1
0
mirror of https://github.com/kubernetes-sigs/descheduler.git synced 2026-01-25 20:59:28 +01:00

migrate plugin - pods violating topologyspread

updated to remove older params
This commit is contained in:
Julian Lawrence
2022-08-09 19:57:42 -07:00
parent 6d4abe88ca
commit 674bf4655d
8 changed files with 246 additions and 187 deletions

View File

@@ -90,3 +90,14 @@ type PodLifeTimeArgs struct {
MaxPodLifeTimeSeconds *uint
States []string
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// RemovePodsViolatingTopologySpreadConstraintArgs holds arguments used to configure RemovePodsViolatingTopologySpreadConstraint plugin.
type RemovePodsViolatingTopologySpreadConstraintArgs struct {
metav1.TypeMeta
Namespaces *api.Namespaces
LabelSelector *metav1.LabelSelector
IncludeSoftConstraints bool
}

View File

@@ -86,6 +86,14 @@ func ValidateRemoveDuplicatesArgs(args *componentconfig.RemoveDuplicatesArgs) er
return validateNamespaceArgs(args.Namespaces)
}
// ValidateRemovePodsViolatingTopologySpreadConstraintArgs validates RemovePodsViolatingTopologySpreadConstraint arguments
func ValidateRemovePodsViolatingTopologySpreadConstraintArgs(args *componentconfig.RemovePodsViolatingTopologySpreadConstraintArgs) error {
return errorsAggregate(
validateNamespaceArgs(args.Namespaces),
validateLabelSelectorArgs(args.LabelSelector),
)
}
// errorsAggregate converts all arg validation errors to a single error interface.
// if no errors, it will return nil.
func errorsAggregate(errors ...error) error {

View File

@@ -298,3 +298,38 @@ func (in *RemovePodsViolatingNodeTaintsArgs) DeepCopyObject() runtime.Object {
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RemovePodsViolatingTopologySpreadConstraintArgs) DeepCopyInto(out *RemovePodsViolatingTopologySpreadConstraintArgs) {
*out = *in
out.TypeMeta = in.TypeMeta
if in.Namespaces != nil {
in, out := &in.Namespaces, &out.Namespaces
*out = new(api.Namespaces)
(*in).DeepCopyInto(*out)
}
if in.LabelSelector != nil {
in, out := &in.LabelSelector, &out.LabelSelector
*out = new(v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RemovePodsViolatingTopologySpreadConstraintArgs.
func (in *RemovePodsViolatingTopologySpreadConstraintArgs) DeepCopy() *RemovePodsViolatingTopologySpreadConstraintArgs {
if in == nil {
return nil
}
out := new(RemovePodsViolatingTopologySpreadConstraintArgs)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *RemovePodsViolatingTopologySpreadConstraintArgs) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}

View File

@@ -252,7 +252,7 @@ func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer
"RemovePodsViolatingNodeTaints": nil,
"RemovePodsHavingTooManyRestarts": nil,
"PodLifeTime": nil,
"RemovePodsViolatingTopologySpreadConstraint": strategies.RemovePodsViolatingTopologySpreadConstraint,
"RemovePodsViolatingTopologySpreadConstraint": nil,
"RemoveFailedPods": nil,
}

View File

@@ -31,6 +31,7 @@ import (
"sigs.k8s.io/descheduler/pkg/framework/plugins/removepodshavingtoomanyrestarts"
"sigs.k8s.io/descheduler/pkg/framework/plugins/removepodsviolatingnodeaffinity"
"sigs.k8s.io/descheduler/pkg/framework/plugins/removepodsviolatingnodetaints"
"sigs.k8s.io/descheduler/pkg/framework/plugins/removepodsviolatingtopologyspreadconstraint"
)
// Once all strategies are migrated the arguments get read from the configuration file
@@ -186,4 +187,24 @@ var pluginsMap = map[string]func(ctx context.Context, nodes []*v1.Node, params *
klog.V(1).ErrorS(err, "plugin finished with error", "pluginName", removeduplicates.PluginName)
}
},
"RemovePodsViolatingTopologySpreadConstraint": func(ctx context.Context, nodes []*v1.Node, params *api.StrategyParameters, handle *handleImpl) {
args := &componentconfig.RemovePodsViolatingTopologySpreadConstraintArgs{
Namespaces: params.Namespaces,
LabelSelector: params.LabelSelector,
IncludeSoftConstraints: params.IncludePreferNoSchedule,
}
if err := validation.ValidateRemovePodsViolatingTopologySpreadConstraintArgs(args); err != nil {
klog.V(1).ErrorS(err, "unable to validate plugin arguments", "pluginName", removepodsviolatingtopologyspreadconstraint.PluginName)
return
}
pg, err := removepodsviolatingtopologyspreadconstraint.New(args, handle)
if err != nil {
klog.V(1).ErrorS(err, "unable to initialize a plugin", "pluginName", removepodsviolatingtopologyspreadconstraint.PluginName)
return
}
status := pg.(framework.BalancePlugin).Balance(ctx, nodes)
if status != nil && status.Err != nil {
klog.V(1).ErrorS(err, "plugin finished with error", "pluginName", removepodsviolatingtopologyspreadconstraint.PluginName)
}
},
}

View File

@@ -1,12 +1,9 @@
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@@ -14,10 +11,11 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package strategies
package removepodsviolatingtopologyspreadconstraint
import (
"context"
"fmt"
"math"
"reflect"
"sort"
@@ -25,17 +23,20 @@ import (
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/klog/v2"
"sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/apis/componentconfig"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
"sigs.k8s.io/descheduler/pkg/descheduler/node"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
"sigs.k8s.io/descheduler/pkg/descheduler/strategies/validation"
"sigs.k8s.io/descheduler/pkg/framework"
"sigs.k8s.io/descheduler/pkg/utils"
)
const PluginName = "RemovePodsViolatingTopologySpreadConstraint"
// AntiAffinityTerm's topology key value used in predicate metadata
type topologyPair struct {
key string
@@ -47,30 +48,44 @@ type topology struct {
pods []*v1.Pod
}
// nolint: gocyclo
func RemovePodsViolatingTopologySpreadConstraint(
ctx context.Context,
client clientset.Interface,
strategy api.DeschedulerStrategy,
nodes []*v1.Node,
podEvictor *evictions.PodEvictor,
evictorFilter *evictions.EvictorFilter,
getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc,
) {
strategyParams, err := validation.ValidateAndParseStrategyParams(ctx, client, strategy.Params)
// RemovePodsViolatingTopologySpreadConstraint evicts pods which violate their topology spread constraints
type RemovePodsViolatingTopologySpreadConstraint struct {
handle framework.Handle
args *componentconfig.RemovePodsViolatingTopologySpreadConstraintArgs
podFilter podutil.FilterFunc
}
var _ framework.BalancePlugin = &RemovePodsViolatingTopologySpreadConstraint{}
// New builds plugin from its arguments while passing a handle
func New(args runtime.Object, handle framework.Handle) (framework.Plugin, error) {
pluginArgs, ok := args.(*componentconfig.RemovePodsViolatingTopologySpreadConstraintArgs)
if !ok {
return nil, fmt.Errorf("want args to be of type RemovePodsViolatingTopologySpreadConstraintArgs, got %T", args)
}
podFilter, err := podutil.NewOptions().
WithFilter(handle.Evictor().Filter).
WithLabelSelector(pluginArgs.LabelSelector).
BuildFilterFunc()
if err != nil {
klog.ErrorS(err, "Invalid RemovePodsViolatingTopologySpreadConstraint parameters")
return
return nil, fmt.Errorf("error initializing pod filter function: %v", err)
}
isEvictable := evictorFilter.Filter
return &RemovePodsViolatingTopologySpreadConstraint{
handle: handle,
podFilter: podFilter,
args: pluginArgs,
}, nil
}
if strategyParams.LabelSelector != nil && !strategyParams.LabelSelector.Empty() {
isEvictable = podutil.WrapFilterFuncs(isEvictable, func(pod *v1.Pod) bool {
return strategyParams.LabelSelector.Matches(labels.Set(pod.Labels))
})
}
// Name retrieves the plugin name
func (d *RemovePodsViolatingTopologySpreadConstraint) Name() string {
return PluginName
}
// nolint: gocyclo
func (d *RemovePodsViolatingTopologySpreadConstraint) Balance(ctx context.Context, nodes []*v1.Node) *framework.Status {
nodeMap := make(map[string]*v1.Node, len(nodes))
for _, node := range nodes {
nodeMap[node.Name] = node
@@ -89,17 +104,26 @@ func RemovePodsViolatingTopologySpreadConstraint(
// if diff > maxSkew, add this pod in the current bucket for eviction
// First record all of the constraints by namespace
client := d.handle.ClientSet()
namespaces, err := client.CoreV1().Namespaces().List(ctx, metav1.ListOptions{})
if err != nil {
klog.ErrorS(err, "Couldn't list namespaces")
return
return &framework.Status{
Err: fmt.Errorf("list namespace: %w", err),
}
}
klog.V(1).InfoS("Processing namespaces for topology spread constraints")
podsForEviction := make(map[*v1.Pod]struct{})
var includedNamespaces, excludedNamespaces sets.String
if d.args.Namespaces != nil {
includedNamespaces = sets.NewString(d.args.Namespaces.Include...)
excludedNamespaces = sets.NewString(d.args.Namespaces.Exclude...)
}
// 1. for each namespace...
for _, namespace := range namespaces.Items {
if (len(strategyParams.IncludedNamespaces) > 0 && !strategyParams.IncludedNamespaces.Has(namespace.Name)) ||
(len(strategyParams.ExcludedNamespaces) > 0 && strategyParams.ExcludedNamespaces.Has(namespace.Name)) {
if (len(includedNamespaces) > 0 && !includedNamespaces.Has(namespace.Name)) ||
(len(excludedNamespaces) > 0 && excludedNamespaces.Has(namespace.Name)) {
continue
}
namespacePods, err := client.CoreV1().Pods(namespace.Name).List(ctx, metav1.ListOptions{})
@@ -113,7 +137,7 @@ func RemovePodsViolatingTopologySpreadConstraint(
for _, pod := range namespacePods.Items {
for _, constraint := range pod.Spec.TopologySpreadConstraints {
// Ignore soft topology constraints if they are not included
if constraint.WhenUnsatisfiable == v1.ScheduleAnyway && (strategy.Params == nil || !strategy.Params.IncludeSoftConstraints) {
if constraint.WhenUnsatisfiable == v1.ScheduleAnyway && (d.args == nil || !d.args.IncludeSoftConstraints) {
continue
}
// Need to check v1.TopologySpreadConstraint deepEquality because
@@ -179,7 +203,7 @@ func RemovePodsViolatingTopologySpreadConstraint(
klog.V(2).InfoS("Skipping topology constraint because it is already balanced", "constraint", constraint)
continue
}
balanceDomains(getPodsAssignedToNode, podsForEviction, constraint, constraintTopologies, sumPods, evictorFilter.Filter, nodes)
balanceDomains(d.handle.GetPodsAssignedToNodeFunc(), podsForEviction, constraint, constraintTopologies, sumPods, d.handle.Evictor().Filter, nodes)
}
}
@@ -188,14 +212,16 @@ func RemovePodsViolatingTopologySpreadConstraint(
if nodeLimitExceeded[pod.Spec.NodeName] {
continue
}
if !isEvictable(pod) {
if !d.podFilter(pod) {
continue
}
podEvictor.EvictPod(ctx, pod, evictions.EvictOptions{})
if podEvictor.NodeLimitExceeded(nodeMap[pod.Spec.NodeName]) {
d.handle.Evictor().Evict(ctx, pod, evictions.EvictOptions{})
if d.handle.Evictor().NodeLimitExceeded(nodeMap[pod.Spec.NodeName]) {
nodeLimitExceeded[pod.Spec.NodeName] = true
}
}
return nil
}
// hasIdenticalConstraints checks if we already had an identical TopologySpreadConstraint in namespaceTopologySpreadConstraints slice

View File

@@ -1,4 +1,4 @@
package strategies
package removepodsviolatingtopologyspreadconstraint
import (
"context"
@@ -16,8 +16,11 @@ import (
"k8s.io/client-go/tools/events"
"sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/apis/componentconfig"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
"sigs.k8s.io/descheduler/pkg/framework"
frameworkfake "sigs.k8s.io/descheduler/pkg/framework/fake"
"sigs.k8s.io/descheduler/test"
)
@@ -28,8 +31,9 @@ func TestTopologySpreadConstraint(t *testing.T) {
expectedEvictedCount uint
expectedEvictedPods []string // if specified, will assert specific pods were evicted
nodes []*v1.Node
strategy api.DeschedulerStrategy
namespaces []string
args componentconfig.RemovePodsViolatingTopologySpreadConstraintArgs
nodeFit bool
}{
{
name: "2 domains, sizes [2,1], maxSkew=1, move 0 pods",
@@ -63,12 +67,8 @@ func TestTopologySpreadConstraint(t *testing.T) {
},
}),
expectedEvictedCount: 0,
strategy: api.DeschedulerStrategy{
Params: &api.StrategyParameters{
NodeFit: false,
},
},
namespaces: []string{"ns1"},
namespaces: []string{"ns1"},
args: componentconfig.RemovePodsViolatingTopologySpreadConstraintArgs{},
},
{
name: "2 domains, sizes [3,1], maxSkew=1, move 1 pod to achieve [2,2]",
@@ -95,12 +95,8 @@ func TestTopologySpreadConstraint(t *testing.T) {
},
}),
expectedEvictedCount: 1,
strategy: api.DeschedulerStrategy{
Params: &api.StrategyParameters{
NodeFit: false,
},
},
namespaces: []string{"ns1"},
namespaces: []string{"ns1"},
args: componentconfig.RemovePodsViolatingTopologySpreadConstraintArgs{},
},
{
name: "2 domains, sizes [3,1], maxSkew=1, move 1 pod to achieve [2,2] (soft constraints)",
@@ -134,8 +130,8 @@ func TestTopologySpreadConstraint(t *testing.T) {
},
}),
expectedEvictedCount: 1,
strategy: api.DeschedulerStrategy{Params: &api.StrategyParameters{IncludeSoftConstraints: true}},
namespaces: []string{"ns1"},
args: componentconfig.RemovePodsViolatingTopologySpreadConstraintArgs{IncludeSoftConstraints: true},
},
{
name: "2 domains, sizes [3,1], maxSkew=1, no pods eligible, move 0 pods",
@@ -165,12 +161,8 @@ func TestTopologySpreadConstraint(t *testing.T) {
},
}),
expectedEvictedCount: 0,
strategy: api.DeschedulerStrategy{
Params: &api.StrategyParameters{
NodeFit: false,
},
},
namespaces: []string{"ns1"},
namespaces: []string{"ns1"},
args: componentconfig.RemovePodsViolatingTopologySpreadConstraintArgs{},
},
{
name: "2 domains, sizes [3,1], maxSkew=1, move 1 pod to achieve [2,2], exclude kube-system namespace",
@@ -197,8 +189,9 @@ func TestTopologySpreadConstraint(t *testing.T) {
},
}),
expectedEvictedCount: 1,
strategy: api.DeschedulerStrategy{Enabled: true, Params: &api.StrategyParameters{NodeFit: true, Namespaces: &api.Namespaces{Exclude: []string{"kube-system"}}}},
namespaces: []string{"ns1"},
args: componentconfig.RemovePodsViolatingTopologySpreadConstraintArgs{Namespaces: &api.Namespaces{Exclude: []string{"kube-system"}}},
nodeFit: true,
},
{
name: "2 domains, sizes [5,2], maxSkew=1, move 1 pod to achieve [4,3]",
@@ -225,12 +218,8 @@ func TestTopologySpreadConstraint(t *testing.T) {
},
}),
expectedEvictedCount: 1,
strategy: api.DeschedulerStrategy{
Params: &api.StrategyParameters{
NodeFit: false,
},
},
namespaces: []string{"ns1"},
namespaces: []string{"ns1"},
args: componentconfig.RemovePodsViolatingTopologySpreadConstraintArgs{},
},
{
name: "2 domains, sizes [4,0], maxSkew=1, move 2 pods to achieve [2,2]",
@@ -252,12 +241,8 @@ func TestTopologySpreadConstraint(t *testing.T) {
},
}),
expectedEvictedCount: 2,
strategy: api.DeschedulerStrategy{
Params: &api.StrategyParameters{
NodeFit: false,
},
},
namespaces: []string{"ns1"},
namespaces: []string{"ns1"},
args: componentconfig.RemovePodsViolatingTopologySpreadConstraintArgs{},
},
{
name: "2 domains, sizes [4,0], maxSkew=1, only move 1 pod since pods with nodeSelector and nodeAffinity aren't evicted",
@@ -296,12 +281,9 @@ func TestTopologySpreadConstraint(t *testing.T) {
},
}),
expectedEvictedCount: 1,
strategy: api.DeschedulerStrategy{
Params: &api.StrategyParameters{
NodeFit: true,
},
},
namespaces: []string{"ns1"},
namespaces: []string{"ns1"},
args: componentconfig.RemovePodsViolatingTopologySpreadConstraintArgs{},
nodeFit: true,
},
{
name: "2 domains, sizes [4,0], maxSkew=1, move 2 pods since selector matches multiple nodes",
@@ -343,12 +325,8 @@ func TestTopologySpreadConstraint(t *testing.T) {
},
}),
expectedEvictedCount: 2,
strategy: api.DeschedulerStrategy{
Params: &api.StrategyParameters{
NodeFit: false,
},
},
namespaces: []string{"ns1"},
namespaces: []string{"ns1"},
args: componentconfig.RemovePodsViolatingTopologySpreadConstraintArgs{},
},
{
name: "3 domains, sizes [0, 1, 100], maxSkew=1, move 66 pods to get [34, 33, 34]",
@@ -371,8 +349,8 @@ func TestTopologySpreadConstraint(t *testing.T) {
},
}),
expectedEvictedCount: 66,
strategy: api.DeschedulerStrategy{},
namespaces: []string{"ns1"},
args: componentconfig.RemovePodsViolatingTopologySpreadConstraintArgs{},
},
{
name: "4 domains, sizes [0, 1, 3, 5], should move 3 to get [2, 2, 3, 2]",
@@ -401,12 +379,8 @@ func TestTopologySpreadConstraint(t *testing.T) {
},
}),
expectedEvictedCount: 3,
strategy: api.DeschedulerStrategy{
Params: &api.StrategyParameters{
NodeFit: false,
},
},
namespaces: []string{"ns1"},
namespaces: []string{"ns1"},
args: componentconfig.RemovePodsViolatingTopologySpreadConstraintArgs{},
},
{
name: "2 domains size [2 6], maxSkew=2, should move 1 to get [3 5]",
@@ -433,12 +407,8 @@ func TestTopologySpreadConstraint(t *testing.T) {
},
}),
expectedEvictedCount: 1,
strategy: api.DeschedulerStrategy{
Params: &api.StrategyParameters{
NodeFit: false,
},
},
namespaces: []string{"ns1"},
namespaces: []string{"ns1"},
args: componentconfig.RemovePodsViolatingTopologySpreadConstraintArgs{},
},
{
name: "2 domains size [2 6], maxSkew=2, can't move any because of node taints",
@@ -481,12 +451,9 @@ func TestTopologySpreadConstraint(t *testing.T) {
},
}),
expectedEvictedCount: 0,
strategy: api.DeschedulerStrategy{
Params: &api.StrategyParameters{
NodeFit: true,
},
},
namespaces: []string{"ns1"},
namespaces: []string{"ns1"},
args: componentconfig.RemovePodsViolatingTopologySpreadConstraintArgs{},
nodeFit: true,
},
{
name: "2 domains size [2 6], maxSkew=2, can't move any because node1 does not have enough CPU",
@@ -513,12 +480,9 @@ func TestTopologySpreadConstraint(t *testing.T) {
},
}),
expectedEvictedCount: 0,
strategy: api.DeschedulerStrategy{
Params: &api.StrategyParameters{
NodeFit: true,
},
},
namespaces: []string{"ns1"},
namespaces: []string{"ns1"},
args: componentconfig.RemovePodsViolatingTopologySpreadConstraintArgs{},
nodeFit: true,
},
{
// see https://github.com/kubernetes-sigs/descheduler/issues/564
@@ -622,8 +586,8 @@ func TestTopologySpreadConstraint(t *testing.T) {
},
}),
expectedEvictedCount: 1,
strategy: api.DeschedulerStrategy{Params: &api.StrategyParameters{IncludeSoftConstraints: true}},
namespaces: []string{"ns1"},
args: componentconfig.RemovePodsViolatingTopologySpreadConstraintArgs{IncludeSoftConstraints: true},
},
{
name: "3 domains size [8 7 0], maxSkew=1, should move 5 to get [5 5 5]",
@@ -648,8 +612,8 @@ func TestTopologySpreadConstraint(t *testing.T) {
}),
expectedEvictedCount: 5,
expectedEvictedPods: []string{"pod-5", "pod-6", "pod-7", "pod-8"},
strategy: api.DeschedulerStrategy{},
namespaces: []string{"ns1"},
args: componentconfig.RemovePodsViolatingTopologySpreadConstraintArgs{},
},
{
name: "3 domains size [5 5 5], maxSkew=1, should move 0 to retain [5 5 5]",
@@ -679,8 +643,8 @@ func TestTopologySpreadConstraint(t *testing.T) {
},
}),
expectedEvictedCount: 0,
strategy: api.DeschedulerStrategy{},
namespaces: []string{"ns1"},
args: componentconfig.RemovePodsViolatingTopologySpreadConstraintArgs{},
},
{
name: "2 domains, sizes [2,0], maxSkew=1, move 1 pod since pod tolerates the node with taint",
@@ -721,8 +685,8 @@ func TestTopologySpreadConstraint(t *testing.T) {
}),
expectedEvictedCount: 1,
expectedEvictedPods: []string{"pod-0"},
strategy: api.DeschedulerStrategy{},
namespaces: []string{"ns1"},
args: componentconfig.RemovePodsViolatingTopologySpreadConstraintArgs{},
},
{
name: "2 domains, sizes [2,0], maxSkew=1, move 0 pods since pod does not tolerate the tainted node",
@@ -754,8 +718,8 @@ func TestTopologySpreadConstraint(t *testing.T) {
},
}),
expectedEvictedCount: 0,
strategy: api.DeschedulerStrategy{},
namespaces: []string{"ns1"},
args: componentconfig.RemovePodsViolatingTopologySpreadConstraintArgs{},
},
{
name: "2 domains, sizes [2,0], maxSkew=1, move 0 pods since pod does not tolerate the tainted node, and NodeFit is enabled",
@@ -787,12 +751,9 @@ func TestTopologySpreadConstraint(t *testing.T) {
},
}),
expectedEvictedCount: 0,
strategy: api.DeschedulerStrategy{
Params: &api.StrategyParameters{
NodeFit: true,
},
},
namespaces: []string{"ns1"},
namespaces: []string{"ns1"},
args: componentconfig.RemovePodsViolatingTopologySpreadConstraintArgs{},
nodeFit: true,
},
{
name: "2 domains, sizes [2,0], maxSkew=1, move 1 pod for node with PreferNoSchedule Taint",
@@ -825,8 +786,8 @@ func TestTopologySpreadConstraint(t *testing.T) {
}),
expectedEvictedCount: 1,
expectedEvictedPods: []string{"pod-0"},
strategy: api.DeschedulerStrategy{},
namespaces: []string{"ns1"},
args: componentconfig.RemovePodsViolatingTopologySpreadConstraintArgs{},
},
{
name: "2 domains, sizes [2,0], maxSkew=1, move 0 pod for node with unmatched label filtering",
@@ -848,12 +809,8 @@ func TestTopologySpreadConstraint(t *testing.T) {
},
}),
expectedEvictedCount: 0,
strategy: api.DeschedulerStrategy{
Params: &api.StrategyParameters{
LabelSelector: getLabelSelector("foo", []string{"baz"}, metav1.LabelSelectorOpIn),
},
},
namespaces: []string{"ns1"},
namespaces: []string{"ns1"},
args: componentconfig.RemovePodsViolatingTopologySpreadConstraintArgs{LabelSelector: getLabelSelector("foo", []string{"baz"}, metav1.LabelSelectorOpIn)},
},
{
name: "2 domains, sizes [2,0], maxSkew=1, move 1 pod for node with matched label filtering",
@@ -876,12 +833,8 @@ func TestTopologySpreadConstraint(t *testing.T) {
}),
expectedEvictedCount: 1,
expectedEvictedPods: []string{"pod-1"},
strategy: api.DeschedulerStrategy{
Params: &api.StrategyParameters{
LabelSelector: getLabelSelector("foo", []string{"bar"}, metav1.LabelSelectorOpIn),
},
},
namespaces: []string{"ns1"},
namespaces: []string{"ns1"},
args: componentconfig.RemovePodsViolatingTopologySpreadConstraintArgs{LabelSelector: getLabelSelector("foo", []string{"bar"}, metav1.LabelSelectorOpIn)},
},
{
name: "2 domains, sizes [2,0], maxSkew=1, move 1 pod for node with matched label filtering (NotIn op)",
@@ -904,12 +857,8 @@ func TestTopologySpreadConstraint(t *testing.T) {
}),
expectedEvictedCount: 1,
expectedEvictedPods: []string{"pod-1"},
strategy: api.DeschedulerStrategy{
Params: &api.StrategyParameters{
LabelSelector: getLabelSelector("foo", []string{"baz"}, metav1.LabelSelectorOpNotIn),
},
},
namespaces: []string{"ns1"},
namespaces: []string{"ns1"},
args: componentconfig.RemovePodsViolatingTopologySpreadConstraintArgs{LabelSelector: getLabelSelector("foo", []string{"baz"}, metav1.LabelSelectorOpNotIn)},
},
{
name: "2 domains, sizes [4,2], maxSkew=1, 2 pods in termination; nothing should be moved",
@@ -939,12 +888,8 @@ func TestTopologySpreadConstraint(t *testing.T) {
},
}),
expectedEvictedCount: 0,
strategy: api.DeschedulerStrategy{
Params: &api.StrategyParameters{
LabelSelector: getLabelSelector("foo", []string{"bar"}, metav1.LabelSelectorOpIn),
},
},
namespaces: []string{"ns1"},
namespaces: []string{"ns1"},
args: componentconfig.RemovePodsViolatingTopologySpreadConstraintArgs{LabelSelector: getLabelSelector("foo", []string{"bar"}, metav1.LabelSelectorOpIn)},
},
{
name: "3 domains, sizes [2,3,4], maxSkew=1, NodeFit is enabled, and not enough cpu on zoneA; nothing should be moved",
@@ -974,10 +919,9 @@ func TestTopologySpreadConstraint(t *testing.T) {
},
}),
expectedEvictedCount: 0,
strategy: api.DeschedulerStrategy{
Params: &api.StrategyParameters{NodeFit: true},
},
namespaces: []string{"ns1"},
namespaces: []string{"ns1"},
args: componentconfig.RemovePodsViolatingTopologySpreadConstraintArgs{},
nodeFit: true,
},
{
name: "3 domains, sizes [[1,0], [1,1], [2,1]], maxSkew=1, NodeFit is enabled, and not enough cpu on ZoneA; nothing should be moved",
@@ -1022,10 +966,9 @@ func TestTopologySpreadConstraint(t *testing.T) {
},
}),
expectedEvictedCount: 0,
strategy: api.DeschedulerStrategy{
Params: &api.StrategyParameters{NodeFit: true},
},
namespaces: []string{"ns1"},
namespaces: []string{"ns1"},
args: componentconfig.RemovePodsViolatingTopologySpreadConstraintArgs{},
nodeFit: true,
},
{
name: "2 domains, sizes [[1,4], [2,1]], maxSkew=1, NodeFit is enabled; should move 1",
@@ -1063,10 +1006,9 @@ func TestTopologySpreadConstraint(t *testing.T) {
}),
expectedEvictedCount: 1,
expectedEvictedPods: []string{"pod-4"},
strategy: api.DeschedulerStrategy{
Params: &api.StrategyParameters{NodeFit: true},
},
namespaces: []string{"ns1"},
namespaces: []string{"ns1"},
args: componentconfig.RemovePodsViolatingTopologySpreadConstraintArgs{},
nodeFit: true,
},
{
// https://github.com/kubernetes-sigs/descheduler/issues/839
@@ -1171,8 +1113,9 @@ func TestTopologySpreadConstraint(t *testing.T) {
},
}),
expectedEvictedCount: 0,
strategy: api.DeschedulerStrategy{Params: &api.StrategyParameters{NodeFit: true}},
namespaces: []string{"ns1"},
args: componentconfig.RemovePodsViolatingTopologySpreadConstraintArgs{},
nodeFit: true,
},
}
@@ -1229,22 +1172,31 @@ func TestTopologySpreadConstraint(t *testing.T) {
eventRecorder,
)
nodeFit := false
if tc.strategy.Params != nil {
nodeFit = tc.strategy.Params.NodeFit
handle := &frameworkfake.HandleImpl{
ClientsetImpl: fakeClient,
GetPodsAssignedToNodeFuncImpl: getPodsAssignedToNode,
PodEvictorImpl: podEvictor,
EvictorFilterImpl: evictions.NewEvictorFilter(
tc.nodes,
getPodsAssignedToNode,
false,
false,
false,
false,
evictions.WithNodeFit(tc.nodeFit),
),
SharedInformerFactoryImpl: sharedInformerFactory,
}
evictorFilter := evictions.NewEvictorFilter(
tc.nodes,
getPodsAssignedToNode,
false,
false,
false,
false,
evictions.WithNodeFit(nodeFit),
plugin, err := New(
&tc.args,
handle,
)
if err != nil {
t.Fatalf("Unable to initialize the plugin: %v", err)
}
RemovePodsViolatingTopologySpreadConstraint(ctx, fakeClient, tc.strategy, tc.nodes, podEvictor, evictorFilter, getPodsAssignedToNode)
plugin.(framework.BalancePlugin).Balance(ctx, tc.nodes)
podsEvicted := podEvictor.TotalEvicted()
if podsEvicted != tc.expectedEvictedCount {
t.Errorf("Test error for description: %s. Expected evicted pods count %v, got %v", tc.name, tc.expectedEvictedCount, podsEvicted)

View File

@@ -10,9 +10,11 @@ import (
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
deschedulerapi "sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/apis/componentconfig"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
"sigs.k8s.io/descheduler/pkg/descheduler/strategies"
"sigs.k8s.io/descheduler/pkg/framework"
frameworkfake "sigs.k8s.io/descheduler/pkg/framework/fake"
"sigs.k8s.io/descheduler/pkg/framework/plugins/removepodsviolatingtopologyspreadconstraint"
)
const zoneTopologyKey string = "topology.kubernetes.io/zone"
@@ -82,27 +84,31 @@ func TestTopologySpreadConstraint(t *testing.T) {
// Run TopologySpreadConstraint strategy
t.Logf("Running RemovePodsViolatingTopologySpreadConstraint strategy for %s", name)
strategies.RemovePodsViolatingTopologySpreadConstraint(
ctx,
clientSet,
deschedulerapi.DeschedulerStrategy{
Enabled: true,
Params: &deschedulerapi.StrategyParameters{
IncludeSoftConstraints: tc.constraint != v1.DoNotSchedule,
},
},
filter := evictions.NewEvictorFilter(
nodes,
podEvictor,
evictions.NewEvictorFilter(
nodes,
getPodsAssignedToNode,
true,
false,
false,
false,
),
getPodsAssignedToNode,
true,
false,
false,
false,
)
plugin, err := removepodsviolatingtopologyspreadconstraint.New(&componentconfig.RemovePodsViolatingTopologySpreadConstraintArgs{
IncludeSoftConstraints: tc.constraint != v1.DoNotSchedule,
},
&frameworkfake.HandleImpl{
ClientsetImpl: clientSet,
PodEvictorImpl: podEvictor,
EvictorFilterImpl: filter,
GetPodsAssignedToNodeFuncImpl: getPodsAssignedToNode,
},
)
if err != nil {
t.Fatalf("Unable to initialize the plugin: %v", err)
}
plugin.(framework.BalancePlugin).Balance(ctx, workerNodes)
t.Logf("Finished RemovePodsViolatingTopologySpreadConstraint strategy for %s", name)
t.Logf("Wait for terminating pods of %s to disappear", name)