1
0
mirror of https://github.com/kubernetes-sigs/descheduler.git synced 2026-01-26 05:14:13 +01:00

migrate plugin - pods violating topologyspread

updated to remove older params
This commit is contained in:
Julian Lawrence
2022-08-09 19:57:42 -07:00
parent 6d4abe88ca
commit 674bf4655d
8 changed files with 246 additions and 187 deletions

View File

@@ -90,3 +90,14 @@ type PodLifeTimeArgs struct {
MaxPodLifeTimeSeconds *uint MaxPodLifeTimeSeconds *uint
States []string States []string
} }
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// RemovePodsViolatingTopologySpreadConstraintArgs holds arguments used to configure RemovePodsViolatingTopologySpreadConstraint plugin.
type RemovePodsViolatingTopologySpreadConstraintArgs struct {
metav1.TypeMeta
Namespaces *api.Namespaces
LabelSelector *metav1.LabelSelector
IncludeSoftConstraints bool
}

View File

@@ -86,6 +86,14 @@ func ValidateRemoveDuplicatesArgs(args *componentconfig.RemoveDuplicatesArgs) er
return validateNamespaceArgs(args.Namespaces) return validateNamespaceArgs(args.Namespaces)
} }
// ValidateRemovePodsViolatingTopologySpreadConstraintArgs validates RemovePodsViolatingTopologySpreadConstraint arguments
func ValidateRemovePodsViolatingTopologySpreadConstraintArgs(args *componentconfig.RemovePodsViolatingTopologySpreadConstraintArgs) error {
return errorsAggregate(
validateNamespaceArgs(args.Namespaces),
validateLabelSelectorArgs(args.LabelSelector),
)
}
// errorsAggregate converts all arg validation errors to a single error interface. // errorsAggregate converts all arg validation errors to a single error interface.
// if no errors, it will return nil. // if no errors, it will return nil.
func errorsAggregate(errors ...error) error { func errorsAggregate(errors ...error) error {

View File

@@ -298,3 +298,38 @@ func (in *RemovePodsViolatingNodeTaintsArgs) DeepCopyObject() runtime.Object {
} }
return nil return nil
} }
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RemovePodsViolatingTopologySpreadConstraintArgs) DeepCopyInto(out *RemovePodsViolatingTopologySpreadConstraintArgs) {
*out = *in
out.TypeMeta = in.TypeMeta
if in.Namespaces != nil {
in, out := &in.Namespaces, &out.Namespaces
*out = new(api.Namespaces)
(*in).DeepCopyInto(*out)
}
if in.LabelSelector != nil {
in, out := &in.LabelSelector, &out.LabelSelector
*out = new(v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RemovePodsViolatingTopologySpreadConstraintArgs.
func (in *RemovePodsViolatingTopologySpreadConstraintArgs) DeepCopy() *RemovePodsViolatingTopologySpreadConstraintArgs {
if in == nil {
return nil
}
out := new(RemovePodsViolatingTopologySpreadConstraintArgs)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *RemovePodsViolatingTopologySpreadConstraintArgs) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}

View File

@@ -252,7 +252,7 @@ func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer
"RemovePodsViolatingNodeTaints": nil, "RemovePodsViolatingNodeTaints": nil,
"RemovePodsHavingTooManyRestarts": nil, "RemovePodsHavingTooManyRestarts": nil,
"PodLifeTime": nil, "PodLifeTime": nil,
"RemovePodsViolatingTopologySpreadConstraint": strategies.RemovePodsViolatingTopologySpreadConstraint, "RemovePodsViolatingTopologySpreadConstraint": nil,
"RemoveFailedPods": nil, "RemoveFailedPods": nil,
} }

View File

@@ -31,6 +31,7 @@ import (
"sigs.k8s.io/descheduler/pkg/framework/plugins/removepodshavingtoomanyrestarts" "sigs.k8s.io/descheduler/pkg/framework/plugins/removepodshavingtoomanyrestarts"
"sigs.k8s.io/descheduler/pkg/framework/plugins/removepodsviolatingnodeaffinity" "sigs.k8s.io/descheduler/pkg/framework/plugins/removepodsviolatingnodeaffinity"
"sigs.k8s.io/descheduler/pkg/framework/plugins/removepodsviolatingnodetaints" "sigs.k8s.io/descheduler/pkg/framework/plugins/removepodsviolatingnodetaints"
"sigs.k8s.io/descheduler/pkg/framework/plugins/removepodsviolatingtopologyspreadconstraint"
) )
// Once all strategies are migrated the arguments get read from the configuration file // Once all strategies are migrated the arguments get read from the configuration file
@@ -186,4 +187,24 @@ var pluginsMap = map[string]func(ctx context.Context, nodes []*v1.Node, params *
klog.V(1).ErrorS(err, "plugin finished with error", "pluginName", removeduplicates.PluginName) klog.V(1).ErrorS(err, "plugin finished with error", "pluginName", removeduplicates.PluginName)
} }
}, },
"RemovePodsViolatingTopologySpreadConstraint": func(ctx context.Context, nodes []*v1.Node, params *api.StrategyParameters, handle *handleImpl) {
args := &componentconfig.RemovePodsViolatingTopologySpreadConstraintArgs{
Namespaces: params.Namespaces,
LabelSelector: params.LabelSelector,
IncludeSoftConstraints: params.IncludePreferNoSchedule,
}
if err := validation.ValidateRemovePodsViolatingTopologySpreadConstraintArgs(args); err != nil {
klog.V(1).ErrorS(err, "unable to validate plugin arguments", "pluginName", removepodsviolatingtopologyspreadconstraint.PluginName)
return
}
pg, err := removepodsviolatingtopologyspreadconstraint.New(args, handle)
if err != nil {
klog.V(1).ErrorS(err, "unable to initialize a plugin", "pluginName", removepodsviolatingtopologyspreadconstraint.PluginName)
return
}
status := pg.(framework.BalancePlugin).Balance(ctx, nodes)
if status != nil && status.Err != nil {
klog.V(1).ErrorS(err, "plugin finished with error", "pluginName", removepodsviolatingtopologyspreadconstraint.PluginName)
}
},
} }

View File

@@ -1,12 +1,9 @@
/* /*
Copyright 2020 The Kubernetes Authors. Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.
You may obtain a copy of the License at You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0 http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@@ -14,10 +11,11 @@ See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
*/ */
package strategies package removepodsviolatingtopologyspreadconstraint
import ( import (
"context" "context"
"fmt"
"math" "math"
"reflect" "reflect"
"sort" "sort"
@@ -25,17 +23,20 @@ import (
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/labels"
clientset "k8s.io/client-go/kubernetes" "k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/klog/v2" "k8s.io/klog/v2"
"sigs.k8s.io/descheduler/pkg/api" "sigs.k8s.io/descheduler/pkg/apis/componentconfig"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions" "sigs.k8s.io/descheduler/pkg/descheduler/evictions"
"sigs.k8s.io/descheduler/pkg/descheduler/node" "sigs.k8s.io/descheduler/pkg/descheduler/node"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod" podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
"sigs.k8s.io/descheduler/pkg/descheduler/strategies/validation" "sigs.k8s.io/descheduler/pkg/framework"
"sigs.k8s.io/descheduler/pkg/utils" "sigs.k8s.io/descheduler/pkg/utils"
) )
const PluginName = "RemovePodsViolatingTopologySpreadConstraint"
// AntiAffinityTerm's topology key value used in predicate metadata // AntiAffinityTerm's topology key value used in predicate metadata
type topologyPair struct { type topologyPair struct {
key string key string
@@ -47,30 +48,44 @@ type topology struct {
pods []*v1.Pod pods []*v1.Pod
} }
// nolint: gocyclo // RemovePodsViolatingTopologySpreadConstraint evicts pods which violate their topology spread constraints
func RemovePodsViolatingTopologySpreadConstraint( type RemovePodsViolatingTopologySpreadConstraint struct {
ctx context.Context, handle framework.Handle
client clientset.Interface, args *componentconfig.RemovePodsViolatingTopologySpreadConstraintArgs
strategy api.DeschedulerStrategy, podFilter podutil.FilterFunc
nodes []*v1.Node, }
podEvictor *evictions.PodEvictor,
evictorFilter *evictions.EvictorFilter, var _ framework.BalancePlugin = &RemovePodsViolatingTopologySpreadConstraint{}
getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc,
) { // New builds plugin from its arguments while passing a handle
strategyParams, err := validation.ValidateAndParseStrategyParams(ctx, client, strategy.Params) func New(args runtime.Object, handle framework.Handle) (framework.Plugin, error) {
pluginArgs, ok := args.(*componentconfig.RemovePodsViolatingTopologySpreadConstraintArgs)
if !ok {
return nil, fmt.Errorf("want args to be of type RemovePodsViolatingTopologySpreadConstraintArgs, got %T", args)
}
podFilter, err := podutil.NewOptions().
WithFilter(handle.Evictor().Filter).
WithLabelSelector(pluginArgs.LabelSelector).
BuildFilterFunc()
if err != nil { if err != nil {
klog.ErrorS(err, "Invalid RemovePodsViolatingTopologySpreadConstraint parameters") return nil, fmt.Errorf("error initializing pod filter function: %v", err)
return
} }
isEvictable := evictorFilter.Filter return &RemovePodsViolatingTopologySpreadConstraint{
handle: handle,
podFilter: podFilter,
args: pluginArgs,
}, nil
}
if strategyParams.LabelSelector != nil && !strategyParams.LabelSelector.Empty() { // Name retrieves the plugin name
isEvictable = podutil.WrapFilterFuncs(isEvictable, func(pod *v1.Pod) bool { func (d *RemovePodsViolatingTopologySpreadConstraint) Name() string {
return strategyParams.LabelSelector.Matches(labels.Set(pod.Labels)) return PluginName
}) }
}
// nolint: gocyclo
func (d *RemovePodsViolatingTopologySpreadConstraint) Balance(ctx context.Context, nodes []*v1.Node) *framework.Status {
nodeMap := make(map[string]*v1.Node, len(nodes)) nodeMap := make(map[string]*v1.Node, len(nodes))
for _, node := range nodes { for _, node := range nodes {
nodeMap[node.Name] = node nodeMap[node.Name] = node
@@ -89,17 +104,26 @@ func RemovePodsViolatingTopologySpreadConstraint(
// if diff > maxSkew, add this pod in the current bucket for eviction // if diff > maxSkew, add this pod in the current bucket for eviction
// First record all of the constraints by namespace // First record all of the constraints by namespace
client := d.handle.ClientSet()
namespaces, err := client.CoreV1().Namespaces().List(ctx, metav1.ListOptions{}) namespaces, err := client.CoreV1().Namespaces().List(ctx, metav1.ListOptions{})
if err != nil { if err != nil {
klog.ErrorS(err, "Couldn't list namespaces") klog.ErrorS(err, "Couldn't list namespaces")
return return &framework.Status{
Err: fmt.Errorf("list namespace: %w", err),
}
} }
klog.V(1).InfoS("Processing namespaces for topology spread constraints") klog.V(1).InfoS("Processing namespaces for topology spread constraints")
podsForEviction := make(map[*v1.Pod]struct{}) podsForEviction := make(map[*v1.Pod]struct{})
var includedNamespaces, excludedNamespaces sets.String
if d.args.Namespaces != nil {
includedNamespaces = sets.NewString(d.args.Namespaces.Include...)
excludedNamespaces = sets.NewString(d.args.Namespaces.Exclude...)
}
// 1. for each namespace... // 1. for each namespace...
for _, namespace := range namespaces.Items { for _, namespace := range namespaces.Items {
if (len(strategyParams.IncludedNamespaces) > 0 && !strategyParams.IncludedNamespaces.Has(namespace.Name)) || if (len(includedNamespaces) > 0 && !includedNamespaces.Has(namespace.Name)) ||
(len(strategyParams.ExcludedNamespaces) > 0 && strategyParams.ExcludedNamespaces.Has(namespace.Name)) { (len(excludedNamespaces) > 0 && excludedNamespaces.Has(namespace.Name)) {
continue continue
} }
namespacePods, err := client.CoreV1().Pods(namespace.Name).List(ctx, metav1.ListOptions{}) namespacePods, err := client.CoreV1().Pods(namespace.Name).List(ctx, metav1.ListOptions{})
@@ -113,7 +137,7 @@ func RemovePodsViolatingTopologySpreadConstraint(
for _, pod := range namespacePods.Items { for _, pod := range namespacePods.Items {
for _, constraint := range pod.Spec.TopologySpreadConstraints { for _, constraint := range pod.Spec.TopologySpreadConstraints {
// Ignore soft topology constraints if they are not included // Ignore soft topology constraints if they are not included
if constraint.WhenUnsatisfiable == v1.ScheduleAnyway && (strategy.Params == nil || !strategy.Params.IncludeSoftConstraints) { if constraint.WhenUnsatisfiable == v1.ScheduleAnyway && (d.args == nil || !d.args.IncludeSoftConstraints) {
continue continue
} }
// Need to check v1.TopologySpreadConstraint deepEquality because // Need to check v1.TopologySpreadConstraint deepEquality because
@@ -179,7 +203,7 @@ func RemovePodsViolatingTopologySpreadConstraint(
klog.V(2).InfoS("Skipping topology constraint because it is already balanced", "constraint", constraint) klog.V(2).InfoS("Skipping topology constraint because it is already balanced", "constraint", constraint)
continue continue
} }
balanceDomains(getPodsAssignedToNode, podsForEviction, constraint, constraintTopologies, sumPods, evictorFilter.Filter, nodes) balanceDomains(d.handle.GetPodsAssignedToNodeFunc(), podsForEviction, constraint, constraintTopologies, sumPods, d.handle.Evictor().Filter, nodes)
} }
} }
@@ -188,14 +212,16 @@ func RemovePodsViolatingTopologySpreadConstraint(
if nodeLimitExceeded[pod.Spec.NodeName] { if nodeLimitExceeded[pod.Spec.NodeName] {
continue continue
} }
if !isEvictable(pod) { if !d.podFilter(pod) {
continue continue
} }
podEvictor.EvictPod(ctx, pod, evictions.EvictOptions{}) d.handle.Evictor().Evict(ctx, pod, evictions.EvictOptions{})
if podEvictor.NodeLimitExceeded(nodeMap[pod.Spec.NodeName]) { if d.handle.Evictor().NodeLimitExceeded(nodeMap[pod.Spec.NodeName]) {
nodeLimitExceeded[pod.Spec.NodeName] = true nodeLimitExceeded[pod.Spec.NodeName] = true
} }
} }
return nil
} }
// hasIdenticalConstraints checks if we already had an identical TopologySpreadConstraint in namespaceTopologySpreadConstraints slice // hasIdenticalConstraints checks if we already had an identical TopologySpreadConstraint in namespaceTopologySpreadConstraints slice

View File

@@ -1,4 +1,4 @@
package strategies package removepodsviolatingtopologyspreadconstraint
import ( import (
"context" "context"
@@ -16,8 +16,11 @@ import (
"k8s.io/client-go/tools/events" "k8s.io/client-go/tools/events"
"sigs.k8s.io/descheduler/pkg/api" "sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/apis/componentconfig"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions" "sigs.k8s.io/descheduler/pkg/descheduler/evictions"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod" podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
"sigs.k8s.io/descheduler/pkg/framework"
frameworkfake "sigs.k8s.io/descheduler/pkg/framework/fake"
"sigs.k8s.io/descheduler/test" "sigs.k8s.io/descheduler/test"
) )
@@ -28,8 +31,9 @@ func TestTopologySpreadConstraint(t *testing.T) {
expectedEvictedCount uint expectedEvictedCount uint
expectedEvictedPods []string // if specified, will assert specific pods were evicted expectedEvictedPods []string // if specified, will assert specific pods were evicted
nodes []*v1.Node nodes []*v1.Node
strategy api.DeschedulerStrategy
namespaces []string namespaces []string
args componentconfig.RemovePodsViolatingTopologySpreadConstraintArgs
nodeFit bool
}{ }{
{ {
name: "2 domains, sizes [2,1], maxSkew=1, move 0 pods", name: "2 domains, sizes [2,1], maxSkew=1, move 0 pods",
@@ -63,12 +67,8 @@ func TestTopologySpreadConstraint(t *testing.T) {
}, },
}), }),
expectedEvictedCount: 0, expectedEvictedCount: 0,
strategy: api.DeschedulerStrategy{ namespaces: []string{"ns1"},
Params: &api.StrategyParameters{ args: componentconfig.RemovePodsViolatingTopologySpreadConstraintArgs{},
NodeFit: false,
},
},
namespaces: []string{"ns1"},
}, },
{ {
name: "2 domains, sizes [3,1], maxSkew=1, move 1 pod to achieve [2,2]", name: "2 domains, sizes [3,1], maxSkew=1, move 1 pod to achieve [2,2]",
@@ -95,12 +95,8 @@ func TestTopologySpreadConstraint(t *testing.T) {
}, },
}), }),
expectedEvictedCount: 1, expectedEvictedCount: 1,
strategy: api.DeschedulerStrategy{ namespaces: []string{"ns1"},
Params: &api.StrategyParameters{ args: componentconfig.RemovePodsViolatingTopologySpreadConstraintArgs{},
NodeFit: false,
},
},
namespaces: []string{"ns1"},
}, },
{ {
name: "2 domains, sizes [3,1], maxSkew=1, move 1 pod to achieve [2,2] (soft constraints)", name: "2 domains, sizes [3,1], maxSkew=1, move 1 pod to achieve [2,2] (soft constraints)",
@@ -134,8 +130,8 @@ func TestTopologySpreadConstraint(t *testing.T) {
}, },
}), }),
expectedEvictedCount: 1, expectedEvictedCount: 1,
strategy: api.DeschedulerStrategy{Params: &api.StrategyParameters{IncludeSoftConstraints: true}},
namespaces: []string{"ns1"}, namespaces: []string{"ns1"},
args: componentconfig.RemovePodsViolatingTopologySpreadConstraintArgs{IncludeSoftConstraints: true},
}, },
{ {
name: "2 domains, sizes [3,1], maxSkew=1, no pods eligible, move 0 pods", name: "2 domains, sizes [3,1], maxSkew=1, no pods eligible, move 0 pods",
@@ -165,12 +161,8 @@ func TestTopologySpreadConstraint(t *testing.T) {
}, },
}), }),
expectedEvictedCount: 0, expectedEvictedCount: 0,
strategy: api.DeschedulerStrategy{ namespaces: []string{"ns1"},
Params: &api.StrategyParameters{ args: componentconfig.RemovePodsViolatingTopologySpreadConstraintArgs{},
NodeFit: false,
},
},
namespaces: []string{"ns1"},
}, },
{ {
name: "2 domains, sizes [3,1], maxSkew=1, move 1 pod to achieve [2,2], exclude kube-system namespace", name: "2 domains, sizes [3,1], maxSkew=1, move 1 pod to achieve [2,2], exclude kube-system namespace",
@@ -197,8 +189,9 @@ func TestTopologySpreadConstraint(t *testing.T) {
}, },
}), }),
expectedEvictedCount: 1, expectedEvictedCount: 1,
strategy: api.DeschedulerStrategy{Enabled: true, Params: &api.StrategyParameters{NodeFit: true, Namespaces: &api.Namespaces{Exclude: []string{"kube-system"}}}},
namespaces: []string{"ns1"}, namespaces: []string{"ns1"},
args: componentconfig.RemovePodsViolatingTopologySpreadConstraintArgs{Namespaces: &api.Namespaces{Exclude: []string{"kube-system"}}},
nodeFit: true,
}, },
{ {
name: "2 domains, sizes [5,2], maxSkew=1, move 1 pod to achieve [4,3]", name: "2 domains, sizes [5,2], maxSkew=1, move 1 pod to achieve [4,3]",
@@ -225,12 +218,8 @@ func TestTopologySpreadConstraint(t *testing.T) {
}, },
}), }),
expectedEvictedCount: 1, expectedEvictedCount: 1,
strategy: api.DeschedulerStrategy{ namespaces: []string{"ns1"},
Params: &api.StrategyParameters{ args: componentconfig.RemovePodsViolatingTopologySpreadConstraintArgs{},
NodeFit: false,
},
},
namespaces: []string{"ns1"},
}, },
{ {
name: "2 domains, sizes [4,0], maxSkew=1, move 2 pods to achieve [2,2]", name: "2 domains, sizes [4,0], maxSkew=1, move 2 pods to achieve [2,2]",
@@ -252,12 +241,8 @@ func TestTopologySpreadConstraint(t *testing.T) {
}, },
}), }),
expectedEvictedCount: 2, expectedEvictedCount: 2,
strategy: api.DeschedulerStrategy{ namespaces: []string{"ns1"},
Params: &api.StrategyParameters{ args: componentconfig.RemovePodsViolatingTopologySpreadConstraintArgs{},
NodeFit: false,
},
},
namespaces: []string{"ns1"},
}, },
{ {
name: "2 domains, sizes [4,0], maxSkew=1, only move 1 pod since pods with nodeSelector and nodeAffinity aren't evicted", name: "2 domains, sizes [4,0], maxSkew=1, only move 1 pod since pods with nodeSelector and nodeAffinity aren't evicted",
@@ -296,12 +281,9 @@ func TestTopologySpreadConstraint(t *testing.T) {
}, },
}), }),
expectedEvictedCount: 1, expectedEvictedCount: 1,
strategy: api.DeschedulerStrategy{ namespaces: []string{"ns1"},
Params: &api.StrategyParameters{ args: componentconfig.RemovePodsViolatingTopologySpreadConstraintArgs{},
NodeFit: true, nodeFit: true,
},
},
namespaces: []string{"ns1"},
}, },
{ {
name: "2 domains, sizes [4,0], maxSkew=1, move 2 pods since selector matches multiple nodes", name: "2 domains, sizes [4,0], maxSkew=1, move 2 pods since selector matches multiple nodes",
@@ -343,12 +325,8 @@ func TestTopologySpreadConstraint(t *testing.T) {
}, },
}), }),
expectedEvictedCount: 2, expectedEvictedCount: 2,
strategy: api.DeschedulerStrategy{ namespaces: []string{"ns1"},
Params: &api.StrategyParameters{ args: componentconfig.RemovePodsViolatingTopologySpreadConstraintArgs{},
NodeFit: false,
},
},
namespaces: []string{"ns1"},
}, },
{ {
name: "3 domains, sizes [0, 1, 100], maxSkew=1, move 66 pods to get [34, 33, 34]", name: "3 domains, sizes [0, 1, 100], maxSkew=1, move 66 pods to get [34, 33, 34]",
@@ -371,8 +349,8 @@ func TestTopologySpreadConstraint(t *testing.T) {
}, },
}), }),
expectedEvictedCount: 66, expectedEvictedCount: 66,
strategy: api.DeschedulerStrategy{},
namespaces: []string{"ns1"}, namespaces: []string{"ns1"},
args: componentconfig.RemovePodsViolatingTopologySpreadConstraintArgs{},
}, },
{ {
name: "4 domains, sizes [0, 1, 3, 5], should move 3 to get [2, 2, 3, 2]", name: "4 domains, sizes [0, 1, 3, 5], should move 3 to get [2, 2, 3, 2]",
@@ -401,12 +379,8 @@ func TestTopologySpreadConstraint(t *testing.T) {
}, },
}), }),
expectedEvictedCount: 3, expectedEvictedCount: 3,
strategy: api.DeschedulerStrategy{ namespaces: []string{"ns1"},
Params: &api.StrategyParameters{ args: componentconfig.RemovePodsViolatingTopologySpreadConstraintArgs{},
NodeFit: false,
},
},
namespaces: []string{"ns1"},
}, },
{ {
name: "2 domains size [2 6], maxSkew=2, should move 1 to get [3 5]", name: "2 domains size [2 6], maxSkew=2, should move 1 to get [3 5]",
@@ -433,12 +407,8 @@ func TestTopologySpreadConstraint(t *testing.T) {
}, },
}), }),
expectedEvictedCount: 1, expectedEvictedCount: 1,
strategy: api.DeschedulerStrategy{ namespaces: []string{"ns1"},
Params: &api.StrategyParameters{ args: componentconfig.RemovePodsViolatingTopologySpreadConstraintArgs{},
NodeFit: false,
},
},
namespaces: []string{"ns1"},
}, },
{ {
name: "2 domains size [2 6], maxSkew=2, can't move any because of node taints", name: "2 domains size [2 6], maxSkew=2, can't move any because of node taints",
@@ -481,12 +451,9 @@ func TestTopologySpreadConstraint(t *testing.T) {
}, },
}), }),
expectedEvictedCount: 0, expectedEvictedCount: 0,
strategy: api.DeschedulerStrategy{ namespaces: []string{"ns1"},
Params: &api.StrategyParameters{ args: componentconfig.RemovePodsViolatingTopologySpreadConstraintArgs{},
NodeFit: true, nodeFit: true,
},
},
namespaces: []string{"ns1"},
}, },
{ {
name: "2 domains size [2 6], maxSkew=2, can't move any because node1 does not have enough CPU", name: "2 domains size [2 6], maxSkew=2, can't move any because node1 does not have enough CPU",
@@ -513,12 +480,9 @@ func TestTopologySpreadConstraint(t *testing.T) {
}, },
}), }),
expectedEvictedCount: 0, expectedEvictedCount: 0,
strategy: api.DeschedulerStrategy{ namespaces: []string{"ns1"},
Params: &api.StrategyParameters{ args: componentconfig.RemovePodsViolatingTopologySpreadConstraintArgs{},
NodeFit: true, nodeFit: true,
},
},
namespaces: []string{"ns1"},
}, },
{ {
// see https://github.com/kubernetes-sigs/descheduler/issues/564 // see https://github.com/kubernetes-sigs/descheduler/issues/564
@@ -622,8 +586,8 @@ func TestTopologySpreadConstraint(t *testing.T) {
}, },
}), }),
expectedEvictedCount: 1, expectedEvictedCount: 1,
strategy: api.DeschedulerStrategy{Params: &api.StrategyParameters{IncludeSoftConstraints: true}},
namespaces: []string{"ns1"}, namespaces: []string{"ns1"},
args: componentconfig.RemovePodsViolatingTopologySpreadConstraintArgs{IncludeSoftConstraints: true},
}, },
{ {
name: "3 domains size [8 7 0], maxSkew=1, should move 5 to get [5 5 5]", name: "3 domains size [8 7 0], maxSkew=1, should move 5 to get [5 5 5]",
@@ -648,8 +612,8 @@ func TestTopologySpreadConstraint(t *testing.T) {
}), }),
expectedEvictedCount: 5, expectedEvictedCount: 5,
expectedEvictedPods: []string{"pod-5", "pod-6", "pod-7", "pod-8"}, expectedEvictedPods: []string{"pod-5", "pod-6", "pod-7", "pod-8"},
strategy: api.DeschedulerStrategy{},
namespaces: []string{"ns1"}, namespaces: []string{"ns1"},
args: componentconfig.RemovePodsViolatingTopologySpreadConstraintArgs{},
}, },
{ {
name: "3 domains size [5 5 5], maxSkew=1, should move 0 to retain [5 5 5]", name: "3 domains size [5 5 5], maxSkew=1, should move 0 to retain [5 5 5]",
@@ -679,8 +643,8 @@ func TestTopologySpreadConstraint(t *testing.T) {
}, },
}), }),
expectedEvictedCount: 0, expectedEvictedCount: 0,
strategy: api.DeschedulerStrategy{},
namespaces: []string{"ns1"}, namespaces: []string{"ns1"},
args: componentconfig.RemovePodsViolatingTopologySpreadConstraintArgs{},
}, },
{ {
name: "2 domains, sizes [2,0], maxSkew=1, move 1 pod since pod tolerates the node with taint", name: "2 domains, sizes [2,0], maxSkew=1, move 1 pod since pod tolerates the node with taint",
@@ -721,8 +685,8 @@ func TestTopologySpreadConstraint(t *testing.T) {
}), }),
expectedEvictedCount: 1, expectedEvictedCount: 1,
expectedEvictedPods: []string{"pod-0"}, expectedEvictedPods: []string{"pod-0"},
strategy: api.DeschedulerStrategy{},
namespaces: []string{"ns1"}, namespaces: []string{"ns1"},
args: componentconfig.RemovePodsViolatingTopologySpreadConstraintArgs{},
}, },
{ {
name: "2 domains, sizes [2,0], maxSkew=1, move 0 pods since pod does not tolerate the tainted node", name: "2 domains, sizes [2,0], maxSkew=1, move 0 pods since pod does not tolerate the tainted node",
@@ -754,8 +718,8 @@ func TestTopologySpreadConstraint(t *testing.T) {
}, },
}), }),
expectedEvictedCount: 0, expectedEvictedCount: 0,
strategy: api.DeschedulerStrategy{},
namespaces: []string{"ns1"}, namespaces: []string{"ns1"},
args: componentconfig.RemovePodsViolatingTopologySpreadConstraintArgs{},
}, },
{ {
name: "2 domains, sizes [2,0], maxSkew=1, move 0 pods since pod does not tolerate the tainted node, and NodeFit is enabled", name: "2 domains, sizes [2,0], maxSkew=1, move 0 pods since pod does not tolerate the tainted node, and NodeFit is enabled",
@@ -787,12 +751,9 @@ func TestTopologySpreadConstraint(t *testing.T) {
}, },
}), }),
expectedEvictedCount: 0, expectedEvictedCount: 0,
strategy: api.DeschedulerStrategy{ namespaces: []string{"ns1"},
Params: &api.StrategyParameters{ args: componentconfig.RemovePodsViolatingTopologySpreadConstraintArgs{},
NodeFit: true, nodeFit: true,
},
},
namespaces: []string{"ns1"},
}, },
{ {
name: "2 domains, sizes [2,0], maxSkew=1, move 1 pod for node with PreferNoSchedule Taint", name: "2 domains, sizes [2,0], maxSkew=1, move 1 pod for node with PreferNoSchedule Taint",
@@ -825,8 +786,8 @@ func TestTopologySpreadConstraint(t *testing.T) {
}), }),
expectedEvictedCount: 1, expectedEvictedCount: 1,
expectedEvictedPods: []string{"pod-0"}, expectedEvictedPods: []string{"pod-0"},
strategy: api.DeschedulerStrategy{},
namespaces: []string{"ns1"}, namespaces: []string{"ns1"},
args: componentconfig.RemovePodsViolatingTopologySpreadConstraintArgs{},
}, },
{ {
name: "2 domains, sizes [2,0], maxSkew=1, move 0 pod for node with unmatched label filtering", name: "2 domains, sizes [2,0], maxSkew=1, move 0 pod for node with unmatched label filtering",
@@ -848,12 +809,8 @@ func TestTopologySpreadConstraint(t *testing.T) {
}, },
}), }),
expectedEvictedCount: 0, expectedEvictedCount: 0,
strategy: api.DeschedulerStrategy{ namespaces: []string{"ns1"},
Params: &api.StrategyParameters{ args: componentconfig.RemovePodsViolatingTopologySpreadConstraintArgs{LabelSelector: getLabelSelector("foo", []string{"baz"}, metav1.LabelSelectorOpIn)},
LabelSelector: getLabelSelector("foo", []string{"baz"}, metav1.LabelSelectorOpIn),
},
},
namespaces: []string{"ns1"},
}, },
{ {
name: "2 domains, sizes [2,0], maxSkew=1, move 1 pod for node with matched label filtering", name: "2 domains, sizes [2,0], maxSkew=1, move 1 pod for node with matched label filtering",
@@ -876,12 +833,8 @@ func TestTopologySpreadConstraint(t *testing.T) {
}), }),
expectedEvictedCount: 1, expectedEvictedCount: 1,
expectedEvictedPods: []string{"pod-1"}, expectedEvictedPods: []string{"pod-1"},
strategy: api.DeschedulerStrategy{ namespaces: []string{"ns1"},
Params: &api.StrategyParameters{ args: componentconfig.RemovePodsViolatingTopologySpreadConstraintArgs{LabelSelector: getLabelSelector("foo", []string{"bar"}, metav1.LabelSelectorOpIn)},
LabelSelector: getLabelSelector("foo", []string{"bar"}, metav1.LabelSelectorOpIn),
},
},
namespaces: []string{"ns1"},
}, },
{ {
name: "2 domains, sizes [2,0], maxSkew=1, move 1 pod for node with matched label filtering (NotIn op)", name: "2 domains, sizes [2,0], maxSkew=1, move 1 pod for node with matched label filtering (NotIn op)",
@@ -904,12 +857,8 @@ func TestTopologySpreadConstraint(t *testing.T) {
}), }),
expectedEvictedCount: 1, expectedEvictedCount: 1,
expectedEvictedPods: []string{"pod-1"}, expectedEvictedPods: []string{"pod-1"},
strategy: api.DeschedulerStrategy{ namespaces: []string{"ns1"},
Params: &api.StrategyParameters{ args: componentconfig.RemovePodsViolatingTopologySpreadConstraintArgs{LabelSelector: getLabelSelector("foo", []string{"baz"}, metav1.LabelSelectorOpNotIn)},
LabelSelector: getLabelSelector("foo", []string{"baz"}, metav1.LabelSelectorOpNotIn),
},
},
namespaces: []string{"ns1"},
}, },
{ {
name: "2 domains, sizes [4,2], maxSkew=1, 2 pods in termination; nothing should be moved", name: "2 domains, sizes [4,2], maxSkew=1, 2 pods in termination; nothing should be moved",
@@ -939,12 +888,8 @@ func TestTopologySpreadConstraint(t *testing.T) {
}, },
}), }),
expectedEvictedCount: 0, expectedEvictedCount: 0,
strategy: api.DeschedulerStrategy{ namespaces: []string{"ns1"},
Params: &api.StrategyParameters{ args: componentconfig.RemovePodsViolatingTopologySpreadConstraintArgs{LabelSelector: getLabelSelector("foo", []string{"bar"}, metav1.LabelSelectorOpIn)},
LabelSelector: getLabelSelector("foo", []string{"bar"}, metav1.LabelSelectorOpIn),
},
},
namespaces: []string{"ns1"},
}, },
{ {
name: "3 domains, sizes [2,3,4], maxSkew=1, NodeFit is enabled, and not enough cpu on zoneA; nothing should be moved", name: "3 domains, sizes [2,3,4], maxSkew=1, NodeFit is enabled, and not enough cpu on zoneA; nothing should be moved",
@@ -974,10 +919,9 @@ func TestTopologySpreadConstraint(t *testing.T) {
}, },
}), }),
expectedEvictedCount: 0, expectedEvictedCount: 0,
strategy: api.DeschedulerStrategy{ namespaces: []string{"ns1"},
Params: &api.StrategyParameters{NodeFit: true}, args: componentconfig.RemovePodsViolatingTopologySpreadConstraintArgs{},
}, nodeFit: true,
namespaces: []string{"ns1"},
}, },
{ {
name: "3 domains, sizes [[1,0], [1,1], [2,1]], maxSkew=1, NodeFit is enabled, and not enough cpu on ZoneA; nothing should be moved", name: "3 domains, sizes [[1,0], [1,1], [2,1]], maxSkew=1, NodeFit is enabled, and not enough cpu on ZoneA; nothing should be moved",
@@ -1022,10 +966,9 @@ func TestTopologySpreadConstraint(t *testing.T) {
}, },
}), }),
expectedEvictedCount: 0, expectedEvictedCount: 0,
strategy: api.DeschedulerStrategy{ namespaces: []string{"ns1"},
Params: &api.StrategyParameters{NodeFit: true}, args: componentconfig.RemovePodsViolatingTopologySpreadConstraintArgs{},
}, nodeFit: true,
namespaces: []string{"ns1"},
}, },
{ {
name: "2 domains, sizes [[1,4], [2,1]], maxSkew=1, NodeFit is enabled; should move 1", name: "2 domains, sizes [[1,4], [2,1]], maxSkew=1, NodeFit is enabled; should move 1",
@@ -1063,10 +1006,9 @@ func TestTopologySpreadConstraint(t *testing.T) {
}), }),
expectedEvictedCount: 1, expectedEvictedCount: 1,
expectedEvictedPods: []string{"pod-4"}, expectedEvictedPods: []string{"pod-4"},
strategy: api.DeschedulerStrategy{ namespaces: []string{"ns1"},
Params: &api.StrategyParameters{NodeFit: true}, args: componentconfig.RemovePodsViolatingTopologySpreadConstraintArgs{},
}, nodeFit: true,
namespaces: []string{"ns1"},
}, },
{ {
// https://github.com/kubernetes-sigs/descheduler/issues/839 // https://github.com/kubernetes-sigs/descheduler/issues/839
@@ -1171,8 +1113,9 @@ func TestTopologySpreadConstraint(t *testing.T) {
}, },
}), }),
expectedEvictedCount: 0, expectedEvictedCount: 0,
strategy: api.DeschedulerStrategy{Params: &api.StrategyParameters{NodeFit: true}},
namespaces: []string{"ns1"}, namespaces: []string{"ns1"},
args: componentconfig.RemovePodsViolatingTopologySpreadConstraintArgs{},
nodeFit: true,
}, },
} }
@@ -1229,22 +1172,31 @@ func TestTopologySpreadConstraint(t *testing.T) {
eventRecorder, eventRecorder,
) )
nodeFit := false handle := &frameworkfake.HandleImpl{
if tc.strategy.Params != nil { ClientsetImpl: fakeClient,
nodeFit = tc.strategy.Params.NodeFit GetPodsAssignedToNodeFuncImpl: getPodsAssignedToNode,
PodEvictorImpl: podEvictor,
EvictorFilterImpl: evictions.NewEvictorFilter(
tc.nodes,
getPodsAssignedToNode,
false,
false,
false,
false,
evictions.WithNodeFit(tc.nodeFit),
),
SharedInformerFactoryImpl: sharedInformerFactory,
} }
evictorFilter := evictions.NewEvictorFilter( plugin, err := New(
tc.nodes, &tc.args,
getPodsAssignedToNode, handle,
false,
false,
false,
false,
evictions.WithNodeFit(nodeFit),
) )
if err != nil {
t.Fatalf("Unable to initialize the plugin: %v", err)
}
RemovePodsViolatingTopologySpreadConstraint(ctx, fakeClient, tc.strategy, tc.nodes, podEvictor, evictorFilter, getPodsAssignedToNode) plugin.(framework.BalancePlugin).Balance(ctx, tc.nodes)
podsEvicted := podEvictor.TotalEvicted() podsEvicted := podEvictor.TotalEvicted()
if podsEvicted != tc.expectedEvictedCount { if podsEvicted != tc.expectedEvictedCount {
t.Errorf("Test error for description: %s. Expected evicted pods count %v, got %v", tc.name, tc.expectedEvictedCount, podsEvicted) t.Errorf("Test error for description: %s. Expected evicted pods count %v, got %v", tc.name, tc.expectedEvictedCount, podsEvicted)

View File

@@ -10,9 +10,11 @@ import (
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
deschedulerapi "sigs.k8s.io/descheduler/pkg/api" "sigs.k8s.io/descheduler/pkg/apis/componentconfig"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions" "sigs.k8s.io/descheduler/pkg/descheduler/evictions"
"sigs.k8s.io/descheduler/pkg/descheduler/strategies" "sigs.k8s.io/descheduler/pkg/framework"
frameworkfake "sigs.k8s.io/descheduler/pkg/framework/fake"
"sigs.k8s.io/descheduler/pkg/framework/plugins/removepodsviolatingtopologyspreadconstraint"
) )
const zoneTopologyKey string = "topology.kubernetes.io/zone" const zoneTopologyKey string = "topology.kubernetes.io/zone"
@@ -82,27 +84,31 @@ func TestTopologySpreadConstraint(t *testing.T) {
// Run TopologySpreadConstraint strategy // Run TopologySpreadConstraint strategy
t.Logf("Running RemovePodsViolatingTopologySpreadConstraint strategy for %s", name) t.Logf("Running RemovePodsViolatingTopologySpreadConstraint strategy for %s", name)
strategies.RemovePodsViolatingTopologySpreadConstraint(
ctx, filter := evictions.NewEvictorFilter(
clientSet,
deschedulerapi.DeschedulerStrategy{
Enabled: true,
Params: &deschedulerapi.StrategyParameters{
IncludeSoftConstraints: tc.constraint != v1.DoNotSchedule,
},
},
nodes, nodes,
podEvictor,
evictions.NewEvictorFilter(
nodes,
getPodsAssignedToNode,
true,
false,
false,
false,
),
getPodsAssignedToNode, getPodsAssignedToNode,
true,
false,
false,
false,
) )
plugin, err := removepodsviolatingtopologyspreadconstraint.New(&componentconfig.RemovePodsViolatingTopologySpreadConstraintArgs{
IncludeSoftConstraints: tc.constraint != v1.DoNotSchedule,
},
&frameworkfake.HandleImpl{
ClientsetImpl: clientSet,
PodEvictorImpl: podEvictor,
EvictorFilterImpl: filter,
GetPodsAssignedToNodeFuncImpl: getPodsAssignedToNode,
},
)
if err != nil {
t.Fatalf("Unable to initialize the plugin: %v", err)
}
plugin.(framework.BalancePlugin).Balance(ctx, workerNodes)
t.Logf("Finished RemovePodsViolatingTopologySpreadConstraint strategy for %s", name) t.Logf("Finished RemovePodsViolatingTopologySpreadConstraint strategy for %s", name)
t.Logf("Wait for terminating pods of %s to disappear", name) t.Logf("Wait for terminating pods of %s to disappear", name)