diff --git a/pkg/descheduler/descheduler.go b/pkg/descheduler/descheduler.go index f1bd61a81..c03f10a29 100644 --- a/pkg/descheduler/descheduler.go +++ b/pkg/descheduler/descheduler.go @@ -45,6 +45,7 @@ import ( podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod" "sigs.k8s.io/descheduler/pkg/descheduler/strategies" "sigs.k8s.io/descheduler/pkg/descheduler/strategies/nodeutilization" + "sigs.k8s.io/descheduler/pkg/utils" ) func Run(ctx context.Context, rs *options.DeschedulerServer) error { @@ -87,7 +88,7 @@ func Run(ctx context.Context, rs *options.DeschedulerServer) error { return runFn() } -type strategyFunction func(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor, getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc) +type strategyFunction func(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor, evictorFilter *evictions.EvictorFilter, getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc) func cachedClient( realClient clientset.Interface, @@ -283,18 +284,25 @@ func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer deschedulerPolicy.MaxNoOfPodsToEvictPerNode, deschedulerPolicy.MaxNoOfPodsToEvictPerNamespace, nodes, - getPodsAssignedToNode, - evictLocalStoragePods, - evictSystemCriticalPods, - ignorePvcPods, - evictBarePods, !rs.DisableMetrics, ) for name, strategy := range deschedulerPolicy.Strategies { if f, ok := strategyFuncs[name]; ok { if strategy.Enabled { - f(ctx, rs.Client, strategy, nodes, podEvictor, getPodsAssignedToNode) + nodeFit := false + if name != "PodLifeTime" { + if strategy.Params != nil { + nodeFit = strategy.Params.NodeFit + } + } + thresholdPriority, err := utils.GetPriorityFromStrategyParams(ctx, rs.Client, strategy.Params) + if err != nil { + klog.ErrorS(err, "Failed to get threshold priority from strategy's params") + continue + } + evictorFilter := evictions.NewEvictorFilter(nodes, getPodsAssignedToNode, evictLocalStoragePods, evictSystemCriticalPods, ignorePvcPods, evictBarePods, evictions.WithNodeFit(nodeFit), evictions.WithPriorityThreshold(thresholdPriority)) + f(ctx, rs.Client, strategy, nodes, podEvictor, evictorFilter, getPodsAssignedToNode) } } else { klog.ErrorS(fmt.Errorf("unknown strategy name"), "skipping strategy", "strategy", name) diff --git a/pkg/descheduler/evictions/evictions.go b/pkg/descheduler/evictions/evictions.go index 742388aba..39f5ef6e8 100644 --- a/pkg/descheduler/evictions/evictions.go +++ b/pkg/descheduler/evictions/evictions.go @@ -51,17 +51,12 @@ type namespacePodEvictCount map[string]uint type PodEvictor struct { client clientset.Interface nodes []*v1.Node - nodeIndexer podutil.GetPodsAssignedToNodeFunc policyGroupVersion string dryRun bool maxPodsToEvictPerNode *uint maxPodsToEvictPerNamespace *uint nodepodCount nodePodEvictedCount namespacePodCount namespacePodEvictCount - evictFailedBarePods bool - evictLocalStoragePods bool - evictSystemCriticalPods bool - ignorePvcPods bool metricsEnabled bool } @@ -72,11 +67,6 @@ func NewPodEvictor( maxPodsToEvictPerNode *uint, maxPodsToEvictPerNamespace *uint, nodes []*v1.Node, - nodeIndexer podutil.GetPodsAssignedToNodeFunc, - evictLocalStoragePods bool, - evictSystemCriticalPods bool, - ignorePvcPods bool, - evictFailedBarePods bool, metricsEnabled bool, ) *PodEvictor { var nodePodCount = make(nodePodEvictedCount) @@ -89,17 +79,12 @@ func NewPodEvictor( return &PodEvictor{ client: client, nodes: nodes, - nodeIndexer: nodeIndexer, policyGroupVersion: policyGroupVersion, dryRun: dryRun, maxPodsToEvictPerNode: maxPodsToEvictPerNode, maxPodsToEvictPerNamespace: maxPodsToEvictPerNamespace, nodepodCount: nodePodCount, namespacePodCount: namespacePodCount, - evictLocalStoragePods: evictLocalStoragePods, - evictSystemCriticalPods: evictSystemCriticalPods, - evictFailedBarePods: evictFailedBarePods, - ignorePvcPods: ignorePvcPods, metricsEnabled: metricsEnabled, } } @@ -230,21 +215,26 @@ func WithLabelSelector(labelSelector labels.Selector) func(opts *Options) { type constraint func(pod *v1.Pod) error -type evictable struct { +type EvictorFilter struct { constraints []constraint } -// Evictable provides an implementation of IsEvictable(IsEvictable(pod *v1.Pod) bool). -// The method accepts a list of options which allow to extend constraints -// which decides when a pod is considered evictable. -func (pe *PodEvictor) Evictable(opts ...func(opts *Options)) *evictable { +func NewEvictorFilter( + nodes []*v1.Node, + nodeIndexer podutil.GetPodsAssignedToNodeFunc, + evictLocalStoragePods bool, + evictSystemCriticalPods bool, + ignorePvcPods bool, + evictFailedBarePods bool, + opts ...func(opts *Options), +) *EvictorFilter { options := &Options{} for _, opt := range opts { opt(options) } - ev := &evictable{} - if pe.evictFailedBarePods { + ev := &EvictorFilter{} + if evictFailedBarePods { ev.constraints = append(ev.constraints, func(pod *v1.Pod) error { ownerRefList := podutil.OwnerRef(pod) // Enable evictFailedBarePods to evict bare pods in failed phase @@ -263,7 +253,7 @@ func (pe *PodEvictor) Evictable(opts ...func(opts *Options)) *evictable { return nil }) } - if !pe.evictSystemCriticalPods { + if !evictSystemCriticalPods { ev.constraints = append(ev.constraints, func(pod *v1.Pod) error { // Moved from IsEvictable function to allow for disabling if utils.IsCriticalPriorityPod(pod) { @@ -281,7 +271,7 @@ func (pe *PodEvictor) Evictable(opts ...func(opts *Options)) *evictable { }) } } - if !pe.evictLocalStoragePods { + if !evictLocalStoragePods { ev.constraints = append(ev.constraints, func(pod *v1.Pod) error { if utils.IsPodWithLocalStorage(pod) { return fmt.Errorf("pod has local storage and descheduler is not configured with evictLocalStoragePods") @@ -289,7 +279,7 @@ func (pe *PodEvictor) Evictable(opts ...func(opts *Options)) *evictable { return nil }) } - if pe.ignorePvcPods { + if ignorePvcPods { ev.constraints = append(ev.constraints, func(pod *v1.Pod) error { if utils.IsPodWithPVC(pod) { return fmt.Errorf("pod has a PVC and descheduler is configured to ignore PVC pods") @@ -299,7 +289,7 @@ func (pe *PodEvictor) Evictable(opts ...func(opts *Options)) *evictable { } if options.nodeFit { ev.constraints = append(ev.constraints, func(pod *v1.Pod) error { - if !nodeutil.PodFitsAnyOtherNode(pe.nodeIndexer, pod, pe.nodes) { + if !nodeutil.PodFitsAnyOtherNode(nodeIndexer, pod, nodes) { return fmt.Errorf("pod does not fit on any other node because of nodeSelector(s), Taint(s), or nodes marked as unschedulable") } return nil @@ -318,7 +308,7 @@ func (pe *PodEvictor) Evictable(opts ...func(opts *Options)) *evictable { } // IsEvictable decides when a pod is evictable -func (ev *evictable) IsEvictable(pod *v1.Pod) bool { +func (ef *EvictorFilter) Filter(pod *v1.Pod) bool { checkErrs := []error{} ownerRefList := podutil.OwnerRef(pod) @@ -338,7 +328,7 @@ func (ev *evictable) IsEvictable(pod *v1.Pod) bool { checkErrs = append(checkErrs, fmt.Errorf("pod is terminating")) } - for _, c := range ev.constraints { + for _, c := range ef.constraints { if err := c(pod); err != nil { checkErrs = append(checkErrs, err) } diff --git a/pkg/descheduler/evictions/evictions_test.go b/pkg/descheduler/evictions/evictions_test.go index 131349f41..ffae40a38 100644 --- a/pkg/descheduler/evictions/evictions_test.go +++ b/pkg/descheduler/evictions/evictions_test.go @@ -21,7 +21,6 @@ import ( "testing" v1 "k8s.io/api/core/v1" - policyv1 "k8s.io/api/policy/v1" "k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/informers" @@ -639,19 +638,6 @@ func TestIsEvictable(t *testing.T) { sharedInformerFactory.Start(ctx.Done()) sharedInformerFactory.WaitForCacheSync(ctx.Done()) - podEvictor := &PodEvictor{ - client: fakeClient, - nodes: nodes, - nodeIndexer: getPodsAssignedToNode, - policyGroupVersion: policyv1.SchemeGroupVersion.String(), - dryRun: false, - maxPodsToEvictPerNode: nil, - maxPodsToEvictPerNamespace: nil, - evictLocalStoragePods: test.evictLocalStoragePods, - evictSystemCriticalPods: test.evictSystemCriticalPods, - evictFailedBarePods: test.evictFailedBarePods, - } - var opts []func(opts *Options) if test.priorityThreshold != nil { opts = append(opts, WithPriorityThreshold(*test.priorityThreshold)) @@ -659,9 +645,18 @@ func TestIsEvictable(t *testing.T) { if test.nodeFit { opts = append(opts, WithNodeFit(true)) } - evictable := podEvictor.Evictable(opts...) - result := evictable.IsEvictable(test.pods[0]) + evictorFilter := NewEvictorFilter( + nodes, + getPodsAssignedToNode, + test.evictLocalStoragePods, + test.evictSystemCriticalPods, + false, + test.evictFailedBarePods, + opts..., + ) + + result := evictorFilter.Filter(test.pods[0]) if result != test.result { t.Errorf("IsEvictable should return for pod %s %t, but it returns %t", test.pods[0].Name, test.result, result) } diff --git a/pkg/descheduler/strategies/duplicates.go b/pkg/descheduler/strategies/duplicates.go index 462fb36f6..d5e11f0f5 100644 --- a/pkg/descheduler/strategies/duplicates.go +++ b/pkg/descheduler/strategies/duplicates.go @@ -66,17 +66,13 @@ func RemoveDuplicatePods( strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor, + evictorFilter *evictions.EvictorFilter, getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc, ) { if err := validateRemoveDuplicatePodsParams(strategy.Params); err != nil { klog.ErrorS(err, "Invalid RemoveDuplicatePods parameters") return } - thresholdPriority, err := utils.GetPriorityFromStrategyParams(ctx, client, strategy.Params) - if err != nil { - klog.ErrorS(err, "Failed to get threshold priority from strategy's params") - return - } var includedNamespaces, excludedNamespaces sets.String if strategy.Params != nil && strategy.Params.Namespaces != nil { @@ -84,20 +80,13 @@ func RemoveDuplicatePods( excludedNamespaces = sets.NewString(strategy.Params.Namespaces.Exclude...) } - nodeFit := false - if strategy.Params != nil { - nodeFit = strategy.Params.NodeFit - } - - evictable := podEvictor.Evictable(evictions.WithPriorityThreshold(thresholdPriority), evictions.WithNodeFit(nodeFit)) - duplicatePods := make(map[podOwner]map[string][]*v1.Pod) ownerKeyOccurence := make(map[podOwner]int32) nodeCount := 0 nodeMap := make(map[string]*v1.Node) podFilter, err := podutil.NewOptions(). - WithFilter(evictable.IsEvictable). + WithFilter(evictorFilter.Filter). WithNamespaces(includedNamespaces). WithoutNamespaces(excludedNamespaces). BuildFilterFunc() diff --git a/pkg/descheduler/strategies/duplicates_test.go b/pkg/descheduler/strategies/duplicates_test.go index 1c5e4d695..b4ab8537b 100644 --- a/pkg/descheduler/strategies/duplicates_test.go +++ b/pkg/descheduler/strategies/duplicates_test.go @@ -319,16 +319,26 @@ func TestFindDuplicatePods(t *testing.T) { false, nil, nil, + testCase.nodes, + false, + ) + + nodeFit := false + if testCase.strategy.Params != nil { + nodeFit = testCase.strategy.Params.NodeFit + } + + evictorFilter := evictions.NewEvictorFilter( testCase.nodes, getPodsAssignedToNode, false, false, false, false, - false, + evictions.WithNodeFit(nodeFit), ) - RemoveDuplicatePods(ctx, fakeClient, testCase.strategy, testCase.nodes, podEvictor, getPodsAssignedToNode) + RemoveDuplicatePods(ctx, fakeClient, testCase.strategy, testCase.nodes, podEvictor, evictorFilter, getPodsAssignedToNode) podsEvicted := podEvictor.TotalEvicted() if podsEvicted != testCase.expectedEvictedPodCount { t.Errorf("Test error for description: %s. Expected evicted pods count %v, got %v", testCase.description, testCase.expectedEvictedPodCount, podsEvicted) @@ -748,15 +758,19 @@ func TestRemoveDuplicatesUniformly(t *testing.T) { nil, nil, testCase.nodes, - getPodsAssignedToNode, false, + ) + + evictorFilter := evictions.NewEvictorFilter( + testCase.nodes, + getPodsAssignedToNode, false, false, false, false, ) - RemoveDuplicatePods(ctx, fakeClient, testCase.strategy, testCase.nodes, podEvictor, getPodsAssignedToNode) + RemoveDuplicatePods(ctx, fakeClient, testCase.strategy, testCase.nodes, podEvictor, evictorFilter, getPodsAssignedToNode) podsEvicted := podEvictor.TotalEvicted() if podsEvicted != testCase.expectedEvictedPodCount { t.Errorf("Test error for description: %s. Expected evicted pods count %v, got %v", testCase.description, testCase.expectedEvictedPodCount, podsEvicted) diff --git a/pkg/descheduler/strategies/failedpods.go b/pkg/descheduler/strategies/failedpods.go index b2942509b..b9bfabaea 100644 --- a/pkg/descheduler/strategies/failedpods.go +++ b/pkg/descheduler/strategies/failedpods.go @@ -33,6 +33,7 @@ func RemoveFailedPods( strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor, + evictorFilter *evictions.EvictorFilter, getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc, ) { strategyParams, err := validateAndParseRemoveFailedPodsParams(ctx, client, strategy.Params) @@ -41,19 +42,13 @@ func RemoveFailedPods( return } - evictable := podEvictor.Evictable( - evictions.WithPriorityThreshold(strategyParams.ThresholdPriority), - evictions.WithNodeFit(strategyParams.NodeFit), - evictions.WithLabelSelector(strategyParams.LabelSelector), - ) - var labelSelector *metav1.LabelSelector if strategy.Params != nil { labelSelector = strategy.Params.LabelSelector } podFilter, err := podutil.NewOptions(). - WithFilter(evictable.IsEvictable). + WithFilter(evictorFilter.Filter). WithNamespaces(strategyParams.IncludedNamespaces). WithoutNamespaces(strategyParams.ExcludedNamespaces). WithLabelSelector(labelSelector). diff --git a/pkg/descheduler/strategies/failedpods_test.go b/pkg/descheduler/strategies/failedpods_test.go index 92b3fdf73..6fdeb9497 100644 --- a/pkg/descheduler/strategies/failedpods_test.go +++ b/pkg/descheduler/strategies/failedpods_test.go @@ -46,7 +46,7 @@ func TestRemoveFailedPods(t *testing.T) { }{ { description: "default empty strategy, 0 failures, 0 evictions", - strategy: api.DeschedulerStrategy{}, + strategy: api.DeschedulerStrategy{Params: &api.StrategyParameters{NodeFit: false}}, nodes: []*v1.Node{test.BuildTestNode("node1", 2000, 3000, 10, nil)}, expectedEvictedPodCount: 0, pods: []*v1.Pod{}, // no pods come back with field selector phase=Failed @@ -274,16 +274,21 @@ func TestRemoveFailedPods(t *testing.T) { false, nil, nil, + tc.nodes, + false, + ) + + evictorFilter := evictions.NewEvictorFilter( tc.nodes, getPodsAssignedToNode, false, false, false, false, - false, + evictions.WithNodeFit(tc.strategy.Params.NodeFit), ) - RemoveFailedPods(ctx, fakeClient, tc.strategy, tc.nodes, podEvictor, getPodsAssignedToNode) + RemoveFailedPods(ctx, fakeClient, tc.strategy, tc.nodes, podEvictor, evictorFilter, getPodsAssignedToNode) actualEvictedPodCount := podEvictor.TotalEvicted() if actualEvictedPodCount != tc.expectedEvictedPodCount { t.Errorf("Test %#v failed, expected %v pod evictions, but got %v pod evictions\n", tc.description, tc.expectedEvictedPodCount, actualEvictedPodCount) diff --git a/pkg/descheduler/strategies/node_affinity.go b/pkg/descheduler/strategies/node_affinity.go index 832a1b6d4..c3c21fec9 100644 --- a/pkg/descheduler/strategies/node_affinity.go +++ b/pkg/descheduler/strategies/node_affinity.go @@ -29,7 +29,6 @@ import ( "sigs.k8s.io/descheduler/pkg/descheduler/evictions" nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node" podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod" - "sigs.k8s.io/descheduler/pkg/utils" ) func validatePodsViolatingNodeAffinityParams(params *api.StrategyParameters) error { @@ -48,16 +47,11 @@ func validatePodsViolatingNodeAffinityParams(params *api.StrategyParameters) err } // RemovePodsViolatingNodeAffinity evicts pods on nodes which violate node affinity -func RemovePodsViolatingNodeAffinity(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor, getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc) { +func RemovePodsViolatingNodeAffinity(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor, evictorFilter *evictions.EvictorFilter, getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc) { if err := validatePodsViolatingNodeAffinityParams(strategy.Params); err != nil { klog.ErrorS(err, "Invalid RemovePodsViolatingNodeAffinity parameters") return } - thresholdPriority, err := utils.GetPriorityFromStrategyParams(ctx, client, strategy.Params) - if err != nil { - klog.ErrorS(err, "Failed to get threshold priority from strategy's params") - return - } var includedNamespaces, excludedNamespaces sets.String if strategy.Params.Namespaces != nil { @@ -65,13 +59,6 @@ func RemovePodsViolatingNodeAffinity(ctx context.Context, client clientset.Inter excludedNamespaces = sets.NewString(strategy.Params.Namespaces.Exclude...) } - nodeFit := false - if strategy.Params != nil { - nodeFit = strategy.Params.NodeFit - } - - evictable := podEvictor.Evictable(evictions.WithPriorityThreshold(thresholdPriority), evictions.WithNodeFit(nodeFit)) - podFilter, err := podutil.NewOptions(). WithNamespaces(includedNamespaces). WithoutNamespaces(excludedNamespaces). @@ -94,7 +81,7 @@ func RemovePodsViolatingNodeAffinity(ctx context.Context, client clientset.Inter node.Name, getPodsAssignedToNode, podutil.WrapFilterFuncs(podFilter, func(pod *v1.Pod) bool { - return evictable.IsEvictable(pod) && + return evictorFilter.Filter(pod) && !nodeutil.PodFitsCurrentNode(getPodsAssignedToNode, pod, node) && nodeutil.PodFitsAnyNode(getPodsAssignedToNode, pod, nodes) }), diff --git a/pkg/descheduler/strategies/node_affinity_test.go b/pkg/descheduler/strategies/node_affinity_test.go index 53f45d67c..37e65e0c8 100644 --- a/pkg/descheduler/strategies/node_affinity_test.go +++ b/pkg/descheduler/strategies/node_affinity_test.go @@ -221,16 +221,26 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) { false, tc.maxPodsToEvictPerNode, tc.maxNoOfPodsToEvictPerNamespace, + tc.nodes, + false, + ) + + nodeFit := false + if tc.strategy.Params != nil { + nodeFit = tc.strategy.Params.NodeFit + } + + evictorFilter := evictions.NewEvictorFilter( tc.nodes, getPodsAssignedToNode, false, false, false, false, - false, + evictions.WithNodeFit(nodeFit), ) - RemovePodsViolatingNodeAffinity(ctx, fakeClient, tc.strategy, tc.nodes, podEvictor, getPodsAssignedToNode) + RemovePodsViolatingNodeAffinity(ctx, fakeClient, tc.strategy, tc.nodes, podEvictor, evictorFilter, getPodsAssignedToNode) actualEvictedPodCount := podEvictor.TotalEvicted() if actualEvictedPodCount != tc.expectedEvictedPodCount { t.Errorf("Test %#v failed, expected %v pod evictions, but got %v pod evictions\n", tc.description, tc.expectedEvictedPodCount, actualEvictedPodCount) diff --git a/pkg/descheduler/strategies/node_taint.go b/pkg/descheduler/strategies/node_taint.go index 64cf8d48c..094ded915 100644 --- a/pkg/descheduler/strategies/node_taint.go +++ b/pkg/descheduler/strategies/node_taint.go @@ -49,7 +49,7 @@ func validateRemovePodsViolatingNodeTaintsParams(params *api.StrategyParameters) } // RemovePodsViolatingNodeTaints evicts pods on the node which violate NoSchedule Taints on nodes -func RemovePodsViolatingNodeTaints(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor, getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc) { +func RemovePodsViolatingNodeTaints(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor, evictorFilter *evictions.EvictorFilter, getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc) { if err := validateRemovePodsViolatingNodeTaintsParams(strategy.Params); err != nil { klog.ErrorS(err, "Invalid RemovePodsViolatingNodeTaints parameters") return @@ -68,21 +68,8 @@ func RemovePodsViolatingNodeTaints(ctx context.Context, client clientset.Interfa labelSelector = strategy.Params.LabelSelector } - thresholdPriority, err := utils.GetPriorityFromStrategyParams(ctx, client, strategy.Params) - if err != nil { - klog.ErrorS(err, "Failed to get threshold priority from strategy's params") - return - } - - nodeFit := false - if strategy.Params != nil { - nodeFit = strategy.Params.NodeFit - } - - evictable := podEvictor.Evictable(evictions.WithPriorityThreshold(thresholdPriority), evictions.WithNodeFit(nodeFit)) - podFilter, err := podutil.NewOptions(). - WithFilter(evictable.IsEvictable). + WithFilter(evictorFilter.Filter). WithNamespaces(includedNamespaces). WithoutNamespaces(excludedNamespaces). WithLabelSelector(labelSelector). diff --git a/pkg/descheduler/strategies/node_taint_test.go b/pkg/descheduler/strategies/node_taint_test.go index ae2d06d2e..7263dd8e2 100644 --- a/pkg/descheduler/strategies/node_taint_test.go +++ b/pkg/descheduler/strategies/node_taint_test.go @@ -338,11 +338,6 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) { tc.maxPodsToEvictPerNode, tc.maxNoOfPodsToEvictPerNamespace, tc.nodes, - getPodsAssignedToNode, - tc.evictLocalStoragePods, - tc.evictSystemCriticalPods, - false, - false, false, ) @@ -354,7 +349,17 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) { }, } - RemovePodsViolatingNodeTaints(ctx, fakeClient, strategy, tc.nodes, podEvictor, getPodsAssignedToNode) + evictorFilter := evictions.NewEvictorFilter( + tc.nodes, + getPodsAssignedToNode, + tc.evictLocalStoragePods, + tc.evictSystemCriticalPods, + false, + false, + evictions.WithNodeFit(tc.nodeFit), + ) + + RemovePodsViolatingNodeTaints(ctx, fakeClient, strategy, tc.nodes, podEvictor, evictorFilter, getPodsAssignedToNode) actualEvictedPodCount := podEvictor.TotalEvicted() if actualEvictedPodCount != tc.expectedEvictedPodCount { t.Errorf("Test %#v failed, Unexpected no of pods evicted: pods evicted: %d, expected: %d", tc.description, actualEvictedPodCount, tc.expectedEvictedPodCount) diff --git a/pkg/descheduler/strategies/nodeutilization/highnodeutilization.go b/pkg/descheduler/strategies/nodeutilization/highnodeutilization.go index 6664fa196..13c0ad7ab 100644 --- a/pkg/descheduler/strategies/nodeutilization/highnodeutilization.go +++ b/pkg/descheduler/strategies/nodeutilization/highnodeutilization.go @@ -29,28 +29,16 @@ import ( "sigs.k8s.io/descheduler/pkg/descheduler/evictions" nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node" podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod" - "sigs.k8s.io/descheduler/pkg/utils" ) // HighNodeUtilization evicts pods from under utilized nodes so that scheduler can schedule according to its strategy. // Note that CPU/Memory requests are used to calculate nodes' utilization and not the actual resource usage. -func HighNodeUtilization(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor, getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc) { +func HighNodeUtilization(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor, evictorFilter *evictions.EvictorFilter, getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc) { if err := validateNodeUtilizationParams(strategy.Params); err != nil { klog.ErrorS(err, "Invalid HighNodeUtilization parameters") return } - nodeFit := false - if strategy.Params != nil { - nodeFit = strategy.Params.NodeFit - } - - thresholdPriority, err := utils.GetPriorityFromStrategyParams(ctx, client, strategy.Params) - if err != nil { - klog.ErrorS(err, "Failed to get threshold priority from strategy's params") - return - } - thresholds := strategy.Params.NodeResourceUtilizationThresholds.Thresholds targetThresholds := strategy.Params.NodeResourceUtilizationThresholds.TargetThresholds if err := validateHighUtilizationStrategyConfig(thresholds, targetThresholds); err != nil { @@ -108,8 +96,6 @@ func HighNodeUtilization(ctx context.Context, client clientset.Interface, strate return } - evictable := podEvictor.Evictable(evictions.WithPriorityThreshold(thresholdPriority), evictions.WithNodeFit(nodeFit)) - // stop if the total available usage has dropped to zero - no more pods can be scheduled continueEvictionCond := func(nodeInfo NodeInfo, totalAvailableUsage map[v1.ResourceName]*resource.Quantity) bool { for name := range totalAvailableUsage { @@ -129,7 +115,7 @@ func HighNodeUtilization(ctx context.Context, client clientset.Interface, strate sourceNodes, highNodes, podEvictor, - evictable.IsEvictable, + evictorFilter.Filter, resourceNames, "HighNodeUtilization", continueEvictionCond) diff --git a/pkg/descheduler/strategies/nodeutilization/highnodeutilization_test.go b/pkg/descheduler/strategies/nodeutilization/highnodeutilization_test.go index b36fb5e29..f2e391b74 100644 --- a/pkg/descheduler/strategies/nodeutilization/highnodeutilization_test.go +++ b/pkg/descheduler/strategies/nodeutilization/highnodeutilization_test.go @@ -507,11 +507,6 @@ func TestHighNodeUtilization(t *testing.T) { nil, nil, testCase.nodes, - getPodsAssignedToNode, - false, - false, - false, - false, false, ) @@ -524,7 +519,18 @@ func TestHighNodeUtilization(t *testing.T) { NodeFit: true, }, } - HighNodeUtilization(ctx, fakeClient, strategy, testCase.nodes, podEvictor, getPodsAssignedToNode) + + evictorFilter := evictions.NewEvictorFilter( + testCase.nodes, + getPodsAssignedToNode, + false, + false, + false, + false, + evictions.WithNodeFit(strategy.Params.NodeFit), + ) + + HighNodeUtilization(ctx, fakeClient, strategy, testCase.nodes, podEvictor, evictorFilter, getPodsAssignedToNode) podsEvicted := podEvictor.TotalEvicted() if testCase.expectedPodsEvicted != podsEvicted { @@ -713,15 +719,19 @@ func TestHighNodeUtilizationWithTaints(t *testing.T) { &item.evictionsExpected, nil, item.nodes, - getPodsAssignedToNode, false, + ) + + evictorFilter := evictions.NewEvictorFilter( + item.nodes, + getPodsAssignedToNode, false, false, false, false, ) - HighNodeUtilization(ctx, fakeClient, strategy, item.nodes, podEvictor, getPodsAssignedToNode) + HighNodeUtilization(ctx, fakeClient, strategy, item.nodes, podEvictor, evictorFilter, getPodsAssignedToNode) if item.evictionsExpected != podEvictor.TotalEvicted() { t.Errorf("Expected %v evictions, got %v", item.evictionsExpected, podEvictor.TotalEvicted()) diff --git a/pkg/descheduler/strategies/nodeutilization/lownodeutilization.go b/pkg/descheduler/strategies/nodeutilization/lownodeutilization.go index 283ee3b8d..15985ad0b 100644 --- a/pkg/descheduler/strategies/nodeutilization/lownodeutilization.go +++ b/pkg/descheduler/strategies/nodeutilization/lownodeutilization.go @@ -29,27 +29,17 @@ import ( "sigs.k8s.io/descheduler/pkg/descheduler/evictions" nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node" podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod" - "sigs.k8s.io/descheduler/pkg/utils" ) // LowNodeUtilization evicts pods from overutilized nodes to underutilized nodes. Note that CPU/Memory requests are used // to calculate nodes' utilization and not the actual resource usage. -func LowNodeUtilization(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor, getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc) { +func LowNodeUtilization(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor, evictorFilter *evictions.EvictorFilter, getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc) { // TODO: May be create a struct for the strategy as well, so that we don't have to pass along the all the params? if err := validateNodeUtilizationParams(strategy.Params); err != nil { klog.ErrorS(err, "Invalid LowNodeUtilization parameters") return } - thresholdPriority, err := utils.GetPriorityFromStrategyParams(ctx, client, strategy.Params) - if err != nil { - klog.ErrorS(err, "Failed to get threshold priority from strategy's params") - return - } - nodeFit := false - if strategy.Params != nil { - nodeFit = strategy.Params.NodeFit - } useDeviationThresholds := strategy.Params.NodeResourceUtilizationThresholds.UseDeviationThresholds thresholds := strategy.Params.NodeResourceUtilizationThresholds.Thresholds targetThresholds := strategy.Params.NodeResourceUtilizationThresholds.TargetThresholds @@ -152,8 +142,6 @@ func LowNodeUtilization(ctx context.Context, client clientset.Interface, strateg return } - evictable := podEvictor.Evictable(evictions.WithPriorityThreshold(thresholdPriority), evictions.WithNodeFit(nodeFit)) - // stop if node utilization drops below target threshold or any of required capacity (cpu, memory, pods) is moved continueEvictionCond := func(nodeInfo NodeInfo, totalAvailableUsage map[v1.ResourceName]*resource.Quantity) bool { if !isNodeAboveTargetUtilization(nodeInfo.NodeUsage, nodeInfo.thresholds.highResourceThreshold) { @@ -176,7 +164,7 @@ func LowNodeUtilization(ctx context.Context, client clientset.Interface, strateg sourceNodes, lowNodes, podEvictor, - evictable.IsEvictable, + evictorFilter.Filter, resourceNames, "LowNodeUtilization", continueEvictionCond) diff --git a/pkg/descheduler/strategies/nodeutilization/lownodeutilization_test.go b/pkg/descheduler/strategies/nodeutilization/lownodeutilization_test.go index 3196825f6..910835814 100644 --- a/pkg/descheduler/strategies/nodeutilization/lownodeutilization_test.go +++ b/pkg/descheduler/strategies/nodeutilization/lownodeutilization_test.go @@ -772,11 +772,6 @@ func TestLowNodeUtilization(t *testing.T) { nil, nil, test.nodes, - getPodsAssignedToNode, - false, - false, - false, - false, false, ) @@ -791,7 +786,18 @@ func TestLowNodeUtilization(t *testing.T) { NodeFit: true, }, } - LowNodeUtilization(ctx, fakeClient, strategy, test.nodes, podEvictor, getPodsAssignedToNode) + + evictorFilter := evictions.NewEvictorFilter( + test.nodes, + getPodsAssignedToNode, + false, + false, + false, + false, + evictions.WithNodeFit(strategy.Params.NodeFit), + ) + + LowNodeUtilization(ctx, fakeClient, strategy, test.nodes, podEvictor, evictorFilter, getPodsAssignedToNode) podsEvicted := podEvictor.TotalEvicted() if test.expectedPodsEvicted != podsEvicted { @@ -1087,15 +1093,19 @@ func TestLowNodeUtilizationWithTaints(t *testing.T) { &item.evictionsExpected, nil, item.nodes, - getPodsAssignedToNode, false, + ) + + evictorFilter := evictions.NewEvictorFilter( + item.nodes, + getPodsAssignedToNode, false, false, false, false, ) - LowNodeUtilization(ctx, fakeClient, strategy, item.nodes, podEvictor, getPodsAssignedToNode) + LowNodeUtilization(ctx, fakeClient, strategy, item.nodes, podEvictor, evictorFilter, getPodsAssignedToNode) if item.evictionsExpected != podEvictor.TotalEvicted() { t.Errorf("Expected %v evictions, got %v", item.evictionsExpected, podEvictor.TotalEvicted()) diff --git a/pkg/descheduler/strategies/pod_antiaffinity.go b/pkg/descheduler/strategies/pod_antiaffinity.go index a5c71cd74..ded994f97 100644 --- a/pkg/descheduler/strategies/pod_antiaffinity.go +++ b/pkg/descheduler/strategies/pod_antiaffinity.go @@ -49,7 +49,7 @@ func validateRemovePodsViolatingInterPodAntiAffinityParams(params *api.StrategyP } // RemovePodsViolatingInterPodAntiAffinity evicts pods on the node which are having a pod affinity rules. -func RemovePodsViolatingInterPodAntiAffinity(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor, getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc) { +func RemovePodsViolatingInterPodAntiAffinity(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor, evictorFilter *evictions.EvictorFilter, getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc) { if err := validateRemovePodsViolatingInterPodAntiAffinityParams(strategy.Params); err != nil { klog.ErrorS(err, "Invalid RemovePodsViolatingInterPodAntiAffinity parameters") return @@ -65,19 +65,6 @@ func RemovePodsViolatingInterPodAntiAffinity(ctx context.Context, client clients labelSelector = strategy.Params.LabelSelector } - thresholdPriority, err := utils.GetPriorityFromStrategyParams(ctx, client, strategy.Params) - if err != nil { - klog.ErrorS(err, "Failed to get threshold priority from strategy's params") - return - } - - nodeFit := false - if strategy.Params != nil { - nodeFit = strategy.Params.NodeFit - } - - evictable := podEvictor.Evictable(evictions.WithPriorityThreshold(thresholdPriority), evictions.WithNodeFit(nodeFit)) - podFilter, err := podutil.NewOptions(). WithNamespaces(includedNamespaces). WithoutNamespaces(excludedNamespaces). @@ -98,7 +85,7 @@ func RemovePodsViolatingInterPodAntiAffinity(ctx context.Context, client clients podutil.SortPodsBasedOnPriorityLowToHigh(pods) totalPods := len(pods) for i := 0; i < totalPods; i++ { - if checkPodsWithAntiAffinityExist(pods[i], pods) && evictable.IsEvictable(pods[i]) { + if checkPodsWithAntiAffinityExist(pods[i], pods) && evictorFilter.Filter(pods[i]) { success, err := podEvictor.EvictPod(ctx, pods[i], node, "InterPodAntiAffinity") if err != nil { klog.ErrorS(err, "Error evicting pod") diff --git a/pkg/descheduler/strategies/pod_antiaffinity_test.go b/pkg/descheduler/strategies/pod_antiaffinity_test.go index ff8fc429f..7a00ae615 100644 --- a/pkg/descheduler/strategies/pod_antiaffinity_test.go +++ b/pkg/descheduler/strategies/pod_antiaffinity_test.go @@ -218,11 +218,6 @@ func TestPodAntiAffinity(t *testing.T) { test.maxPodsToEvictPerNode, test.maxNoOfPodsToEvictPerNamespace, test.nodes, - getPodsAssignedToNode, - false, - false, - false, - false, false, ) strategy := api.DeschedulerStrategy{ @@ -231,7 +226,17 @@ func TestPodAntiAffinity(t *testing.T) { }, } - RemovePodsViolatingInterPodAntiAffinity(ctx, fakeClient, strategy, test.nodes, podEvictor, getPodsAssignedToNode) + evictorFilter := evictions.NewEvictorFilter( + test.nodes, + getPodsAssignedToNode, + false, + false, + false, + false, + evictions.WithNodeFit(test.nodeFit), + ) + + RemovePodsViolatingInterPodAntiAffinity(ctx, fakeClient, strategy, test.nodes, podEvictor, evictorFilter, getPodsAssignedToNode) podsEvicted := podEvictor.TotalEvicted() if podsEvicted != test.expectedEvictedPodCount { t.Errorf("Unexpected no of pods evicted: pods evicted: %d, expected: %d", podsEvicted, test.expectedEvictedPodCount) diff --git a/pkg/descheduler/strategies/pod_lifetime.go b/pkg/descheduler/strategies/pod_lifetime.go index fa5e91f47..93cd28b93 100644 --- a/pkg/descheduler/strategies/pod_lifetime.go +++ b/pkg/descheduler/strategies/pod_lifetime.go @@ -29,7 +29,6 @@ import ( "sigs.k8s.io/descheduler/pkg/api" "sigs.k8s.io/descheduler/pkg/descheduler/evictions" podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod" - "sigs.k8s.io/descheduler/pkg/utils" ) func validatePodLifeTimeParams(params *api.StrategyParameters) error { @@ -57,32 +56,24 @@ func validatePodLifeTimeParams(params *api.StrategyParameters) error { } // PodLifeTime evicts pods on nodes that were created more than strategy.Params.MaxPodLifeTimeSeconds seconds ago. -func PodLifeTime(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor, getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc) { +func PodLifeTime(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor, evictorFilter *evictions.EvictorFilter, getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc) { if err := validatePodLifeTimeParams(strategy.Params); err != nil { klog.ErrorS(err, "Invalid PodLifeTime parameters") return } - thresholdPriority, err := utils.GetPriorityFromStrategyParams(ctx, client, strategy.Params) - if err != nil { - klog.ErrorS(err, "Failed to get threshold priority from strategy's params") - return - } - var includedNamespaces, excludedNamespaces sets.String if strategy.Params.Namespaces != nil { includedNamespaces = sets.NewString(strategy.Params.Namespaces.Include...) excludedNamespaces = sets.NewString(strategy.Params.Namespaces.Exclude...) } - evictable := podEvictor.Evictable(evictions.WithPriorityThreshold(thresholdPriority)) - - filter := evictable.IsEvictable + filter := evictorFilter.Filter if strategy.Params.PodLifeTime.PodStatusPhases != nil { filter = func(pod *v1.Pod) bool { for _, phase := range strategy.Params.PodLifeTime.PodStatusPhases { if string(pod.Status.Phase) == phase { - return evictable.IsEvictable(pod) + return evictorFilter.Filter(pod) } } return false diff --git a/pkg/descheduler/strategies/pod_lifetime_test.go b/pkg/descheduler/strategies/pod_lifetime_test.go index 4b7f21a9a..b3998b919 100644 --- a/pkg/descheduler/strategies/pod_lifetime_test.go +++ b/pkg/descheduler/strategies/pod_lifetime_test.go @@ -339,16 +339,20 @@ func TestPodLifeTime(t *testing.T) { false, tc.maxPodsToEvictPerNode, tc.maxPodsToEvictPerNamespace, + tc.nodes, + false, + ) + + evictorFilter := evictions.NewEvictorFilter( tc.nodes, getPodsAssignedToNode, false, false, tc.ignorePvcPods, false, - false, ) - PodLifeTime(ctx, fakeClient, tc.strategy, tc.nodes, podEvictor, getPodsAssignedToNode) + PodLifeTime(ctx, fakeClient, tc.strategy, tc.nodes, podEvictor, evictorFilter, getPodsAssignedToNode) podsEvicted := podEvictor.TotalEvicted() if podsEvicted != tc.expectedEvictedPodCount { t.Errorf("Test error for description: %s. Expected evicted pods count %v, got %v", tc.description, tc.expectedEvictedPodCount, podsEvicted) diff --git a/pkg/descheduler/strategies/toomanyrestarts.go b/pkg/descheduler/strategies/toomanyrestarts.go index 629122f07..7e5431e01 100644 --- a/pkg/descheduler/strategies/toomanyrestarts.go +++ b/pkg/descheduler/strategies/toomanyrestarts.go @@ -28,7 +28,6 @@ import ( "sigs.k8s.io/descheduler/pkg/api" "sigs.k8s.io/descheduler/pkg/descheduler/evictions" podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod" - "sigs.k8s.io/descheduler/pkg/utils" ) func validateRemovePodsHavingTooManyRestartsParams(params *api.StrategyParameters) error { @@ -50,33 +49,20 @@ func validateRemovePodsHavingTooManyRestartsParams(params *api.StrategyParameter // RemovePodsHavingTooManyRestarts removes the pods that have too many restarts on node. // There are too many cases leading this issue: Volume mount failed, app error due to nodes' different settings. // As of now, this strategy won't evict daemonsets, mirror pods, critical pods and pods with local storages. -func RemovePodsHavingTooManyRestarts(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor, getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc) { +func RemovePodsHavingTooManyRestarts(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor, evictorFilter *evictions.EvictorFilter, getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc) { if err := validateRemovePodsHavingTooManyRestartsParams(strategy.Params); err != nil { klog.ErrorS(err, "Invalid RemovePodsHavingTooManyRestarts parameters") return } - thresholdPriority, err := utils.GetPriorityFromStrategyParams(ctx, client, strategy.Params) - if err != nil { - klog.ErrorS(err, "Failed to get threshold priority from strategy's params") - return - } - var includedNamespaces, excludedNamespaces sets.String if strategy.Params.Namespaces != nil { includedNamespaces = sets.NewString(strategy.Params.Namespaces.Include...) excludedNamespaces = sets.NewString(strategy.Params.Namespaces.Exclude...) } - nodeFit := false - if strategy.Params != nil { - nodeFit = strategy.Params.NodeFit - } - - evictable := podEvictor.Evictable(evictions.WithPriorityThreshold(thresholdPriority), evictions.WithNodeFit(nodeFit)) - podFilter, err := podutil.NewOptions(). - WithFilter(evictable.IsEvictable). + WithFilter(evictorFilter.Filter). WithNamespaces(includedNamespaces). WithoutNamespaces(excludedNamespaces). WithLabelSelector(strategy.Params.LabelSelector). diff --git a/pkg/descheduler/strategies/toomanyrestarts_test.go b/pkg/descheduler/strategies/toomanyrestarts_test.go index 8a1ba94f3..921731b2d 100644 --- a/pkg/descheduler/strategies/toomanyrestarts_test.go +++ b/pkg/descheduler/strategies/toomanyrestarts_test.go @@ -249,16 +249,21 @@ func TestRemovePodsHavingTooManyRestarts(t *testing.T) { false, tc.maxPodsToEvictPerNode, tc.maxNoOfPodsToEvictPerNamespace, + tc.nodes, + false, + ) + + evictorFilter := evictions.NewEvictorFilter( tc.nodes, getPodsAssignedToNode, false, false, false, false, - false, + evictions.WithNodeFit(tc.strategy.Params.NodeFit), ) - RemovePodsHavingTooManyRestarts(ctx, fakeClient, tc.strategy, tc.nodes, podEvictor, getPodsAssignedToNode) + RemovePodsHavingTooManyRestarts(ctx, fakeClient, tc.strategy, tc.nodes, podEvictor, evictorFilter, getPodsAssignedToNode) actualEvictedPodCount := podEvictor.TotalEvicted() if actualEvictedPodCount != tc.expectedEvictedPodCount { t.Errorf("Test %#v failed, expected %v pod evictions, but got %v pod evictions\n", tc.description, tc.expectedEvictedPodCount, actualEvictedPodCount) diff --git a/pkg/descheduler/strategies/topologyspreadconstraint.go b/pkg/descheduler/strategies/topologyspreadconstraint.go index 035896bbc..def1a5347 100644 --- a/pkg/descheduler/strategies/topologyspreadconstraint.go +++ b/pkg/descheduler/strategies/topologyspreadconstraint.go @@ -46,12 +46,14 @@ type topology struct { pods []*v1.Pod } +// nolint: gocyclo func RemovePodsViolatingTopologySpreadConstraint( ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor, + evictorFilter *evictions.EvictorFilter, getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc, ) { strategyParams, err := validation.ValidateAndParseStrategyParams(ctx, client, strategy.Params) @@ -60,11 +62,13 @@ func RemovePodsViolatingTopologySpreadConstraint( return } - evictable := podEvictor.Evictable( - evictions.WithPriorityThreshold(strategyParams.ThresholdPriority), - evictions.WithNodeFit(strategyParams.NodeFit), - evictions.WithLabelSelector(strategyParams.LabelSelector), - ) + isEvictable := evictorFilter.Filter + + if strategyParams.LabelSelector != nil && !strategyParams.LabelSelector.Empty() { + isEvictable = podutil.WrapFilterFuncs(isEvictable, func(pod *v1.Pod) bool { + return strategyParams.LabelSelector.Matches(labels.Set(pod.Labels)) + }) + } nodeMap := make(map[string]*v1.Node, len(nodes)) for _, node := range nodes { @@ -168,12 +172,12 @@ func RemovePodsViolatingTopologySpreadConstraint( klog.V(2).InfoS("Skipping topology constraint because it is already balanced", "constraint", constraint) continue } - balanceDomains(client, getPodsAssignedToNode, podsForEviction, constraint, constraintTopologies, sumPods, evictable.IsEvictable, nodes) + balanceDomains(client, getPodsAssignedToNode, podsForEviction, constraint, constraintTopologies, sumPods, evictorFilter.Filter, nodes) } } for pod := range podsForEviction { - if !evictable.IsEvictable(pod) { + if !isEvictable(pod) { continue } if _, err := podEvictor.EvictPod(ctx, pod, nodeMap[pod.Spec.NodeName], "PodTopologySpread"); err != nil { @@ -234,6 +238,7 @@ func balanceDomains( idealAvg := sumPods / float64(len(constraintTopologies)) sortedDomains := sortDomains(constraintTopologies, isEvictable) + // i is the index for belowOrEqualAvg // j is the index for aboveAvg i := 0 diff --git a/pkg/descheduler/strategies/topologyspreadconstraint_test.go b/pkg/descheduler/strategies/topologyspreadconstraint_test.go index f2b8ac8fa..fb6276031 100644 --- a/pkg/descheduler/strategies/topologyspreadconstraint_test.go +++ b/pkg/descheduler/strategies/topologyspreadconstraint_test.go @@ -970,15 +970,26 @@ func TestTopologySpreadConstraint(t *testing.T) { false, nil, nil, + tc.nodes, + false, + ) + + nodeFit := false + if tc.strategy.Params != nil { + nodeFit = tc.strategy.Params.NodeFit + } + + evictorFilter := evictions.NewEvictorFilter( tc.nodes, getPodsAssignedToNode, false, false, false, false, - false, + evictions.WithNodeFit(nodeFit), ) - RemovePodsViolatingTopologySpreadConstraint(ctx, fakeClient, tc.strategy, tc.nodes, podEvictor, getPodsAssignedToNode) + + RemovePodsViolatingTopologySpreadConstraint(ctx, fakeClient, tc.strategy, tc.nodes, podEvictor, evictorFilter, getPodsAssignedToNode) podsEvicted := podEvictor.TotalEvicted() if podsEvicted != tc.expectedEvictedCount { t.Errorf("Test error for description: %s. Expected evicted pods count %v, got %v", tc.name, tc.expectedEvictedCount, podsEvicted) diff --git a/test/e2e/e2e_duplicatepods_test.go b/test/e2e/e2e_duplicatepods_test.go index 4fb8e7870..9a1bbc494 100644 --- a/test/e2e/e2e_duplicatepods_test.go +++ b/test/e2e/e2e_duplicatepods_test.go @@ -145,11 +145,6 @@ func TestRemoveDuplicates(t *testing.T) { nil, nil, nodes, - getPodsAssignedToNode, - true, - false, - false, - false, false, ) @@ -165,6 +160,14 @@ func TestRemoveDuplicates(t *testing.T) { }, workerNodes, podEvictor, + evictions.NewEvictorFilter( + nodes, + getPodsAssignedToNode, + true, + false, + false, + false, + ), getPodsAssignedToNode, ) diff --git a/test/e2e/e2e_failedpods_test.go b/test/e2e/e2e_failedpods_test.go index ae5efae41..3873ee97d 100644 --- a/test/e2e/e2e_failedpods_test.go +++ b/test/e2e/e2e_failedpods_test.go @@ -15,6 +15,7 @@ import ( "k8s.io/utils/pointer" deschedulerapi "sigs.k8s.io/descheduler/pkg/api" + "sigs.k8s.io/descheduler/pkg/descheduler/evictions" "sigs.k8s.io/descheduler/pkg/descheduler/strategies" ) @@ -96,6 +97,14 @@ func TestFailedPods(t *testing.T) { }, nodes, podEvictor, + evictions.NewEvictorFilter( + nodes, + getPodsAssignedToNode, + true, + false, + false, + false, + ), getPodsAssignedToNode, ) t.Logf("Finished RemoveFailedPods strategy for %s", name) diff --git a/test/e2e/e2e_test.go b/test/e2e/e2e_test.go index 390e59b97..8ccac576a 100644 --- a/test/e2e/e2e_test.go +++ b/test/e2e/e2e_test.go @@ -177,19 +177,26 @@ func runPodLifetimeStrategy( } maxPodLifeTimeSeconds := uint(1) + strategy := deschedulerapi.DeschedulerStrategy{ + Enabled: true, + Params: &deschedulerapi.StrategyParameters{ + PodLifeTime: &deschedulerapi.PodLifeTime{MaxPodLifeTimeSeconds: &maxPodLifeTimeSeconds}, + Namespaces: namespaces, + ThresholdPriority: priority, + ThresholdPriorityClassName: priorityClass, + LabelSelector: labelSelector, + }, + } + + thresholdPriority, err := utils.GetPriorityFromStrategyParams(ctx, clientset, strategy.Params) + if err != nil { + t.Fatalf("Failed to get threshold priority from strategy's params") + } + strategies.PodLifeTime( ctx, clientset, - deschedulerapi.DeschedulerStrategy{ - Enabled: true, - Params: &deschedulerapi.StrategyParameters{ - PodLifeTime: &deschedulerapi.PodLifeTime{MaxPodLifeTimeSeconds: &maxPodLifeTimeSeconds}, - Namespaces: namespaces, - ThresholdPriority: priority, - ThresholdPriorityClassName: priorityClass, - LabelSelector: labelSelector, - }, - }, + strategy, nodes, evictions.NewPodEvictor( clientset, @@ -197,13 +204,17 @@ func runPodLifetimeStrategy( false, nil, maxPodsToEvictPerNamespace, + nodes, + false, + ), + evictions.NewEvictorFilter( nodes, getPodsAssignedToNode, false, evictCritical, false, false, - false, + evictions.WithPriorityThreshold(thresholdPriority), ), getPodsAssignedToNode, ) @@ -326,7 +337,16 @@ func TestLowNodeUtilization(t *testing.T) { // Run LowNodeUtilization strategy podEvictor := initPodEvictorOrFail(t, clientSet, getPodsAssignedToNode, nodes) - podFilter, err := podutil.NewOptions().WithFilter(podEvictor.Evictable().IsEvictable).BuildFilterFunc() + evictorFilter := evictions.NewEvictorFilter( + nodes, + getPodsAssignedToNode, + true, + false, + false, + false, + ) + + podFilter, err := podutil.NewOptions().WithFilter(evictorFilter.Filter).BuildFilterFunc() if err != nil { t.Errorf("Error initializing pod filter function, %v", err) } @@ -356,12 +376,13 @@ func TestLowNodeUtilization(t *testing.T) { }, workerNodes, podEvictor, + evictorFilter, getPodsAssignedToNode, ) waitForTerminatingPodsToDisappear(ctx, t, clientSet, rc.Namespace) - podFilter, err = podutil.NewOptions().WithFilter(podEvictor.Evictable().IsEvictable).BuildFilterFunc() + podFilter, err = podutil.NewOptions().WithFilter(evictorFilter.Filter).BuildFilterFunc() if err != nil { t.Errorf("Error initializing pod filter function, %v", err) } @@ -1410,11 +1431,6 @@ func initPodEvictorOrFail(t *testing.T, clientSet clientset.Interface, getPodsAs nil, nil, nodes, - getPodsAssignedToNode, - true, - false, - false, - false, false, ) } diff --git a/test/e2e/e2e_toomanyrestarts_test.go b/test/e2e/e2e_toomanyrestarts_test.go index 1aa3d22e0..eb4ca15b7 100644 --- a/test/e2e/e2e_toomanyrestarts_test.go +++ b/test/e2e/e2e_toomanyrestarts_test.go @@ -138,13 +138,9 @@ func TestTooManyRestarts(t *testing.T) { nil, nil, nodes, - getPodsAssignedToNode, - true, - false, - false, - false, false, ) + // Run RemovePodsHavingTooManyRestarts strategy t.Log("Running RemovePodsHavingTooManyRestarts strategy") strategies.RemovePodsHavingTooManyRestarts( @@ -161,6 +157,14 @@ func TestTooManyRestarts(t *testing.T) { }, workerNodes, podEvictor, + evictions.NewEvictorFilter( + nodes, + getPodsAssignedToNode, + true, + false, + false, + false, + ), getPodsAssignedToNode, ) diff --git a/test/e2e/e2e_topologyspreadconstraint_test.go b/test/e2e/e2e_topologyspreadconstraint_test.go index 2766eaafa..dc2759868 100644 --- a/test/e2e/e2e_topologyspreadconstraint_test.go +++ b/test/e2e/e2e_topologyspreadconstraint_test.go @@ -11,6 +11,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" deschedulerapi "sigs.k8s.io/descheduler/pkg/api" + "sigs.k8s.io/descheduler/pkg/descheduler/evictions" "sigs.k8s.io/descheduler/pkg/descheduler/strategies" ) @@ -92,6 +93,14 @@ func TestTopologySpreadConstraint(t *testing.T) { }, nodes, podEvictor, + evictions.NewEvictorFilter( + nodes, + getPodsAssignedToNode, + true, + false, + false, + false, + ), getPodsAssignedToNode, ) t.Logf("Finished RemovePodsViolatingTopologySpreadConstraint strategy for %s", name)