From 0ff8ecb41ecf90ff2cfaa84a3e763ad0c800cb6d Mon Sep 17 00:00:00 2001 From: Garrybest Date: Fri, 10 Dec 2021 19:28:51 +0800 Subject: [PATCH] reform all strategies by using getPodsAssignedToNode Signed-off-by: Garrybest --- pkg/descheduler/descheduler.go | 11 ++++- pkg/descheduler/strategies/duplicates.go | 25 ++++++----- pkg/descheduler/strategies/failedpods.go | 30 ++++++++------ pkg/descheduler/strategies/node_affinity.go | 29 ++++++++----- pkg/descheduler/strategies/node_taint.go | 30 ++++++++------ .../nodeutilization/highnodeutilization.go | 6 ++- .../nodeutilization/lownodeutilization.go | 5 ++- .../nodeutilization/nodeutilization.go | 10 ++--- .../strategies/pod_antiaffinity.go | 28 +++++++------ pkg/descheduler/strategies/pod_lifetime.go | 41 ++++++++++--------- pkg/descheduler/strategies/toomanyrestarts.go | 30 ++++++++------ .../strategies/topologyspreadconstraint.go | 5 ++- 12 files changed, 145 insertions(+), 105 deletions(-) diff --git a/pkg/descheduler/descheduler.go b/pkg/descheduler/descheduler.go index a773e26f2..b5b3cd283 100644 --- a/pkg/descheduler/descheduler.go +++ b/pkg/descheduler/descheduler.go @@ -33,6 +33,7 @@ import ( "sigs.k8s.io/descheduler/pkg/descheduler/evictions" eutils "sigs.k8s.io/descheduler/pkg/descheduler/evictions/utils" nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node" + podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod" "sigs.k8s.io/descheduler/pkg/descheduler/strategies" "sigs.k8s.io/descheduler/pkg/descheduler/strategies/nodeutilization" ) @@ -64,11 +65,17 @@ func Run(rs *options.DeschedulerServer) error { return RunDeschedulerStrategies(ctx, rs, deschedulerPolicy, evictionPolicyGroupVersion, stopChannel) } -type strategyFunction func(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor) +type strategyFunction func(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor, getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc) func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer, deschedulerPolicy *api.DeschedulerPolicy, evictionPolicyGroupVersion string, stopChannel chan struct{}) error { sharedInformerFactory := informers.NewSharedInformerFactory(rs.Client, 0) nodeInformer := sharedInformerFactory.Core().V1().Nodes() + podInformer := sharedInformerFactory.Core().V1().Pods() + + getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer) + if err != nil { + return fmt.Errorf("build get pods assigned to node function error: %v", err) + } sharedInformerFactory.Start(stopChannel) sharedInformerFactory.WaitForCacheSync(stopChannel) @@ -138,7 +145,7 @@ func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer for name, strategy := range deschedulerPolicy.Strategies { if f, ok := strategyFuncs[name]; ok { if strategy.Enabled { - f(ctx, rs.Client, strategy, nodes, podEvictor) + f(ctx, rs.Client, strategy, nodes, podEvictor, getPodsAssignedToNode) } } else { klog.ErrorS(fmt.Errorf("unknown strategy name"), "skipping strategy", "strategy", name) diff --git a/pkg/descheduler/strategies/duplicates.go b/pkg/descheduler/strategies/duplicates.go index fe6169b0a..462fb36f6 100644 --- a/pkg/descheduler/strategies/duplicates.go +++ b/pkg/descheduler/strategies/duplicates.go @@ -66,6 +66,7 @@ func RemoveDuplicatePods( strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor, + getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc, ) { if err := validateRemoveDuplicatePodsParams(strategy.Params); err != nil { klog.ErrorS(err, "Invalid RemoveDuplicatePods parameters") @@ -77,10 +78,10 @@ func RemoveDuplicatePods( return } - var includedNamespaces, excludedNamespaces []string + var includedNamespaces, excludedNamespaces sets.String if strategy.Params != nil && strategy.Params.Namespaces != nil { - includedNamespaces = strategy.Params.Namespaces.Include - excludedNamespaces = strategy.Params.Namespaces.Exclude + includedNamespaces = sets.NewString(strategy.Params.Namespaces.Include...) + excludedNamespaces = sets.NewString(strategy.Params.Namespaces.Exclude...) } nodeFit := false @@ -95,15 +96,19 @@ func RemoveDuplicatePods( nodeCount := 0 nodeMap := make(map[string]*v1.Node) + podFilter, err := podutil.NewOptions(). + WithFilter(evictable.IsEvictable). + WithNamespaces(includedNamespaces). + WithoutNamespaces(excludedNamespaces). + BuildFilterFunc() + if err != nil { + klog.ErrorS(err, "Error initializing pod filter function") + return + } + for _, node := range nodes { klog.V(1).InfoS("Processing node", "node", klog.KObj(node)) - pods, err := podutil.ListPodsOnANode(ctx, - client, - node, - podutil.WithFilter(evictable.IsEvictable), - podutil.WithNamespaces(includedNamespaces), - podutil.WithoutNamespaces(excludedNamespaces), - ) + pods, err := podutil.ListPodsOnANode(node.Name, getPodsAssignedToNode, podFilter) if err != nil { klog.ErrorS(err, "Error listing evictable pods on node", "node", klog.KObj(node)) continue diff --git a/pkg/descheduler/strategies/failedpods.go b/pkg/descheduler/strategies/failedpods.go index 74d79956c..10eeae3d6 100644 --- a/pkg/descheduler/strategies/failedpods.go +++ b/pkg/descheduler/strategies/failedpods.go @@ -3,11 +3,11 @@ package strategies import ( "context" "fmt" - "k8s.io/apimachinery/pkg/util/sets" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/sets" clientset "k8s.io/client-go/kubernetes" "k8s.io/klog/v2" @@ -33,6 +33,7 @@ func RemoveFailedPods( strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor, + getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc, ) { strategyParams, err := validateAndParseRemoveFailedPodsParams(ctx, client, strategy.Params) if err != nil { @@ -51,20 +52,23 @@ func RemoveFailedPods( labelSelector = strategy.Params.LabelSelector } + podFilter, err := podutil.NewOptions(). + WithFilter(evictable.IsEvictable). + WithNamespaces(strategyParams.IncludedNamespaces). + WithoutNamespaces(strategyParams.ExcludedNamespaces). + WithLabelSelector(labelSelector). + BuildFilterFunc() + if err != nil { + klog.ErrorS(err, "Error initializing pod filter function") + return + } + // Only list failed pods + phaseFilter := func(pod *v1.Pod) bool { return pod.Status.Phase == v1.PodFailed } + podFilter = podutil.WrapFilterFuncs(phaseFilter, podFilter) + for _, node := range nodes { klog.V(1).InfoS("Processing node", "node", klog.KObj(node)) - fieldSelectorString := "spec.nodeName=" + node.Name + ",status.phase=" + string(v1.PodFailed) - - pods, err := podutil.ListPodsOnANodeWithFieldSelector( - ctx, - client, - node, - fieldSelectorString, - podutil.WithFilter(evictable.IsEvictable), - podutil.WithNamespaces(strategyParams.IncludedNamespaces.UnsortedList()), - podutil.WithoutNamespaces(strategyParams.ExcludedNamespaces.UnsortedList()), - podutil.WithLabelSelector(labelSelector), - ) + pods, err := podutil.ListAllPodsOnANode(node.Name, getPodsAssignedToNode, podFilter) if err != nil { klog.ErrorS(err, "Error listing a nodes failed pods", "node", klog.KObj(node)) continue diff --git a/pkg/descheduler/strategies/node_affinity.go b/pkg/descheduler/strategies/node_affinity.go index cd21b8ce3..c768e6b78 100644 --- a/pkg/descheduler/strategies/node_affinity.go +++ b/pkg/descheduler/strategies/node_affinity.go @@ -21,6 +21,7 @@ import ( "fmt" v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/sets" clientset "k8s.io/client-go/kubernetes" "k8s.io/klog/v2" @@ -47,7 +48,7 @@ func validatePodsViolatingNodeAffinityParams(params *api.StrategyParameters) err } // RemovePodsViolatingNodeAffinity evicts pods on nodes which violate node affinity -func RemovePodsViolatingNodeAffinity(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor) { +func RemovePodsViolatingNodeAffinity(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor, getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc) { if err := validatePodsViolatingNodeAffinityParams(strategy.Params); err != nil { klog.ErrorS(err, "Invalid RemovePodsViolatingNodeAffinity parameters") return @@ -58,10 +59,10 @@ func RemovePodsViolatingNodeAffinity(ctx context.Context, client clientset.Inter return } - var includedNamespaces, excludedNamespaces []string + var includedNamespaces, excludedNamespaces sets.String if strategy.Params.Namespaces != nil { - includedNamespaces = strategy.Params.Namespaces.Include - excludedNamespaces = strategy.Params.Namespaces.Exclude + includedNamespaces = sets.NewString(strategy.Params.Namespaces.Include...) + excludedNamespaces = sets.NewString(strategy.Params.Namespaces.Exclude...) } nodeFit := false @@ -71,6 +72,16 @@ func RemovePodsViolatingNodeAffinity(ctx context.Context, client clientset.Inter evictable := podEvictor.Evictable(evictions.WithPriorityThreshold(thresholdPriority), evictions.WithNodeFit(nodeFit)) + podFilter, err := podutil.NewOptions(). + WithNamespaces(includedNamespaces). + WithoutNamespaces(excludedNamespaces). + WithLabelSelector(strategy.Params.LabelSelector). + BuildFilterFunc() + if err != nil { + klog.ErrorS(err, "Error initializing pod filter function") + return + } + for _, nodeAffinity := range strategy.Params.NodeAffinityType { klog.V(2).InfoS("Executing for nodeAffinityType", "nodeAffinity", nodeAffinity) @@ -80,17 +91,13 @@ func RemovePodsViolatingNodeAffinity(ctx context.Context, client clientset.Inter klog.V(1).InfoS("Processing node", "node", klog.KObj(node)) pods, err := podutil.ListPodsOnANode( - ctx, - client, - node, - podutil.WithFilter(func(pod *v1.Pod) bool { + node.Name, + getPodsAssignedToNode, + podutil.WrapFilterFuncs(podFilter, func(pod *v1.Pod) bool { return evictable.IsEvictable(pod) && !nodeutil.PodFitsCurrentNode(pod, node) && nodeutil.PodFitsAnyNode(pod, nodes) }), - podutil.WithNamespaces(includedNamespaces), - podutil.WithoutNamespaces(excludedNamespaces), - podutil.WithLabelSelector(strategy.Params.LabelSelector), ) if err != nil { klog.ErrorS(err, "Failed to get pods", "node", klog.KObj(node)) diff --git a/pkg/descheduler/strategies/node_taint.go b/pkg/descheduler/strategies/node_taint.go index f2310b514..3a7c205dc 100644 --- a/pkg/descheduler/strategies/node_taint.go +++ b/pkg/descheduler/strategies/node_taint.go @@ -20,6 +20,7 @@ import ( "context" "fmt" + "k8s.io/apimachinery/pkg/util/sets" "sigs.k8s.io/descheduler/pkg/api" "sigs.k8s.io/descheduler/pkg/descheduler/evictions" podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod" @@ -48,18 +49,18 @@ func validateRemovePodsViolatingNodeTaintsParams(params *api.StrategyParameters) } // RemovePodsViolatingNodeTaints evicts pods on the node which violate NoSchedule Taints on nodes -func RemovePodsViolatingNodeTaints(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor) { +func RemovePodsViolatingNodeTaints(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor, getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc) { if err := validateRemovePodsViolatingNodeTaintsParams(strategy.Params); err != nil { klog.ErrorS(err, "Invalid RemovePodsViolatingNodeTaints parameters") return } - var includedNamespaces, excludedNamespaces []string + var includedNamespaces, excludedNamespaces sets.String var labelSelector *metav1.LabelSelector if strategy.Params != nil { if strategy.Params.Namespaces != nil { - includedNamespaces = strategy.Params.Namespaces.Include - excludedNamespaces = strategy.Params.Namespaces.Exclude + includedNamespaces = sets.NewString(strategy.Params.Namespaces.Include...) + excludedNamespaces = sets.NewString(strategy.Params.Namespaces.Exclude...) } labelSelector = strategy.Params.LabelSelector } @@ -77,17 +78,20 @@ func RemovePodsViolatingNodeTaints(ctx context.Context, client clientset.Interfa evictable := podEvictor.Evictable(evictions.WithPriorityThreshold(thresholdPriority), evictions.WithNodeFit(nodeFit)) + podFilter, err := podutil.NewOptions(). + WithFilter(evictable.IsEvictable). + WithNamespaces(includedNamespaces). + WithoutNamespaces(excludedNamespaces). + WithLabelSelector(labelSelector). + BuildFilterFunc() + if err != nil { + klog.ErrorS(err, "Error initializing pod filter function") + return + } + for _, node := range nodes { klog.V(1).InfoS("Processing node", "node", klog.KObj(node)) - pods, err := podutil.ListPodsOnANode( - ctx, - client, - node, - podutil.WithFilter(evictable.IsEvictable), - podutil.WithNamespaces(includedNamespaces), - podutil.WithoutNamespaces(excludedNamespaces), - podutil.WithLabelSelector(labelSelector), - ) + pods, err := podutil.ListAllPodsOnANode(node.Name, getPodsAssignedToNode, podFilter) if err != nil { //no pods evicted as error encountered retrieving evictable Pods return diff --git a/pkg/descheduler/strategies/nodeutilization/highnodeutilization.go b/pkg/descheduler/strategies/nodeutilization/highnodeutilization.go index 467889963..4a340b8fb 100644 --- a/pkg/descheduler/strategies/nodeutilization/highnodeutilization.go +++ b/pkg/descheduler/strategies/nodeutilization/highnodeutilization.go @@ -24,15 +24,17 @@ import ( "k8s.io/apimachinery/pkg/api/resource" clientset "k8s.io/client-go/kubernetes" "k8s.io/klog/v2" + "sigs.k8s.io/descheduler/pkg/api" "sigs.k8s.io/descheduler/pkg/descheduler/evictions" nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node" + podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod" "sigs.k8s.io/descheduler/pkg/utils" ) // HighNodeUtilization evicts pods from under utilized nodes so that scheduler can schedule according to its strategy. // Note that CPU/Memory requests are used to calculate nodes' utilization and not the actual resource usage. -func HighNodeUtilization(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor) { +func HighNodeUtilization(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor, getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc) { if err := validateNodeUtilizationParams(strategy.Params); err != nil { klog.ErrorS(err, "Invalid HighNodeUtilization parameters") return @@ -61,7 +63,7 @@ func HighNodeUtilization(ctx context.Context, client clientset.Interface, strate resourceNames := getResourceNames(targetThresholds) sourceNodes, highNodes := classifyNodes( - getNodeUsage(ctx, client, nodes, thresholds, targetThresholds, resourceNames), + getNodeUsage(nodes, thresholds, targetThresholds, resourceNames, getPodsAssignedToNode), func(node *v1.Node, usage NodeUsage) bool { return isNodeWithLowUtilization(usage) }, diff --git a/pkg/descheduler/strategies/nodeutilization/lownodeutilization.go b/pkg/descheduler/strategies/nodeutilization/lownodeutilization.go index 70bfe6257..725d7910a 100644 --- a/pkg/descheduler/strategies/nodeutilization/lownodeutilization.go +++ b/pkg/descheduler/strategies/nodeutilization/lownodeutilization.go @@ -28,12 +28,13 @@ import ( "sigs.k8s.io/descheduler/pkg/api" "sigs.k8s.io/descheduler/pkg/descheduler/evictions" nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node" + podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod" "sigs.k8s.io/descheduler/pkg/utils" ) // LowNodeUtilization evicts pods from overutilized nodes to underutilized nodes. Note that CPU/Memory requests are used // to calculate nodes' utilization and not the actual resource usage. -func LowNodeUtilization(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor) { +func LowNodeUtilization(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor, getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc) { // TODO: May be create a struct for the strategy as well, so that we don't have to pass along the all the params? if err := validateNodeUtilizationParams(strategy.Params); err != nil { klog.ErrorS(err, "Invalid LowNodeUtilization parameters") @@ -72,7 +73,7 @@ func LowNodeUtilization(ctx context.Context, client clientset.Interface, strateg resourceNames := getResourceNames(thresholds) lowNodes, sourceNodes := classifyNodes( - getNodeUsage(ctx, client, nodes, thresholds, targetThresholds, resourceNames), + getNodeUsage(nodes, thresholds, targetThresholds, resourceNames, getPodsAssignedToNode), // The node has to be schedulable (to be able to move workload there) func(node *v1.Node, usage NodeUsage) bool { if nodeutil.IsNodeUnschedulable(node) { diff --git a/pkg/descheduler/strategies/nodeutilization/nodeutilization.go b/pkg/descheduler/strategies/nodeutilization/nodeutilization.go index fca2330b7..d53d24a13 100644 --- a/pkg/descheduler/strategies/nodeutilization/nodeutilization.go +++ b/pkg/descheduler/strategies/nodeutilization/nodeutilization.go @@ -19,15 +19,16 @@ package nodeutilization import ( "context" "fmt" + "sort" + v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" - clientset "k8s.io/client-go/kubernetes" "k8s.io/klog/v2" + "sigs.k8s.io/descheduler/pkg/api" "sigs.k8s.io/descheduler/pkg/descheduler/evictions" podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod" "sigs.k8s.io/descheduler/pkg/utils" - "sort" ) // NodeUsage stores a node's info, pods on it, thresholds and its resource usage @@ -77,16 +78,15 @@ func validateThresholds(thresholds api.ResourceThresholds) error { } func getNodeUsage( - ctx context.Context, - client clientset.Interface, nodes []*v1.Node, lowThreshold, highThreshold api.ResourceThresholds, resourceNames []v1.ResourceName, + getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc, ) []NodeUsage { var nodeUsageList []NodeUsage for _, node := range nodes { - pods, err := podutil.ListPodsOnANode(ctx, client, node) + pods, err := podutil.ListPodsOnANode(node.Name, getPodsAssignedToNode, nil) if err != nil { klog.V(2).InfoS("Node will not be processed, error accessing its pods", "node", klog.KObj(node), "err", err) continue diff --git a/pkg/descheduler/strategies/pod_antiaffinity.go b/pkg/descheduler/strategies/pod_antiaffinity.go index 6fe9d5a29..a5c71cd74 100644 --- a/pkg/descheduler/strategies/pod_antiaffinity.go +++ b/pkg/descheduler/strategies/pod_antiaffinity.go @@ -20,6 +20,7 @@ import ( "context" "fmt" + "k8s.io/apimachinery/pkg/util/sets" "sigs.k8s.io/descheduler/pkg/api" "sigs.k8s.io/descheduler/pkg/descheduler/evictions" podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod" @@ -48,18 +49,18 @@ func validateRemovePodsViolatingInterPodAntiAffinityParams(params *api.StrategyP } // RemovePodsViolatingInterPodAntiAffinity evicts pods on the node which are having a pod affinity rules. -func RemovePodsViolatingInterPodAntiAffinity(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor) { +func RemovePodsViolatingInterPodAntiAffinity(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor, getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc) { if err := validateRemovePodsViolatingInterPodAntiAffinityParams(strategy.Params); err != nil { klog.ErrorS(err, "Invalid RemovePodsViolatingInterPodAntiAffinity parameters") return } - var includedNamespaces, excludedNamespaces []string + var includedNamespaces, excludedNamespaces sets.String var labelSelector *metav1.LabelSelector if strategy.Params != nil { if strategy.Params.Namespaces != nil { - includedNamespaces = strategy.Params.Namespaces.Include - excludedNamespaces = strategy.Params.Namespaces.Exclude + includedNamespaces = sets.NewString(strategy.Params.Namespaces.Include...) + excludedNamespaces = sets.NewString(strategy.Params.Namespaces.Exclude...) } labelSelector = strategy.Params.LabelSelector } @@ -77,16 +78,19 @@ func RemovePodsViolatingInterPodAntiAffinity(ctx context.Context, client clients evictable := podEvictor.Evictable(evictions.WithPriorityThreshold(thresholdPriority), evictions.WithNodeFit(nodeFit)) + podFilter, err := podutil.NewOptions(). + WithNamespaces(includedNamespaces). + WithoutNamespaces(excludedNamespaces). + WithLabelSelector(labelSelector). + BuildFilterFunc() + if err != nil { + klog.ErrorS(err, "Error initializing pod filter function") + return + } + for _, node := range nodes { klog.V(1).InfoS("Processing node", "node", klog.KObj(node)) - pods, err := podutil.ListPodsOnANode( - ctx, - client, - node, - podutil.WithNamespaces(includedNamespaces), - podutil.WithoutNamespaces(excludedNamespaces), - podutil.WithLabelSelector(labelSelector), - ) + pods, err := podutil.ListPodsOnANode(node.Name, getPodsAssignedToNode, podFilter) if err != nil { return } diff --git a/pkg/descheduler/strategies/pod_lifetime.go b/pkg/descheduler/strategies/pod_lifetime.go index 3720ea5e4..bf31892da 100644 --- a/pkg/descheduler/strategies/pod_lifetime.go +++ b/pkg/descheduler/strategies/pod_lifetime.go @@ -22,6 +22,7 @@ import ( v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/sets" clientset "k8s.io/client-go/kubernetes" "k8s.io/klog/v2" @@ -56,7 +57,7 @@ func validatePodLifeTimeParams(params *api.StrategyParameters) error { } // PodLifeTime evicts pods on nodes that were created more than strategy.Params.MaxPodLifeTimeSeconds seconds ago. -func PodLifeTime(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor) { +func PodLifeTime(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor, getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc) { if err := validatePodLifeTimeParams(strategy.Params); err != nil { klog.ErrorS(err, "Invalid PodLifeTime parameters") return @@ -68,10 +69,10 @@ func PodLifeTime(ctx context.Context, client clientset.Interface, strategy api.D return } - var includedNamespaces, excludedNamespaces []string + var includedNamespaces, excludedNamespaces sets.String if strategy.Params.Namespaces != nil { - includedNamespaces = strategy.Params.Namespaces.Include - excludedNamespaces = strategy.Params.Namespaces.Exclude + includedNamespaces = sets.NewString(strategy.Params.Namespaces.Include...) + excludedNamespaces = sets.NewString(strategy.Params.Namespaces.Exclude...) } evictable := podEvictor.Evictable(evictions.WithPriorityThreshold(thresholdPriority)) @@ -88,10 +89,21 @@ func PodLifeTime(ctx context.Context, client clientset.Interface, strategy api.D } } + podFilter, err := podutil.NewOptions(). + WithFilter(filter). + WithNamespaces(includedNamespaces). + WithoutNamespaces(excludedNamespaces). + WithLabelSelector(strategy.Params.LabelSelector). + BuildFilterFunc() + if err != nil { + klog.ErrorS(err, "Error initializing pod filter function") + return + } + for _, node := range nodes { klog.V(1).InfoS("Processing node", "node", klog.KObj(node)) - pods := listOldPodsOnNode(ctx, client, node, includedNamespaces, excludedNamespaces, strategy.Params.LabelSelector, *strategy.Params.PodLifeTime.MaxPodLifeTimeSeconds, filter) + pods := listOldPodsOnNode(node.Name, getPodsAssignedToNode, podFilter, *strategy.Params.PodLifeTime.MaxPodLifeTimeSeconds) for _, pod := range pods { success, err := podEvictor.EvictPod(ctx, pod, node, "PodLifeTime") if success { @@ -108,23 +120,12 @@ func PodLifeTime(ctx context.Context, client clientset.Interface, strategy api.D } func listOldPodsOnNode( - ctx context.Context, - client clientset.Interface, - node *v1.Node, - includedNamespaces, excludedNamespaces []string, - labelSelector *metav1.LabelSelector, + nodeName string, + getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc, + filter podutil.FilterFunc, maxPodLifeTimeSeconds uint, - filter func(pod *v1.Pod) bool, ) []*v1.Pod { - pods, err := podutil.ListPodsOnANode( - ctx, - client, - node, - podutil.WithFilter(filter), - podutil.WithNamespaces(includedNamespaces), - podutil.WithoutNamespaces(excludedNamespaces), - podutil.WithLabelSelector(labelSelector), - ) + pods, err := podutil.ListPodsOnANode(nodeName, getPodsAssignedToNode, filter) if err != nil { return nil } diff --git a/pkg/descheduler/strategies/toomanyrestarts.go b/pkg/descheduler/strategies/toomanyrestarts.go index 11dddc402..629122f07 100644 --- a/pkg/descheduler/strategies/toomanyrestarts.go +++ b/pkg/descheduler/strategies/toomanyrestarts.go @@ -21,6 +21,7 @@ import ( "fmt" v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/sets" clientset "k8s.io/client-go/kubernetes" "k8s.io/klog/v2" @@ -49,7 +50,7 @@ func validateRemovePodsHavingTooManyRestartsParams(params *api.StrategyParameter // RemovePodsHavingTooManyRestarts removes the pods that have too many restarts on node. // There are too many cases leading this issue: Volume mount failed, app error due to nodes' different settings. // As of now, this strategy won't evict daemonsets, mirror pods, critical pods and pods with local storages. -func RemovePodsHavingTooManyRestarts(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor) { +func RemovePodsHavingTooManyRestarts(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor, getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc) { if err := validateRemovePodsHavingTooManyRestartsParams(strategy.Params); err != nil { klog.ErrorS(err, "Invalid RemovePodsHavingTooManyRestarts parameters") return @@ -61,10 +62,10 @@ func RemovePodsHavingTooManyRestarts(ctx context.Context, client clientset.Inter return } - var includedNamespaces, excludedNamespaces []string + var includedNamespaces, excludedNamespaces sets.String if strategy.Params.Namespaces != nil { - includedNamespaces = strategy.Params.Namespaces.Include - excludedNamespaces = strategy.Params.Namespaces.Exclude + includedNamespaces = sets.NewString(strategy.Params.Namespaces.Include...) + excludedNamespaces = sets.NewString(strategy.Params.Namespaces.Exclude...) } nodeFit := false @@ -74,17 +75,20 @@ func RemovePodsHavingTooManyRestarts(ctx context.Context, client clientset.Inter evictable := podEvictor.Evictable(evictions.WithPriorityThreshold(thresholdPriority), evictions.WithNodeFit(nodeFit)) + podFilter, err := podutil.NewOptions(). + WithFilter(evictable.IsEvictable). + WithNamespaces(includedNamespaces). + WithoutNamespaces(excludedNamespaces). + WithLabelSelector(strategy.Params.LabelSelector). + BuildFilterFunc() + if err != nil { + klog.ErrorS(err, "Error initializing pod filter function") + return + } + for _, node := range nodes { klog.V(1).InfoS("Processing node", "node", klog.KObj(node)) - pods, err := podutil.ListPodsOnANode( - ctx, - client, - node, - podutil.WithFilter(evictable.IsEvictable), - podutil.WithNamespaces(includedNamespaces), - podutil.WithoutNamespaces(excludedNamespaces), - podutil.WithLabelSelector(strategy.Params.LabelSelector), - ) + pods, err := podutil.ListPodsOnANode(node.Name, getPodsAssignedToNode, podFilter) if err != nil { klog.ErrorS(err, "Error listing a nodes pods", "node", klog.KObj(node)) continue diff --git a/pkg/descheduler/strategies/topologyspreadconstraint.go b/pkg/descheduler/strategies/topologyspreadconstraint.go index 6b56a52b0..2f0fda87a 100644 --- a/pkg/descheduler/strategies/topologyspreadconstraint.go +++ b/pkg/descheduler/strategies/topologyspreadconstraint.go @@ -22,17 +22,17 @@ import ( "math" "sort" - utilerrors "k8s.io/apimachinery/pkg/util/errors" - v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" + utilerrors "k8s.io/apimachinery/pkg/util/errors" clientset "k8s.io/client-go/kubernetes" "k8s.io/klog/v2" "sigs.k8s.io/descheduler/pkg/api" "sigs.k8s.io/descheduler/pkg/descheduler/evictions" nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node" + podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod" "sigs.k8s.io/descheduler/pkg/descheduler/strategies/validation" "sigs.k8s.io/descheduler/pkg/utils" ) @@ -54,6 +54,7 @@ func RemovePodsViolatingTopologySpreadConstraint( strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor, + getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc, ) { strategyParams, err := validation.ValidateAndParseStrategyParams(ctx, client, strategy.Params) if err != nil {