1
0
mirror of https://github.com/kubernetes-sigs/descheduler.git synced 2026-01-25 20:59:28 +01:00

reform all strategies by using getPodsAssignedToNode

Signed-off-by: Garrybest <garrybest@foxmail.com>
This commit is contained in:
Garrybest
2021-12-10 19:28:51 +08:00
parent 08ed129a07
commit 0ff8ecb41e
12 changed files with 145 additions and 105 deletions

View File

@@ -33,6 +33,7 @@ import (
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
eutils "sigs.k8s.io/descheduler/pkg/descheduler/evictions/utils"
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
"sigs.k8s.io/descheduler/pkg/descheduler/strategies"
"sigs.k8s.io/descheduler/pkg/descheduler/strategies/nodeutilization"
)
@@ -64,11 +65,17 @@ func Run(rs *options.DeschedulerServer) error {
return RunDeschedulerStrategies(ctx, rs, deschedulerPolicy, evictionPolicyGroupVersion, stopChannel)
}
type strategyFunction func(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor)
type strategyFunction func(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor, getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc)
func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer, deschedulerPolicy *api.DeschedulerPolicy, evictionPolicyGroupVersion string, stopChannel chan struct{}) error {
sharedInformerFactory := informers.NewSharedInformerFactory(rs.Client, 0)
nodeInformer := sharedInformerFactory.Core().V1().Nodes()
podInformer := sharedInformerFactory.Core().V1().Pods()
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
if err != nil {
return fmt.Errorf("build get pods assigned to node function error: %v", err)
}
sharedInformerFactory.Start(stopChannel)
sharedInformerFactory.WaitForCacheSync(stopChannel)
@@ -138,7 +145,7 @@ func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer
for name, strategy := range deschedulerPolicy.Strategies {
if f, ok := strategyFuncs[name]; ok {
if strategy.Enabled {
f(ctx, rs.Client, strategy, nodes, podEvictor)
f(ctx, rs.Client, strategy, nodes, podEvictor, getPodsAssignedToNode)
}
} else {
klog.ErrorS(fmt.Errorf("unknown strategy name"), "skipping strategy", "strategy", name)

View File

@@ -66,6 +66,7 @@ func RemoveDuplicatePods(
strategy api.DeschedulerStrategy,
nodes []*v1.Node,
podEvictor *evictions.PodEvictor,
getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc,
) {
if err := validateRemoveDuplicatePodsParams(strategy.Params); err != nil {
klog.ErrorS(err, "Invalid RemoveDuplicatePods parameters")
@@ -77,10 +78,10 @@ func RemoveDuplicatePods(
return
}
var includedNamespaces, excludedNamespaces []string
var includedNamespaces, excludedNamespaces sets.String
if strategy.Params != nil && strategy.Params.Namespaces != nil {
includedNamespaces = strategy.Params.Namespaces.Include
excludedNamespaces = strategy.Params.Namespaces.Exclude
includedNamespaces = sets.NewString(strategy.Params.Namespaces.Include...)
excludedNamespaces = sets.NewString(strategy.Params.Namespaces.Exclude...)
}
nodeFit := false
@@ -95,15 +96,19 @@ func RemoveDuplicatePods(
nodeCount := 0
nodeMap := make(map[string]*v1.Node)
podFilter, err := podutil.NewOptions().
WithFilter(evictable.IsEvictable).
WithNamespaces(includedNamespaces).
WithoutNamespaces(excludedNamespaces).
BuildFilterFunc()
if err != nil {
klog.ErrorS(err, "Error initializing pod filter function")
return
}
for _, node := range nodes {
klog.V(1).InfoS("Processing node", "node", klog.KObj(node))
pods, err := podutil.ListPodsOnANode(ctx,
client,
node,
podutil.WithFilter(evictable.IsEvictable),
podutil.WithNamespaces(includedNamespaces),
podutil.WithoutNamespaces(excludedNamespaces),
)
pods, err := podutil.ListPodsOnANode(node.Name, getPodsAssignedToNode, podFilter)
if err != nil {
klog.ErrorS(err, "Error listing evictable pods on node", "node", klog.KObj(node))
continue

View File

@@ -3,11 +3,11 @@ package strategies
import (
"context"
"fmt"
"k8s.io/apimachinery/pkg/util/sets"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/sets"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/klog/v2"
@@ -33,6 +33,7 @@ func RemoveFailedPods(
strategy api.DeschedulerStrategy,
nodes []*v1.Node,
podEvictor *evictions.PodEvictor,
getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc,
) {
strategyParams, err := validateAndParseRemoveFailedPodsParams(ctx, client, strategy.Params)
if err != nil {
@@ -51,20 +52,23 @@ func RemoveFailedPods(
labelSelector = strategy.Params.LabelSelector
}
podFilter, err := podutil.NewOptions().
WithFilter(evictable.IsEvictable).
WithNamespaces(strategyParams.IncludedNamespaces).
WithoutNamespaces(strategyParams.ExcludedNamespaces).
WithLabelSelector(labelSelector).
BuildFilterFunc()
if err != nil {
klog.ErrorS(err, "Error initializing pod filter function")
return
}
// Only list failed pods
phaseFilter := func(pod *v1.Pod) bool { return pod.Status.Phase == v1.PodFailed }
podFilter = podutil.WrapFilterFuncs(phaseFilter, podFilter)
for _, node := range nodes {
klog.V(1).InfoS("Processing node", "node", klog.KObj(node))
fieldSelectorString := "spec.nodeName=" + node.Name + ",status.phase=" + string(v1.PodFailed)
pods, err := podutil.ListPodsOnANodeWithFieldSelector(
ctx,
client,
node,
fieldSelectorString,
podutil.WithFilter(evictable.IsEvictable),
podutil.WithNamespaces(strategyParams.IncludedNamespaces.UnsortedList()),
podutil.WithoutNamespaces(strategyParams.ExcludedNamespaces.UnsortedList()),
podutil.WithLabelSelector(labelSelector),
)
pods, err := podutil.ListAllPodsOnANode(node.Name, getPodsAssignedToNode, podFilter)
if err != nil {
klog.ErrorS(err, "Error listing a nodes failed pods", "node", klog.KObj(node))
continue

View File

@@ -21,6 +21,7 @@ import (
"fmt"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/sets"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/klog/v2"
@@ -47,7 +48,7 @@ func validatePodsViolatingNodeAffinityParams(params *api.StrategyParameters) err
}
// RemovePodsViolatingNodeAffinity evicts pods on nodes which violate node affinity
func RemovePodsViolatingNodeAffinity(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor) {
func RemovePodsViolatingNodeAffinity(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor, getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc) {
if err := validatePodsViolatingNodeAffinityParams(strategy.Params); err != nil {
klog.ErrorS(err, "Invalid RemovePodsViolatingNodeAffinity parameters")
return
@@ -58,10 +59,10 @@ func RemovePodsViolatingNodeAffinity(ctx context.Context, client clientset.Inter
return
}
var includedNamespaces, excludedNamespaces []string
var includedNamespaces, excludedNamespaces sets.String
if strategy.Params.Namespaces != nil {
includedNamespaces = strategy.Params.Namespaces.Include
excludedNamespaces = strategy.Params.Namespaces.Exclude
includedNamespaces = sets.NewString(strategy.Params.Namespaces.Include...)
excludedNamespaces = sets.NewString(strategy.Params.Namespaces.Exclude...)
}
nodeFit := false
@@ -71,6 +72,16 @@ func RemovePodsViolatingNodeAffinity(ctx context.Context, client clientset.Inter
evictable := podEvictor.Evictable(evictions.WithPriorityThreshold(thresholdPriority), evictions.WithNodeFit(nodeFit))
podFilter, err := podutil.NewOptions().
WithNamespaces(includedNamespaces).
WithoutNamespaces(excludedNamespaces).
WithLabelSelector(strategy.Params.LabelSelector).
BuildFilterFunc()
if err != nil {
klog.ErrorS(err, "Error initializing pod filter function")
return
}
for _, nodeAffinity := range strategy.Params.NodeAffinityType {
klog.V(2).InfoS("Executing for nodeAffinityType", "nodeAffinity", nodeAffinity)
@@ -80,17 +91,13 @@ func RemovePodsViolatingNodeAffinity(ctx context.Context, client clientset.Inter
klog.V(1).InfoS("Processing node", "node", klog.KObj(node))
pods, err := podutil.ListPodsOnANode(
ctx,
client,
node,
podutil.WithFilter(func(pod *v1.Pod) bool {
node.Name,
getPodsAssignedToNode,
podutil.WrapFilterFuncs(podFilter, func(pod *v1.Pod) bool {
return evictable.IsEvictable(pod) &&
!nodeutil.PodFitsCurrentNode(pod, node) &&
nodeutil.PodFitsAnyNode(pod, nodes)
}),
podutil.WithNamespaces(includedNamespaces),
podutil.WithoutNamespaces(excludedNamespaces),
podutil.WithLabelSelector(strategy.Params.LabelSelector),
)
if err != nil {
klog.ErrorS(err, "Failed to get pods", "node", klog.KObj(node))

View File

@@ -20,6 +20,7 @@ import (
"context"
"fmt"
"k8s.io/apimachinery/pkg/util/sets"
"sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
@@ -48,18 +49,18 @@ func validateRemovePodsViolatingNodeTaintsParams(params *api.StrategyParameters)
}
// RemovePodsViolatingNodeTaints evicts pods on the node which violate NoSchedule Taints on nodes
func RemovePodsViolatingNodeTaints(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor) {
func RemovePodsViolatingNodeTaints(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor, getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc) {
if err := validateRemovePodsViolatingNodeTaintsParams(strategy.Params); err != nil {
klog.ErrorS(err, "Invalid RemovePodsViolatingNodeTaints parameters")
return
}
var includedNamespaces, excludedNamespaces []string
var includedNamespaces, excludedNamespaces sets.String
var labelSelector *metav1.LabelSelector
if strategy.Params != nil {
if strategy.Params.Namespaces != nil {
includedNamespaces = strategy.Params.Namespaces.Include
excludedNamespaces = strategy.Params.Namespaces.Exclude
includedNamespaces = sets.NewString(strategy.Params.Namespaces.Include...)
excludedNamespaces = sets.NewString(strategy.Params.Namespaces.Exclude...)
}
labelSelector = strategy.Params.LabelSelector
}
@@ -77,17 +78,20 @@ func RemovePodsViolatingNodeTaints(ctx context.Context, client clientset.Interfa
evictable := podEvictor.Evictable(evictions.WithPriorityThreshold(thresholdPriority), evictions.WithNodeFit(nodeFit))
podFilter, err := podutil.NewOptions().
WithFilter(evictable.IsEvictable).
WithNamespaces(includedNamespaces).
WithoutNamespaces(excludedNamespaces).
WithLabelSelector(labelSelector).
BuildFilterFunc()
if err != nil {
klog.ErrorS(err, "Error initializing pod filter function")
return
}
for _, node := range nodes {
klog.V(1).InfoS("Processing node", "node", klog.KObj(node))
pods, err := podutil.ListPodsOnANode(
ctx,
client,
node,
podutil.WithFilter(evictable.IsEvictable),
podutil.WithNamespaces(includedNamespaces),
podutil.WithoutNamespaces(excludedNamespaces),
podutil.WithLabelSelector(labelSelector),
)
pods, err := podutil.ListAllPodsOnANode(node.Name, getPodsAssignedToNode, podFilter)
if err != nil {
//no pods evicted as error encountered retrieving evictable Pods
return

View File

@@ -24,15 +24,17 @@ import (
"k8s.io/apimachinery/pkg/api/resource"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/klog/v2"
"sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
"sigs.k8s.io/descheduler/pkg/utils"
)
// HighNodeUtilization evicts pods from under utilized nodes so that scheduler can schedule according to its strategy.
// Note that CPU/Memory requests are used to calculate nodes' utilization and not the actual resource usage.
func HighNodeUtilization(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor) {
func HighNodeUtilization(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor, getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc) {
if err := validateNodeUtilizationParams(strategy.Params); err != nil {
klog.ErrorS(err, "Invalid HighNodeUtilization parameters")
return
@@ -61,7 +63,7 @@ func HighNodeUtilization(ctx context.Context, client clientset.Interface, strate
resourceNames := getResourceNames(targetThresholds)
sourceNodes, highNodes := classifyNodes(
getNodeUsage(ctx, client, nodes, thresholds, targetThresholds, resourceNames),
getNodeUsage(nodes, thresholds, targetThresholds, resourceNames, getPodsAssignedToNode),
func(node *v1.Node, usage NodeUsage) bool {
return isNodeWithLowUtilization(usage)
},

View File

@@ -28,12 +28,13 @@ import (
"sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
"sigs.k8s.io/descheduler/pkg/utils"
)
// LowNodeUtilization evicts pods from overutilized nodes to underutilized nodes. Note that CPU/Memory requests are used
// to calculate nodes' utilization and not the actual resource usage.
func LowNodeUtilization(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor) {
func LowNodeUtilization(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor, getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc) {
// TODO: May be create a struct for the strategy as well, so that we don't have to pass along the all the params?
if err := validateNodeUtilizationParams(strategy.Params); err != nil {
klog.ErrorS(err, "Invalid LowNodeUtilization parameters")
@@ -72,7 +73,7 @@ func LowNodeUtilization(ctx context.Context, client clientset.Interface, strateg
resourceNames := getResourceNames(thresholds)
lowNodes, sourceNodes := classifyNodes(
getNodeUsage(ctx, client, nodes, thresholds, targetThresholds, resourceNames),
getNodeUsage(nodes, thresholds, targetThresholds, resourceNames, getPodsAssignedToNode),
// The node has to be schedulable (to be able to move workload there)
func(node *v1.Node, usage NodeUsage) bool {
if nodeutil.IsNodeUnschedulable(node) {

View File

@@ -19,15 +19,16 @@ package nodeutilization
import (
"context"
"fmt"
"sort"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/klog/v2"
"sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
"sigs.k8s.io/descheduler/pkg/utils"
"sort"
)
// NodeUsage stores a node's info, pods on it, thresholds and its resource usage
@@ -77,16 +78,15 @@ func validateThresholds(thresholds api.ResourceThresholds) error {
}
func getNodeUsage(
ctx context.Context,
client clientset.Interface,
nodes []*v1.Node,
lowThreshold, highThreshold api.ResourceThresholds,
resourceNames []v1.ResourceName,
getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc,
) []NodeUsage {
var nodeUsageList []NodeUsage
for _, node := range nodes {
pods, err := podutil.ListPodsOnANode(ctx, client, node)
pods, err := podutil.ListPodsOnANode(node.Name, getPodsAssignedToNode, nil)
if err != nil {
klog.V(2).InfoS("Node will not be processed, error accessing its pods", "node", klog.KObj(node), "err", err)
continue

View File

@@ -20,6 +20,7 @@ import (
"context"
"fmt"
"k8s.io/apimachinery/pkg/util/sets"
"sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
@@ -48,18 +49,18 @@ func validateRemovePodsViolatingInterPodAntiAffinityParams(params *api.StrategyP
}
// RemovePodsViolatingInterPodAntiAffinity evicts pods on the node which are having a pod affinity rules.
func RemovePodsViolatingInterPodAntiAffinity(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor) {
func RemovePodsViolatingInterPodAntiAffinity(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor, getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc) {
if err := validateRemovePodsViolatingInterPodAntiAffinityParams(strategy.Params); err != nil {
klog.ErrorS(err, "Invalid RemovePodsViolatingInterPodAntiAffinity parameters")
return
}
var includedNamespaces, excludedNamespaces []string
var includedNamespaces, excludedNamespaces sets.String
var labelSelector *metav1.LabelSelector
if strategy.Params != nil {
if strategy.Params.Namespaces != nil {
includedNamespaces = strategy.Params.Namespaces.Include
excludedNamespaces = strategy.Params.Namespaces.Exclude
includedNamespaces = sets.NewString(strategy.Params.Namespaces.Include...)
excludedNamespaces = sets.NewString(strategy.Params.Namespaces.Exclude...)
}
labelSelector = strategy.Params.LabelSelector
}
@@ -77,16 +78,19 @@ func RemovePodsViolatingInterPodAntiAffinity(ctx context.Context, client clients
evictable := podEvictor.Evictable(evictions.WithPriorityThreshold(thresholdPriority), evictions.WithNodeFit(nodeFit))
podFilter, err := podutil.NewOptions().
WithNamespaces(includedNamespaces).
WithoutNamespaces(excludedNamespaces).
WithLabelSelector(labelSelector).
BuildFilterFunc()
if err != nil {
klog.ErrorS(err, "Error initializing pod filter function")
return
}
for _, node := range nodes {
klog.V(1).InfoS("Processing node", "node", klog.KObj(node))
pods, err := podutil.ListPodsOnANode(
ctx,
client,
node,
podutil.WithNamespaces(includedNamespaces),
podutil.WithoutNamespaces(excludedNamespaces),
podutil.WithLabelSelector(labelSelector),
)
pods, err := podutil.ListPodsOnANode(node.Name, getPodsAssignedToNode, podFilter)
if err != nil {
return
}

View File

@@ -22,6 +22,7 @@ import (
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/sets"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/klog/v2"
@@ -56,7 +57,7 @@ func validatePodLifeTimeParams(params *api.StrategyParameters) error {
}
// PodLifeTime evicts pods on nodes that were created more than strategy.Params.MaxPodLifeTimeSeconds seconds ago.
func PodLifeTime(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor) {
func PodLifeTime(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor, getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc) {
if err := validatePodLifeTimeParams(strategy.Params); err != nil {
klog.ErrorS(err, "Invalid PodLifeTime parameters")
return
@@ -68,10 +69,10 @@ func PodLifeTime(ctx context.Context, client clientset.Interface, strategy api.D
return
}
var includedNamespaces, excludedNamespaces []string
var includedNamespaces, excludedNamespaces sets.String
if strategy.Params.Namespaces != nil {
includedNamespaces = strategy.Params.Namespaces.Include
excludedNamespaces = strategy.Params.Namespaces.Exclude
includedNamespaces = sets.NewString(strategy.Params.Namespaces.Include...)
excludedNamespaces = sets.NewString(strategy.Params.Namespaces.Exclude...)
}
evictable := podEvictor.Evictable(evictions.WithPriorityThreshold(thresholdPriority))
@@ -88,10 +89,21 @@ func PodLifeTime(ctx context.Context, client clientset.Interface, strategy api.D
}
}
podFilter, err := podutil.NewOptions().
WithFilter(filter).
WithNamespaces(includedNamespaces).
WithoutNamespaces(excludedNamespaces).
WithLabelSelector(strategy.Params.LabelSelector).
BuildFilterFunc()
if err != nil {
klog.ErrorS(err, "Error initializing pod filter function")
return
}
for _, node := range nodes {
klog.V(1).InfoS("Processing node", "node", klog.KObj(node))
pods := listOldPodsOnNode(ctx, client, node, includedNamespaces, excludedNamespaces, strategy.Params.LabelSelector, *strategy.Params.PodLifeTime.MaxPodLifeTimeSeconds, filter)
pods := listOldPodsOnNode(node.Name, getPodsAssignedToNode, podFilter, *strategy.Params.PodLifeTime.MaxPodLifeTimeSeconds)
for _, pod := range pods {
success, err := podEvictor.EvictPod(ctx, pod, node, "PodLifeTime")
if success {
@@ -108,23 +120,12 @@ func PodLifeTime(ctx context.Context, client clientset.Interface, strategy api.D
}
func listOldPodsOnNode(
ctx context.Context,
client clientset.Interface,
node *v1.Node,
includedNamespaces, excludedNamespaces []string,
labelSelector *metav1.LabelSelector,
nodeName string,
getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc,
filter podutil.FilterFunc,
maxPodLifeTimeSeconds uint,
filter func(pod *v1.Pod) bool,
) []*v1.Pod {
pods, err := podutil.ListPodsOnANode(
ctx,
client,
node,
podutil.WithFilter(filter),
podutil.WithNamespaces(includedNamespaces),
podutil.WithoutNamespaces(excludedNamespaces),
podutil.WithLabelSelector(labelSelector),
)
pods, err := podutil.ListPodsOnANode(nodeName, getPodsAssignedToNode, filter)
if err != nil {
return nil
}

View File

@@ -21,6 +21,7 @@ import (
"fmt"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/sets"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/klog/v2"
@@ -49,7 +50,7 @@ func validateRemovePodsHavingTooManyRestartsParams(params *api.StrategyParameter
// RemovePodsHavingTooManyRestarts removes the pods that have too many restarts on node.
// There are too many cases leading this issue: Volume mount failed, app error due to nodes' different settings.
// As of now, this strategy won't evict daemonsets, mirror pods, critical pods and pods with local storages.
func RemovePodsHavingTooManyRestarts(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor) {
func RemovePodsHavingTooManyRestarts(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor, getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc) {
if err := validateRemovePodsHavingTooManyRestartsParams(strategy.Params); err != nil {
klog.ErrorS(err, "Invalid RemovePodsHavingTooManyRestarts parameters")
return
@@ -61,10 +62,10 @@ func RemovePodsHavingTooManyRestarts(ctx context.Context, client clientset.Inter
return
}
var includedNamespaces, excludedNamespaces []string
var includedNamespaces, excludedNamespaces sets.String
if strategy.Params.Namespaces != nil {
includedNamespaces = strategy.Params.Namespaces.Include
excludedNamespaces = strategy.Params.Namespaces.Exclude
includedNamespaces = sets.NewString(strategy.Params.Namespaces.Include...)
excludedNamespaces = sets.NewString(strategy.Params.Namespaces.Exclude...)
}
nodeFit := false
@@ -74,17 +75,20 @@ func RemovePodsHavingTooManyRestarts(ctx context.Context, client clientset.Inter
evictable := podEvictor.Evictable(evictions.WithPriorityThreshold(thresholdPriority), evictions.WithNodeFit(nodeFit))
podFilter, err := podutil.NewOptions().
WithFilter(evictable.IsEvictable).
WithNamespaces(includedNamespaces).
WithoutNamespaces(excludedNamespaces).
WithLabelSelector(strategy.Params.LabelSelector).
BuildFilterFunc()
if err != nil {
klog.ErrorS(err, "Error initializing pod filter function")
return
}
for _, node := range nodes {
klog.V(1).InfoS("Processing node", "node", klog.KObj(node))
pods, err := podutil.ListPodsOnANode(
ctx,
client,
node,
podutil.WithFilter(evictable.IsEvictable),
podutil.WithNamespaces(includedNamespaces),
podutil.WithoutNamespaces(excludedNamespaces),
podutil.WithLabelSelector(strategy.Params.LabelSelector),
)
pods, err := podutil.ListPodsOnANode(node.Name, getPodsAssignedToNode, podFilter)
if err != nil {
klog.ErrorS(err, "Error listing a nodes pods", "node", klog.KObj(node))
continue

View File

@@ -22,17 +22,17 @@ import (
"math"
"sort"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/klog/v2"
"sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
"sigs.k8s.io/descheduler/pkg/descheduler/strategies/validation"
"sigs.k8s.io/descheduler/pkg/utils"
)
@@ -54,6 +54,7 @@ func RemovePodsViolatingTopologySpreadConstraint(
strategy api.DeschedulerStrategy,
nodes []*v1.Node,
podEvictor *evictions.PodEvictor,
getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc,
) {
strategyParams, err := validation.ValidateAndParseStrategyParams(ctx, client, strategy.Params)
if err != nil {