1
0
mirror of https://github.com/kubernetes-sigs/descheduler.git synced 2026-01-26 05:14:13 +01:00

Pass the strategy name into evictor through context

This commit is contained in:
Jan Chaloupka
2022-06-09 11:50:09 +02:00
parent b2418ef481
commit d5ee855221
11 changed files with 20 additions and 15 deletions

View File

@@ -302,7 +302,7 @@ func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer
continue
}
evictorFilter := evictions.NewEvictorFilter(nodes, getPodsAssignedToNode, evictLocalStoragePods, evictSystemCriticalPods, ignorePvcPods, evictBarePods, evictions.WithNodeFit(nodeFit), evictions.WithPriorityThreshold(thresholdPriority))
f(ctx, rs.Client, strategy, nodes, podEvictor, evictorFilter, getPodsAssignedToNode)
f(context.WithValue(ctx, "strategyName", string(name)), rs.Client, strategy, nodes, podEvictor, evictorFilter, getPodsAssignedToNode)
}
} else {
klog.ErrorS(fmt.Errorf("unknown strategy name"), "skipping strategy", "strategy", name)

View File

@@ -19,7 +19,6 @@ package evictions
import (
"context"
"fmt"
"strings"
v1 "k8s.io/api/core/v1"
policy "k8s.io/api/policy/v1beta1"
@@ -106,11 +105,17 @@ func (pe *PodEvictor) TotalEvicted() uint {
// EvictPod returns non-nil error only when evicting a pod on a node is not
// possible (due to maxPodsToEvictPerNode constraint). Success is true when the pod
// is evicted on the server side.
func (pe *PodEvictor) EvictPod(ctx context.Context, pod *v1.Pod, node *v1.Node, strategy string, reasons ...string) (bool, error) {
reason := strategy
if len(reasons) > 0 {
reason += " (" + strings.Join(reasons, ", ") + ")"
// eviction reason can be set through the ctx's evictionReason:STRING pair
func (pe *PodEvictor) EvictPod(ctx context.Context, pod *v1.Pod, node *v1.Node) (bool, error) {
strategy := ""
if ctx.Value("strategyName") != nil {
strategy = ctx.Value("strategyName").(string)
}
reason := ""
if ctx.Value("evictionReason") != nil {
reason = ctx.Value("evictionReason").(string)
}
if pe.maxPodsToEvictPerNode != nil && pe.nodepodCount[node]+1 > *pe.maxPodsToEvictPerNode {
if pe.metricsEnabled {
metrics.PodsEvicted.With(map[string]string{"result": "maximum number of pods per node reached", "strategy": strategy, "namespace": pod.Namespace, "node": node.Name}).Inc()

View File

@@ -199,7 +199,7 @@ func RemoveDuplicatePods(
// It's assumed all duplicated pods are in the same priority class
// TODO(jchaloup): check if the pod has a different node to lend to
for _, pod := range pods[upperAvg-1:] {
if _, err := podEvictor.EvictPod(ctx, pod, nodeMap[nodeName], "RemoveDuplicatePods"); err != nil {
if _, err := podEvictor.EvictPod(ctx, pod, nodeMap[nodeName]); err != nil {
klog.ErrorS(err, "Error evicting pod", "pod", klog.KObj(pod))
break
}

View File

@@ -75,7 +75,7 @@ func RemoveFailedPods(
continue
}
if _, err = podEvictor.EvictPod(ctx, pods[i], node, "FailedPod"); err != nil {
if _, err = podEvictor.EvictPod(ctx, pods[i], node); err != nil {
klog.ErrorS(err, "Error evicting pod", "pod", klog.KObj(pod))
break
}

View File

@@ -93,7 +93,7 @@ func RemovePodsViolatingNodeAffinity(ctx context.Context, client clientset.Inter
for _, pod := range pods {
if pod.Spec.Affinity != nil && pod.Spec.Affinity.NodeAffinity != nil && pod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil {
klog.V(1).InfoS("Evicting pod", "pod", klog.KObj(pod))
if _, err := podEvictor.EvictPod(ctx, pod, node, "NodeAffinity"); err != nil {
if _, err := podEvictor.EvictPod(ctx, pod, node); err != nil {
klog.ErrorS(err, "Error evicting pod")
break
}

View File

@@ -106,7 +106,7 @@ func RemovePodsViolatingNodeTaints(ctx context.Context, client clientset.Interfa
taintFilterFnc,
) {
klog.V(2).InfoS("Not all taints with NoSchedule effect are tolerated after update for pod on node", "pod", klog.KObj(pods[i]), "node", klog.KObj(node))
if _, err := podEvictor.EvictPod(ctx, pods[i], node, "NodeTaint"); err != nil {
if _, err := podEvictor.EvictPod(ctx, pods[i], node); err != nil {
klog.ErrorS(err, "Error evicting pod")
break
}

View File

@@ -313,7 +313,7 @@ func evictPods(
continue
}
success, err := podEvictor.EvictPod(ctx, pod, nodeInfo.node, strategy)
success, err := podEvictor.EvictPod(ctx, pod, nodeInfo.node)
if err != nil {
klog.ErrorS(err, "Error evicting pod", "pod", klog.KObj(pod))
break

View File

@@ -86,7 +86,7 @@ func RemovePodsViolatingInterPodAntiAffinity(ctx context.Context, client clients
totalPods := len(pods)
for i := 0; i < totalPods; i++ {
if checkPodsWithAntiAffinityExist(pods[i], pods) && evictorFilter.Filter(pods[i]) {
success, err := podEvictor.EvictPod(ctx, pods[i], node, "InterPodAntiAffinity")
success, err := podEvictor.EvictPod(ctx, pods[i], node)
if err != nil {
klog.ErrorS(err, "Error evicting pod")
break

View File

@@ -107,7 +107,7 @@ func PodLifeTime(ctx context.Context, client clientset.Interface, strategy api.D
podutil.SortPodsBasedOnAge(podsToEvict)
for _, pod := range podsToEvict {
success, err := podEvictor.EvictPod(ctx, pod, nodeMap[pod.Spec.NodeName], "PodLifeTime")
success, err := podEvictor.EvictPod(ctx, pod, nodeMap[pod.Spec.NodeName])
if success {
klog.V(1).InfoS("Evicted pod because it exceeded its lifetime", "pod", klog.KObj(pod), "maxPodLifeTime", *strategy.Params.PodLifeTime.MaxPodLifeTimeSeconds)
}

View File

@@ -89,7 +89,7 @@ func RemovePodsHavingTooManyRestarts(ctx context.Context, client clientset.Inter
} else if restarts < strategy.Params.PodsHavingTooManyRestarts.PodRestartThreshold {
continue
}
if _, err := podEvictor.EvictPod(ctx, pods[i], node, "TooManyRestarts"); err != nil {
if _, err := podEvictor.EvictPod(ctx, pods[i], node); err != nil {
klog.ErrorS(err, "Error evicting pod", "pod", klog.KObj(pod))
break
}

View File

@@ -187,7 +187,7 @@ func RemovePodsViolatingTopologySpreadConstraint(
if !isEvictable(pod) {
continue
}
if _, err := podEvictor.EvictPod(ctx, pod, nodeMap[pod.Spec.NodeName], "PodTopologySpread"); err != nil {
if _, err := podEvictor.EvictPod(ctx, pod, nodeMap[pod.Spec.NodeName]); err != nil {
klog.ErrorS(err, "Error evicting pod", "pod", klog.KObj(pod))
break
}