mirror of
https://github.com/kubernetes-sigs/descheduler.git
synced 2026-01-26 05:14:13 +01:00
Add optional reason parameter to EvictPod
This commit is contained in:
@@ -19,6 +19,7 @@ package evictions
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
policy "k8s.io/api/policy/v1beta1"
|
||||
@@ -124,7 +125,11 @@ func (pe *PodEvictor) TotalEvicted() int {
|
||||
// EvictPod returns non-nil error only when evicting a pod on a node is not
|
||||
// possible (due to maxPodsToEvict constraint). Success is true when the pod
|
||||
// is evicted on the server side.
|
||||
func (pe *PodEvictor) EvictPod(ctx context.Context, pod *v1.Pod, node *v1.Node) (bool, error) {
|
||||
func (pe *PodEvictor) EvictPod(ctx context.Context, pod *v1.Pod, node *v1.Node, reasons ...string) (bool, error) {
|
||||
reason := ""
|
||||
if len(reasons) > 0 {
|
||||
reason = " (" + strings.Join(reasons, ", ") + ")"
|
||||
}
|
||||
if pe.maxPodsToEvict > 0 && pe.nodepodCount[node]+1 > pe.maxPodsToEvict {
|
||||
return false, fmt.Errorf("Maximum number %v of evicted pods per %q node reached", pe.maxPodsToEvict, node.Name)
|
||||
}
|
||||
@@ -132,15 +137,15 @@ func (pe *PodEvictor) EvictPod(ctx context.Context, pod *v1.Pod, node *v1.Node)
|
||||
err := evictPod(ctx, pe.client, pod, pe.policyGroupVersion, pe.dryRun)
|
||||
if err != nil {
|
||||
// err is used only for logging purposes
|
||||
klog.Errorf("Error evicting pod: %#v in namespace %#v (%#v)", pod.Name, pod.Namespace, err)
|
||||
klog.Errorf("Error evicting pod: %#v in namespace %#v%s: %#v", pod.Name, pod.Namespace, reason, err)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
pe.nodepodCount[node]++
|
||||
if pe.dryRun {
|
||||
klog.V(1).Infof("Evicted pod in dry run mode: %#v in namespace %#v", pod.Name, pod.Namespace)
|
||||
klog.V(1).Infof("Evicted pod in dry run mode: %#v in namespace %#v%s", pod.Name, pod.Namespace, reason)
|
||||
} else {
|
||||
klog.V(1).Infof("Evicted pod: %#v in namespace %#v", pod.Name, pod.Namespace)
|
||||
klog.V(1).Infof("Evicted pod: %#v in namespace %#v%s", pod.Name, pod.Namespace, reason)
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
@@ -101,7 +101,7 @@ func RemoveDuplicatePods(
|
||||
}
|
||||
|
||||
for _, pod := range duplicatePods {
|
||||
if _, err := podEvictor.EvictPod(ctx, pod, node); err != nil {
|
||||
if _, err := podEvictor.EvictPod(ctx, pod, node, "RemoveDuplicatePods"); err != nil {
|
||||
klog.Errorf("Error evicting pod: (%#v)", err)
|
||||
break
|
||||
}
|
||||
|
||||
@@ -289,7 +289,7 @@ func evictPods(
|
||||
cUsage := utils.GetResourceRequest(pod, v1.ResourceCPU)
|
||||
mUsage := utils.GetResourceRequest(pod, v1.ResourceMemory)
|
||||
|
||||
success, err := podEvictor.EvictPod(ctx, pod, node)
|
||||
success, err := podEvictor.EvictPod(ctx, pod, node, "LowNodeUtilization")
|
||||
if err != nil {
|
||||
klog.Errorf("Error evicting pod: (%#v)", err)
|
||||
break
|
||||
|
||||
@@ -55,7 +55,7 @@ func RemovePodsViolatingNodeAffinity(ctx context.Context, client clientset.Inter
|
||||
for _, pod := range pods {
|
||||
if pod.Spec.Affinity != nil && pod.Spec.Affinity.NodeAffinity != nil && pod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil {
|
||||
klog.V(1).Infof("Evicting pod: %v", pod.Name)
|
||||
if _, err := podEvictor.EvictPod(ctx, pod, node); err != nil {
|
||||
if _, err := podEvictor.EvictPod(ctx, pod, node, "NodeAffinity"); err != nil {
|
||||
klog.Errorf("Error evicting pod: (%#v)", err)
|
||||
break
|
||||
}
|
||||
|
||||
@@ -46,7 +46,7 @@ func RemovePodsViolatingNodeTaints(ctx context.Context, client clientset.Interfa
|
||||
func(taint *v1.Taint) bool { return taint.Effect == v1.TaintEffectNoSchedule },
|
||||
) {
|
||||
klog.V(2).Infof("Not all taints with NoSchedule effect are tolerated after update for pod %v on node %v", pods[i].Name, node.Name)
|
||||
if _, err := podEvictor.EvictPod(ctx, pods[i], node); err != nil {
|
||||
if _, err := podEvictor.EvictPod(ctx, pods[i], node, "NodeTaint"); err != nil {
|
||||
klog.Errorf("Error evicting pod: (%#v)", err)
|
||||
break
|
||||
}
|
||||
|
||||
@@ -40,7 +40,7 @@ func RemovePodsViolatingInterPodAntiAffinity(ctx context.Context, client clients
|
||||
totalPods := len(pods)
|
||||
for i := 0; i < totalPods; i++ {
|
||||
if checkPodsWithAntiAffinityExist(pods[i], pods) {
|
||||
success, err := podEvictor.EvictPod(ctx, pods[i], node)
|
||||
success, err := podEvictor.EvictPod(ctx, pods[i], node, "InterPodAntiAffinity")
|
||||
if err != nil {
|
||||
klog.Errorf("Error evicting pod: (%#v)", err)
|
||||
break
|
||||
|
||||
@@ -40,7 +40,7 @@ func PodLifeTime(ctx context.Context, client clientset.Interface, strategy api.D
|
||||
klog.V(1).Infof("Processing node: %#v", node.Name)
|
||||
pods := listOldPodsOnNode(ctx, client, node, *strategy.Params.MaxPodLifeTimeSeconds, podEvictor)
|
||||
for _, pod := range pods {
|
||||
success, err := podEvictor.EvictPod(ctx, pod, node)
|
||||
success, err := podEvictor.EvictPod(ctx, pod, node, "PodLifeTime")
|
||||
if success {
|
||||
klog.V(1).Infof("Evicted pod: %#v because it was created more than %v seconds ago", pod.Name, *strategy.Params.MaxPodLifeTimeSeconds)
|
||||
}
|
||||
|
||||
@@ -53,7 +53,7 @@ func RemovePodsHavingTooManyRestarts(ctx context.Context, client clientset.Inter
|
||||
} else if restarts < strategy.Params.PodsHavingTooManyRestarts.PodRestartThreshold {
|
||||
continue
|
||||
}
|
||||
if _, err := podEvictor.EvictPod(ctx, pods[i], node); err != nil {
|
||||
if _, err := podEvictor.EvictPod(ctx, pods[i], node, "TooManyRestarts"); err != nil {
|
||||
klog.Errorf("Error evicting pod: (%#v)", err)
|
||||
break
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user