diff --git a/pkg/descheduler/evictions/evictions.go b/pkg/descheduler/evictions/evictions.go index 1eaff53af..0a7110ad6 100644 --- a/pkg/descheduler/evictions/evictions.go +++ b/pkg/descheduler/evictions/evictions.go @@ -19,6 +19,7 @@ package evictions import ( "context" "fmt" + "strings" v1 "k8s.io/api/core/v1" policy "k8s.io/api/policy/v1beta1" @@ -124,7 +125,11 @@ func (pe *PodEvictor) TotalEvicted() int { // EvictPod returns non-nil error only when evicting a pod on a node is not // possible (due to maxPodsToEvict constraint). Success is true when the pod // is evicted on the server side. -func (pe *PodEvictor) EvictPod(ctx context.Context, pod *v1.Pod, node *v1.Node) (bool, error) { +func (pe *PodEvictor) EvictPod(ctx context.Context, pod *v1.Pod, node *v1.Node, reasons ...string) (bool, error) { + reason := "" + if len(reasons) > 0 { + reason = " (" + strings.Join(reasons, ", ") + ")" + } if pe.maxPodsToEvict > 0 && pe.nodepodCount[node]+1 > pe.maxPodsToEvict { return false, fmt.Errorf("Maximum number %v of evicted pods per %q node reached", pe.maxPodsToEvict, node.Name) } @@ -132,15 +137,15 @@ func (pe *PodEvictor) EvictPod(ctx context.Context, pod *v1.Pod, node *v1.Node) err := evictPod(ctx, pe.client, pod, pe.policyGroupVersion, pe.dryRun) if err != nil { // err is used only for logging purposes - klog.Errorf("Error evicting pod: %#v in namespace %#v (%#v)", pod.Name, pod.Namespace, err) + klog.Errorf("Error evicting pod: %#v in namespace %#v%s: %#v", pod.Name, pod.Namespace, reason, err) return false, nil } pe.nodepodCount[node]++ if pe.dryRun { - klog.V(1).Infof("Evicted pod in dry run mode: %#v in namespace %#v", pod.Name, pod.Namespace) + klog.V(1).Infof("Evicted pod in dry run mode: %#v in namespace %#v%s", pod.Name, pod.Namespace, reason) } else { - klog.V(1).Infof("Evicted pod: %#v in namespace %#v", pod.Name, pod.Namespace) + klog.V(1).Infof("Evicted pod: %#v in namespace %#v%s", pod.Name, pod.Namespace, reason) } return true, nil } diff --git a/pkg/descheduler/strategies/duplicates.go b/pkg/descheduler/strategies/duplicates.go index 568a6c99a..7840b2ce7 100644 --- a/pkg/descheduler/strategies/duplicates.go +++ b/pkg/descheduler/strategies/duplicates.go @@ -101,7 +101,7 @@ func RemoveDuplicatePods( } for _, pod := range duplicatePods { - if _, err := podEvictor.EvictPod(ctx, pod, node); err != nil { + if _, err := podEvictor.EvictPod(ctx, pod, node, "RemoveDuplicatePods"); err != nil { klog.Errorf("Error evicting pod: (%#v)", err) break } diff --git a/pkg/descheduler/strategies/lownodeutilization.go b/pkg/descheduler/strategies/lownodeutilization.go index 26d609cd9..b16f84e61 100644 --- a/pkg/descheduler/strategies/lownodeutilization.go +++ b/pkg/descheduler/strategies/lownodeutilization.go @@ -289,7 +289,7 @@ func evictPods( cUsage := utils.GetResourceRequest(pod, v1.ResourceCPU) mUsage := utils.GetResourceRequest(pod, v1.ResourceMemory) - success, err := podEvictor.EvictPod(ctx, pod, node) + success, err := podEvictor.EvictPod(ctx, pod, node, "LowNodeUtilization") if err != nil { klog.Errorf("Error evicting pod: (%#v)", err) break diff --git a/pkg/descheduler/strategies/node_affinity.go b/pkg/descheduler/strategies/node_affinity.go index 4a489ddcf..ad8315021 100644 --- a/pkg/descheduler/strategies/node_affinity.go +++ b/pkg/descheduler/strategies/node_affinity.go @@ -55,7 +55,7 @@ func RemovePodsViolatingNodeAffinity(ctx context.Context, client clientset.Inter for _, pod := range pods { if pod.Spec.Affinity != nil && pod.Spec.Affinity.NodeAffinity != nil && pod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil { klog.V(1).Infof("Evicting pod: %v", pod.Name) - if _, err := podEvictor.EvictPod(ctx, pod, node); err != nil { + if _, err := podEvictor.EvictPod(ctx, pod, node, "NodeAffinity"); err != nil { klog.Errorf("Error evicting pod: (%#v)", err) break } diff --git a/pkg/descheduler/strategies/node_taint.go b/pkg/descheduler/strategies/node_taint.go index 690e41338..9f78c10f6 100644 --- a/pkg/descheduler/strategies/node_taint.go +++ b/pkg/descheduler/strategies/node_taint.go @@ -46,7 +46,7 @@ func RemovePodsViolatingNodeTaints(ctx context.Context, client clientset.Interfa func(taint *v1.Taint) bool { return taint.Effect == v1.TaintEffectNoSchedule }, ) { klog.V(2).Infof("Not all taints with NoSchedule effect are tolerated after update for pod %v on node %v", pods[i].Name, node.Name) - if _, err := podEvictor.EvictPod(ctx, pods[i], node); err != nil { + if _, err := podEvictor.EvictPod(ctx, pods[i], node, "NodeTaint"); err != nil { klog.Errorf("Error evicting pod: (%#v)", err) break } diff --git a/pkg/descheduler/strategies/pod_antiaffinity.go b/pkg/descheduler/strategies/pod_antiaffinity.go index fc43419bd..15c96b36a 100644 --- a/pkg/descheduler/strategies/pod_antiaffinity.go +++ b/pkg/descheduler/strategies/pod_antiaffinity.go @@ -40,14 +40,13 @@ func RemovePodsViolatingInterPodAntiAffinity(ctx context.Context, client clients totalPods := len(pods) for i := 0; i < totalPods; i++ { if checkPodsWithAntiAffinityExist(pods[i], pods) { - success, err := podEvictor.EvictPod(ctx, pods[i], node) + success, err := podEvictor.EvictPod(ctx, pods[i], node, "InterPodAntiAffinity") if err != nil { klog.Errorf("Error evicting pod: (%#v)", err) break } if success { - klog.V(1).Infof("Evicted pod: %#v\n because of existing anti-affinity", pods[i].Name) // Since the current pod is evicted all other pods which have anti-affinity with this // pod need not be evicted. // Update pods. diff --git a/pkg/descheduler/strategies/pod_lifetime.go b/pkg/descheduler/strategies/pod_lifetime.go index b1287abbf..9204b46ba 100644 --- a/pkg/descheduler/strategies/pod_lifetime.go +++ b/pkg/descheduler/strategies/pod_lifetime.go @@ -40,7 +40,7 @@ func PodLifeTime(ctx context.Context, client clientset.Interface, strategy api.D klog.V(1).Infof("Processing node: %#v", node.Name) pods := listOldPodsOnNode(ctx, client, node, *strategy.Params.MaxPodLifeTimeSeconds, podEvictor) for _, pod := range pods { - success, err := podEvictor.EvictPod(ctx, pod, node) + success, err := podEvictor.EvictPod(ctx, pod, node, "PodLifeTime") if success { klog.V(1).Infof("Evicted pod: %#v because it was created more than %v seconds ago", pod.Name, *strategy.Params.MaxPodLifeTimeSeconds) } diff --git a/pkg/descheduler/strategies/toomanyrestarts.go b/pkg/descheduler/strategies/toomanyrestarts.go index 5991e0393..dd359bcc6 100644 --- a/pkg/descheduler/strategies/toomanyrestarts.go +++ b/pkg/descheduler/strategies/toomanyrestarts.go @@ -53,7 +53,7 @@ func RemovePodsHavingTooManyRestarts(ctx context.Context, client clientset.Inter } else if restarts < strategy.Params.PodsHavingTooManyRestarts.PodRestartThreshold { continue } - if _, err := podEvictor.EvictPod(ctx, pods[i], node); err != nil { + if _, err := podEvictor.EvictPod(ctx, pods[i], node, "TooManyRestarts"); err != nil { klog.Errorf("Error evicting pod: (%#v)", err) break }