diff --git a/README.md b/README.md index 128b6187e..f01f8aad7 100644 --- a/README.md +++ b/README.md @@ -941,7 +941,7 @@ When the descheduler decides to evict pods from a node, it employs the following * [Critical pods](https://kubernetes.io/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/) (with priorityClassName set to system-cluster-critical or system-node-critical) are never evicted (unless `evictSystemCriticalPods: true` is set). * Pods (static or mirrored pods or standalone pods) not part of an ReplicationController, ReplicaSet(Deployment), StatefulSet, or Job are never evicted because these pods won't be recreated. (Standalone pods in failed status phase can be evicted by setting `evictFailedBarePods: true`) -* Pods associated with DaemonSets are never evicted. +* Pods associated with DaemonSets are never evicted (unless `evictDaemonSetPods: true` is set). * Pods with local storage are never evicted (unless `evictLocalStoragePods: true` is set). * Pods with PVCs are evicted (unless `ignorePvcPods: true` is set). * In `LowNodeUtilization` and `RemovePodsViolatingInterPodAntiAffinity`, pods are evicted by their priority from low to high, and if they have same priority, diff --git a/charts/descheduler/values.yaml b/charts/descheduler/values.yaml index e6f2a3e6f..90514c0cd 100644 --- a/charts/descheduler/values.yaml +++ b/charts/descheduler/values.yaml @@ -85,6 +85,9 @@ deschedulerPolicy: # nodeSelector: "key1=value1,key2=value2" # maxNoOfPodsToEvictPerNode: 10 # maxNoOfPodsToEvictPerNamespace: 10 + # ignorePvcPods: true + # evictLocalStoragePods: true + # evictDaemonSetPods: true # tracing: # collectorEndpoint: otel-collector.observability.svc.cluster.local:4317 # transportCert: "" diff --git a/docs/deprecated/v1alpha1.md b/docs/deprecated/v1alpha1.md index 5867a3b49..c55bfe2c9 100644 --- a/docs/deprecated/v1alpha1.md +++ b/docs/deprecated/v1alpha1.md @@ -135,6 +135,7 @@ The policy includes a common configuration that applies to all the strategies: |------|---------------|-------------| | `nodeSelector` | `nil` | limiting the nodes which are processed | | `evictLocalStoragePods` | `false` | allows eviction of pods with local storage | +| `evictDaemonSetPods` | `false` | allows eviction of pods associated to DaemonSet resources | | `evictSystemCriticalPods` | `false` | [Warning: Will evict Kubernetes system pods] allows eviction of pods with any priority, including system pods like kube-dns | | `ignorePvcPods` | `false` | set whether PVC pods should be evicted or ignored | | `maxNoOfPodsToEvictPerNode` | `nil` | maximum number of pods evicted from each node (summed through all strategies) | @@ -152,6 +153,7 @@ kind: "DeschedulerPolicy" nodeSelector: prod=dev evictFailedBarePods: false evictLocalStoragePods: true +evictDaemonSetPods: true evictSystemCriticalPods: true maxNoOfPodsToEvictPerNode: 40 ignorePvcPods: false diff --git a/pkg/api/v1alpha1/conversion.go b/pkg/api/v1alpha1/conversion.go index 7aaa5e51d..bb15cf978 100644 --- a/pkg/api/v1alpha1/conversion.go +++ b/pkg/api/v1alpha1/conversion.go @@ -139,6 +139,14 @@ func V1alpha1ToInternal( } } + evictDaemonSetPods := false + if deschedulerPolicy.EvictDaemonSetPods != nil { + evictDaemonSetPods = *deschedulerPolicy.EvictDaemonSetPods + if evictDaemonSetPods { + klog.V(1).Info("Warning: EvictDaemonSetPods is set to True. This could cause eviction of Kubernetes DaemonSet pods.") + } + } + ignorePvcPods := false if deschedulerPolicy.IgnorePVCPods != nil { ignorePvcPods = *deschedulerPolicy.IgnorePVCPods @@ -193,6 +201,7 @@ func V1alpha1ToInternal( Name: defaultevictor.PluginName, Args: &defaultevictor.DefaultEvictorArgs{ EvictLocalStoragePods: evictLocalStoragePods, + EvictDaemonSetPods: evictDaemonSetPods, EvictSystemCriticalPods: evictSystemCriticalPods, IgnorePvcPods: ignorePvcPods, EvictFailedBarePods: evictBarePods, diff --git a/pkg/api/v1alpha1/types.go b/pkg/api/v1alpha1/types.go index 5d5b321c7..fb0ccc28a 100644 --- a/pkg/api/v1alpha1/types.go +++ b/pkg/api/v1alpha1/types.go @@ -41,6 +41,9 @@ type DeschedulerPolicy struct { // EvictSystemCriticalPods allows eviction of pods of any priority (including Kubernetes system pods) EvictSystemCriticalPods *bool `json:"evictSystemCriticalPods,omitempty"` + // EvictDaemonSetPods allows pods owned by a DaemonSet resource to be evicted. + EvictDaemonSetPods *bool `json:"evictDaemonSetPods,omitempty"` + // IgnorePVCPods prevents pods with PVCs from being evicted. IgnorePVCPods *bool `json:"ignorePvcPods,omitempty"` diff --git a/pkg/api/v1alpha1/zz_generated.deepcopy.go b/pkg/api/v1alpha1/zz_generated.deepcopy.go index eff39bda9..fb57c89f7 100644 --- a/pkg/api/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/api/v1alpha1/zz_generated.deepcopy.go @@ -57,6 +57,11 @@ func (in *DeschedulerPolicy) DeepCopyInto(out *DeschedulerPolicy) { *out = new(bool) **out = **in } + if in.EvictDaemonSetPods != nil { + in, out := &in.EvictDaemonSetPods, &out.EvictDaemonSetPods + *out = new(bool) + **out = **in + } if in.IgnorePVCPods != nil { in, out := &in.IgnorePVCPods, &out.IgnorePVCPods *out = new(bool) diff --git a/pkg/apis/componentconfig/types.go b/pkg/apis/componentconfig/types.go index 328eed213..b83717865 100644 --- a/pkg/apis/componentconfig/types.go +++ b/pkg/apis/componentconfig/types.go @@ -51,6 +51,9 @@ type DeschedulerConfiguration struct { // EvictLocalStoragePods allows pods using local storage to be evicted. EvictLocalStoragePods bool + // EvictDaemonSetPods allows pods owned by a DaemonSet resource to be evicted. + EvictDaemonSetPods bool + // IgnorePVCPods sets whether PVC pods should be allowed to be evicted IgnorePVCPods bool diff --git a/pkg/apis/componentconfig/v1alpha1/types.go b/pkg/apis/componentconfig/v1alpha1/types.go index c8d9d9a44..26a66e777 100644 --- a/pkg/apis/componentconfig/v1alpha1/types.go +++ b/pkg/apis/componentconfig/v1alpha1/types.go @@ -51,6 +51,9 @@ type DeschedulerConfiguration struct { // EvictLocalStoragePods allows pods using local storage to be evicted. EvictLocalStoragePods bool `json:"evictLocalStoragePods,omitempty"` + // EvictDaemonSetPods allows pods owned by a DaemonSet resource to be evicted. + EvictDaemonSetPods bool `json:"evictDaemonSetPods,omitempty"` + // IgnorePVCPods sets whether PVC pods should be allowed to be evicted IgnorePVCPods bool `json:"ignorePvcPods,omitempty"` diff --git a/pkg/apis/componentconfig/v1alpha1/zz_generated.conversion.go b/pkg/apis/componentconfig/v1alpha1/zz_generated.conversion.go index 31110895a..8921ff57d 100644 --- a/pkg/apis/componentconfig/v1alpha1/zz_generated.conversion.go +++ b/pkg/apis/componentconfig/v1alpha1/zz_generated.conversion.go @@ -67,6 +67,7 @@ func autoConvert_v1alpha1_DeschedulerConfiguration_To_componentconfig_Deschedule out.NodeSelector = in.NodeSelector out.MaxNoOfPodsToEvictPerNode = in.MaxNoOfPodsToEvictPerNode out.EvictLocalStoragePods = in.EvictLocalStoragePods + out.EvictDaemonSetPods = in.EvictDaemonSetPods out.IgnorePVCPods = in.IgnorePVCPods if err := Convert_v1alpha1_TracingConfiguration_To_componentconfig_TracingConfiguration(&in.Tracing, &out.Tracing, s); err != nil { return err @@ -89,6 +90,7 @@ func autoConvert_componentconfig_DeschedulerConfiguration_To_v1alpha1_Deschedule out.NodeSelector = in.NodeSelector out.MaxNoOfPodsToEvictPerNode = in.MaxNoOfPodsToEvictPerNode out.EvictLocalStoragePods = in.EvictLocalStoragePods + out.EvictDaemonSetPods = in.EvictDaemonSetPods out.IgnorePVCPods = in.IgnorePVCPods if err := Convert_componentconfig_TracingConfiguration_To_v1alpha1_TracingConfiguration(&in.Tracing, &out.Tracing, s); err != nil { return err diff --git a/pkg/descheduler/policyconfig_test.go b/pkg/descheduler/policyconfig_test.go index 2b1f8b6cb..7f415150b 100644 --- a/pkg/descheduler/policyconfig_test.go +++ b/pkg/descheduler/policyconfig_test.go @@ -134,6 +134,71 @@ func TestV1alpha1ToV1alpha2(t *testing.T) { }, }, }, + { + description: "convert global policy fields to defaultevictor", + policy: &v1alpha1.DeschedulerPolicy{ + EvictFailedBarePods: utilpointer.Bool(true), + EvictLocalStoragePods: utilpointer.Bool(true), + EvictSystemCriticalPods: utilpointer.Bool(true), + EvictDaemonSetPods: utilpointer.Bool(true), + IgnorePVCPods: utilpointer.Bool(true), + Strategies: v1alpha1.StrategyList{ + removeduplicates.PluginName: v1alpha1.DeschedulerStrategy{ + Enabled: true, + Params: &v1alpha1.StrategyParameters{ + Namespaces: &v1alpha1.Namespaces{ + Exclude: []string{ + "test2", + }, + }, + }, + }, + }, + }, + result: &api.DeschedulerPolicy{ + Profiles: []api.DeschedulerProfile{ + { + Name: fmt.Sprintf("strategy-%s-profile", removeduplicates.PluginName), + PluginConfigs: []api.PluginConfig{ + { + Name: defaultevictor.PluginName, + Args: &defaultevictor.DefaultEvictorArgs{ + EvictLocalStoragePods: true, + EvictDaemonSetPods: true, + EvictSystemCriticalPods: true, + IgnorePvcPods: true, + EvictFailedBarePods: true, + PriorityThreshold: &api.PriorityThreshold{ + Value: nil, + }, + }, + }, + { + Name: removeduplicates.PluginName, + Args: &removeduplicates.RemoveDuplicatesArgs{ + Namespaces: &api.Namespaces{ + Exclude: []string{ + "test2", + }, + }, + }, + }, + }, + Plugins: api.Plugins{ + Balance: api.PluginSet{ + Enabled: []string{removeduplicates.PluginName}, + }, + Filter: api.PluginSet{ + Enabled: []string{defaultevictor.PluginName}, + }, + PreEvictionFilter: api.PluginSet{ + Enabled: []string{defaultevictor.PluginName}, + }, + }, + }, + }, + }, + }, { description: "convert all strategies", policy: &v1alpha1.DeschedulerPolicy{ @@ -947,6 +1012,7 @@ profiles: evictSystemCriticalPods: true evictFailedBarePods: true evictLocalStoragePods: true + evictDaemonSetPods: true nodeFit: true - name: "RemovePodsHavingTooManyRestarts" args: @@ -968,6 +1034,7 @@ profiles: EvictSystemCriticalPods: true, EvictFailedBarePods: true, EvictLocalStoragePods: true, + EvictDaemonSetPods: true, PriorityThreshold: &api.PriorityThreshold{Value: utilpointer.Int32(2000000000)}, NodeFit: true, }, @@ -1099,6 +1166,7 @@ profiles: evictSystemCriticalPods: true evictFailedBarePods: true evictLocalStoragePods: true + evictDaemonSetPods: true nodeFit: true - name: "RemoveFailedPods" plugins: @@ -1123,6 +1191,7 @@ profiles: EvictSystemCriticalPods: true, EvictFailedBarePods: true, EvictLocalStoragePods: true, + EvictDaemonSetPods: true, PriorityThreshold: &api.PriorityThreshold{Value: utilpointer.Int32(2000000000)}, NodeFit: true, }, @@ -1161,6 +1230,7 @@ profiles: evictSystemCriticalPods: true evictFailedBarePods: true evictLocalStoragePods: true + evictDaemonSetPods: true nodeFit: true - name: "RemoveFailedPods" plugins: @@ -1179,6 +1249,7 @@ profiles: EvictSystemCriticalPods: true, EvictFailedBarePods: true, EvictLocalStoragePods: true, + EvictDaemonSetPods: true, PriorityThreshold: &api.PriorityThreshold{Value: utilpointer.Int32(2000000000)}, NodeFit: true, }, diff --git a/pkg/framework/plugins/defaultevictor/defaultevictor.go b/pkg/framework/plugins/defaultevictor/defaultevictor.go index 1a458f9b0..f7e635562 100644 --- a/pkg/framework/plugins/defaultevictor/defaultevictor.go +++ b/pkg/framework/plugins/defaultevictor/defaultevictor.go @@ -125,6 +125,15 @@ func New(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plug return nil }) } + if !defaultEvictorArgs.EvictDaemonSetPods { + ev.constraints = append(ev.constraints, func(pod *v1.Pod) error { + ownerRefList := podutil.OwnerRef(pod) + if utils.IsDaemonsetPod(ownerRefList) { + return fmt.Errorf("pod is related to daemonset and descheduler is not configured with evictDaemonSetPods") + } + return nil + }) + } if defaultEvictorArgs.IgnorePvcPods { ev.constraints = append(ev.constraints, func(pod *v1.Pod) error { if utils.IsPodWithPVC(pod) { @@ -207,11 +216,6 @@ func (d *DefaultEvictor) Filter(pod *v1.Pod) bool { return true } - ownerRefList := podutil.OwnerRef(pod) - if utils.IsDaemonsetPod(ownerRefList) { - checkErrs = append(checkErrs, fmt.Errorf("pod is a DaemonSet pod")) - } - if utils.IsMirrorPod(pod) { checkErrs = append(checkErrs, fmt.Errorf("pod is a mirror pod")) } diff --git a/pkg/framework/plugins/defaultevictor/defaults.go b/pkg/framework/plugins/defaultevictor/defaults.go index f93edc323..463ddd518 100644 --- a/pkg/framework/plugins/defaultevictor/defaults.go +++ b/pkg/framework/plugins/defaultevictor/defaults.go @@ -31,6 +31,9 @@ func SetDefaults_DefaultEvictorArgs(obj runtime.Object) { if !args.EvictLocalStoragePods { args.EvictLocalStoragePods = false } + if !args.EvictDaemonSetPods { + args.EvictDaemonSetPods = false + } if !args.EvictSystemCriticalPods { args.EvictSystemCriticalPods = false } diff --git a/pkg/framework/plugins/defaultevictor/defaults_test.go b/pkg/framework/plugins/defaultevictor/defaults_test.go index 7d21865aa..165935687 100644 --- a/pkg/framework/plugins/defaultevictor/defaults_test.go +++ b/pkg/framework/plugins/defaultevictor/defaults_test.go @@ -35,6 +35,7 @@ func TestSetDefaults_DefaultEvictorArgs(t *testing.T) { want: &DefaultEvictorArgs{ NodeSelector: "", EvictLocalStoragePods: false, + EvictDaemonSetPods: false, EvictSystemCriticalPods: false, IgnorePvcPods: false, EvictFailedBarePods: false, @@ -48,6 +49,7 @@ func TestSetDefaults_DefaultEvictorArgs(t *testing.T) { in: &DefaultEvictorArgs{ NodeSelector: "NodeSelector", EvictLocalStoragePods: true, + EvictDaemonSetPods: true, EvictSystemCriticalPods: true, IgnorePvcPods: true, EvictFailedBarePods: true, @@ -60,6 +62,7 @@ func TestSetDefaults_DefaultEvictorArgs(t *testing.T) { want: &DefaultEvictorArgs{ NodeSelector: "NodeSelector", EvictLocalStoragePods: true, + EvictDaemonSetPods: true, EvictSystemCriticalPods: true, IgnorePvcPods: true, EvictFailedBarePods: true, diff --git a/pkg/framework/plugins/defaultevictor/types.go b/pkg/framework/plugins/defaultevictor/types.go index d68c71a70..67f9a55b9 100644 --- a/pkg/framework/plugins/defaultevictor/types.go +++ b/pkg/framework/plugins/defaultevictor/types.go @@ -27,6 +27,7 @@ type DefaultEvictorArgs struct { NodeSelector string `json:"nodeSelector"` EvictLocalStoragePods bool `json:"evictLocalStoragePods"` + EvictDaemonSetPods bool `json:"evictDaemonSetPods"` EvictSystemCriticalPods bool `json:"evictSystemCriticalPods"` IgnorePvcPods bool `json:"ignorePvcPods"` EvictFailedBarePods bool `json:"evictFailedBarePods"` diff --git a/test/e2e/e2e_test.go b/test/e2e/e2e_test.go index 191bd01f3..2ae907480 100644 --- a/test/e2e/e2e_test.go +++ b/test/e2e/e2e_test.go @@ -26,6 +26,7 @@ import ( "testing" "time" + appsv1 "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" schedulingv1 "k8s.io/api/scheduling/v1" "k8s.io/apimachinery/pkg/api/resource" @@ -89,6 +90,38 @@ func RcByNameContainer(name, namespace string, replicas int32, labels map[string } } +// DsByNameContainer returns a DaemonSet with specified name and container +func DsByNameContainer(name, namespace string, labels map[string]string, gracePeriod *int64) *appsv1.DaemonSet { + // Add "name": name to the labels, overwriting if it exists. + labels["name"] = name + if gracePeriod == nil { + gracePeriod = pointer.Int64(0) + } + return &appsv1.DaemonSet{ + TypeMeta: metav1.TypeMeta{ + Kind: "DaemonSet", + APIVersion: "apps/v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Spec: appsv1.DaemonSetSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "name": name, + }, + }, + Template: v1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: labels, + }, + Spec: test.MakePodSpec("", gracePeriod), + }, + }, + } +} + func initializeClient(t *testing.T) (clientset.Interface, informers.SharedInformerFactory, listersv1.NodeLister, podutil.GetPodsAssignedToNodeFunc, chan struct{}) { clientSet, err := client.CreateClient(componentbaseconfig.ClientConnectionConfiguration{Kubeconfig: os.Getenv("KUBECONFIG")}, "") if err != nil { @@ -144,6 +177,7 @@ func runPodLifetimePlugin( priorityClass string, priority *int32, evictCritical bool, + evictDaemonSet bool, maxPodsToEvictPerNamespace *uint, labelSelector *metav1.LabelSelector, getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc, @@ -182,6 +216,7 @@ func runPodLifetimePlugin( defaultevictorArgs := &defaultevictor.DefaultEvictorArgs{ EvictLocalStoragePods: false, EvictSystemCriticalPods: evictCritical, + EvictDaemonSetPods: evictDaemonSet, IgnorePvcPods: false, EvictFailedBarePods: false, PriorityThreshold: &api.PriorityThreshold{ @@ -451,7 +486,7 @@ func TestNamespaceConstraintsInclude(t *testing.T) { t.Logf("run the plugin to delete pods from %v namespace", rc.Namespace) runPodLifetimePlugin(ctx, t, clientSet, nodeInformer, &deschedulerapi.Namespaces{ Include: []string{rc.Namespace}, - }, "", nil, false, nil, nil, getPodsAssignedToNode) + }, "", nil, false, false, nil, nil, getPodsAssignedToNode) // All pods are supposed to be deleted, wait until all the old pods are deleted if err := wait.PollImmediate(time.Second, 20*time.Second, func() (bool, error) { @@ -522,7 +557,7 @@ func TestNamespaceConstraintsExclude(t *testing.T) { t.Logf("run the plugin to delete pods from namespaces except the %v namespace", rc.Namespace) runPodLifetimePlugin(ctx, t, clientSet, nodeInformer, &deschedulerapi.Namespaces{ Exclude: []string{rc.Namespace}, - }, "", nil, false, nil, nil, getPodsAssignedToNode) + }, "", nil, false, false, nil, nil, getPodsAssignedToNode) t.Logf("Waiting 10s") time.Sleep(10 * time.Second) @@ -635,9 +670,9 @@ func testEvictSystemCritical(t *testing.T, isPriorityClass bool) { t.Logf("Existing pods: %v", initialPodNames) if isPriorityClass { - runPodLifetimePlugin(ctx, t, clientSet, nodeInformer, nil, highPriorityClass.Name, nil, true, nil, nil, getPodsAssignedToNode) + runPodLifetimePlugin(ctx, t, clientSet, nodeInformer, nil, highPriorityClass.Name, nil, true, false, nil, nil, getPodsAssignedToNode) } else { - runPodLifetimePlugin(ctx, t, clientSet, nodeInformer, nil, "", &highPriority, true, nil, nil, getPodsAssignedToNode) + runPodLifetimePlugin(ctx, t, clientSet, nodeInformer, nil, "", &highPriority, true, false, nil, nil, getPodsAssignedToNode) } // All pods are supposed to be deleted, wait until all pods in the test namespace are terminating @@ -671,6 +706,74 @@ func testEvictSystemCritical(t *testing.T, isPriorityClass bool) { } } +func TestEvictDaemonSetPod(t *testing.T) { + testEvictDaemonSetPod(t, true) +} + +func testEvictDaemonSetPod(t *testing.T, isDaemonSet bool) { + ctx := context.Background() + + clientSet, _, nodeInformer, getPodsAssignedToNode, stopCh := initializeClient(t) + defer close(stopCh) + + testNamespace := &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "e2e-" + strings.ToLower(t.Name())}} + if _, err := clientSet.CoreV1().Namespaces().Create(ctx, testNamespace, metav1.CreateOptions{}); err != nil { + t.Fatalf("Unable to create ns %v", testNamespace.Name) + } + defer clientSet.CoreV1().Namespaces().Delete(ctx, testNamespace.Name, metav1.DeleteOptions{}) + + daemonSet := DsByNameContainer("test-ds-evictdaemonsetpods", testNamespace.Name, + map[string]string{"test": "evictdaemonsetpods"}, nil) + if _, err := clientSet.AppsV1().DaemonSets(daemonSet.Namespace).Create(ctx, daemonSet, metav1.CreateOptions{}); err != nil { + t.Errorf("Error creating ds %s: %v", daemonSet.Name, err) + } + defer deleteDS(ctx, t, clientSet, daemonSet) + + // wait for a while so all the pods are at least few seconds older + time.Sleep(5 * time.Second) + + podListDaemonSet, err := clientSet.CoreV1().Pods(daemonSet.Namespace).List(ctx, metav1.ListOptions{LabelSelector: labels.SelectorFromSet(daemonSet.Spec.Template.Labels).String()}) + if err != nil { + t.Fatalf("Unable to list pods: %v", err) + } + + initialPodNames := getPodNames(podListDaemonSet.Items) + sort.Strings(initialPodNames) + t.Logf("Existing pods: %v", initialPodNames) + + runPodLifetimePlugin(ctx, t, clientSet, nodeInformer, nil, "", nil, false, isDaemonSet, nil, nil, getPodsAssignedToNode) + + // All pods are supposed to be deleted, wait until all pods in the test namespace are terminating + t.Logf("All daemonset pods in the test namespace, will be deleted") + if err := wait.PollImmediate(time.Second, 30*time.Second, func() (bool, error) { + podList, err := clientSet.CoreV1().Pods(testNamespace.Name).List(ctx, metav1.ListOptions{}) + if err != nil { + return false, nil + } + currentPodNames := getPodNames(podList.Items) + // validate all pod were deleted + if len(intersectStrings(initialPodNames, currentPodNames)) > 0 { + t.Logf("Waiting until %v pods get deleted", intersectStrings(initialPodNames, currentPodNames)) + // check if there's at least one pod not in Terminating state + for _, pod := range podList.Items { + // In case podList contains newly created pods + if len(intersectStrings(initialPodNames, []string{pod.Name})) == 0 { + continue + } + if pod.DeletionTimestamp == nil { + t.Logf("Pod %v not in terminating state", pod.Name) + return false, nil + } + } + t.Logf("All %v pods are terminating", intersectStrings(initialPodNames, currentPodNames)) + } + + return true, nil + }); err != nil { + t.Fatalf("Error waiting for pods to be deleted: %v", err) + } +} + func TestThresholdPriority(t *testing.T) { testPriority(t, false) } @@ -754,10 +857,10 @@ func testPriority(t *testing.T, isPriorityClass bool) { if isPriorityClass { t.Logf("run the plugin to delete pods with priority lower than priority class %s", highPriorityClass.Name) - runPodLifetimePlugin(ctx, t, clientSet, nodeInformer, nil, highPriorityClass.Name, nil, false, nil, nil, getPodsAssignedToNode) + runPodLifetimePlugin(ctx, t, clientSet, nodeInformer, nil, highPriorityClass.Name, nil, false, false, nil, nil, getPodsAssignedToNode) } else { t.Logf("run the plugin to delete pods with priority lower than %d", highPriority) - runPodLifetimePlugin(ctx, t, clientSet, nodeInformer, nil, "", &highPriority, false, nil, nil, getPodsAssignedToNode) + runPodLifetimePlugin(ctx, t, clientSet, nodeInformer, nil, "", &highPriority, false, false, nil, nil, getPodsAssignedToNode) } t.Logf("Waiting 10s") @@ -861,7 +964,7 @@ func TestPodLabelSelector(t *testing.T) { t.Logf("Pods not expected to be evicted: %v, pods expected to be evicted: %v", expectReservePodNames, expectEvictPodNames) t.Logf("run the plugin to delete pods with label test:podlifetime-evict") - runPodLifetimePlugin(ctx, t, clientSet, nodeInformer, nil, "", nil, false, nil, &metav1.LabelSelector{MatchLabels: map[string]string{"test": "podlifetime-evict"}}, getPodsAssignedToNode) + runPodLifetimePlugin(ctx, t, clientSet, nodeInformer, nil, "", nil, false, false, nil, &metav1.LabelSelector{MatchLabels: map[string]string{"test": "podlifetime-evict"}}, getPodsAssignedToNode) t.Logf("Waiting 10s") time.Sleep(10 * time.Second) @@ -961,7 +1064,7 @@ func TestEvictAnnotation(t *testing.T) { t.Logf("Existing pods: %v", initialPodNames) t.Log("Running PodLifetime plugin") - runPodLifetimePlugin(ctx, t, clientSet, nodeLister, nil, "", nil, false, nil, nil, getPodsAssignedToNode) + runPodLifetimePlugin(ctx, t, clientSet, nodeLister, nil, "", nil, false, false, nil, nil, getPodsAssignedToNode) if err := wait.PollImmediate(5*time.Second, time.Minute, func() (bool, error) { podList, err = clientSet.CoreV1().Pods(rc.Namespace).List(ctx, metav1.ListOptions{LabelSelector: labels.SelectorFromSet(rc.Spec.Template.Labels).String()}) @@ -1027,7 +1130,7 @@ func TestPodLifeTimeOldestEvicted(t *testing.T) { t.Log("Running PodLifetime plugin with maxPodsToEvictPerNamespace=1 to ensure only the oldest pod is evicted") var maxPodsToEvictPerNamespace uint = 1 - runPodLifetimePlugin(ctx, t, clientSet, nodeLister, nil, "", nil, false, &maxPodsToEvictPerNamespace, nil, getPodsAssignedToNode) + runPodLifetimePlugin(ctx, t, clientSet, nodeLister, nil, "", nil, false, false, &maxPodsToEvictPerNamespace, nil, getPodsAssignedToNode) t.Log("Finished PodLifetime plugin") t.Logf("Wait for terminating pod to disappear") @@ -1123,6 +1226,43 @@ func waitForTerminatingPodsToDisappear(ctx context.Context, t *testing.T, client } } +func deleteDS(ctx context.Context, t *testing.T, clientSet clientset.Interface, ds *appsv1.DaemonSet) { + // adds nodeselector to avoid any nodes by setting an unused label + dsDeepCopy := ds.DeepCopy() + dsDeepCopy.Spec.Template.Spec.NodeSelector = map[string]string{ + "avoid-all-nodes": "true", + } + + if _, err := clientSet.AppsV1().DaemonSets(dsDeepCopy.Namespace).Update(ctx, dsDeepCopy, metav1.UpdateOptions{}); err != nil { + t.Fatalf("Error updating daemonset %v", err) + } + + if err := wait.PollImmediate(5*time.Second, time.Minute, func() (bool, error) { + podList, _ := clientSet.CoreV1().Pods(ds.Namespace).List(ctx, metav1.ListOptions{LabelSelector: labels.SelectorFromSet(ds.Spec.Template.Labels).String()}) + t.Logf("Waiting for %v DS pods to disappear, still %v remaining", ds.Name, len(podList.Items)) + if len(podList.Items) > 0 { + return false, nil + } + return true, nil + }); err != nil { + t.Fatalf("Error waiting for ds pods to disappear: %v", err) + } + + if err := clientSet.AppsV1().DaemonSets(ds.Namespace).Delete(ctx, ds.Name, metav1.DeleteOptions{}); err != nil { + t.Fatalf("Error deleting ds %v", err) + } + + if err := wait.PollImmediate(time.Second, 10*time.Second, func() (bool, error) { + _, err := clientSet.AppsV1().DaemonSets(ds.Namespace).Get(ctx, ds.Name, metav1.GetOptions{}) + if err != nil && strings.Contains(err.Error(), "not found") { + return true, nil + } + return false, nil + }); err != nil { + t.Fatalf("Error deleting ds %v", err) + } +} + func deleteRC(ctx context.Context, t *testing.T, clientSet clientset.Interface, rc *v1.ReplicationController) { // set number of replicas to 0 rcdeepcopy := rc.DeepCopy()