mirror of
https://github.com/kubernetes-sigs/descheduler.git
synced 2026-01-26 05:14:13 +01:00
Allow eviction of DaemonSet pods (#1342)
* feat: evictDaemonSetPods flag for evictors * test: evictDaemonSetPods unit and e2e * docs: evictDaemonSetPods
This commit is contained in:
@@ -941,7 +941,7 @@ When the descheduler decides to evict pods from a node, it employs the following
|
||||
* [Critical pods](https://kubernetes.io/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/) (with priorityClassName set to system-cluster-critical or system-node-critical) are never evicted (unless `evictSystemCriticalPods: true` is set).
|
||||
* Pods (static or mirrored pods or standalone pods) not part of an ReplicationController, ReplicaSet(Deployment), StatefulSet, or Job are
|
||||
never evicted because these pods won't be recreated. (Standalone pods in failed status phase can be evicted by setting `evictFailedBarePods: true`)
|
||||
* Pods associated with DaemonSets are never evicted.
|
||||
* Pods associated with DaemonSets are never evicted (unless `evictDaemonSetPods: true` is set).
|
||||
* Pods with local storage are never evicted (unless `evictLocalStoragePods: true` is set).
|
||||
* Pods with PVCs are evicted (unless `ignorePvcPods: true` is set).
|
||||
* In `LowNodeUtilization` and `RemovePodsViolatingInterPodAntiAffinity`, pods are evicted by their priority from low to high, and if they have same priority,
|
||||
|
||||
@@ -85,6 +85,9 @@ deschedulerPolicy:
|
||||
# nodeSelector: "key1=value1,key2=value2"
|
||||
# maxNoOfPodsToEvictPerNode: 10
|
||||
# maxNoOfPodsToEvictPerNamespace: 10
|
||||
# ignorePvcPods: true
|
||||
# evictLocalStoragePods: true
|
||||
# evictDaemonSetPods: true
|
||||
# tracing:
|
||||
# collectorEndpoint: otel-collector.observability.svc.cluster.local:4317
|
||||
# transportCert: ""
|
||||
|
||||
@@ -135,6 +135,7 @@ The policy includes a common configuration that applies to all the strategies:
|
||||
|------|---------------|-------------|
|
||||
| `nodeSelector` | `nil` | limiting the nodes which are processed |
|
||||
| `evictLocalStoragePods` | `false` | allows eviction of pods with local storage |
|
||||
| `evictDaemonSetPods` | `false` | allows eviction of pods associated to DaemonSet resources |
|
||||
| `evictSystemCriticalPods` | `false` | [Warning: Will evict Kubernetes system pods] allows eviction of pods with any priority, including system pods like kube-dns |
|
||||
| `ignorePvcPods` | `false` | set whether PVC pods should be evicted or ignored |
|
||||
| `maxNoOfPodsToEvictPerNode` | `nil` | maximum number of pods evicted from each node (summed through all strategies) |
|
||||
@@ -152,6 +153,7 @@ kind: "DeschedulerPolicy"
|
||||
nodeSelector: prod=dev
|
||||
evictFailedBarePods: false
|
||||
evictLocalStoragePods: true
|
||||
evictDaemonSetPods: true
|
||||
evictSystemCriticalPods: true
|
||||
maxNoOfPodsToEvictPerNode: 40
|
||||
ignorePvcPods: false
|
||||
|
||||
@@ -139,6 +139,14 @@ func V1alpha1ToInternal(
|
||||
}
|
||||
}
|
||||
|
||||
evictDaemonSetPods := false
|
||||
if deschedulerPolicy.EvictDaemonSetPods != nil {
|
||||
evictDaemonSetPods = *deschedulerPolicy.EvictDaemonSetPods
|
||||
if evictDaemonSetPods {
|
||||
klog.V(1).Info("Warning: EvictDaemonSetPods is set to True. This could cause eviction of Kubernetes DaemonSet pods.")
|
||||
}
|
||||
}
|
||||
|
||||
ignorePvcPods := false
|
||||
if deschedulerPolicy.IgnorePVCPods != nil {
|
||||
ignorePvcPods = *deschedulerPolicy.IgnorePVCPods
|
||||
@@ -193,6 +201,7 @@ func V1alpha1ToInternal(
|
||||
Name: defaultevictor.PluginName,
|
||||
Args: &defaultevictor.DefaultEvictorArgs{
|
||||
EvictLocalStoragePods: evictLocalStoragePods,
|
||||
EvictDaemonSetPods: evictDaemonSetPods,
|
||||
EvictSystemCriticalPods: evictSystemCriticalPods,
|
||||
IgnorePvcPods: ignorePvcPods,
|
||||
EvictFailedBarePods: evictBarePods,
|
||||
|
||||
@@ -41,6 +41,9 @@ type DeschedulerPolicy struct {
|
||||
// EvictSystemCriticalPods allows eviction of pods of any priority (including Kubernetes system pods)
|
||||
EvictSystemCriticalPods *bool `json:"evictSystemCriticalPods,omitempty"`
|
||||
|
||||
// EvictDaemonSetPods allows pods owned by a DaemonSet resource to be evicted.
|
||||
EvictDaemonSetPods *bool `json:"evictDaemonSetPods,omitempty"`
|
||||
|
||||
// IgnorePVCPods prevents pods with PVCs from being evicted.
|
||||
IgnorePVCPods *bool `json:"ignorePvcPods,omitempty"`
|
||||
|
||||
|
||||
5
pkg/api/v1alpha1/zz_generated.deepcopy.go
generated
5
pkg/api/v1alpha1/zz_generated.deepcopy.go
generated
@@ -57,6 +57,11 @@ func (in *DeschedulerPolicy) DeepCopyInto(out *DeschedulerPolicy) {
|
||||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
if in.EvictDaemonSetPods != nil {
|
||||
in, out := &in.EvictDaemonSetPods, &out.EvictDaemonSetPods
|
||||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
if in.IgnorePVCPods != nil {
|
||||
in, out := &in.IgnorePVCPods, &out.IgnorePVCPods
|
||||
*out = new(bool)
|
||||
|
||||
@@ -51,6 +51,9 @@ type DeschedulerConfiguration struct {
|
||||
// EvictLocalStoragePods allows pods using local storage to be evicted.
|
||||
EvictLocalStoragePods bool
|
||||
|
||||
// EvictDaemonSetPods allows pods owned by a DaemonSet resource to be evicted.
|
||||
EvictDaemonSetPods bool
|
||||
|
||||
// IgnorePVCPods sets whether PVC pods should be allowed to be evicted
|
||||
IgnorePVCPods bool
|
||||
|
||||
|
||||
@@ -51,6 +51,9 @@ type DeschedulerConfiguration struct {
|
||||
// EvictLocalStoragePods allows pods using local storage to be evicted.
|
||||
EvictLocalStoragePods bool `json:"evictLocalStoragePods,omitempty"`
|
||||
|
||||
// EvictDaemonSetPods allows pods owned by a DaemonSet resource to be evicted.
|
||||
EvictDaemonSetPods bool `json:"evictDaemonSetPods,omitempty"`
|
||||
|
||||
// IgnorePVCPods sets whether PVC pods should be allowed to be evicted
|
||||
IgnorePVCPods bool `json:"ignorePvcPods,omitempty"`
|
||||
|
||||
|
||||
@@ -67,6 +67,7 @@ func autoConvert_v1alpha1_DeschedulerConfiguration_To_componentconfig_Deschedule
|
||||
out.NodeSelector = in.NodeSelector
|
||||
out.MaxNoOfPodsToEvictPerNode = in.MaxNoOfPodsToEvictPerNode
|
||||
out.EvictLocalStoragePods = in.EvictLocalStoragePods
|
||||
out.EvictDaemonSetPods = in.EvictDaemonSetPods
|
||||
out.IgnorePVCPods = in.IgnorePVCPods
|
||||
if err := Convert_v1alpha1_TracingConfiguration_To_componentconfig_TracingConfiguration(&in.Tracing, &out.Tracing, s); err != nil {
|
||||
return err
|
||||
@@ -89,6 +90,7 @@ func autoConvert_componentconfig_DeschedulerConfiguration_To_v1alpha1_Deschedule
|
||||
out.NodeSelector = in.NodeSelector
|
||||
out.MaxNoOfPodsToEvictPerNode = in.MaxNoOfPodsToEvictPerNode
|
||||
out.EvictLocalStoragePods = in.EvictLocalStoragePods
|
||||
out.EvictDaemonSetPods = in.EvictDaemonSetPods
|
||||
out.IgnorePVCPods = in.IgnorePVCPods
|
||||
if err := Convert_componentconfig_TracingConfiguration_To_v1alpha1_TracingConfiguration(&in.Tracing, &out.Tracing, s); err != nil {
|
||||
return err
|
||||
|
||||
@@ -134,6 +134,71 @@ func TestV1alpha1ToV1alpha2(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "convert global policy fields to defaultevictor",
|
||||
policy: &v1alpha1.DeschedulerPolicy{
|
||||
EvictFailedBarePods: utilpointer.Bool(true),
|
||||
EvictLocalStoragePods: utilpointer.Bool(true),
|
||||
EvictSystemCriticalPods: utilpointer.Bool(true),
|
||||
EvictDaemonSetPods: utilpointer.Bool(true),
|
||||
IgnorePVCPods: utilpointer.Bool(true),
|
||||
Strategies: v1alpha1.StrategyList{
|
||||
removeduplicates.PluginName: v1alpha1.DeschedulerStrategy{
|
||||
Enabled: true,
|
||||
Params: &v1alpha1.StrategyParameters{
|
||||
Namespaces: &v1alpha1.Namespaces{
|
||||
Exclude: []string{
|
||||
"test2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
result: &api.DeschedulerPolicy{
|
||||
Profiles: []api.DeschedulerProfile{
|
||||
{
|
||||
Name: fmt.Sprintf("strategy-%s-profile", removeduplicates.PluginName),
|
||||
PluginConfigs: []api.PluginConfig{
|
||||
{
|
||||
Name: defaultevictor.PluginName,
|
||||
Args: &defaultevictor.DefaultEvictorArgs{
|
||||
EvictLocalStoragePods: true,
|
||||
EvictDaemonSetPods: true,
|
||||
EvictSystemCriticalPods: true,
|
||||
IgnorePvcPods: true,
|
||||
EvictFailedBarePods: true,
|
||||
PriorityThreshold: &api.PriorityThreshold{
|
||||
Value: nil,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: removeduplicates.PluginName,
|
||||
Args: &removeduplicates.RemoveDuplicatesArgs{
|
||||
Namespaces: &api.Namespaces{
|
||||
Exclude: []string{
|
||||
"test2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Plugins: api.Plugins{
|
||||
Balance: api.PluginSet{
|
||||
Enabled: []string{removeduplicates.PluginName},
|
||||
},
|
||||
Filter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
PreEvictionFilter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "convert all strategies",
|
||||
policy: &v1alpha1.DeschedulerPolicy{
|
||||
@@ -947,6 +1012,7 @@ profiles:
|
||||
evictSystemCriticalPods: true
|
||||
evictFailedBarePods: true
|
||||
evictLocalStoragePods: true
|
||||
evictDaemonSetPods: true
|
||||
nodeFit: true
|
||||
- name: "RemovePodsHavingTooManyRestarts"
|
||||
args:
|
||||
@@ -968,6 +1034,7 @@ profiles:
|
||||
EvictSystemCriticalPods: true,
|
||||
EvictFailedBarePods: true,
|
||||
EvictLocalStoragePods: true,
|
||||
EvictDaemonSetPods: true,
|
||||
PriorityThreshold: &api.PriorityThreshold{Value: utilpointer.Int32(2000000000)},
|
||||
NodeFit: true,
|
||||
},
|
||||
@@ -1099,6 +1166,7 @@ profiles:
|
||||
evictSystemCriticalPods: true
|
||||
evictFailedBarePods: true
|
||||
evictLocalStoragePods: true
|
||||
evictDaemonSetPods: true
|
||||
nodeFit: true
|
||||
- name: "RemoveFailedPods"
|
||||
plugins:
|
||||
@@ -1123,6 +1191,7 @@ profiles:
|
||||
EvictSystemCriticalPods: true,
|
||||
EvictFailedBarePods: true,
|
||||
EvictLocalStoragePods: true,
|
||||
EvictDaemonSetPods: true,
|
||||
PriorityThreshold: &api.PriorityThreshold{Value: utilpointer.Int32(2000000000)},
|
||||
NodeFit: true,
|
||||
},
|
||||
@@ -1161,6 +1230,7 @@ profiles:
|
||||
evictSystemCriticalPods: true
|
||||
evictFailedBarePods: true
|
||||
evictLocalStoragePods: true
|
||||
evictDaemonSetPods: true
|
||||
nodeFit: true
|
||||
- name: "RemoveFailedPods"
|
||||
plugins:
|
||||
@@ -1179,6 +1249,7 @@ profiles:
|
||||
EvictSystemCriticalPods: true,
|
||||
EvictFailedBarePods: true,
|
||||
EvictLocalStoragePods: true,
|
||||
EvictDaemonSetPods: true,
|
||||
PriorityThreshold: &api.PriorityThreshold{Value: utilpointer.Int32(2000000000)},
|
||||
NodeFit: true,
|
||||
},
|
||||
|
||||
@@ -125,6 +125,15 @@ func New(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plug
|
||||
return nil
|
||||
})
|
||||
}
|
||||
if !defaultEvictorArgs.EvictDaemonSetPods {
|
||||
ev.constraints = append(ev.constraints, func(pod *v1.Pod) error {
|
||||
ownerRefList := podutil.OwnerRef(pod)
|
||||
if utils.IsDaemonsetPod(ownerRefList) {
|
||||
return fmt.Errorf("pod is related to daemonset and descheduler is not configured with evictDaemonSetPods")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
if defaultEvictorArgs.IgnorePvcPods {
|
||||
ev.constraints = append(ev.constraints, func(pod *v1.Pod) error {
|
||||
if utils.IsPodWithPVC(pod) {
|
||||
@@ -207,11 +216,6 @@ func (d *DefaultEvictor) Filter(pod *v1.Pod) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
ownerRefList := podutil.OwnerRef(pod)
|
||||
if utils.IsDaemonsetPod(ownerRefList) {
|
||||
checkErrs = append(checkErrs, fmt.Errorf("pod is a DaemonSet pod"))
|
||||
}
|
||||
|
||||
if utils.IsMirrorPod(pod) {
|
||||
checkErrs = append(checkErrs, fmt.Errorf("pod is a mirror pod"))
|
||||
}
|
||||
|
||||
@@ -31,6 +31,9 @@ func SetDefaults_DefaultEvictorArgs(obj runtime.Object) {
|
||||
if !args.EvictLocalStoragePods {
|
||||
args.EvictLocalStoragePods = false
|
||||
}
|
||||
if !args.EvictDaemonSetPods {
|
||||
args.EvictDaemonSetPods = false
|
||||
}
|
||||
if !args.EvictSystemCriticalPods {
|
||||
args.EvictSystemCriticalPods = false
|
||||
}
|
||||
|
||||
@@ -35,6 +35,7 @@ func TestSetDefaults_DefaultEvictorArgs(t *testing.T) {
|
||||
want: &DefaultEvictorArgs{
|
||||
NodeSelector: "",
|
||||
EvictLocalStoragePods: false,
|
||||
EvictDaemonSetPods: false,
|
||||
EvictSystemCriticalPods: false,
|
||||
IgnorePvcPods: false,
|
||||
EvictFailedBarePods: false,
|
||||
@@ -48,6 +49,7 @@ func TestSetDefaults_DefaultEvictorArgs(t *testing.T) {
|
||||
in: &DefaultEvictorArgs{
|
||||
NodeSelector: "NodeSelector",
|
||||
EvictLocalStoragePods: true,
|
||||
EvictDaemonSetPods: true,
|
||||
EvictSystemCriticalPods: true,
|
||||
IgnorePvcPods: true,
|
||||
EvictFailedBarePods: true,
|
||||
@@ -60,6 +62,7 @@ func TestSetDefaults_DefaultEvictorArgs(t *testing.T) {
|
||||
want: &DefaultEvictorArgs{
|
||||
NodeSelector: "NodeSelector",
|
||||
EvictLocalStoragePods: true,
|
||||
EvictDaemonSetPods: true,
|
||||
EvictSystemCriticalPods: true,
|
||||
IgnorePvcPods: true,
|
||||
EvictFailedBarePods: true,
|
||||
|
||||
@@ -27,6 +27,7 @@ type DefaultEvictorArgs struct {
|
||||
|
||||
NodeSelector string `json:"nodeSelector"`
|
||||
EvictLocalStoragePods bool `json:"evictLocalStoragePods"`
|
||||
EvictDaemonSetPods bool `json:"evictDaemonSetPods"`
|
||||
EvictSystemCriticalPods bool `json:"evictSystemCriticalPods"`
|
||||
IgnorePvcPods bool `json:"ignorePvcPods"`
|
||||
EvictFailedBarePods bool `json:"evictFailedBarePods"`
|
||||
|
||||
@@ -26,6 +26,7 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
schedulingv1 "k8s.io/api/scheduling/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
@@ -89,6 +90,38 @@ func RcByNameContainer(name, namespace string, replicas int32, labels map[string
|
||||
}
|
||||
}
|
||||
|
||||
// DsByNameContainer returns a DaemonSet with specified name and container
|
||||
func DsByNameContainer(name, namespace string, labels map[string]string, gracePeriod *int64) *appsv1.DaemonSet {
|
||||
// Add "name": name to the labels, overwriting if it exists.
|
||||
labels["name"] = name
|
||||
if gracePeriod == nil {
|
||||
gracePeriod = pointer.Int64(0)
|
||||
}
|
||||
return &appsv1.DaemonSet{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "DaemonSet",
|
||||
APIVersion: "apps/v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: appsv1.DaemonSetSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"name": name,
|
||||
},
|
||||
},
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: labels,
|
||||
},
|
||||
Spec: test.MakePodSpec("", gracePeriod),
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func initializeClient(t *testing.T) (clientset.Interface, informers.SharedInformerFactory, listersv1.NodeLister, podutil.GetPodsAssignedToNodeFunc, chan struct{}) {
|
||||
clientSet, err := client.CreateClient(componentbaseconfig.ClientConnectionConfiguration{Kubeconfig: os.Getenv("KUBECONFIG")}, "")
|
||||
if err != nil {
|
||||
@@ -144,6 +177,7 @@ func runPodLifetimePlugin(
|
||||
priorityClass string,
|
||||
priority *int32,
|
||||
evictCritical bool,
|
||||
evictDaemonSet bool,
|
||||
maxPodsToEvictPerNamespace *uint,
|
||||
labelSelector *metav1.LabelSelector,
|
||||
getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc,
|
||||
@@ -182,6 +216,7 @@ func runPodLifetimePlugin(
|
||||
defaultevictorArgs := &defaultevictor.DefaultEvictorArgs{
|
||||
EvictLocalStoragePods: false,
|
||||
EvictSystemCriticalPods: evictCritical,
|
||||
EvictDaemonSetPods: evictDaemonSet,
|
||||
IgnorePvcPods: false,
|
||||
EvictFailedBarePods: false,
|
||||
PriorityThreshold: &api.PriorityThreshold{
|
||||
@@ -451,7 +486,7 @@ func TestNamespaceConstraintsInclude(t *testing.T) {
|
||||
t.Logf("run the plugin to delete pods from %v namespace", rc.Namespace)
|
||||
runPodLifetimePlugin(ctx, t, clientSet, nodeInformer, &deschedulerapi.Namespaces{
|
||||
Include: []string{rc.Namespace},
|
||||
}, "", nil, false, nil, nil, getPodsAssignedToNode)
|
||||
}, "", nil, false, false, nil, nil, getPodsAssignedToNode)
|
||||
|
||||
// All pods are supposed to be deleted, wait until all the old pods are deleted
|
||||
if err := wait.PollImmediate(time.Second, 20*time.Second, func() (bool, error) {
|
||||
@@ -522,7 +557,7 @@ func TestNamespaceConstraintsExclude(t *testing.T) {
|
||||
t.Logf("run the plugin to delete pods from namespaces except the %v namespace", rc.Namespace)
|
||||
runPodLifetimePlugin(ctx, t, clientSet, nodeInformer, &deschedulerapi.Namespaces{
|
||||
Exclude: []string{rc.Namespace},
|
||||
}, "", nil, false, nil, nil, getPodsAssignedToNode)
|
||||
}, "", nil, false, false, nil, nil, getPodsAssignedToNode)
|
||||
|
||||
t.Logf("Waiting 10s")
|
||||
time.Sleep(10 * time.Second)
|
||||
@@ -635,9 +670,9 @@ func testEvictSystemCritical(t *testing.T, isPriorityClass bool) {
|
||||
t.Logf("Existing pods: %v", initialPodNames)
|
||||
|
||||
if isPriorityClass {
|
||||
runPodLifetimePlugin(ctx, t, clientSet, nodeInformer, nil, highPriorityClass.Name, nil, true, nil, nil, getPodsAssignedToNode)
|
||||
runPodLifetimePlugin(ctx, t, clientSet, nodeInformer, nil, highPriorityClass.Name, nil, true, false, nil, nil, getPodsAssignedToNode)
|
||||
} else {
|
||||
runPodLifetimePlugin(ctx, t, clientSet, nodeInformer, nil, "", &highPriority, true, nil, nil, getPodsAssignedToNode)
|
||||
runPodLifetimePlugin(ctx, t, clientSet, nodeInformer, nil, "", &highPriority, true, false, nil, nil, getPodsAssignedToNode)
|
||||
}
|
||||
|
||||
// All pods are supposed to be deleted, wait until all pods in the test namespace are terminating
|
||||
@@ -671,6 +706,74 @@ func testEvictSystemCritical(t *testing.T, isPriorityClass bool) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestEvictDaemonSetPod(t *testing.T) {
|
||||
testEvictDaemonSetPod(t, true)
|
||||
}
|
||||
|
||||
func testEvictDaemonSetPod(t *testing.T, isDaemonSet bool) {
|
||||
ctx := context.Background()
|
||||
|
||||
clientSet, _, nodeInformer, getPodsAssignedToNode, stopCh := initializeClient(t)
|
||||
defer close(stopCh)
|
||||
|
||||
testNamespace := &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "e2e-" + strings.ToLower(t.Name())}}
|
||||
if _, err := clientSet.CoreV1().Namespaces().Create(ctx, testNamespace, metav1.CreateOptions{}); err != nil {
|
||||
t.Fatalf("Unable to create ns %v", testNamespace.Name)
|
||||
}
|
||||
defer clientSet.CoreV1().Namespaces().Delete(ctx, testNamespace.Name, metav1.DeleteOptions{})
|
||||
|
||||
daemonSet := DsByNameContainer("test-ds-evictdaemonsetpods", testNamespace.Name,
|
||||
map[string]string{"test": "evictdaemonsetpods"}, nil)
|
||||
if _, err := clientSet.AppsV1().DaemonSets(daemonSet.Namespace).Create(ctx, daemonSet, metav1.CreateOptions{}); err != nil {
|
||||
t.Errorf("Error creating ds %s: %v", daemonSet.Name, err)
|
||||
}
|
||||
defer deleteDS(ctx, t, clientSet, daemonSet)
|
||||
|
||||
// wait for a while so all the pods are at least few seconds older
|
||||
time.Sleep(5 * time.Second)
|
||||
|
||||
podListDaemonSet, err := clientSet.CoreV1().Pods(daemonSet.Namespace).List(ctx, metav1.ListOptions{LabelSelector: labels.SelectorFromSet(daemonSet.Spec.Template.Labels).String()})
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to list pods: %v", err)
|
||||
}
|
||||
|
||||
initialPodNames := getPodNames(podListDaemonSet.Items)
|
||||
sort.Strings(initialPodNames)
|
||||
t.Logf("Existing pods: %v", initialPodNames)
|
||||
|
||||
runPodLifetimePlugin(ctx, t, clientSet, nodeInformer, nil, "", nil, false, isDaemonSet, nil, nil, getPodsAssignedToNode)
|
||||
|
||||
// All pods are supposed to be deleted, wait until all pods in the test namespace are terminating
|
||||
t.Logf("All daemonset pods in the test namespace, will be deleted")
|
||||
if err := wait.PollImmediate(time.Second, 30*time.Second, func() (bool, error) {
|
||||
podList, err := clientSet.CoreV1().Pods(testNamespace.Name).List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, nil
|
||||
}
|
||||
currentPodNames := getPodNames(podList.Items)
|
||||
// validate all pod were deleted
|
||||
if len(intersectStrings(initialPodNames, currentPodNames)) > 0 {
|
||||
t.Logf("Waiting until %v pods get deleted", intersectStrings(initialPodNames, currentPodNames))
|
||||
// check if there's at least one pod not in Terminating state
|
||||
for _, pod := range podList.Items {
|
||||
// In case podList contains newly created pods
|
||||
if len(intersectStrings(initialPodNames, []string{pod.Name})) == 0 {
|
||||
continue
|
||||
}
|
||||
if pod.DeletionTimestamp == nil {
|
||||
t.Logf("Pod %v not in terminating state", pod.Name)
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
t.Logf("All %v pods are terminating", intersectStrings(initialPodNames, currentPodNames))
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}); err != nil {
|
||||
t.Fatalf("Error waiting for pods to be deleted: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestThresholdPriority(t *testing.T) {
|
||||
testPriority(t, false)
|
||||
}
|
||||
@@ -754,10 +857,10 @@ func testPriority(t *testing.T, isPriorityClass bool) {
|
||||
|
||||
if isPriorityClass {
|
||||
t.Logf("run the plugin to delete pods with priority lower than priority class %s", highPriorityClass.Name)
|
||||
runPodLifetimePlugin(ctx, t, clientSet, nodeInformer, nil, highPriorityClass.Name, nil, false, nil, nil, getPodsAssignedToNode)
|
||||
runPodLifetimePlugin(ctx, t, clientSet, nodeInformer, nil, highPriorityClass.Name, nil, false, false, nil, nil, getPodsAssignedToNode)
|
||||
} else {
|
||||
t.Logf("run the plugin to delete pods with priority lower than %d", highPriority)
|
||||
runPodLifetimePlugin(ctx, t, clientSet, nodeInformer, nil, "", &highPriority, false, nil, nil, getPodsAssignedToNode)
|
||||
runPodLifetimePlugin(ctx, t, clientSet, nodeInformer, nil, "", &highPriority, false, false, nil, nil, getPodsAssignedToNode)
|
||||
}
|
||||
|
||||
t.Logf("Waiting 10s")
|
||||
@@ -861,7 +964,7 @@ func TestPodLabelSelector(t *testing.T) {
|
||||
t.Logf("Pods not expected to be evicted: %v, pods expected to be evicted: %v", expectReservePodNames, expectEvictPodNames)
|
||||
|
||||
t.Logf("run the plugin to delete pods with label test:podlifetime-evict")
|
||||
runPodLifetimePlugin(ctx, t, clientSet, nodeInformer, nil, "", nil, false, nil, &metav1.LabelSelector{MatchLabels: map[string]string{"test": "podlifetime-evict"}}, getPodsAssignedToNode)
|
||||
runPodLifetimePlugin(ctx, t, clientSet, nodeInformer, nil, "", nil, false, false, nil, &metav1.LabelSelector{MatchLabels: map[string]string{"test": "podlifetime-evict"}}, getPodsAssignedToNode)
|
||||
|
||||
t.Logf("Waiting 10s")
|
||||
time.Sleep(10 * time.Second)
|
||||
@@ -961,7 +1064,7 @@ func TestEvictAnnotation(t *testing.T) {
|
||||
t.Logf("Existing pods: %v", initialPodNames)
|
||||
|
||||
t.Log("Running PodLifetime plugin")
|
||||
runPodLifetimePlugin(ctx, t, clientSet, nodeLister, nil, "", nil, false, nil, nil, getPodsAssignedToNode)
|
||||
runPodLifetimePlugin(ctx, t, clientSet, nodeLister, nil, "", nil, false, false, nil, nil, getPodsAssignedToNode)
|
||||
|
||||
if err := wait.PollImmediate(5*time.Second, time.Minute, func() (bool, error) {
|
||||
podList, err = clientSet.CoreV1().Pods(rc.Namespace).List(ctx, metav1.ListOptions{LabelSelector: labels.SelectorFromSet(rc.Spec.Template.Labels).String()})
|
||||
@@ -1027,7 +1130,7 @@ func TestPodLifeTimeOldestEvicted(t *testing.T) {
|
||||
|
||||
t.Log("Running PodLifetime plugin with maxPodsToEvictPerNamespace=1 to ensure only the oldest pod is evicted")
|
||||
var maxPodsToEvictPerNamespace uint = 1
|
||||
runPodLifetimePlugin(ctx, t, clientSet, nodeLister, nil, "", nil, false, &maxPodsToEvictPerNamespace, nil, getPodsAssignedToNode)
|
||||
runPodLifetimePlugin(ctx, t, clientSet, nodeLister, nil, "", nil, false, false, &maxPodsToEvictPerNamespace, nil, getPodsAssignedToNode)
|
||||
t.Log("Finished PodLifetime plugin")
|
||||
|
||||
t.Logf("Wait for terminating pod to disappear")
|
||||
@@ -1123,6 +1226,43 @@ func waitForTerminatingPodsToDisappear(ctx context.Context, t *testing.T, client
|
||||
}
|
||||
}
|
||||
|
||||
func deleteDS(ctx context.Context, t *testing.T, clientSet clientset.Interface, ds *appsv1.DaemonSet) {
|
||||
// adds nodeselector to avoid any nodes by setting an unused label
|
||||
dsDeepCopy := ds.DeepCopy()
|
||||
dsDeepCopy.Spec.Template.Spec.NodeSelector = map[string]string{
|
||||
"avoid-all-nodes": "true",
|
||||
}
|
||||
|
||||
if _, err := clientSet.AppsV1().DaemonSets(dsDeepCopy.Namespace).Update(ctx, dsDeepCopy, metav1.UpdateOptions{}); err != nil {
|
||||
t.Fatalf("Error updating daemonset %v", err)
|
||||
}
|
||||
|
||||
if err := wait.PollImmediate(5*time.Second, time.Minute, func() (bool, error) {
|
||||
podList, _ := clientSet.CoreV1().Pods(ds.Namespace).List(ctx, metav1.ListOptions{LabelSelector: labels.SelectorFromSet(ds.Spec.Template.Labels).String()})
|
||||
t.Logf("Waiting for %v DS pods to disappear, still %v remaining", ds.Name, len(podList.Items))
|
||||
if len(podList.Items) > 0 {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}); err != nil {
|
||||
t.Fatalf("Error waiting for ds pods to disappear: %v", err)
|
||||
}
|
||||
|
||||
if err := clientSet.AppsV1().DaemonSets(ds.Namespace).Delete(ctx, ds.Name, metav1.DeleteOptions{}); err != nil {
|
||||
t.Fatalf("Error deleting ds %v", err)
|
||||
}
|
||||
|
||||
if err := wait.PollImmediate(time.Second, 10*time.Second, func() (bool, error) {
|
||||
_, err := clientSet.AppsV1().DaemonSets(ds.Namespace).Get(ctx, ds.Name, metav1.GetOptions{})
|
||||
if err != nil && strings.Contains(err.Error(), "not found") {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
}); err != nil {
|
||||
t.Fatalf("Error deleting ds %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func deleteRC(ctx context.Context, t *testing.T, clientSet clientset.Interface, rc *v1.ReplicationController) {
|
||||
// set number of replicas to 0
|
||||
rcdeepcopy := rc.DeepCopy()
|
||||
|
||||
Reference in New Issue
Block a user