diff --git a/pkg/descheduler/descheduler_test.go b/pkg/descheduler/descheduler_test.go index 150df4058..883f8f0b5 100644 --- a/pkg/descheduler/descheduler_test.go +++ b/pkg/descheduler/descheduler_test.go @@ -16,10 +16,10 @@ import ( ) func TestTaintsUpdated(t *testing.T) { - n1 := test.BuildTestNode("n1", 2000, 3000, 10) - n2 := test.BuildTestNode("n2", 2000, 3000, 10) + n1 := test.BuildTestNode("n1", 2000, 3000, 10, nil) + n2 := test.BuildTestNode("n2", 2000, 3000, 10, nil) - p1 := test.BuildTestPod(fmt.Sprintf("pod_1_%s", n1.Name), 200, 0, n1.Name) + p1 := test.BuildTestPod(fmt.Sprintf("pod_1_%s", n1.Name), 200, 0, n1.Name, nil) p1.ObjectMeta.OwnerReferences = []metav1.OwnerReference{ {}, } diff --git a/pkg/descheduler/evictions/evictions_test.go b/pkg/descheduler/evictions/evictions_test.go index fb1e942cc..a8624b3c7 100644 --- a/pkg/descheduler/evictions/evictions_test.go +++ b/pkg/descheduler/evictions/evictions_test.go @@ -17,17 +17,18 @@ limitations under the License. package evictions import ( + "testing" + "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/kubernetes/fake" core "k8s.io/client-go/testing" "sigs.k8s.io/descheduler/test" - "testing" ) func TestEvictPod(t *testing.T) { - node1 := test.BuildTestNode("node1", 1000, 2000, 9) - pod1 := test.BuildTestPod("p1", 400, 0, "node1") + node1 := test.BuildTestNode("node1", 1000, 2000, 9, nil) + pod1 := test.BuildTestPod("p1", 400, 0, "node1", nil) tests := []struct { description string node *v1.Node @@ -46,7 +47,7 @@ func TestEvictPod(t *testing.T) { description: "test pod eviction - pod absent", node: node1, pod: pod1, - pods: []v1.Pod{*test.BuildTestPod("p2", 400, 0, "node1"), *test.BuildTestPod("p3", 450, 0, "node1")}, + pods: []v1.Pod{*test.BuildTestPod("p2", 400, 0, "node1", nil), *test.BuildTestPod("p3", 450, 0, "node1", nil)}, want: true, }, } diff --git a/pkg/descheduler/node/node_test.go b/pkg/descheduler/node/node_test.go index e17827e73..1e3ea9cf8 100644 --- a/pkg/descheduler/node/node_test.go +++ b/pkg/descheduler/node/node_test.go @@ -27,14 +27,14 @@ import ( ) func TestReadyNodes(t *testing.T) { - node1 := test.BuildTestNode("node2", 1000, 2000, 9) - node2 := test.BuildTestNode("node3", 1000, 2000, 9) + node1 := test.BuildTestNode("node2", 1000, 2000, 9, nil) + node2 := test.BuildTestNode("node3", 1000, 2000, 9, nil) node2.Status.Conditions = []v1.NodeCondition{{Type: v1.NodeMemoryPressure, Status: v1.ConditionTrue}} - node3 := test.BuildTestNode("node4", 1000, 2000, 9) + node3 := test.BuildTestNode("node4", 1000, 2000, 9, nil) node3.Status.Conditions = []v1.NodeCondition{{Type: v1.NodeNetworkUnavailable, Status: v1.ConditionTrue}} - node4 := test.BuildTestNode("node5", 1000, 2000, 9) + node4 := test.BuildTestNode("node5", 1000, 2000, 9, nil) node4.Spec.Unschedulable = true - node5 := test.BuildTestNode("node6", 1000, 2000, 9) + node5 := test.BuildTestNode("node6", 1000, 2000, 9, nil) node5.Status.Conditions = []v1.NodeCondition{{Type: v1.NodeReady, Status: v1.ConditionFalse}} if !IsReady(node1) { @@ -56,9 +56,9 @@ func TestReadyNodes(t *testing.T) { } func TestReadyNodesWithNodeSelector(t *testing.T) { - node1 := test.BuildTestNode("node1", 1000, 2000, 9) + node1 := test.BuildTestNode("node1", 1000, 2000, 9, nil) node1.Labels = map[string]string{"type": "compute"} - node2 := test.BuildTestNode("node2", 1000, 2000, 9) + node2 := test.BuildTestNode("node2", 1000, 2000, 9, nil) node2.Labels = map[string]string{"type": "infra"} fakeClient := fake.NewSimpleClientset(node1, node2) diff --git a/pkg/descheduler/pod/pods_test.go b/pkg/descheduler/pod/pods_test.go index eee220678..43e5e7afb 100644 --- a/pkg/descheduler/pod/pods_test.go +++ b/pkg/descheduler/pod/pods_test.go @@ -26,7 +26,7 @@ import ( ) func TestIsEvictable(t *testing.T) { - n1 := test.BuildTestNode("node1", 1000, 2000, 13) + n1 := test.BuildTestNode("node1", 1000, 2000, 13, nil) type testCase struct { pod *v1.Pod runBefore func(*v1.Pod) @@ -36,14 +36,14 @@ func TestIsEvictable(t *testing.T) { testCases := []testCase{ { - pod: test.BuildTestPod("p1", 400, 0, n1.Name), + pod: test.BuildTestPod("p1", 400, 0, n1.Name, nil), runBefore: func(pod *v1.Pod) { pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList() }, evictLocalStoragePods: false, result: true, }, { - pod: test.BuildTestPod("p2", 400, 0, n1.Name), + pod: test.BuildTestPod("p2", 400, 0, n1.Name, nil), runBefore: func(pod *v1.Pod) { pod.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"} pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList() @@ -51,14 +51,14 @@ func TestIsEvictable(t *testing.T) { evictLocalStoragePods: false, result: true, }, { - pod: test.BuildTestPod("p3", 400, 0, n1.Name), + pod: test.BuildTestPod("p3", 400, 0, n1.Name, nil), runBefore: func(pod *v1.Pod) { pod.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList() }, evictLocalStoragePods: false, result: true, }, { - pod: test.BuildTestPod("p4", 400, 0, n1.Name), + pod: test.BuildTestPod("p4", 400, 0, n1.Name, nil), runBefore: func(pod *v1.Pod) { pod.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"} pod.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList() @@ -66,7 +66,7 @@ func TestIsEvictable(t *testing.T) { evictLocalStoragePods: false, result: true, }, { - pod: test.BuildTestPod("p5", 400, 0, n1.Name), + pod: test.BuildTestPod("p5", 400, 0, n1.Name, nil), runBefore: func(pod *v1.Pod) { pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList() pod.Spec.Volumes = []v1.Volume{ @@ -83,7 +83,7 @@ func TestIsEvictable(t *testing.T) { evictLocalStoragePods: false, result: false, }, { - pod: test.BuildTestPod("p6", 400, 0, n1.Name), + pod: test.BuildTestPod("p6", 400, 0, n1.Name, nil), runBefore: func(pod *v1.Pod) { pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList() pod.Spec.Volumes = []v1.Volume{ @@ -100,7 +100,7 @@ func TestIsEvictable(t *testing.T) { evictLocalStoragePods: true, result: true, }, { - pod: test.BuildTestPod("p7", 400, 0, n1.Name), + pod: test.BuildTestPod("p7", 400, 0, n1.Name, nil), runBefore: func(pod *v1.Pod) { pod.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"} pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList() @@ -118,14 +118,14 @@ func TestIsEvictable(t *testing.T) { evictLocalStoragePods: false, result: true, }, { - pod: test.BuildTestPod("p8", 400, 0, n1.Name), + pod: test.BuildTestPod("p8", 400, 0, n1.Name, nil), runBefore: func(pod *v1.Pod) { pod.ObjectMeta.OwnerReferences = test.GetDaemonSetOwnerRefList() }, evictLocalStoragePods: false, result: false, }, { - pod: test.BuildTestPod("p9", 400, 0, n1.Name), + pod: test.BuildTestPod("p9", 400, 0, n1.Name, nil), runBefore: func(pod *v1.Pod) { pod.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"} pod.ObjectMeta.OwnerReferences = test.GetDaemonSetOwnerRefList() @@ -133,14 +133,14 @@ func TestIsEvictable(t *testing.T) { evictLocalStoragePods: false, result: true, }, { - pod: test.BuildTestPod("p10", 400, 0, n1.Name), + pod: test.BuildTestPod("p10", 400, 0, n1.Name, nil), runBefore: func(pod *v1.Pod) { pod.Annotations = test.GetMirrorPodAnnotation() }, evictLocalStoragePods: false, result: false, }, { - pod: test.BuildTestPod("p11", 400, 0, n1.Name), + pod: test.BuildTestPod("p11", 400, 0, n1.Name, nil), runBefore: func(pod *v1.Pod) { pod.Annotations = test.GetMirrorPodAnnotation() pod.Annotations["descheduler.alpha.kubernetes.io/evict"] = "true" @@ -148,7 +148,7 @@ func TestIsEvictable(t *testing.T) { evictLocalStoragePods: false, result: true, }, { - pod: test.BuildTestPod("p12", 400, 0, n1.Name), + pod: test.BuildTestPod("p12", 400, 0, n1.Name, nil), runBefore: func(pod *v1.Pod) { priority := utils.SystemCriticalPriority pod.Spec.Priority = &priority @@ -156,7 +156,7 @@ func TestIsEvictable(t *testing.T) { evictLocalStoragePods: false, result: false, }, { - pod: test.BuildTestPod("p13", 400, 0, n1.Name), + pod: test.BuildTestPod("p13", 400, 0, n1.Name, nil), runBefore: func(pod *v1.Pod) { priority := utils.SystemCriticalPriority pod.Spec.Priority = &priority @@ -179,15 +179,15 @@ func TestIsEvictable(t *testing.T) { } } func TestPodTypes(t *testing.T) { - n1 := test.BuildTestNode("node1", 1000, 2000, 9) - p1 := test.BuildTestPod("p1", 400, 0, n1.Name) + n1 := test.BuildTestNode("node1", 1000, 2000, 9, nil) + p1 := test.BuildTestPod("p1", 400, 0, n1.Name, nil) // These won't be evicted. - p2 := test.BuildTestPod("p2", 400, 0, n1.Name) - p3 := test.BuildTestPod("p3", 400, 0, n1.Name) - p4 := test.BuildTestPod("p4", 400, 0, n1.Name) - p5 := test.BuildTestPod("p5", 400, 0, n1.Name) - p6 := test.BuildTestPod("p6", 400, 0, n1.Name) + p2 := test.BuildTestPod("p2", 400, 0, n1.Name, nil) + p3 := test.BuildTestPod("p3", 400, 0, n1.Name, nil) + p4 := test.BuildTestPod("p4", 400, 0, n1.Name, nil) + p5 := test.BuildTestPod("p5", 400, 0, n1.Name, nil) + p6 := test.BuildTestPod("p6", 400, 0, n1.Name, nil) p6.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList() diff --git a/pkg/descheduler/strategies/duplicates_test.go b/pkg/descheduler/strategies/duplicates_test.go index db1208222..987b1a7d3 100644 --- a/pkg/descheduler/strategies/duplicates_test.go +++ b/pkg/descheduler/strategies/duplicates_test.go @@ -30,23 +30,23 @@ import ( func TestFindDuplicatePods(t *testing.T) { // first setup pods - node := test.BuildTestNode("n1", 2000, 3000, 10) - p1 := test.BuildTestPod("p1", 100, 0, node.Name) + node := test.BuildTestNode("n1", 2000, 3000, 10, nil) + p1 := test.BuildTestPod("p1", 100, 0, node.Name, nil) p1.Namespace = "dev" - p2 := test.BuildTestPod("p2", 100, 0, node.Name) + p2 := test.BuildTestPod("p2", 100, 0, node.Name, nil) p2.Namespace = "dev" - p3 := test.BuildTestPod("p3", 100, 0, node.Name) + p3 := test.BuildTestPod("p3", 100, 0, node.Name, nil) p3.Namespace = "dev" - p4 := test.BuildTestPod("p4", 100, 0, node.Name) - p5 := test.BuildTestPod("p5", 100, 0, node.Name) - p6 := test.BuildTestPod("p6", 100, 0, node.Name) - p7 := test.BuildTestPod("p7", 100, 0, node.Name) + p4 := test.BuildTestPod("p4", 100, 0, node.Name, nil) + p5 := test.BuildTestPod("p5", 100, 0, node.Name, nil) + p6 := test.BuildTestPod("p6", 100, 0, node.Name, nil) + p7 := test.BuildTestPod("p7", 100, 0, node.Name, nil) p7.Namespace = "kube-system" - p8 := test.BuildTestPod("p8", 100, 0, node.Name) + p8 := test.BuildTestPod("p8", 100, 0, node.Name, nil) p8.Namespace = "test" - p9 := test.BuildTestPod("p9", 100, 0, node.Name) + p9 := test.BuildTestPod("p9", 100, 0, node.Name, nil) p9.Namespace = "test" - p10 := test.BuildTestPod("p10", 100, 0, node.Name) + p10 := test.BuildTestPod("p10", 100, 0, node.Name, nil) p10.Namespace = "test" // ### Evictable Pods ### diff --git a/pkg/descheduler/strategies/lownodeutilization_test.go b/pkg/descheduler/strategies/lownodeutilization_test.go index 0bdcda76c..0d582da13 100644 --- a/pkg/descheduler/strategies/lownodeutilization_test.go +++ b/pkg/descheduler/strategies/lownodeutilization_test.go @@ -39,237 +39,238 @@ import ( "sigs.k8s.io/descheduler/test" ) -// TODO: Make this table driven. -func TestLowNodeUtilizationWithoutPriority(t *testing.T) { - var thresholds = make(api.ResourceThresholds) - var targetThresholds = make(api.ResourceThresholds) - thresholds[v1.ResourceCPU] = 30 - thresholds[v1.ResourcePods] = 30 - targetThresholds[v1.ResourceCPU] = 50 - targetThresholds[v1.ResourcePods] = 50 +var ( + lowPriority = int32(0) + highPriority = int32(10000) +) - n1 := test.BuildTestNode("n1", 4000, 3000, 9) - n2 := test.BuildTestNode("n2", 4000, 3000, 10) - n3 := test.BuildTestNode("n3", 4000, 3000, 10) - // Making n3 node unschedulable so that it won't counted in lowUtilized nodes list. - n3.Spec.Unschedulable = true - p1 := test.BuildTestPod("p1", 400, 0, n1.Name) - p2 := test.BuildTestPod("p2", 400, 0, n1.Name) - p3 := test.BuildTestPod("p3", 400, 0, n1.Name) - p4 := test.BuildTestPod("p4", 400, 0, n1.Name) - p5 := test.BuildTestPod("p5", 400, 0, n1.Name) +func setRSOwnerRef(pod *v1.Pod) { pod.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList() } +func setDSOwnerRef(pod *v1.Pod) { pod.ObjectMeta.OwnerReferences = test.GetDaemonSetOwnerRefList() } +func setNormalOwnerRef(pod *v1.Pod) { pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList() } +func setHighPriority(pod *v1.Pod) { pod.Spec.Priority = &highPriority } +func setLowPriority(pod *v1.Pod) { pod.Spec.Priority = &lowPriority } - // These won't be evicted. - p6 := test.BuildTestPod("p6", 400, 0, n1.Name) - p7 := test.BuildTestPod("p7", 400, 0, n1.Name) - p8 := test.BuildTestPod("p8", 400, 0, n1.Name) +func TestLowNodeUtilization(t *testing.T) { + n1NodeName := "n1" + n2NodeName := "n2" + n3NodeName := "n3" - p1.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList() - p2.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList() - p3.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList() - p4.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList() - p5.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList() - // The following 4 pods won't get evicted. - // A daemonset. - p6.ObjectMeta.OwnerReferences = test.GetDaemonSetOwnerRefList() - // A pod with local storage. - p7.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList() - p7.Spec.Volumes = []v1.Volume{ + testCases := []struct { + name string + thresholds, targetThresholds api.ResourceThresholds + nodes map[string]*v1.Node + pods map[string]*v1.PodList + expectedPodsEvicted int + }{ { - Name: "sample", - VolumeSource: v1.VolumeSource{ - HostPath: &v1.HostPathVolumeSource{Path: "somePath"}, - EmptyDir: &v1.EmptyDirVolumeSource{ - SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)}, + name: "without priorities", + thresholds: api.ResourceThresholds{ + v1.ResourceCPU: 30, + v1.ResourcePods: 30, }, + targetThresholds: api.ResourceThresholds{ + v1.ResourceCPU: 50, + v1.ResourcePods: 50, + }, + nodes: map[string]*v1.Node{ + n1NodeName: test.BuildTestNode(n1NodeName, 4000, 3000, 9, nil), + n2NodeName: test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil), + n3NodeName: test.BuildTestNode(n3NodeName, 4000, 3000, 10, func(node *v1.Node) { + node.Spec.Unschedulable = true + }), + }, + pods: map[string]*v1.PodList{ + n1NodeName: { + Items: []v1.Pod{ + *test.BuildTestPod("p1", 400, 0, n1NodeName, setRSOwnerRef), + *test.BuildTestPod("p2", 400, 0, n1NodeName, setRSOwnerRef), + *test.BuildTestPod("p3", 400, 0, n1NodeName, setRSOwnerRef), + *test.BuildTestPod("p4", 400, 0, n1NodeName, setRSOwnerRef), + *test.BuildTestPod("p5", 400, 0, n1NodeName, setRSOwnerRef), + // These won't be evicted. + *test.BuildTestPod("p6", 400, 0, n1NodeName, setDSOwnerRef), + *test.BuildTestPod("p7", 400, 0, n1NodeName, func(pod *v1.Pod) { + // A pod with local storage. + setNormalOwnerRef(pod) + pod.Spec.Volumes = []v1.Volume{ + { + Name: "sample", + VolumeSource: v1.VolumeSource{ + HostPath: &v1.HostPathVolumeSource{Path: "somePath"}, + EmptyDir: &v1.EmptyDirVolumeSource{ + SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)}, + }, + }, + } + // A Mirror Pod. + pod.Annotations = test.GetMirrorPodAnnotation() + }), + *test.BuildTestPod("p8", 400, 0, n1NodeName, func(pod *v1.Pod) { + // A Critical Pod. + pod.Namespace = "kube-system" + priority := utils.SystemCriticalPriority + pod.Spec.Priority = &priority + }), + }, + }, + n2NodeName: { + Items: []v1.Pod{ + *test.BuildTestPod("p9", 400, 0, n1NodeName, setRSOwnerRef), + }, + }, + n3NodeName: {}, + }, + expectedPodsEvicted: 3, + }, + { + name: "with priorities", + thresholds: api.ResourceThresholds{ + v1.ResourceCPU: 30, + v1.ResourcePods: 30, + }, + targetThresholds: api.ResourceThresholds{ + v1.ResourceCPU: 50, + v1.ResourcePods: 50, + }, + nodes: map[string]*v1.Node{ + n1NodeName: test.BuildTestNode(n1NodeName, 4000, 3000, 9, nil), + n2NodeName: test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil), + n3NodeName: test.BuildTestNode(n3NodeName, 4000, 3000, 10, func(node *v1.Node) { + node.Spec.Unschedulable = true + }), + }, + pods: map[string]*v1.PodList{ + n1NodeName: { + Items: []v1.Pod{ + *test.BuildTestPod("p1", 400, 0, n1NodeName, func(pod *v1.Pod) { + setRSOwnerRef(pod) + setHighPriority(pod) + }), + *test.BuildTestPod("p2", 400, 0, n1NodeName, func(pod *v1.Pod) { + setRSOwnerRef(pod) + setHighPriority(pod) + }), + *test.BuildTestPod("p3", 400, 0, n1NodeName, func(pod *v1.Pod) { + setRSOwnerRef(pod) + setHighPriority(pod) + }), + *test.BuildTestPod("p4", 400, 0, n1NodeName, func(pod *v1.Pod) { + setRSOwnerRef(pod) + setHighPriority(pod) + }), + *test.BuildTestPod("p5", 400, 0, n1NodeName, func(pod *v1.Pod) { + setRSOwnerRef(pod) + setLowPriority(pod) + }), + // These won't be evicted. + *test.BuildTestPod("p6", 400, 0, n1NodeName, func(pod *v1.Pod) { + setDSOwnerRef(pod) + setHighPriority(pod) + }), + *test.BuildTestPod("p7", 400, 0, n1NodeName, func(pod *v1.Pod) { + // A pod with local storage. + setNormalOwnerRef(pod) + setLowPriority(pod) + pod.Spec.Volumes = []v1.Volume{ + { + Name: "sample", + VolumeSource: v1.VolumeSource{ + HostPath: &v1.HostPathVolumeSource{Path: "somePath"}, + EmptyDir: &v1.EmptyDirVolumeSource{ + SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)}, + }, + }, + } + // A Mirror Pod. + pod.Annotations = test.GetMirrorPodAnnotation() + }), + *test.BuildTestPod("p8", 400, 0, n1NodeName, func(pod *v1.Pod) { + // A Critical Pod. + pod.Namespace = "kube-system" + priority := utils.SystemCriticalPriority + pod.Spec.Priority = &priority + }), + }, + }, + n2NodeName: { + Items: []v1.Pod{ + *test.BuildTestPod("p9", 400, 0, n1NodeName, setRSOwnerRef), + }, + }, + n3NodeName: {}, + }, + expectedPodsEvicted: 3, }, } - // A Mirror Pod. - p7.Annotations = test.GetMirrorPodAnnotation() - // A Critical Pod. - p8.Namespace = "kube-system" - priority := utils.SystemCriticalPriority - p8.Spec.Priority = &priority - p9 := test.BuildTestPod("p9", 400, 0, n1.Name) - p9.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList() - fakeClient := &fake.Clientset{} - fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) { - list := action.(core.ListAction) - fieldString := list.GetListRestrictions().Fields.String() - if strings.Contains(fieldString, "n1") { - return true, &v1.PodList{Items: []v1.Pod{*p1, *p2, *p3, *p4, *p5, *p6, *p7, *p8}}, nil - } - if strings.Contains(fieldString, "n2") { - return true, &v1.PodList{Items: []v1.Pod{*p9}}, nil - } - if strings.Contains(fieldString, "n3") { - return true, &v1.PodList{Items: []v1.Pod{}}, nil - } - return true, nil, fmt.Errorf("Failed to list: %v", list) - }) - fakeClient.Fake.AddReactor("get", "nodes", func(action core.Action) (bool, runtime.Object, error) { - getAction := action.(core.GetAction) - switch getAction.GetName() { - case n1.Name: - return true, n1, nil - case n2.Name: - return true, n2, nil - case n3.Name: - return true, n3, nil - } - return true, nil, fmt.Errorf("Wrong node: %v", getAction.GetName()) - }) - expectedPodsEvicted := 3 - npm := createNodePodsMap(fakeClient, []*v1.Node{n1, n2, n3}) - lowNodes, targetNodes := classifyNodes(npm, thresholds, targetThresholds, false) - if len(lowNodes) != 1 { - t.Errorf("After ignoring unschedulable nodes, expected only one node to be under utilized.") - } - npe := utils.NodePodEvictedCount{} - npe[n1] = 0 - npe[n2] = 0 - npe[n3] = 0 - podsEvicted := evictPodsFromTargetNodes(fakeClient, "v1", targetNodes, lowNodes, targetThresholds, false, 3, false, npe) - if expectedPodsEvicted != podsEvicted { - t.Errorf("Expected %#v pods to be evicted but %#v got evicted", expectedPodsEvicted, podsEvicted) - } -} + for _, test := range testCases { + t.Run(test.name, func(t *testing.T) { + fakeClient := &fake.Clientset{} + fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) { + list := action.(core.ListAction) + fieldString := list.GetListRestrictions().Fields.String() + if strings.Contains(fieldString, n1NodeName) { + return true, test.pods[n1NodeName], nil + } + if strings.Contains(fieldString, n2NodeName) { + return true, test.pods[n2NodeName], nil + } + if strings.Contains(fieldString, n3NodeName) { + return true, test.pods[n3NodeName], nil + } + return true, nil, fmt.Errorf("Failed to list: %v", list) + }) + fakeClient.Fake.AddReactor("get", "nodes", func(action core.Action) (bool, runtime.Object, error) { + getAction := action.(core.GetAction) + if node, exists := test.nodes[getAction.GetName()]; exists { + return true, node, nil + } + return true, nil, fmt.Errorf("Wrong node: %v", getAction.GetName()) + }) -// TODO: Make this table driven. -func TestLowNodeUtilizationWithPriorities(t *testing.T) { - var thresholds = make(api.ResourceThresholds) - var targetThresholds = make(api.ResourceThresholds) - thresholds[v1.ResourceCPU] = 30 - thresholds[v1.ResourcePods] = 30 - targetThresholds[v1.ResourceCPU] = 50 - targetThresholds[v1.ResourcePods] = 50 - lowPriority := int32(0) - highPriority := int32(10000) - n1 := test.BuildTestNode("n1", 4000, 3000, 9) - n2 := test.BuildTestNode("n2", 4000, 3000, 10) - n3 := test.BuildTestNode("n3", 4000, 3000, 10) - // Making n3 node unschedulable so that it won't counted in lowUtilized nodes list. - n3.Spec.Unschedulable = true - p1 := test.BuildTestPod("p1", 400, 0, n1.Name) - p1.Spec.Priority = &highPriority - p2 := test.BuildTestPod("p2", 400, 0, n1.Name) - p2.Spec.Priority = &highPriority - p3 := test.BuildTestPod("p3", 400, 0, n1.Name) - p3.Spec.Priority = &highPriority - p4 := test.BuildTestPod("p4", 400, 0, n1.Name) - p4.Spec.Priority = &highPriority - p5 := test.BuildTestPod("p5", 400, 0, n1.Name) - p5.Spec.Priority = &lowPriority + var nodes []*v1.Node + npe := utils.NodePodEvictedCount{} + for _, node := range test.nodes { + nodes = append(nodes, node) + npe[node] = 0 + } - // These won't be evicted. - p6 := test.BuildTestPod("p6", 400, 0, n1.Name) - p6.Spec.Priority = &highPriority - p7 := test.BuildTestPod("p7", 400, 0, n1.Name) - p7.Spec.Priority = &lowPriority - p8 := test.BuildTestPod("p8", 400, 0, n1.Name) - p8.Spec.Priority = &lowPriority - - p1.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList() - p2.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList() - p3.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList() - p4.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList() - p5.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList() - // The following 4 pods won't get evicted. - // A daemonset. - p6.ObjectMeta.OwnerReferences = test.GetDaemonSetOwnerRefList() - // A pod with local storage. - p7.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList() - p7.Spec.Volumes = []v1.Volume{ - { - Name: "sample", - VolumeSource: v1.VolumeSource{ - HostPath: &v1.HostPathVolumeSource{Path: "somePath"}, - EmptyDir: &v1.EmptyDirVolumeSource{ - SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)}, - }, - }, + npm := createNodePodsMap(fakeClient, nodes) + lowNodes, targetNodes := classifyNodes(npm, test.thresholds, test.targetThresholds, false) + if len(lowNodes) != 1 { + t.Errorf("After ignoring unschedulable nodes, expected only one node to be under utilized.") + } + podsEvicted := evictPodsFromTargetNodes(fakeClient, "v1", targetNodes, lowNodes, test.targetThresholds, false, test.expectedPodsEvicted, false, npe) + if test.expectedPodsEvicted != podsEvicted { + t.Errorf("Expected %#v pods to be evicted but %#v got evicted", test.expectedPodsEvicted, podsEvicted) + } + }) } - // A Mirror Pod. - p7.Annotations = test.GetMirrorPodAnnotation() - // A Critical Pod. - p8.Namespace = "kube-system" - priority := utils.SystemCriticalPriority - p8.Spec.Priority = &priority - p9 := test.BuildTestPod("p9", 400, 0, n1.Name) - p9.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList() - fakeClient := &fake.Clientset{} - fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) { - list := action.(core.ListAction) - fieldString := list.GetListRestrictions().Fields.String() - if strings.Contains(fieldString, "n1") { - return true, &v1.PodList{Items: []v1.Pod{*p1, *p2, *p3, *p4, *p5, *p6, *p7, *p8}}, nil - } - if strings.Contains(fieldString, "n2") { - return true, &v1.PodList{Items: []v1.Pod{*p9}}, nil - } - if strings.Contains(fieldString, "n3") { - return true, &v1.PodList{Items: []v1.Pod{}}, nil - } - return true, nil, fmt.Errorf("Failed to list: %v", list) - }) - fakeClient.Fake.AddReactor("get", "nodes", func(action core.Action) (bool, runtime.Object, error) { - getAction := action.(core.GetAction) - switch getAction.GetName() { - case n1.Name: - return true, n1, nil - case n2.Name: - return true, n2, nil - case n3.Name: - return true, n3, nil - } - return true, nil, fmt.Errorf("Wrong node: %v", getAction.GetName()) - }) - expectedPodsEvicted := 3 - npm := createNodePodsMap(fakeClient, []*v1.Node{n1, n2, n3}) - lowNodes, targetNodes := classifyNodes(npm, thresholds, targetThresholds, false) - if len(lowNodes) != 1 { - t.Errorf("After ignoring unschedulable nodes, expected only one node to be under utilized.") - } - npe := utils.NodePodEvictedCount{} - npe[n1] = 0 - npe[n2] = 0 - npe[n3] = 0 - podsEvicted := evictPodsFromTargetNodes(fakeClient, "v1", targetNodes, lowNodes, targetThresholds, false, 3, false, npe) - if expectedPodsEvicted != podsEvicted { - t.Errorf("Expected %#v pods to be evicted but %#v got evicted", expectedPodsEvicted, podsEvicted) - } - } func TestSortPodsByPriority(t *testing.T) { - n1 := test.BuildTestNode("n1", 4000, 3000, 9) - lowPriority := int32(0) - highPriority := int32(10000) - p1 := test.BuildTestPod("p1", 400, 0, n1.Name) - p1.Spec.Priority = &lowPriority + n1 := test.BuildTestNode("n1", 4000, 3000, 9, nil) + + p1 := test.BuildTestPod("p1", 400, 0, n1.Name, setLowPriority) // BestEffort - p2 := test.BuildTestPod("p2", 400, 0, n1.Name) - p2.Spec.Priority = &highPriority - + p2 := test.BuildTestPod("p2", 400, 0, n1.Name, setHighPriority) p2.Spec.Containers[0].Resources.Requests = nil p2.Spec.Containers[0].Resources.Limits = nil // Burstable - p3 := test.BuildTestPod("p3", 400, 0, n1.Name) - p3.Spec.Priority = &highPriority + p3 := test.BuildTestPod("p3", 400, 0, n1.Name, setHighPriority) // Guaranteed - p4 := test.BuildTestPod("p4", 400, 100, n1.Name) - p4.Spec.Priority = &highPriority + p4 := test.BuildTestPod("p4", 400, 100, n1.Name, setHighPriority) p4.Spec.Containers[0].Resources.Limits[v1.ResourceCPU] = *resource.NewMilliQuantity(400, resource.DecimalSI) p4.Spec.Containers[0].Resources.Limits[v1.ResourceMemory] = *resource.NewQuantity(100, resource.DecimalSI) // Best effort with nil priorities. - p5 := test.BuildTestPod("p5", 400, 100, n1.Name) + p5 := test.BuildTestPod("p5", 400, 100, n1.Name, nil) p5.Spec.Priority = nil - p6 := test.BuildTestPod("p6", 400, 100, n1.Name) + + p6 := test.BuildTestPod("p6", 400, 100, n1.Name, nil) p6.Spec.Containers[0].Resources.Limits[v1.ResourceCPU] = *resource.NewMilliQuantity(400, resource.DecimalSI) p6.Spec.Containers[0].Resources.Limits[v1.ResourceMemory] = *resource.NewQuantity(100, resource.DecimalSI) p6.Spec.Priority = nil @@ -393,9 +394,9 @@ func TestWithTaints(t *testing.T) { }, } - n1 := test.BuildTestNode("n1", 2000, 3000, 10) - n2 := test.BuildTestNode("n2", 1000, 3000, 10) - n3 := test.BuildTestNode("n3", 1000, 3000, 10) + n1 := test.BuildTestNode("n1", 2000, 3000, 10, nil) + n2 := test.BuildTestNode("n2", 1000, 3000, 10, nil) + n3 := test.BuildTestNode("n3", 1000, 3000, 10, nil) n3withTaints := n3.DeepCopy() n3withTaints.Spec.Taints = []v1.Taint{ { @@ -405,7 +406,7 @@ func TestWithTaints(t *testing.T) { }, } - podThatToleratesTaint := test.BuildTestPod("tolerate_pod", 200, 0, n1.Name) + podThatToleratesTaint := test.BuildTestPod("tolerate_pod", 200, 0, n1.Name, setRSOwnerRef) podThatToleratesTaint.Spec.Tolerations = []v1.Toleration{ { Key: "key", @@ -424,16 +425,16 @@ func TestWithTaints(t *testing.T) { nodes: []*v1.Node{n1, n2, n3}, pods: []*v1.Pod{ //Node 1 pods - test.BuildTestPod(fmt.Sprintf("pod_1_%s", n1.Name), 200, 0, n1.Name), - test.BuildTestPod(fmt.Sprintf("pod_2_%s", n1.Name), 200, 0, n1.Name), - test.BuildTestPod(fmt.Sprintf("pod_3_%s", n1.Name), 200, 0, n1.Name), - test.BuildTestPod(fmt.Sprintf("pod_4_%s", n1.Name), 200, 0, n1.Name), - test.BuildTestPod(fmt.Sprintf("pod_5_%s", n1.Name), 200, 0, n1.Name), - test.BuildTestPod(fmt.Sprintf("pod_6_%s", n1.Name), 200, 0, n1.Name), - test.BuildTestPod(fmt.Sprintf("pod_7_%s", n1.Name), 200, 0, n1.Name), - test.BuildTestPod(fmt.Sprintf("pod_8_%s", n1.Name), 200, 0, n1.Name), + test.BuildTestPod(fmt.Sprintf("pod_1_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef), + test.BuildTestPod(fmt.Sprintf("pod_2_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef), + test.BuildTestPod(fmt.Sprintf("pod_3_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef), + test.BuildTestPod(fmt.Sprintf("pod_4_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef), + test.BuildTestPod(fmt.Sprintf("pod_5_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef), + test.BuildTestPod(fmt.Sprintf("pod_6_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef), + test.BuildTestPod(fmt.Sprintf("pod_7_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef), + test.BuildTestPod(fmt.Sprintf("pod_8_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef), // Node 2 pods - test.BuildTestPod(fmt.Sprintf("pod_9_%s", n2.Name), 200, 0, n2.Name), + test.BuildTestPod(fmt.Sprintf("pod_9_%s", n2.Name), 200, 0, n2.Name, setRSOwnerRef), }, evictionsExpected: 1, }, @@ -442,16 +443,16 @@ func TestWithTaints(t *testing.T) { nodes: []*v1.Node{n1, n3withTaints}, pods: []*v1.Pod{ //Node 1 pods - test.BuildTestPod(fmt.Sprintf("pod_1_%s", n1.Name), 200, 0, n1.Name), - test.BuildTestPod(fmt.Sprintf("pod_2_%s", n1.Name), 200, 0, n1.Name), - test.BuildTestPod(fmt.Sprintf("pod_3_%s", n1.Name), 200, 0, n1.Name), - test.BuildTestPod(fmt.Sprintf("pod_4_%s", n1.Name), 200, 0, n1.Name), - test.BuildTestPod(fmt.Sprintf("pod_5_%s", n1.Name), 200, 0, n1.Name), - test.BuildTestPod(fmt.Sprintf("pod_6_%s", n1.Name), 200, 0, n1.Name), - test.BuildTestPod(fmt.Sprintf("pod_7_%s", n1.Name), 200, 0, n1.Name), - test.BuildTestPod(fmt.Sprintf("pod_8_%s", n1.Name), 200, 0, n1.Name), + test.BuildTestPod(fmt.Sprintf("pod_1_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef), + test.BuildTestPod(fmt.Sprintf("pod_2_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef), + test.BuildTestPod(fmt.Sprintf("pod_3_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef), + test.BuildTestPod(fmt.Sprintf("pod_4_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef), + test.BuildTestPod(fmt.Sprintf("pod_5_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef), + test.BuildTestPod(fmt.Sprintf("pod_6_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef), + test.BuildTestPod(fmt.Sprintf("pod_7_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef), + test.BuildTestPod(fmt.Sprintf("pod_8_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef), // Node 3 pods - test.BuildTestPod(fmt.Sprintf("pod_9_%s", n3withTaints.Name), 200, 0, n3withTaints.Name), + test.BuildTestPod(fmt.Sprintf("pod_9_%s", n3withTaints.Name), 200, 0, n3withTaints.Name, setRSOwnerRef), }, evictionsExpected: 0, }, @@ -460,16 +461,16 @@ func TestWithTaints(t *testing.T) { nodes: []*v1.Node{n1, n3withTaints}, pods: []*v1.Pod{ //Node 1 pods - test.BuildTestPod(fmt.Sprintf("pod_1_%s", n1.Name), 200, 0, n1.Name), - test.BuildTestPod(fmt.Sprintf("pod_2_%s", n1.Name), 200, 0, n1.Name), - test.BuildTestPod(fmt.Sprintf("pod_3_%s", n1.Name), 200, 0, n1.Name), - test.BuildTestPod(fmt.Sprintf("pod_4_%s", n1.Name), 200, 0, n1.Name), - test.BuildTestPod(fmt.Sprintf("pod_5_%s", n1.Name), 200, 0, n1.Name), - test.BuildTestPod(fmt.Sprintf("pod_6_%s", n1.Name), 200, 0, n1.Name), - test.BuildTestPod(fmt.Sprintf("pod_7_%s", n1.Name), 200, 0, n1.Name), + test.BuildTestPod(fmt.Sprintf("pod_1_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef), + test.BuildTestPod(fmt.Sprintf("pod_2_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef), + test.BuildTestPod(fmt.Sprintf("pod_3_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef), + test.BuildTestPod(fmt.Sprintf("pod_4_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef), + test.BuildTestPod(fmt.Sprintf("pod_5_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef), + test.BuildTestPod(fmt.Sprintf("pod_6_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef), + test.BuildTestPod(fmt.Sprintf("pod_7_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef), podThatToleratesTaint, // Node 3 pods - test.BuildTestPod(fmt.Sprintf("pod_9_%s", n3withTaints.Name), 200, 0, n3withTaints.Name), + test.BuildTestPod(fmt.Sprintf("pod_9_%s", n3withTaints.Name), 200, 0, n3withTaints.Name, setRSOwnerRef), }, evictionsExpected: 1, }, @@ -483,7 +484,6 @@ func TestWithTaints(t *testing.T) { } for _, pod := range item.pods { - pod.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList() objs = append(objs, pod) } diff --git a/pkg/descheduler/strategies/node_affinity_test.go b/pkg/descheduler/strategies/node_affinity_test.go index 0c6dc412d..8fab3de50 100644 --- a/pkg/descheduler/strategies/node_affinity_test.go +++ b/pkg/descheduler/strategies/node_affinity_test.go @@ -42,17 +42,17 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) { nodeLabelKey := "kubernetes.io/desiredNode" nodeLabelValue := "yes" - nodeWithLabels := test.BuildTestNode("nodeWithLabels", 2000, 3000, 10) + nodeWithLabels := test.BuildTestNode("nodeWithLabels", 2000, 3000, 10, nil) nodeWithLabels.Labels[nodeLabelKey] = nodeLabelValue - nodeWithoutLabels := test.BuildTestNode("nodeWithoutLabels", 2000, 3000, 10) + nodeWithoutLabels := test.BuildTestNode("nodeWithoutLabels", 2000, 3000, 10, nil) - unschedulableNodeWithLabels := test.BuildTestNode("unschedulableNodeWithLabels", 2000, 3000, 10) + unschedulableNodeWithLabels := test.BuildTestNode("unschedulableNodeWithLabels", 2000, 3000, 10, nil) nodeWithLabels.Labels[nodeLabelKey] = nodeLabelValue unschedulableNodeWithLabels.Spec.Unschedulable = true addPodsToNode := func(node *v1.Node) []v1.Pod { - podWithNodeAffinity := test.BuildTestPod("podWithNodeAffinity", 100, 0, node.Name) + podWithNodeAffinity := test.BuildTestPod("podWithNodeAffinity", 100, 0, node.Name, nil) podWithNodeAffinity.Spec.Affinity = &v1.Affinity{ NodeAffinity: &v1.NodeAffinity{ RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{ @@ -73,8 +73,8 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) { }, } - pod1 := test.BuildTestPod("pod1", 100, 0, node.Name) - pod2 := test.BuildTestPod("pod2", 100, 0, node.Name) + pod1 := test.BuildTestPod("pod1", 100, 0, node.Name, nil) + pod2 := test.BuildTestPod("pod2", 100, 0, node.Name, nil) podWithNodeAffinity.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList() pod1.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList() diff --git a/pkg/descheduler/strategies/node_taint_test.go b/pkg/descheduler/strategies/node_taint_test.go index 7e467fb02..726f59ddf 100644 --- a/pkg/descheduler/strategies/node_taint_test.go +++ b/pkg/descheduler/strategies/node_taint_test.go @@ -42,17 +42,17 @@ func addTolerationToPod(pod *v1.Pod, key, value string, index int) *v1.Pod { func TestDeletePodsViolatingNodeTaints(t *testing.T) { - node1 := test.BuildTestNode("n1", 2000, 3000, 10) + node1 := test.BuildTestNode("n1", 2000, 3000, 10, nil) node1 = addTaintsToNode(node1, "testTaint", "test", []int{1}) - node2 := test.BuildTestNode("n2", 2000, 3000, 10) + node2 := test.BuildTestNode("n2", 2000, 3000, 10, nil) node1 = addTaintsToNode(node2, "testingTaint", "testing", []int{1}) - p1 := test.BuildTestPod("p1", 100, 0, node1.Name) - p2 := test.BuildTestPod("p2", 100, 0, node1.Name) - p3 := test.BuildTestPod("p3", 100, 0, node1.Name) - p4 := test.BuildTestPod("p4", 100, 0, node1.Name) - p5 := test.BuildTestPod("p5", 100, 0, node1.Name) - p6 := test.BuildTestPod("p6", 100, 0, node1.Name) + p1 := test.BuildTestPod("p1", 100, 0, node1.Name, nil) + p2 := test.BuildTestPod("p2", 100, 0, node1.Name, nil) + p3 := test.BuildTestPod("p3", 100, 0, node1.Name, nil) + p4 := test.BuildTestPod("p4", 100, 0, node1.Name, nil) + p5 := test.BuildTestPod("p5", 100, 0, node1.Name, nil) + p6 := test.BuildTestPod("p6", 100, 0, node1.Name, nil) p1.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList() p2.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList() @@ -60,11 +60,11 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) { p4.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList() p5.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList() p6.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList() - p7 := test.BuildTestPod("p7", 100, 0, node2.Name) - p8 := test.BuildTestPod("p8", 100, 0, node2.Name) - p9 := test.BuildTestPod("p9", 100, 0, node2.Name) - p10 := test.BuildTestPod("p10", 100, 0, node2.Name) - p11 := test.BuildTestPod("p11", 100, 0, node2.Name) + p7 := test.BuildTestPod("p7", 100, 0, node2.Name, nil) + p8 := test.BuildTestPod("p8", 100, 0, node2.Name, nil) + p9 := test.BuildTestPod("p9", 100, 0, node2.Name, nil) + p10 := test.BuildTestPod("p10", 100, 0, node2.Name, nil) + p11 := test.BuildTestPod("p11", 100, 0, node2.Name, nil) p11.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList() // The following 4 pods won't get evicted. diff --git a/pkg/descheduler/strategies/pod_antiaffinity_test.go b/pkg/descheduler/strategies/pod_antiaffinity_test.go index c718d5b34..d651e2f0e 100644 --- a/pkg/descheduler/strategies/pod_antiaffinity_test.go +++ b/pkg/descheduler/strategies/pod_antiaffinity_test.go @@ -29,11 +29,11 @@ import ( ) func TestPodAntiAffinity(t *testing.T) { - node := test.BuildTestNode("n1", 2000, 3000, 10) - p1 := test.BuildTestPod("p1", 100, 0, node.Name) - p2 := test.BuildTestPod("p2", 100, 0, node.Name) - p3 := test.BuildTestPod("p3", 100, 0, node.Name) - p4 := test.BuildTestPod("p4", 100, 0, node.Name) + node := test.BuildTestNode("n1", 2000, 3000, 10, nil) + p1 := test.BuildTestPod("p1", 100, 0, node.Name, nil) + p2 := test.BuildTestPod("p2", 100, 0, node.Name, nil) + p3 := test.BuildTestPod("p3", 100, 0, node.Name, nil) + p4 := test.BuildTestPod("p4", 100, 0, node.Name, nil) p2.Labels = map[string]string{"foo": "bar"} p1.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList() p2.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList() diff --git a/test/test_utils.go b/test/test_utils.go index a848b0553..95680cf11 100644 --- a/test/test_utils.go +++ b/test/test_utils.go @@ -25,7 +25,7 @@ import ( ) // BuildTestPod creates a test pod with given parameters. -func BuildTestPod(name string, cpu int64, memory int64, nodeName string) *v1.Pod { +func BuildTestPod(name string, cpu int64, memory int64, nodeName string, apply func(*v1.Pod)) *v1.Pod { pod := &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Namespace: "default", @@ -50,7 +50,9 @@ func BuildTestPod(name string, cpu int64, memory int64, nodeName string) *v1.Pod if memory >= 0 { pod.Spec.Containers[0].Resources.Requests[v1.ResourceMemory] = *resource.NewQuantity(memory, resource.DecimalSI) } - + if apply != nil { + apply(pod) + } return pod } @@ -85,7 +87,7 @@ func GetDaemonSetOwnerRefList() []metav1.OwnerReference { } // BuildTestNode creates a node with specified capacity. -func BuildTestNode(name string, millicpu int64, mem int64, pods int64) *v1.Node { +func BuildTestNode(name string, millicpu int64, mem int64, pods int64, apply func(*v1.Node)) *v1.Node { node := &v1.Node{ ObjectMeta: metav1.ObjectMeta{ Name: name, @@ -109,5 +111,8 @@ func BuildTestNode(name string, millicpu int64, mem int64, pods int64) *v1.Node }, }, } + if apply != nil { + apply(node) + } return node }