From 7457626f62c19fc0e94ba1ff5e37170ade1012be Mon Sep 17 00:00:00 2001 From: lixiang Date: Tue, 16 Jun 2020 19:17:28 +0800 Subject: [PATCH 1/3] Move helper funcs to testutil --- .../strategies/lownodeutilization_test.go | 208 ++++++++---------- test/test_utils.go | 45 ++++ 2 files changed, 138 insertions(+), 115 deletions(-) diff --git a/pkg/descheduler/strategies/lownodeutilization_test.go b/pkg/descheduler/strategies/lownodeutilization_test.go index bb346ebfc..4aa6ece2f 100644 --- a/pkg/descheduler/strategies/lownodeutilization_test.go +++ b/pkg/descheduler/strategies/lownodeutilization_test.go @@ -30,7 +30,7 @@ import ( "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/kubernetes/fake" core "k8s.io/client-go/testing" @@ -45,30 +45,6 @@ var ( highPriority = int32(10000) ) -func setRSOwnerRef(pod *v1.Pod) { pod.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList() } -func setDSOwnerRef(pod *v1.Pod) { pod.ObjectMeta.OwnerReferences = test.GetDaemonSetOwnerRefList() } -func setNormalOwnerRef(pod *v1.Pod) { pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList() } -func setHighPriority(pod *v1.Pod) { pod.Spec.Priority = &highPriority } -func setLowPriority(pod *v1.Pod) { pod.Spec.Priority = &lowPriority } -func setNodeUnschedulable(node *v1.Node) { node.Spec.Unschedulable = true } - -func makeBestEffortPod(pod *v1.Pod) { - pod.Spec.Containers[0].Resources.Requests = nil - pod.Spec.Containers[0].Resources.Requests = nil - pod.Spec.Containers[0].Resources.Limits = nil - pod.Spec.Containers[0].Resources.Limits = nil -} - -func makeBurstablePod(pod *v1.Pod) { - pod.Spec.Containers[0].Resources.Limits = nil - pod.Spec.Containers[0].Resources.Limits = nil -} - -func makeGuaranteedPod(pod *v1.Pod) { - pod.Spec.Containers[0].Resources.Limits[v1.ResourceCPU] = pod.Spec.Containers[0].Resources.Requests[v1.ResourceCPU] - pod.Spec.Containers[0].Resources.Limits[v1.ResourceMemory] = pod.Spec.Containers[0].Resources.Requests[v1.ResourceMemory] -} - func TestLowNodeUtilization(t *testing.T) { ctx := context.Background() n1NodeName := "n1" @@ -99,19 +75,19 @@ func TestLowNodeUtilization(t *testing.T) { nodes: map[string]*v1.Node{ n1NodeName: test.BuildTestNode(n1NodeName, 4000, 3000, 9, nil), n2NodeName: test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil), - n3NodeName: test.BuildTestNode(n3NodeName, 4000, 3000, 10, setNodeUnschedulable), + n3NodeName: test.BuildTestNode(n3NodeName, 4000, 3000, 10, test.SetNodeUnschedulable), }, pods: map[string]*v1.PodList{ n1NodeName: { Items: []v1.Pod{ // These won't be evicted. - *test.BuildTestPod("p1", 400, 0, n1NodeName, setDSOwnerRef), - *test.BuildTestPod("p2", 400, 0, n1NodeName, setDSOwnerRef), - *test.BuildTestPod("p3", 400, 0, n1NodeName, setDSOwnerRef), - *test.BuildTestPod("p4", 400, 0, n1NodeName, setDSOwnerRef), + *test.BuildTestPod("p1", 400, 0, n1NodeName, test.SetDSOwnerRef), + *test.BuildTestPod("p2", 400, 0, n1NodeName, test.SetDSOwnerRef), + *test.BuildTestPod("p3", 400, 0, n1NodeName, test.SetDSOwnerRef), + *test.BuildTestPod("p4", 400, 0, n1NodeName, test.SetDSOwnerRef), *test.BuildTestPod("p5", 400, 0, n1NodeName, func(pod *v1.Pod) { // A pod with local storage. - setNormalOwnerRef(pod) + test.SetNormalOwnerRef(pod) pod.Spec.Volumes = []v1.Volume{ { Name: "sample", @@ -135,7 +111,7 @@ func TestLowNodeUtilization(t *testing.T) { }, n2NodeName: { Items: []v1.Pod{ - *test.BuildTestPod("p9", 400, 0, n1NodeName, setRSOwnerRef), + *test.BuildTestPod("p9", 400, 0, n1NodeName, test.SetRSOwnerRef), }, }, n3NodeName: {}, @@ -155,21 +131,21 @@ func TestLowNodeUtilization(t *testing.T) { nodes: map[string]*v1.Node{ n1NodeName: test.BuildTestNode(n1NodeName, 4000, 3000, 9, nil), n2NodeName: test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil), - n3NodeName: test.BuildTestNode(n3NodeName, 4000, 3000, 10, setNodeUnschedulable), + n3NodeName: test.BuildTestNode(n3NodeName, 4000, 3000, 10, test.SetNodeUnschedulable), }, pods: map[string]*v1.PodList{ n1NodeName: { Items: []v1.Pod{ - *test.BuildTestPod("p1", 400, 0, n1NodeName, setRSOwnerRef), - *test.BuildTestPod("p2", 400, 0, n1NodeName, setRSOwnerRef), - *test.BuildTestPod("p3", 400, 0, n1NodeName, setRSOwnerRef), - *test.BuildTestPod("p4", 400, 0, n1NodeName, setRSOwnerRef), - *test.BuildTestPod("p5", 400, 0, n1NodeName, setRSOwnerRef), + *test.BuildTestPod("p1", 400, 0, n1NodeName, test.SetRSOwnerRef), + *test.BuildTestPod("p2", 400, 0, n1NodeName, test.SetRSOwnerRef), + *test.BuildTestPod("p3", 400, 0, n1NodeName, test.SetRSOwnerRef), + *test.BuildTestPod("p4", 400, 0, n1NodeName, test.SetRSOwnerRef), + *test.BuildTestPod("p5", 400, 0, n1NodeName, test.SetRSOwnerRef), // These won't be evicted. - *test.BuildTestPod("p6", 400, 0, n1NodeName, setDSOwnerRef), + *test.BuildTestPod("p6", 400, 0, n1NodeName, test.SetDSOwnerRef), *test.BuildTestPod("p7", 400, 0, n1NodeName, func(pod *v1.Pod) { // A pod with local storage. - setNormalOwnerRef(pod) + test.SetNormalOwnerRef(pod) pod.Spec.Volumes = []v1.Volume{ { Name: "sample", @@ -193,7 +169,7 @@ func TestLowNodeUtilization(t *testing.T) { }, n2NodeName: { Items: []v1.Pod{ - *test.BuildTestPod("p9", 400, 0, n1NodeName, setRSOwnerRef), + *test.BuildTestPod("p9", 400, 0, n1NodeName, test.SetRSOwnerRef), }, }, n3NodeName: {}, @@ -213,21 +189,21 @@ func TestLowNodeUtilization(t *testing.T) { nodes: map[string]*v1.Node{ n1NodeName: test.BuildTestNode(n1NodeName, 4000, 3000, 9, nil), n2NodeName: test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil), - n3NodeName: test.BuildTestNode(n3NodeName, 4000, 3000, 10, setNodeUnschedulable), + n3NodeName: test.BuildTestNode(n3NodeName, 4000, 3000, 10, test.SetNodeUnschedulable), }, pods: map[string]*v1.PodList{ n1NodeName: { Items: []v1.Pod{ - *test.BuildTestPod("p1", 400, 300, n1NodeName, setRSOwnerRef), - *test.BuildTestPod("p2", 400, 300, n1NodeName, setRSOwnerRef), - *test.BuildTestPod("p3", 400, 300, n1NodeName, setRSOwnerRef), - *test.BuildTestPod("p4", 400, 300, n1NodeName, setRSOwnerRef), - *test.BuildTestPod("p5", 400, 300, n1NodeName, setRSOwnerRef), + *test.BuildTestPod("p1", 400, 300, n1NodeName, test.SetRSOwnerRef), + *test.BuildTestPod("p2", 400, 300, n1NodeName, test.SetRSOwnerRef), + *test.BuildTestPod("p3", 400, 300, n1NodeName, test.SetRSOwnerRef), + *test.BuildTestPod("p4", 400, 300, n1NodeName, test.SetRSOwnerRef), + *test.BuildTestPod("p5", 400, 300, n1NodeName, test.SetRSOwnerRef), // These won't be evicted. - *test.BuildTestPod("p6", 400, 300, n1NodeName, setDSOwnerRef), + *test.BuildTestPod("p6", 400, 300, n1NodeName, test.SetDSOwnerRef), *test.BuildTestPod("p7", 400, 300, n1NodeName, func(pod *v1.Pod) { // A pod with local storage. - setNormalOwnerRef(pod) + test.SetNormalOwnerRef(pod) pod.Spec.Volumes = []v1.Volume{ { Name: "sample", @@ -251,7 +227,7 @@ func TestLowNodeUtilization(t *testing.T) { }, n2NodeName: { Items: []v1.Pod{ - *test.BuildTestPod("p9", 400, 2100, n1NodeName, setRSOwnerRef), + *test.BuildTestPod("p9", 400, 2100, n1NodeName, test.SetRSOwnerRef), }, }, n3NodeName: {}, @@ -272,40 +248,40 @@ func TestLowNodeUtilization(t *testing.T) { nodes: map[string]*v1.Node{ n1NodeName: test.BuildTestNode(n1NodeName, 4000, 3000, 9, nil), n2NodeName: test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil), - n3NodeName: test.BuildTestNode(n3NodeName, 4000, 3000, 10, setNodeUnschedulable), + n3NodeName: test.BuildTestNode(n3NodeName, 4000, 3000, 10, test.SetNodeUnschedulable), }, pods: map[string]*v1.PodList{ n1NodeName: { Items: []v1.Pod{ *test.BuildTestPod("p1", 400, 0, n1NodeName, func(pod *v1.Pod) { - setRSOwnerRef(pod) - setHighPriority(pod) + test.SetRSOwnerRef(pod) + test.SetPodPriority(pod, highPriority) }), *test.BuildTestPod("p2", 400, 0, n1NodeName, func(pod *v1.Pod) { - setRSOwnerRef(pod) - setHighPriority(pod) + test.SetRSOwnerRef(pod) + test.SetPodPriority(pod, highPriority) }), *test.BuildTestPod("p3", 400, 0, n1NodeName, func(pod *v1.Pod) { - setRSOwnerRef(pod) - setHighPriority(pod) + test.SetRSOwnerRef(pod) + test.SetPodPriority(pod, highPriority) }), *test.BuildTestPod("p4", 400, 0, n1NodeName, func(pod *v1.Pod) { - setRSOwnerRef(pod) - setHighPriority(pod) + test.SetRSOwnerRef(pod) + test.SetPodPriority(pod, highPriority) }), *test.BuildTestPod("p5", 400, 0, n1NodeName, func(pod *v1.Pod) { - setRSOwnerRef(pod) - setLowPriority(pod) + test.SetRSOwnerRef(pod) + test.SetPodPriority(pod, lowPriority) }), // These won't be evicted. *test.BuildTestPod("p6", 400, 0, n1NodeName, func(pod *v1.Pod) { - setDSOwnerRef(pod) - setHighPriority(pod) + test.SetDSOwnerRef(pod) + test.SetPodPriority(pod, highPriority) }), *test.BuildTestPod("p7", 400, 0, n1NodeName, func(pod *v1.Pod) { // A pod with local storage. - setNormalOwnerRef(pod) - setLowPriority(pod) + test.SetNormalOwnerRef(pod) + test.SetPodPriority(pod, lowPriority) pod.Spec.Volumes = []v1.Volume{ { Name: "sample", @@ -329,7 +305,7 @@ func TestLowNodeUtilization(t *testing.T) { }, n2NodeName: { Items: []v1.Pod{ - *test.BuildTestPod("p9", 400, 0, n1NodeName, setRSOwnerRef), + *test.BuildTestPod("p9", 400, 0, n1NodeName, test.SetRSOwnerRef), }, }, n3NodeName: {}, @@ -349,38 +325,38 @@ func TestLowNodeUtilization(t *testing.T) { nodes: map[string]*v1.Node{ n1NodeName: test.BuildTestNode(n1NodeName, 4000, 3000, 9, nil), n2NodeName: test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil), - n3NodeName: test.BuildTestNode(n3NodeName, 4000, 3000, 10, setNodeUnschedulable), + n3NodeName: test.BuildTestNode(n3NodeName, 4000, 3000, 10, test.SetNodeUnschedulable), }, // All pods are assumed to be burstable (test.BuildTestNode always sets both cpu/memory resource requests to some value) pods: map[string]*v1.PodList{ n1NodeName: { Items: []v1.Pod{ *test.BuildTestPod("p1", 400, 0, n1NodeName, func(pod *v1.Pod) { - setRSOwnerRef(pod) - makeBestEffortPod(pod) + test.SetRSOwnerRef(pod) + test.MakeBestEffortPod(pod) }), *test.BuildTestPod("p2", 400, 0, n1NodeName, func(pod *v1.Pod) { - setRSOwnerRef(pod) - makeBestEffortPod(pod) + test.SetRSOwnerRef(pod) + test.MakeBestEffortPod(pod) }), *test.BuildTestPod("p3", 400, 0, n1NodeName, func(pod *v1.Pod) { - setRSOwnerRef(pod) + test.SetRSOwnerRef(pod) }), *test.BuildTestPod("p4", 400, 0, n1NodeName, func(pod *v1.Pod) { - setRSOwnerRef(pod) - makeBestEffortPod(pod) + test.SetRSOwnerRef(pod) + test.MakeBestEffortPod(pod) }), *test.BuildTestPod("p5", 400, 0, n1NodeName, func(pod *v1.Pod) { - setRSOwnerRef(pod) - makeBestEffortPod(pod) + test.SetRSOwnerRef(pod) + test.MakeBestEffortPod(pod) }), // These won't be evicted. *test.BuildTestPod("p6", 400, 0, n1NodeName, func(pod *v1.Pod) { - setDSOwnerRef(pod) + test.SetDSOwnerRef(pod) }), *test.BuildTestPod("p7", 400, 0, n1NodeName, func(pod *v1.Pod) { // A pod with local storage. - setNormalOwnerRef(pod) + test.SetNormalOwnerRef(pod) pod.Spec.Volumes = []v1.Volume{ { Name: "sample", @@ -404,7 +380,7 @@ func TestLowNodeUtilization(t *testing.T) { }, n2NodeName: { Items: []v1.Pod{ - *test.BuildTestPod("p9", 400, 0, n1NodeName, setRSOwnerRef), + *test.BuildTestPod("p9", 400, 0, n1NodeName, test.SetRSOwnerRef), }, }, n3NodeName: {}, @@ -498,31 +474,33 @@ func TestLowNodeUtilization(t *testing.T) { func TestSortPodsByPriority(t *testing.T) { n1 := test.BuildTestNode("n1", 4000, 3000, 9, nil) - p1 := test.BuildTestPod("p1", 400, 0, n1.Name, setLowPriority) + p1 := test.BuildTestPod("p1", 400, 0, n1.Name, func(pod *v1.Pod) { + test.SetPodPriority(pod, lowPriority) + }) // BestEffort p2 := test.BuildTestPod("p2", 400, 0, n1.Name, func(pod *v1.Pod) { - setHighPriority(pod) - makeBestEffortPod(pod) + test.SetPodPriority(pod, highPriority) + test.MakeBestEffortPod(pod) }) // Burstable p3 := test.BuildTestPod("p3", 400, 0, n1.Name, func(pod *v1.Pod) { - setHighPriority(pod) - makeBurstablePod(pod) + test.SetPodPriority(pod, highPriority) + test.MakeBurstablePod(pod) }) // Guaranteed p4 := test.BuildTestPod("p4", 400, 100, n1.Name, func(pod *v1.Pod) { - setHighPriority(pod) - makeGuaranteedPod(pod) + test.SetPodPriority(pod, highPriority) + test.MakeGuaranteedPod(pod) }) // Best effort with nil priorities. - p5 := test.BuildTestPod("p5", 400, 100, n1.Name, makeBestEffortPod) + p5 := test.BuildTestPod("p5", 400, 100, n1.Name, test.MakeBestEffortPod) p5.Spec.Priority = nil - p6 := test.BuildTestPod("p6", 400, 100, n1.Name, makeGuaranteedPod) + p6 := test.BuildTestPod("p6", 400, 100, n1.Name, test.MakeGuaranteedPod) p6.Spec.Priority = nil podList := []*v1.Pod{p4, p3, p2, p1, p6, p5} @@ -783,7 +761,7 @@ func TestWithTaints(t *testing.T) { }, } - podThatToleratesTaint := test.BuildTestPod("tolerate_pod", 200, 0, n1.Name, setRSOwnerRef) + podThatToleratesTaint := test.BuildTestPod("tolerate_pod", 200, 0, n1.Name, test.SetRSOwnerRef) podThatToleratesTaint.Spec.Tolerations = []v1.Toleration{ { Key: "key", @@ -802,16 +780,16 @@ func TestWithTaints(t *testing.T) { nodes: []*v1.Node{n1, n2, n3}, pods: []*v1.Pod{ //Node 1 pods - test.BuildTestPod(fmt.Sprintf("pod_1_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef), - test.BuildTestPod(fmt.Sprintf("pod_2_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef), - test.BuildTestPod(fmt.Sprintf("pod_3_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef), - test.BuildTestPod(fmt.Sprintf("pod_4_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef), - test.BuildTestPod(fmt.Sprintf("pod_5_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef), - test.BuildTestPod(fmt.Sprintf("pod_6_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef), - test.BuildTestPod(fmt.Sprintf("pod_7_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef), - test.BuildTestPod(fmt.Sprintf("pod_8_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef), + test.BuildTestPod(fmt.Sprintf("pod_1_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef), + test.BuildTestPod(fmt.Sprintf("pod_2_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef), + test.BuildTestPod(fmt.Sprintf("pod_3_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef), + test.BuildTestPod(fmt.Sprintf("pod_4_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef), + test.BuildTestPod(fmt.Sprintf("pod_5_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef), + test.BuildTestPod(fmt.Sprintf("pod_6_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef), + test.BuildTestPod(fmt.Sprintf("pod_7_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef), + test.BuildTestPod(fmt.Sprintf("pod_8_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef), // Node 2 pods - test.BuildTestPod(fmt.Sprintf("pod_9_%s", n2.Name), 200, 0, n2.Name, setRSOwnerRef), + test.BuildTestPod(fmt.Sprintf("pod_9_%s", n2.Name), 200, 0, n2.Name, test.SetRSOwnerRef), }, evictionsExpected: 1, }, @@ -820,16 +798,16 @@ func TestWithTaints(t *testing.T) { nodes: []*v1.Node{n1, n3withTaints}, pods: []*v1.Pod{ //Node 1 pods - test.BuildTestPod(fmt.Sprintf("pod_1_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef), - test.BuildTestPod(fmt.Sprintf("pod_2_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef), - test.BuildTestPod(fmt.Sprintf("pod_3_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef), - test.BuildTestPod(fmt.Sprintf("pod_4_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef), - test.BuildTestPod(fmt.Sprintf("pod_5_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef), - test.BuildTestPod(fmt.Sprintf("pod_6_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef), - test.BuildTestPod(fmt.Sprintf("pod_7_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef), - test.BuildTestPod(fmt.Sprintf("pod_8_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef), + test.BuildTestPod(fmt.Sprintf("pod_1_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef), + test.BuildTestPod(fmt.Sprintf("pod_2_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef), + test.BuildTestPod(fmt.Sprintf("pod_3_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef), + test.BuildTestPod(fmt.Sprintf("pod_4_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef), + test.BuildTestPod(fmt.Sprintf("pod_5_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef), + test.BuildTestPod(fmt.Sprintf("pod_6_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef), + test.BuildTestPod(fmt.Sprintf("pod_7_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef), + test.BuildTestPod(fmt.Sprintf("pod_8_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef), // Node 3 pods - test.BuildTestPod(fmt.Sprintf("pod_9_%s", n3withTaints.Name), 200, 0, n3withTaints.Name, setRSOwnerRef), + test.BuildTestPod(fmt.Sprintf("pod_9_%s", n3withTaints.Name), 200, 0, n3withTaints.Name, test.SetRSOwnerRef), }, evictionsExpected: 0, }, @@ -838,16 +816,16 @@ func TestWithTaints(t *testing.T) { nodes: []*v1.Node{n1, n3withTaints}, pods: []*v1.Pod{ //Node 1 pods - test.BuildTestPod(fmt.Sprintf("pod_1_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef), - test.BuildTestPod(fmt.Sprintf("pod_2_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef), - test.BuildTestPod(fmt.Sprintf("pod_3_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef), - test.BuildTestPod(fmt.Sprintf("pod_4_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef), - test.BuildTestPod(fmt.Sprintf("pod_5_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef), - test.BuildTestPod(fmt.Sprintf("pod_6_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef), - test.BuildTestPod(fmt.Sprintf("pod_7_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef), + test.BuildTestPod(fmt.Sprintf("pod_1_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef), + test.BuildTestPod(fmt.Sprintf("pod_2_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef), + test.BuildTestPod(fmt.Sprintf("pod_3_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef), + test.BuildTestPod(fmt.Sprintf("pod_4_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef), + test.BuildTestPod(fmt.Sprintf("pod_5_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef), + test.BuildTestPod(fmt.Sprintf("pod_6_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef), + test.BuildTestPod(fmt.Sprintf("pod_7_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef), podThatToleratesTaint, // Node 3 pods - test.BuildTestPod(fmt.Sprintf("pod_9_%s", n3withTaints.Name), 200, 0, n3withTaints.Name, setRSOwnerRef), + test.BuildTestPod(fmt.Sprintf("pod_9_%s", n3withTaints.Name), 200, 0, n3withTaints.Name, test.SetRSOwnerRef), }, evictionsExpected: 1, }, diff --git a/test/test_utils.go b/test/test_utils.go index 95680cf11..206fa85c4 100644 --- a/test/test_utils.go +++ b/test/test_utils.go @@ -116,3 +116,48 @@ func BuildTestNode(name string, millicpu int64, mem int64, pods int64, apply fun } return node } + +// MakeBestEffortPod makes the given pod a BestEffort pod +func MakeBestEffortPod(pod *v1.Pod) { + pod.Spec.Containers[0].Resources.Requests = nil + pod.Spec.Containers[0].Resources.Requests = nil + pod.Spec.Containers[0].Resources.Limits = nil + pod.Spec.Containers[0].Resources.Limits = nil +} + +// MakeBurstablePod makes the given pod a Burstable pod +func MakeBurstablePod(pod *v1.Pod) { + pod.Spec.Containers[0].Resources.Limits = nil + pod.Spec.Containers[0].Resources.Limits = nil +} + +// MakeGuaranteedPod makes the given pod an Guaranteed pod +func MakeGuaranteedPod(pod *v1.Pod) { + pod.Spec.Containers[0].Resources.Limits[v1.ResourceCPU] = pod.Spec.Containers[0].Resources.Requests[v1.ResourceCPU] + pod.Spec.Containers[0].Resources.Limits[v1.ResourceMemory] = pod.Spec.Containers[0].Resources.Requests[v1.ResourceMemory] +} + +// SetRSOwnerRef sets the given pod's owner to ReplicaSet +func SetRSOwnerRef(pod *v1.Pod) { + pod.ObjectMeta.OwnerReferences = GetReplicaSetOwnerRefList() +} + +// SetDSOwnerRef sets the given pod's owner to DaemonSet +func SetDSOwnerRef(pod *v1.Pod) { + pod.ObjectMeta.OwnerReferences = GetDaemonSetOwnerRefList() +} + +// SetNormalOwnerRef sets the given pod's owner to Pod +func SetNormalOwnerRef(pod *v1.Pod) { + pod.ObjectMeta.OwnerReferences = GetNormalPodOwnerRefList() +} + +// SetPodPriority sets the given pod's priority +func SetPodPriority(pod *v1.Pod, priority int32) { + pod.Spec.Priority = &priority +} + +// SetNodeUnschedulable sets the given node unschedulable +func SetNodeUnschedulable(node *v1.Node) { + node.Spec.Unschedulable = true +} From 43525f649348aa7f614aa9e357eb3e1233b9ab8e Mon Sep 17 00:00:00 2001 From: lixiang Date: Tue, 16 Jun 2020 19:19:03 +0800 Subject: [PATCH 2/3] Move sortPodsBasedOnPriority to podutil --- README.md | 3 +- pkg/descheduler/pod/pods.go | 25 ++++++++++ pkg/descheduler/pod/pods_test.go | 46 +++++++++++++++++++ .../strategies/lownodeutilization.go | 24 +--------- .../strategies/lownodeutilization_test.go | 42 ----------------- 5 files changed, 74 insertions(+), 66 deletions(-) diff --git a/README.md b/README.md index 68d695f32..32bced6a3 100644 --- a/README.md +++ b/README.md @@ -238,7 +238,8 @@ When the descheduler decides to evict pods from a node, it employs the following never evicted because these pods won't be recreated. * Pods associated with DaemonSets are never evicted. * Pods with local storage are never evicted. -* Best efforts pods are evicted before burstable and guaranteed pods. +* In `LowNodeUtilization`, pods are evicted by their priority from low to high, and if they have same priority, +best effort pods are evicted before burstable and guaranteed pods. * All types of pods with the annotation descheduler.alpha.kubernetes.io/evict are evicted. This annotation is used to override checks which prevent eviction and users can select which pod is evicted. Users should know how and if the pod will be recreated. diff --git a/pkg/descheduler/pod/pods.go b/pkg/descheduler/pod/pods.go index 91f0f0fed..8e7ad39dc 100644 --- a/pkg/descheduler/pod/pods.go +++ b/pkg/descheduler/pod/pods.go @@ -23,6 +23,7 @@ import ( "k8s.io/apimachinery/pkg/fields" clientset "k8s.io/client-go/kubernetes" "sigs.k8s.io/descheduler/pkg/utils" + "sort" ) // ListPodsOnANode lists all of the pods on a node @@ -68,3 +69,27 @@ func IsBurstablePod(pod *v1.Pod) bool { func IsGuaranteedPod(pod *v1.Pod) bool { return utils.GetPodQOS(pod) == v1.PodQOSGuaranteed } + +// SortPodsBasedOnPriorityLowToHigh sorts pods based on their priorities from low to high. +// If pods have same priorities, they will be sorted by QoS in the following order: +// BestEffort, Burstable, Guaranteed +func SortPodsBasedOnPriorityLowToHigh(pods []*v1.Pod) { + sort.Slice(pods, func(i, j int) bool { + if pods[i].Spec.Priority == nil && pods[j].Spec.Priority != nil { + return true + } + if pods[j].Spec.Priority == nil && pods[i].Spec.Priority != nil { + return false + } + if (pods[j].Spec.Priority == nil && pods[i].Spec.Priority == nil) || (*pods[i].Spec.Priority == *pods[j].Spec.Priority) { + if IsBestEffortPod(pods[i]) { + return true + } + if IsBurstablePod(pods[i]) && IsGuaranteedPod(pods[j]) { + return true + } + return false + } + return *pods[i].Spec.Priority < *pods[j].Spec.Priority + }) +} diff --git a/pkg/descheduler/pod/pods_test.go b/pkg/descheduler/pod/pods_test.go index aed6361b6..9bff1d2b4 100644 --- a/pkg/descheduler/pod/pods_test.go +++ b/pkg/descheduler/pod/pods_test.go @@ -19,6 +19,7 @@ package pod import ( "context" "fmt" + "reflect" "strings" "testing" @@ -29,6 +30,11 @@ import ( "sigs.k8s.io/descheduler/test" ) +var ( + lowPriority = int32(0) + highPriority = int32(10000) +) + func TestListPodsOnANode(t *testing.T) { testCases := []struct { name string @@ -67,3 +73,43 @@ func TestListPodsOnANode(t *testing.T) { } } } + +func TestSortPodsBasedOnPriorityLowToHigh(t *testing.T) { + n1 := test.BuildTestNode("n1", 4000, 3000, 9, nil) + + p1 := test.BuildTestPod("p1", 400, 0, n1.Name, func(pod *v1.Pod) { + test.SetPodPriority(pod, lowPriority) + }) + + // BestEffort + p2 := test.BuildTestPod("p2", 400, 0, n1.Name, func(pod *v1.Pod) { + test.SetPodPriority(pod, highPriority) + test.MakeBestEffortPod(pod) + }) + + // Burstable + p3 := test.BuildTestPod("p3", 400, 0, n1.Name, func(pod *v1.Pod) { + test.SetPodPriority(pod, highPriority) + test.MakeBurstablePod(pod) + }) + + // Guaranteed + p4 := test.BuildTestPod("p4", 400, 100, n1.Name, func(pod *v1.Pod) { + test.SetPodPriority(pod, highPriority) + test.MakeGuaranteedPod(pod) + }) + + // Best effort with nil priorities. + p5 := test.BuildTestPod("p5", 400, 100, n1.Name, test.MakeBestEffortPod) + p5.Spec.Priority = nil + + p6 := test.BuildTestPod("p6", 400, 100, n1.Name, test.MakeGuaranteedPod) + p6.Spec.Priority = nil + + podList := []*v1.Pod{p4, p3, p2, p1, p6, p5} + + SortPodsBasedOnPriorityLowToHigh(podList) + if !reflect.DeepEqual(podList[len(podList)-1], p4) { + t.Errorf("Expected last pod in sorted list to be %v which of highest priority and guaranteed but got %v", p4, podList[len(podList)-1]) + } +} diff --git a/pkg/descheduler/strategies/lownodeutilization.go b/pkg/descheduler/strategies/lownodeutilization.go index b16f84e61..9e674cbfe 100644 --- a/pkg/descheduler/strategies/lownodeutilization.go +++ b/pkg/descheduler/strategies/lownodeutilization.go @@ -247,7 +247,7 @@ func evictPodsFromTargetNodes( evictablePods = append(append(burstablePods, bestEffortPods...), guaranteedPods...) // sort the evictable Pods based on priority. This also sorts them based on QoS. If there are multiple pods with same priority, they are sorted based on QoS tiers. - sortPodsBasedOnPriority(evictablePods) + podutil.SortPodsBasedOnPriorityLowToHigh(evictablePods) evictPods(ctx, evictablePods, targetThresholds, nodeCapacity, node.usage, &totalPods, &totalCPU, &totalMem, taintsOfLowNodes, podEvictor, node.node) } else { // TODO: Remove this when we support only priority. @@ -338,28 +338,6 @@ func sortNodesByUsage(nodes []NodeUsageMap) { }) } -// sortPodsBasedOnPriority sorts pods based on priority and if their priorities are equal, they are sorted based on QoS tiers. -func sortPodsBasedOnPriority(evictablePods []*v1.Pod) { - sort.Slice(evictablePods, func(i, j int) bool { - if evictablePods[i].Spec.Priority == nil && evictablePods[j].Spec.Priority != nil { - return true - } - if evictablePods[j].Spec.Priority == nil && evictablePods[i].Spec.Priority != nil { - return false - } - if (evictablePods[j].Spec.Priority == nil && evictablePods[i].Spec.Priority == nil) || (*evictablePods[i].Spec.Priority == *evictablePods[j].Spec.Priority) { - if podutil.IsBestEffortPod(evictablePods[i]) { - return true - } - if podutil.IsBurstablePod(evictablePods[i]) && podutil.IsGuaranteedPod(evictablePods[j]) { - return true - } - return false - } - return *evictablePods[i].Spec.Priority < *evictablePods[j].Spec.Priority - }) -} - // createNodePodsMap returns nodepodsmap with evictable pods on node. func createNodePodsMap(ctx context.Context, client clientset.Interface, nodes []*v1.Node) NodePodsMap { npm := NodePodsMap{} diff --git a/pkg/descheduler/strategies/lownodeutilization_test.go b/pkg/descheduler/strategies/lownodeutilization_test.go index 4aa6ece2f..5d87570ed 100644 --- a/pkg/descheduler/strategies/lownodeutilization_test.go +++ b/pkg/descheduler/strategies/lownodeutilization_test.go @@ -22,8 +22,6 @@ import ( "strings" "testing" - "reflect" - v1 "k8s.io/api/core/v1" "k8s.io/api/policy/v1beta1" "k8s.io/apimachinery/pkg/api/resource" @@ -471,46 +469,6 @@ func TestLowNodeUtilization(t *testing.T) { } } -func TestSortPodsByPriority(t *testing.T) { - n1 := test.BuildTestNode("n1", 4000, 3000, 9, nil) - - p1 := test.BuildTestPod("p1", 400, 0, n1.Name, func(pod *v1.Pod) { - test.SetPodPriority(pod, lowPriority) - }) - - // BestEffort - p2 := test.BuildTestPod("p2", 400, 0, n1.Name, func(pod *v1.Pod) { - test.SetPodPriority(pod, highPriority) - test.MakeBestEffortPod(pod) - }) - - // Burstable - p3 := test.BuildTestPod("p3", 400, 0, n1.Name, func(pod *v1.Pod) { - test.SetPodPriority(pod, highPriority) - test.MakeBurstablePod(pod) - }) - - // Guaranteed - p4 := test.BuildTestPod("p4", 400, 100, n1.Name, func(pod *v1.Pod) { - test.SetPodPriority(pod, highPriority) - test.MakeGuaranteedPod(pod) - }) - - // Best effort with nil priorities. - p5 := test.BuildTestPod("p5", 400, 100, n1.Name, test.MakeBestEffortPod) - p5.Spec.Priority = nil - - p6 := test.BuildTestPod("p6", 400, 100, n1.Name, test.MakeGuaranteedPod) - p6.Spec.Priority = nil - - podList := []*v1.Pod{p4, p3, p2, p1, p6, p5} - - sortPodsBasedOnPriority(podList) - if !reflect.DeepEqual(podList[len(podList)-1], p4) { - t.Errorf("Expected last pod in sorted list to be %v which of highest priority and guaranteed but got %v", p4, podList[len(podList)-1]) - } -} - func TestValidateStrategyConfig(t *testing.T) { tests := []struct { name string From 65a03e76bfda0e4379fd9971153ff52a443978e9 Mon Sep 17 00:00:00 2001 From: lixiang Date: Fri, 19 Jun 2020 11:48:23 +0800 Subject: [PATCH 3/3] Add sorting to RemovePodsViolatingInterPodAntiAffinity --- README.md | 2 +- .../strategies/pod_antiaffinity.go | 2 + .../strategies/pod_antiaffinity_test.go | 45 ++++++++++++++----- 3 files changed, 37 insertions(+), 12 deletions(-) diff --git a/README.md b/README.md index 32bced6a3..e7d26ee92 100644 --- a/README.md +++ b/README.md @@ -238,7 +238,7 @@ When the descheduler decides to evict pods from a node, it employs the following never evicted because these pods won't be recreated. * Pods associated with DaemonSets are never evicted. * Pods with local storage are never evicted. -* In `LowNodeUtilization`, pods are evicted by their priority from low to high, and if they have same priority, +* In `LowNodeUtilization` and `RemovePodsViolatingInterPodAntiAffinity`, pods are evicted by their priority from low to high, and if they have same priority, best effort pods are evicted before burstable and guaranteed pods. * All types of pods with the annotation descheduler.alpha.kubernetes.io/evict are evicted. This annotation is used to override checks which prevent eviction and users can select which pod is evicted. diff --git a/pkg/descheduler/strategies/pod_antiaffinity.go b/pkg/descheduler/strategies/pod_antiaffinity.go index 15c96b36a..30990929e 100644 --- a/pkg/descheduler/strategies/pod_antiaffinity.go +++ b/pkg/descheduler/strategies/pod_antiaffinity.go @@ -37,6 +37,8 @@ func RemovePodsViolatingInterPodAntiAffinity(ctx context.Context, client clients if err != nil { return } + // sort the evictable Pods based on priority, if there are multiple pods with same priority, they are sorted based on QoS tiers. + podutil.SortPodsBasedOnPriorityLowToHigh(pods) totalPods := len(pods) for i := 0; i < totalPods; i++ { if checkPodsWithAntiAffinityExist(pods[i], pods) { diff --git a/pkg/descheduler/strategies/pod_antiaffinity_test.go b/pkg/descheduler/strategies/pod_antiaffinity_test.go index 37fdf4617..d0d0a4990 100644 --- a/pkg/descheduler/strategies/pod_antiaffinity_test.go +++ b/pkg/descheduler/strategies/pod_antiaffinity_test.go @@ -37,16 +37,33 @@ func TestPodAntiAffinity(t *testing.T) { p2 := test.BuildTestPod("p2", 100, 0, node.Name, nil) p3 := test.BuildTestPod("p3", 100, 0, node.Name, nil) p4 := test.BuildTestPod("p4", 100, 0, node.Name, nil) + p5 := test.BuildTestPod("p5", 100, 0, node.Name, nil) + p6 := test.BuildTestPod("p6", 100, 0, node.Name, nil) + p7 := test.BuildTestPod("p7", 100, 0, node.Name, nil) p2.Labels = map[string]string{"foo": "bar"} - p1.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList() - p2.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList() - p3.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList() - p4.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList() + p5.Labels = map[string]string{"foo": "bar"} + p6.Labels = map[string]string{"foo": "bar"} + p7.Labels = map[string]string{"foo1": "bar1"} + test.SetNormalOwnerRef(p1) + test.SetNormalOwnerRef(p2) + test.SetNormalOwnerRef(p3) + test.SetNormalOwnerRef(p4) + test.SetNormalOwnerRef(p5) + test.SetNormalOwnerRef(p6) + test.SetNormalOwnerRef(p7) // set pod anti affinity - setPodAntiAffinity(p1) - setPodAntiAffinity(p3) - setPodAntiAffinity(p4) + setPodAntiAffinity(p1, "foo", "bar") + setPodAntiAffinity(p3, "foo", "bar") + setPodAntiAffinity(p4, "foo", "bar") + setPodAntiAffinity(p5, "foo1", "bar1") + setPodAntiAffinity(p6, "foo1", "bar1") + setPodAntiAffinity(p7, "foo", "bar") + + // set pod priority + test.SetPodPriority(p5, 100) + test.SetPodPriority(p6, 50) + test.SetPodPriority(p7, 0) tests := []struct { description string @@ -66,13 +83,19 @@ func TestPodAntiAffinity(t *testing.T) { pods: []v1.Pod{*p1, *p2, *p3, *p4}, expectedEvictedPodCount: 3, }, + { + description: "Evict only 1 pod after sorting", + maxPodsToEvict: 0, + pods: []v1.Pod{*p5, *p6, *p7}, + expectedEvictedPodCount: 1, + }, } for _, test := range tests { // create fake client fakeClient := &fake.Clientset{} fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) { - return true, &v1.PodList{Items: []v1.Pod{*p1, *p2, *p3, *p4}}, nil + return true, &v1.PodList{Items: test.pods}, nil }) fakeClient.Fake.AddReactor("get", "nodes", func(action core.Action) (bool, runtime.Object, error) { return true, node, nil @@ -95,7 +118,7 @@ func TestPodAntiAffinity(t *testing.T) { } } -func setPodAntiAffinity(inputPod *v1.Pod) { +func setPodAntiAffinity(inputPod *v1.Pod, labelKey, labelValue string) { inputPod.Spec.Affinity = &v1.Affinity{ PodAntiAffinity: &v1.PodAntiAffinity{ RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{ @@ -103,9 +126,9 @@ func setPodAntiAffinity(inputPod *v1.Pod) { LabelSelector: &metav1.LabelSelector{ MatchExpressions: []metav1.LabelSelectorRequirement{ { - Key: "foo", + Key: labelKey, Operator: metav1.LabelSelectorOpIn, - Values: []string{"bar"}, + Values: []string{labelValue}, }, }, },