From 7bb8b4feda30db7dd37a356eb945462b9ca7c732 Mon Sep 17 00:00:00 2001 From: Jan Chaloupka Date: Mon, 13 Jul 2020 14:23:38 +0200 Subject: [PATCH 1/4] e2e: create and delete RC in the same function body So create/delete are coupled and caller of create is responsible for calling delete as well --- test/e2e/e2e_test.go | 70 ++++++++++++++++++++++++++------------------ 1 file changed, 41 insertions(+), 29 deletions(-) diff --git a/test/e2e/e2e_test.go b/test/e2e/e2e_test.go index 0f2566a8d..f42e19d62 100644 --- a/test/e2e/e2e_test.go +++ b/test/e2e/e2e_test.go @@ -20,12 +20,14 @@ import ( "context" "math" "os" + "strings" "testing" "time" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/informers" coreinformers "k8s.io/client-go/informers/core/v1" clientset "k8s.io/client-go/kubernetes" @@ -175,6 +177,7 @@ func TestE2E(t *testing.T) { t.Errorf("Error creating deployment %v", err) } evictPods(ctx, t, clientSet, nodeInformer, nodes, rc) + deleteRC(ctx, t, clientSet, rc) rc.Spec.Template.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"} rc.Spec.Replicas = func(i int32) *int32 { return &i }(15) @@ -192,6 +195,7 @@ func TestE2E(t *testing.T) { t.Errorf("Error creating deployment %v", err) } evictPods(ctx, t, clientSet, nodeInformer, nodes, rc) + deleteRC(ctx, t, clientSet, rc) } func TestDeschedulingInterval(t *testing.T) { @@ -229,6 +233,43 @@ func TestDeschedulingInterval(t *testing.T) { } } +func deleteRC(ctx context.Context, t *testing.T, clientSet clientset.Interface, rc *v1.ReplicationController) { + //set number of replicas to 0 + rcdeepcopy := rc.DeepCopy() + rcdeepcopy.Spec.Replicas = func(i int32) *int32 { return &i }(0) + if _, err := clientSet.CoreV1().ReplicationControllers(rcdeepcopy.Namespace).Update(ctx, rcdeepcopy, metav1.UpdateOptions{}); err != nil { + t.Fatalf("Error updating replica controller %v", err) + } + allPodsDeleted := false + //wait 30 seconds until all pods are deleted + for i := 0; i < 6; i++ { + scale, _ := clientSet.CoreV1().ReplicationControllers(rc.Namespace).GetScale(ctx, rc.Name, metav1.GetOptions{}) + if scale.Spec.Replicas == 0 { + allPodsDeleted = true + break + } + time.Sleep(5 * time.Second) + } + + if !allPodsDeleted { + t.Errorf("Deleting of rc pods took too long") + } + + if err := clientSet.CoreV1().ReplicationControllers(rc.Namespace).Delete(ctx, rc.Name, metav1.DeleteOptions{}); err != nil { + t.Fatalf("Error deleting rc %v", err) + } + + if err := wait.PollImmediate(time.Second, 10*time.Second, func() (bool, error) { + _, err := clientSet.CoreV1().ReplicationControllers(rc.Namespace).Get(ctx, rc.Name, metav1.GetOptions{}) + if err != nil && strings.Contains(err.Error(), "not found") { + return true, nil + } + return false, nil + }); err != nil { + t.Fatalf("Error deleting rc %v", err) + } +} + func evictPods(ctx context.Context, t *testing.T, clientSet clientset.Interface, nodeInformer coreinformers.NodeInformer, nodeList []*v1.Node, rc *v1.ReplicationController) { var leastLoadedNode *v1.Node podsBefore := math.MaxInt16 @@ -270,33 +311,4 @@ func evictPods(ctx context.Context, t *testing.T, clientSet clientset.Interface, if podsBefore > podsAfter { t.Fatalf("We should have see more pods on this node as per kubeadm's way of installing %v, %v", podsBefore, podsAfter) } - - //set number of replicas to 0 - rc.Spec.Replicas = func(i int32) *int32 { return &i }(0) - _, err = clientSet.CoreV1().ReplicationControllers("default").Update(ctx, rc, metav1.UpdateOptions{}) - if err != nil { - t.Errorf("Error updating replica controller %v", err) - } - allPodsDeleted := false - //wait 30 seconds until all pods are deleted - for i := 0; i < 6; i++ { - scale, _ := clientSet.CoreV1().ReplicationControllers("default").GetScale(ctx, rc.Name, metav1.GetOptions{}) - if scale.Spec.Replicas == 0 { - allPodsDeleted = true - break - } - time.Sleep(5 * time.Second) - } - - if !allPodsDeleted { - t.Errorf("Deleting of rc pods took too long") - } - - err = clientSet.CoreV1().ReplicationControllers("default").Delete(ctx, rc.Name, metav1.DeleteOptions{}) - if err != nil { - t.Errorf("Error deleting rc %v", err) - } - - //wait until rc is deleted - time.Sleep(5 * time.Second) } From 327880ba513779dc1e1dec2eacb16db346960f00 Mon Sep 17 00:00:00 2001 From: Jan Chaloupka Date: Mon, 13 Jul 2020 14:50:58 +0200 Subject: [PATCH 2/4] e2e: separate tests for LowNodeUtilization strategy and evict annotation --- test/e2e/e2e_test.go | 64 ++++++++++++++++++++++++++++++++------------ 1 file changed, 47 insertions(+), 17 deletions(-) diff --git a/test/e2e/e2e_test.go b/test/e2e/e2e_test.go index f42e19d62..07613b3e8 100644 --- a/test/e2e/e2e_test.go +++ b/test/e2e/e2e_test.go @@ -143,44 +143,73 @@ func startEndToEndForLowNodeUtilization(ctx context.Context, clientset clientset time.Sleep(10 * time.Second) } -func TestE2E(t *testing.T) { - // If we have reached here, it means cluster would have been already setup and the kubeconfig file should - // be in /tmp directory as admin.conf. +func TestLowNodeUtilization(t *testing.T) { ctx := context.Background() + clientSet, err := client.CreateClient(os.Getenv("KUBECONFIG")) if err != nil { t.Errorf("Error during client creation with %v", err) } + stopChannel := make(chan struct{}, 0) + + sharedInformerFactory := informers.NewSharedInformerFactory(clientSet, 0) + sharedInformerFactory.Start(stopChannel) + sharedInformerFactory.WaitForCacheSync(stopChannel) + defer close(stopChannel) + + nodeInformer := sharedInformerFactory.Core().V1().Nodes() + nodeList, err := clientSet.CoreV1().Nodes().List(ctx, metav1.ListOptions{}) if err != nil { t.Errorf("Error listing node with %v", err) } + var nodes []*v1.Node for i := range nodeList.Items { node := nodeList.Items[i] nodes = append(nodes, &node) } - sharedInformerFactory := informers.NewSharedInformerFactory(clientSet, 0) - nodeInformer := sharedInformerFactory.Core().V1().Nodes() + + rc := RcByNameContainer("test-rc-node-utilization", int32(15), map[string]string{"test": "node-utilization"}, nil) + if _, err := clientSet.CoreV1().ReplicationControllers(rc.Namespace).Create(ctx, rc, metav1.CreateOptions{}); err != nil { + t.Errorf("Error creating deployment %v", err) + } + + evictPods(ctx, t, clientSet, nodeInformer, nodes, rc) + deleteRC(ctx, t, clientSet, rc) +} + +func TestEvictAnnotation(t *testing.T) { + ctx := context.Background() + + clientSet, err := client.CreateClient(os.Getenv("KUBECONFIG")) + if err != nil { + t.Errorf("Error during client creation with %v", err) + } stopChannel := make(chan struct{}, 0) + + sharedInformerFactory := informers.NewSharedInformerFactory(clientSet, 0) sharedInformerFactory.Start(stopChannel) sharedInformerFactory.WaitForCacheSync(stopChannel) defer close(stopChannel) - // Assumption: We would have 3 node cluster by now. Kubeadm brings all the master components onto master node. - // So, the last node would have least utilization. - rc := RcByNameContainer("test-rc", int32(15), map[string]string{"test": "app"}, nil) - _, err = clientSet.CoreV1().ReplicationControllers("default").Create(ctx, rc, metav1.CreateOptions{}) - if err != nil { - t.Errorf("Error creating deployment %v", err) - } - evictPods(ctx, t, clientSet, nodeInformer, nodes, rc) - deleteRC(ctx, t, clientSet, rc) + nodeInformer := sharedInformerFactory.Core().V1().Nodes() + nodeList, err := clientSet.CoreV1().Nodes().List(ctx, metav1.ListOptions{}) + if err != nil { + t.Errorf("Error listing node with %v", err) + } + + var nodes []*v1.Node + for i := range nodeList.Items { + node := nodeList.Items[i] + nodes = append(nodes, &node) + } + + rc := RcByNameContainer("test-rc-evict-annotation", int32(15), map[string]string{"test": "annotation"}, nil) rc.Spec.Template.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"} - rc.Spec.Replicas = func(i int32) *int32 { return &i }(15) rc.Spec.Template.Spec.Volumes = []v1.Volume{ { Name: "sample", @@ -190,10 +219,11 @@ func TestE2E(t *testing.T) { }, }, } - _, err = clientSet.CoreV1().ReplicationControllers("default").Create(ctx, rc, metav1.CreateOptions{}) - if err != nil { + + if _, err := clientSet.CoreV1().ReplicationControllers(rc.Namespace).Create(ctx, rc, metav1.CreateOptions{}); err != nil { t.Errorf("Error creating deployment %v", err) } + evictPods(ctx, t, clientSet, nodeInformer, nodes, rc) deleteRC(ctx, t, clientSet, rc) } From f801c5f72fee4e8d8cf81c49f5a90569003a8b33 Mon Sep 17 00:00:00 2001 From: Jan Chaloupka Date: Mon, 13 Jul 2020 16:17:20 +0200 Subject: [PATCH 3/4] e2e: code cleanup --- test/e2e/e2e_test.go | 54 ++++++++++++++++++-------------------------- 1 file changed, 22 insertions(+), 32 deletions(-) diff --git a/test/e2e/e2e_test.go b/test/e2e/e2e_test.go index 07613b3e8..da52a0b8d 100644 --- a/test/e2e/e2e_test.go +++ b/test/e2e/e2e_test.go @@ -68,7 +68,6 @@ func MakePodSpec() v1.PodSpec { // RcByNameContainer returns a ReplicationControoler with specified name and container func RcByNameContainer(name string, replicas int32, labels map[string]string, gracePeriod *int64) *v1.ReplicationController { - zeroGracePeriod := int64(0) // Add "name": name to the labels, overwriting if it exists. @@ -100,46 +99,37 @@ func RcByNameContainer(name string, replicas int32, labels map[string]string, gr } // startEndToEndForLowNodeUtilization tests the lownode utilization strategy. -func startEndToEndForLowNodeUtilization(ctx context.Context, clientset clientset.Interface, nodeInformer coreinformers.NodeInformer) { - var thresholds = make(deschedulerapi.ResourceThresholds) - var targetThresholds = make(deschedulerapi.ResourceThresholds) - thresholds[v1.ResourceMemory] = 20 - thresholds[v1.ResourcePods] = 20 - thresholds[v1.ResourceCPU] = 85 - targetThresholds[v1.ResourceMemory] = 20 - targetThresholds[v1.ResourcePods] = 20 - targetThresholds[v1.ResourceCPU] = 90 +func startEndToEndForLowNodeUtilization(ctx context.Context, clientset clientset.Interface, nodeInformer coreinformers.NodeInformer, podEvictor *evictions.PodEvictor) { // Run descheduler. - evictionPolicyGroupVersion, err := eutils.SupportEviction(clientset) - if err != nil || len(evictionPolicyGroupVersion) == 0 { - klog.Fatalf("%v", err) - } - stopChannel := make(chan struct{}) - nodes, err := nodeutil.ReadyNodes(ctx, clientset, nodeInformer, "", stopChannel) + nodes, err := nodeutil.ReadyNodes(ctx, clientset, nodeInformer, "", nil) if err != nil { klog.Fatalf("%v", err) } - lowNodeUtilizationStrategy := deschedulerapi.DeschedulerStrategy{ - Enabled: true, - Params: &deschedulerapi.StrategyParameters{ - NodeResourceUtilizationThresholds: &deschedulerapi.NodeResourceUtilizationThresholds{ - Thresholds: thresholds, - TargetThresholds: targetThresholds, + strategies.LowNodeUtilization( + ctx, + clientset, + deschedulerapi.DeschedulerStrategy{ + Enabled: true, + Params: &deschedulerapi.StrategyParameters{ + NodeResourceUtilizationThresholds: &deschedulerapi.NodeResourceUtilizationThresholds{ + Thresholds: deschedulerapi.ResourceThresholds{ + v1.ResourceMemory: 20, + v1.ResourcePods: 20, + v1.ResourceCPU: 85, + }, + TargetThresholds: deschedulerapi.ResourceThresholds{ + v1.ResourceMemory: 20, + v1.ResourcePods: 20, + v1.ResourceCPU: 90, + }, + }, }, }, - } - - podEvictor := evictions.NewPodEvictor( - clientset, - evictionPolicyGroupVersion, - false, - 0, nodes, - false, + podEvictor, ) - strategies.LowNodeUtilization(ctx, clientset, lowNodeUtilizationStrategy, nodes, podEvictor) time.Sleep(10 * time.Second) } @@ -332,7 +322,7 @@ func evictPods(ctx context.Context, t *testing.T, clientSet clientset.Interface, } } t.Log("Eviction of pods starting") - startEndToEndForLowNodeUtilization(ctx, clientSet, nodeInformer) + startEndToEndForLowNodeUtilization(ctx, clientSet, nodeInformer, podEvictor) podsOnleastUtilizedNode, err := podutil.ListPodsOnANode(ctx, clientSet, leastLoadedNode, podEvictor.IsEvictable) if err != nil { t.Errorf("Error listing pods on a node %v", err) From 53badf7b61421ff41ca3e4dbebdeb6cc3626a998 Mon Sep 17 00:00:00 2001 From: Jan Chaloupka Date: Tue, 14 Jul 2020 14:29:19 +0200 Subject: [PATCH 4/4] e2e: create dedicated ns for each e2e test --- test/e2e/e2e_test.go | 21 +++++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) diff --git a/test/e2e/e2e_test.go b/test/e2e/e2e_test.go index da52a0b8d..848096f20 100644 --- a/test/e2e/e2e_test.go +++ b/test/e2e/e2e_test.go @@ -67,7 +67,7 @@ func MakePodSpec() v1.PodSpec { } // RcByNameContainer returns a ReplicationControoler with specified name and container -func RcByNameContainer(name string, replicas int32, labels map[string]string, gracePeriod *int64) *v1.ReplicationController { +func RcByNameContainer(name, namespace string, replicas int32, labels map[string]string, gracePeriod *int64) *v1.ReplicationController { zeroGracePeriod := int64(0) // Add "name": name to the labels, overwriting if it exists. @@ -81,7 +81,8 @@ func RcByNameContainer(name string, replicas int32, labels map[string]string, gr APIVersion: "v1", }, ObjectMeta: metav1.ObjectMeta{ - Name: name, + Name: name, + Namespace: namespace, }, Spec: v1.ReplicationControllerSpec{ Replicas: func(i int32) *int32 { return &i }(replicas), @@ -161,7 +162,13 @@ func TestLowNodeUtilization(t *testing.T) { nodes = append(nodes, &node) } - rc := RcByNameContainer("test-rc-node-utilization", int32(15), map[string]string{"test": "node-utilization"}, nil) + testNamespace := &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "e2e-" + strings.ToLower(t.Name())}} + if _, err := clientSet.CoreV1().Namespaces().Create(ctx, testNamespace, metav1.CreateOptions{}); err != nil { + t.Fatalf("Unable to create ns %v", testNamespace.Name) + } + defer clientSet.CoreV1().Namespaces().Delete(ctx, testNamespace.Name, metav1.DeleteOptions{}) + + rc := RcByNameContainer("test-rc-node-utilization", testNamespace.Name, int32(15), map[string]string{"test": "node-utilization"}, nil) if _, err := clientSet.CoreV1().ReplicationControllers(rc.Namespace).Create(ctx, rc, metav1.CreateOptions{}); err != nil { t.Errorf("Error creating deployment %v", err) } @@ -198,7 +205,13 @@ func TestEvictAnnotation(t *testing.T) { nodes = append(nodes, &node) } - rc := RcByNameContainer("test-rc-evict-annotation", int32(15), map[string]string{"test": "annotation"}, nil) + testNamespace := &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "e2e-" + strings.ToLower(t.Name())}} + if _, err := clientSet.CoreV1().Namespaces().Create(ctx, testNamespace, metav1.CreateOptions{}); err != nil { + t.Fatalf("Unable to create ns %v", testNamespace.Name) + } + defer clientSet.CoreV1().Namespaces().Delete(ctx, testNamespace.Name, metav1.DeleteOptions{}) + + rc := RcByNameContainer("test-rc-evict-annotation", testNamespace.Name, int32(15), map[string]string{"test": "annotation"}, nil) rc.Spec.Template.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"} rc.Spec.Template.Spec.Volumes = []v1.Volume{ {