1
0
mirror of https://github.com/kubernetes-sigs/descheduler.git synced 2026-01-26 05:14:13 +01:00

Merge pull request #343 from ingvagabund/test-cleanup

Clean e2e test so it's easier to extend it
This commit is contained in:
Kubernetes Prow Robot
2020-07-21 05:21:13 -07:00
committed by GitHub

View File

@@ -20,12 +20,14 @@ import (
"context"
"math"
"os"
"strings"
"testing"
"time"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/informers"
coreinformers "k8s.io/client-go/informers/core/v1"
clientset "k8s.io/client-go/kubernetes"
@@ -65,8 +67,7 @@ func MakePodSpec() v1.PodSpec {
}
// RcByNameContainer returns a ReplicationControoler with specified name and container
func RcByNameContainer(name string, replicas int32, labels map[string]string, gracePeriod *int64) *v1.ReplicationController {
func RcByNameContainer(name, namespace string, replicas int32, labels map[string]string, gracePeriod *int64) *v1.ReplicationController {
zeroGracePeriod := int64(0)
// Add "name": name to the labels, overwriting if it exists.
@@ -81,6 +82,7 @@ func RcByNameContainer(name string, replicas int32, labels map[string]string, gr
},
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
},
Spec: v1.ReplicationControllerSpec{
Replicas: func(i int32) *int32 { return &i }(replicas),
@@ -98,86 +100,119 @@ func RcByNameContainer(name string, replicas int32, labels map[string]string, gr
}
// startEndToEndForLowNodeUtilization tests the lownode utilization strategy.
func startEndToEndForLowNodeUtilization(ctx context.Context, clientset clientset.Interface, nodeInformer coreinformers.NodeInformer) {
var thresholds = make(deschedulerapi.ResourceThresholds)
var targetThresholds = make(deschedulerapi.ResourceThresholds)
thresholds[v1.ResourceMemory] = 20
thresholds[v1.ResourcePods] = 20
thresholds[v1.ResourceCPU] = 85
targetThresholds[v1.ResourceMemory] = 20
targetThresholds[v1.ResourcePods] = 20
targetThresholds[v1.ResourceCPU] = 90
func startEndToEndForLowNodeUtilization(ctx context.Context, clientset clientset.Interface, nodeInformer coreinformers.NodeInformer, podEvictor *evictions.PodEvictor) {
// Run descheduler.
evictionPolicyGroupVersion, err := eutils.SupportEviction(clientset)
if err != nil || len(evictionPolicyGroupVersion) == 0 {
klog.Fatalf("%v", err)
}
stopChannel := make(chan struct{})
nodes, err := nodeutil.ReadyNodes(ctx, clientset, nodeInformer, "", stopChannel)
nodes, err := nodeutil.ReadyNodes(ctx, clientset, nodeInformer, "", nil)
if err != nil {
klog.Fatalf("%v", err)
}
lowNodeUtilizationStrategy := deschedulerapi.DeschedulerStrategy{
strategies.LowNodeUtilization(
ctx,
clientset,
deschedulerapi.DeschedulerStrategy{
Enabled: true,
Params: &deschedulerapi.StrategyParameters{
NodeResourceUtilizationThresholds: &deschedulerapi.NodeResourceUtilizationThresholds{
Thresholds: thresholds,
TargetThresholds: targetThresholds,
Thresholds: deschedulerapi.ResourceThresholds{
v1.ResourceMemory: 20,
v1.ResourcePods: 20,
v1.ResourceCPU: 85,
},
TargetThresholds: deschedulerapi.ResourceThresholds{
v1.ResourceMemory: 20,
v1.ResourcePods: 20,
v1.ResourceCPU: 90,
},
},
},
},
}
podEvictor := evictions.NewPodEvictor(
clientset,
evictionPolicyGroupVersion,
false,
0,
nodes,
false,
podEvictor,
)
strategies.LowNodeUtilization(ctx, clientset, lowNodeUtilizationStrategy, nodes, podEvictor)
time.Sleep(10 * time.Second)
}
func TestE2E(t *testing.T) {
// If we have reached here, it means cluster would have been already setup and the kubeconfig file should
// be in /tmp directory as admin.conf.
func TestLowNodeUtilization(t *testing.T) {
ctx := context.Background()
clientSet, err := client.CreateClient(os.Getenv("KUBECONFIG"))
if err != nil {
t.Errorf("Error during client creation with %v", err)
}
stopChannel := make(chan struct{}, 0)
sharedInformerFactory := informers.NewSharedInformerFactory(clientSet, 0)
sharedInformerFactory.Start(stopChannel)
sharedInformerFactory.WaitForCacheSync(stopChannel)
defer close(stopChannel)
nodeInformer := sharedInformerFactory.Core().V1().Nodes()
nodeList, err := clientSet.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
if err != nil {
t.Errorf("Error listing node with %v", err)
}
var nodes []*v1.Node
for i := range nodeList.Items {
node := nodeList.Items[i]
nodes = append(nodes, &node)
}
sharedInformerFactory := informers.NewSharedInformerFactory(clientSet, 0)
nodeInformer := sharedInformerFactory.Core().V1().Nodes()
testNamespace := &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "e2e-" + strings.ToLower(t.Name())}}
if _, err := clientSet.CoreV1().Namespaces().Create(ctx, testNamespace, metav1.CreateOptions{}); err != nil {
t.Fatalf("Unable to create ns %v", testNamespace.Name)
}
defer clientSet.CoreV1().Namespaces().Delete(ctx, testNamespace.Name, metav1.DeleteOptions{})
rc := RcByNameContainer("test-rc-node-utilization", testNamespace.Name, int32(15), map[string]string{"test": "node-utilization"}, nil)
if _, err := clientSet.CoreV1().ReplicationControllers(rc.Namespace).Create(ctx, rc, metav1.CreateOptions{}); err != nil {
t.Errorf("Error creating deployment %v", err)
}
evictPods(ctx, t, clientSet, nodeInformer, nodes, rc)
deleteRC(ctx, t, clientSet, rc)
}
func TestEvictAnnotation(t *testing.T) {
ctx := context.Background()
clientSet, err := client.CreateClient(os.Getenv("KUBECONFIG"))
if err != nil {
t.Errorf("Error during client creation with %v", err)
}
stopChannel := make(chan struct{}, 0)
sharedInformerFactory := informers.NewSharedInformerFactory(clientSet, 0)
sharedInformerFactory.Start(stopChannel)
sharedInformerFactory.WaitForCacheSync(stopChannel)
defer close(stopChannel)
// Assumption: We would have 3 node cluster by now. Kubeadm brings all the master components onto master node.
// So, the last node would have least utilization.
rc := RcByNameContainer("test-rc", int32(15), map[string]string{"test": "app"}, nil)
_, err = clientSet.CoreV1().ReplicationControllers("default").Create(ctx, rc, metav1.CreateOptions{})
if err != nil {
t.Errorf("Error creating deployment %v", err)
}
evictPods(ctx, t, clientSet, nodeInformer, nodes, rc)
nodeInformer := sharedInformerFactory.Core().V1().Nodes()
nodeList, err := clientSet.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
if err != nil {
t.Errorf("Error listing node with %v", err)
}
var nodes []*v1.Node
for i := range nodeList.Items {
node := nodeList.Items[i]
nodes = append(nodes, &node)
}
testNamespace := &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "e2e-" + strings.ToLower(t.Name())}}
if _, err := clientSet.CoreV1().Namespaces().Create(ctx, testNamespace, metav1.CreateOptions{}); err != nil {
t.Fatalf("Unable to create ns %v", testNamespace.Name)
}
defer clientSet.CoreV1().Namespaces().Delete(ctx, testNamespace.Name, metav1.DeleteOptions{})
rc := RcByNameContainer("test-rc-evict-annotation", testNamespace.Name, int32(15), map[string]string{"test": "annotation"}, nil)
rc.Spec.Template.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"}
rc.Spec.Replicas = func(i int32) *int32 { return &i }(15)
rc.Spec.Template.Spec.Volumes = []v1.Volume{
{
Name: "sample",
@@ -187,11 +222,13 @@ func TestE2E(t *testing.T) {
},
},
}
_, err = clientSet.CoreV1().ReplicationControllers("default").Create(ctx, rc, metav1.CreateOptions{})
if err != nil {
if _, err := clientSet.CoreV1().ReplicationControllers(rc.Namespace).Create(ctx, rc, metav1.CreateOptions{}); err != nil {
t.Errorf("Error creating deployment %v", err)
}
evictPods(ctx, t, clientSet, nodeInformer, nodes, rc)
deleteRC(ctx, t, clientSet, rc)
}
func TestDeschedulingInterval(t *testing.T) {
@@ -229,6 +266,43 @@ func TestDeschedulingInterval(t *testing.T) {
}
}
func deleteRC(ctx context.Context, t *testing.T, clientSet clientset.Interface, rc *v1.ReplicationController) {
//set number of replicas to 0
rcdeepcopy := rc.DeepCopy()
rcdeepcopy.Spec.Replicas = func(i int32) *int32 { return &i }(0)
if _, err := clientSet.CoreV1().ReplicationControllers(rcdeepcopy.Namespace).Update(ctx, rcdeepcopy, metav1.UpdateOptions{}); err != nil {
t.Fatalf("Error updating replica controller %v", err)
}
allPodsDeleted := false
//wait 30 seconds until all pods are deleted
for i := 0; i < 6; i++ {
scale, _ := clientSet.CoreV1().ReplicationControllers(rc.Namespace).GetScale(ctx, rc.Name, metav1.GetOptions{})
if scale.Spec.Replicas == 0 {
allPodsDeleted = true
break
}
time.Sleep(5 * time.Second)
}
if !allPodsDeleted {
t.Errorf("Deleting of rc pods took too long")
}
if err := clientSet.CoreV1().ReplicationControllers(rc.Namespace).Delete(ctx, rc.Name, metav1.DeleteOptions{}); err != nil {
t.Fatalf("Error deleting rc %v", err)
}
if err := wait.PollImmediate(time.Second, 10*time.Second, func() (bool, error) {
_, err := clientSet.CoreV1().ReplicationControllers(rc.Namespace).Get(ctx, rc.Name, metav1.GetOptions{})
if err != nil && strings.Contains(err.Error(), "not found") {
return true, nil
}
return false, nil
}); err != nil {
t.Fatalf("Error deleting rc %v", err)
}
}
func evictPods(ctx context.Context, t *testing.T, clientSet clientset.Interface, nodeInformer coreinformers.NodeInformer, nodeList []*v1.Node, rc *v1.ReplicationController) {
var leastLoadedNode *v1.Node
podsBefore := math.MaxInt16
@@ -261,7 +335,7 @@ func evictPods(ctx context.Context, t *testing.T, clientSet clientset.Interface,
}
}
t.Log("Eviction of pods starting")
startEndToEndForLowNodeUtilization(ctx, clientSet, nodeInformer)
startEndToEndForLowNodeUtilization(ctx, clientSet, nodeInformer, podEvictor)
podsOnleastUtilizedNode, err := podutil.ListPodsOnANode(ctx, clientSet, leastLoadedNode, podEvictor.IsEvictable)
if err != nil {
t.Errorf("Error listing pods on a node %v", err)
@@ -270,33 +344,4 @@ func evictPods(ctx context.Context, t *testing.T, clientSet clientset.Interface,
if podsBefore > podsAfter {
t.Fatalf("We should have see more pods on this node as per kubeadm's way of installing %v, %v", podsBefore, podsAfter)
}
//set number of replicas to 0
rc.Spec.Replicas = func(i int32) *int32 { return &i }(0)
_, err = clientSet.CoreV1().ReplicationControllers("default").Update(ctx, rc, metav1.UpdateOptions{})
if err != nil {
t.Errorf("Error updating replica controller %v", err)
}
allPodsDeleted := false
//wait 30 seconds until all pods are deleted
for i := 0; i < 6; i++ {
scale, _ := clientSet.CoreV1().ReplicationControllers("default").GetScale(ctx, rc.Name, metav1.GetOptions{})
if scale.Spec.Replicas == 0 {
allPodsDeleted = true
break
}
time.Sleep(5 * time.Second)
}
if !allPodsDeleted {
t.Errorf("Deleting of rc pods took too long")
}
err = clientSet.CoreV1().ReplicationControllers("default").Delete(ctx, rc.Name, metav1.DeleteOptions{})
if err != nil {
t.Errorf("Error deleting rc %v", err)
}
//wait until rc is deleted
time.Sleep(5 * time.Second)
}