1
0
mirror of https://github.com/kubernetes-sigs/descheduler.git synced 2026-01-26 13:29:11 +01:00

reform all test files

Signed-off-by: Garrybest <garrybest@foxmail.com>
This commit is contained in:
Garrybest
2021-12-11 19:27:05 +08:00
parent 0ff8ecb41e
commit cac3b9185b
17 changed files with 1387 additions and 1331 deletions

View File

@@ -39,7 +39,7 @@ import (
func TestRemoveDuplicates(t *testing.T) {
ctx := context.Background()
clientSet, _, stopCh := initializeClient(t)
clientSet, _, getPodsAssignedToNode, stopCh := initializeClient(t)
defer close(stopCh)
nodeList, err := clientSet.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
@@ -164,6 +164,7 @@ func TestRemoveDuplicates(t *testing.T) {
},
workerNodes,
podEvictor,
getPodsAssignedToNode,
)
waitForTerminatingPodsToDisappear(ctx, t, clientSet, testNamespace.Name)

View File

@@ -21,7 +21,7 @@ var oneHourPodLifetimeSeconds uint = 3600
func TestFailedPods(t *testing.T) {
ctx := context.Background()
clientSet, _, stopCh := initializeClient(t)
clientSet, _, getPodsAssignedToNode, stopCh := initializeClient(t)
defer close(stopCh)
nodeList, err := clientSet.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
if err != nil {
@@ -95,6 +95,7 @@ func TestFailedPods(t *testing.T) {
},
nodes,
podEvictor,
getPodsAssignedToNode,
)
t.Logf("Finished RemoveFailedPods strategy for %s", name)

View File

@@ -108,7 +108,7 @@ func RcByNameContainer(name, namespace string, replicas int32, labels map[string
}
}
func initializeClient(t *testing.T) (clientset.Interface, coreinformers.NodeInformer, chan struct{}) {
func initializeClient(t *testing.T) (clientset.Interface, coreinformers.NodeInformer, podutil.GetPodsAssignedToNodeFunc, chan struct{}) {
clientSet, err := client.CreateClient(os.Getenv("KUBECONFIG"))
if err != nil {
t.Errorf("Error during client creation with %v", err)
@@ -117,12 +117,18 @@ func initializeClient(t *testing.T) (clientset.Interface, coreinformers.NodeInfo
stopChannel := make(chan struct{})
sharedInformerFactory := informers.NewSharedInformerFactory(clientSet, 0)
nodeInformer := sharedInformerFactory.Core().V1().Nodes()
podInformer := sharedInformerFactory.Core().V1().Pods()
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
if err != nil {
t.Errorf("build get pods assigned to node function error: %v", err)
}
sharedInformerFactory.Start(stopChannel)
sharedInformerFactory.WaitForCacheSync(stopChannel)
nodeInformer := sharedInformerFactory.Core().V1().Nodes()
return clientSet, nodeInformer, stopChannel
return clientSet, nodeInformer, getPodsAssignedToNode, stopChannel
}
func runPodLifetimeStrategy(
@@ -135,6 +141,7 @@ func runPodLifetimeStrategy(
priority *int32,
evictCritical bool,
labelSelector *metav1.LabelSelector,
getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc,
) {
// Run descheduler.
evictionPolicyGroupVersion, err := eutils.SupportEviction(clientset)
@@ -173,6 +180,7 @@ func runPodLifetimeStrategy(
evictCritical,
false,
),
getPodsAssignedToNode,
)
}
@@ -202,7 +210,7 @@ func intersectStrings(lista, listb []string) []string {
func TestLowNodeUtilization(t *testing.T) {
ctx := context.Background()
clientSet, _, stopCh := initializeClient(t)
clientSet, _, getPodsAssignedToNode, stopCh := initializeClient(t)
defer close(stopCh)
nodeList, err := clientSet.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
@@ -293,7 +301,12 @@ func TestLowNodeUtilization(t *testing.T) {
// Run LowNodeUtilization strategy
podEvictor := initPodEvictorOrFail(t, clientSet, nodes)
podsOnMosttUtilizedNode, err := podutil.ListPodsOnANode(ctx, clientSet, workerNodes[0], podutil.WithFilter(podEvictor.Evictable().IsEvictable))
podFilter, err := podutil.NewOptions().WithFilter(podEvictor.Evictable().IsEvictable).BuildFilterFunc()
if err != nil {
t.Errorf("Error initializing pod filter function, %v", err)
}
podsOnMosttUtilizedNode, err := podutil.ListPodsOnANode(workerNodes[0].Name, getPodsAssignedToNode, podFilter)
if err != nil {
t.Errorf("Error listing pods on a node %v", err)
}
@@ -318,11 +331,17 @@ func TestLowNodeUtilization(t *testing.T) {
},
workerNodes,
podEvictor,
getPodsAssignedToNode,
)
waitForTerminatingPodsToDisappear(ctx, t, clientSet, rc.Namespace)
podsOnMosttUtilizedNode, err = podutil.ListPodsOnANode(ctx, clientSet, workerNodes[0], podutil.WithFilter(podEvictor.Evictable().IsEvictable))
podFilter, err = podutil.NewOptions().WithFilter(podEvictor.Evictable().IsEvictable).BuildFilterFunc()
if err != nil {
t.Errorf("Error initializing pod filter function, %v", err)
}
podsOnMosttUtilizedNode, err = podutil.ListPodsOnANode(workerNodes[0].Name, getPodsAssignedToNode, podFilter)
if err != nil {
t.Errorf("Error listing pods on a node %v", err)
}
@@ -339,7 +358,7 @@ func TestLowNodeUtilization(t *testing.T) {
func TestNamespaceConstraintsInclude(t *testing.T) {
ctx := context.Background()
clientSet, nodeInformer, stopCh := initializeClient(t)
clientSet, nodeInformer, getPodsAssignedToNode, stopCh := initializeClient(t)
defer close(stopCh)
testNamespace := &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "e2e-" + strings.ToLower(t.Name())}}
@@ -374,7 +393,7 @@ func TestNamespaceConstraintsInclude(t *testing.T) {
t.Logf("set the strategy to delete pods from %v namespace", rc.Namespace)
runPodLifetimeStrategy(ctx, t, clientSet, nodeInformer, &deschedulerapi.Namespaces{
Include: []string{rc.Namespace},
}, "", nil, false, nil)
}, "", nil, false, nil, getPodsAssignedToNode)
// All pods are supposed to be deleted, wait until all the old pods are deleted
if err := wait.PollImmediate(time.Second, 20*time.Second, func() (bool, error) {
@@ -410,7 +429,7 @@ func TestNamespaceConstraintsInclude(t *testing.T) {
func TestNamespaceConstraintsExclude(t *testing.T) {
ctx := context.Background()
clientSet, nodeInformer, stopCh := initializeClient(t)
clientSet, nodeInformer, getPodsAssignedToNode, stopCh := initializeClient(t)
defer close(stopCh)
testNamespace := &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "e2e-" + strings.ToLower(t.Name())}}
@@ -445,7 +464,7 @@ func TestNamespaceConstraintsExclude(t *testing.T) {
t.Logf("set the strategy to delete pods from namespaces except the %v namespace", rc.Namespace)
runPodLifetimeStrategy(ctx, t, clientSet, nodeInformer, &deschedulerapi.Namespaces{
Exclude: []string{rc.Namespace},
}, "", nil, false, nil)
}, "", nil, false, nil, getPodsAssignedToNode)
t.Logf("Waiting 10s")
time.Sleep(10 * time.Second)
@@ -477,7 +496,7 @@ func testEvictSystemCritical(t *testing.T, isPriorityClass bool) {
var lowPriority = int32(500)
ctx := context.Background()
clientSet, nodeInformer, stopCh := initializeClient(t)
clientSet, nodeInformer, getPodsAssignedToNode, stopCh := initializeClient(t)
defer close(stopCh)
testNamespace := &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "e2e-" + strings.ToLower(t.Name())}}
@@ -558,9 +577,9 @@ func testEvictSystemCritical(t *testing.T, isPriorityClass bool) {
t.Logf("Existing pods: %v", initialPodNames)
if isPriorityClass {
runPodLifetimeStrategy(ctx, t, clientSet, nodeInformer, nil, highPriorityClass.Name, nil, true, nil)
runPodLifetimeStrategy(ctx, t, clientSet, nodeInformer, nil, highPriorityClass.Name, nil, true, nil, getPodsAssignedToNode)
} else {
runPodLifetimeStrategy(ctx, t, clientSet, nodeInformer, nil, "", &highPriority, true, nil)
runPodLifetimeStrategy(ctx, t, clientSet, nodeInformer, nil, "", &highPriority, true, nil, getPodsAssignedToNode)
}
// All pods are supposed to be deleted, wait until all pods in the test namespace are terminating
@@ -607,7 +626,7 @@ func testPriority(t *testing.T, isPriorityClass bool) {
var lowPriority = int32(500)
ctx := context.Background()
clientSet, nodeInformer, stopCh := initializeClient(t)
clientSet, nodeInformer, getPodsAssignedToNode, stopCh := initializeClient(t)
defer close(stopCh)
testNamespace := &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "e2e-" + strings.ToLower(t.Name())}}
@@ -677,10 +696,10 @@ func testPriority(t *testing.T, isPriorityClass bool) {
if isPriorityClass {
t.Logf("set the strategy to delete pods with priority lower than priority class %s", highPriorityClass.Name)
runPodLifetimeStrategy(ctx, t, clientSet, nodeInformer, nil, highPriorityClass.Name, nil, false, nil)
runPodLifetimeStrategy(ctx, t, clientSet, nodeInformer, nil, highPriorityClass.Name, nil, false, nil, getPodsAssignedToNode)
} else {
t.Logf("set the strategy to delete pods with priority lower than %d", highPriority)
runPodLifetimeStrategy(ctx, t, clientSet, nodeInformer, nil, "", &highPriority, false, nil)
runPodLifetimeStrategy(ctx, t, clientSet, nodeInformer, nil, "", &highPriority, false, nil, getPodsAssignedToNode)
}
t.Logf("Waiting 10s")
@@ -736,7 +755,7 @@ func testPriority(t *testing.T, isPriorityClass bool) {
func TestPodLabelSelector(t *testing.T) {
ctx := context.Background()
clientSet, nodeInformer, stopCh := initializeClient(t)
clientSet, nodeInformer, getPodsAssignedToNode, stopCh := initializeClient(t)
defer close(stopCh)
testNamespace := &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "e2e-" + strings.ToLower(t.Name())}}
@@ -784,7 +803,7 @@ func TestPodLabelSelector(t *testing.T) {
t.Logf("Pods not expected to be evicted: %v, pods expected to be evicted: %v", expectReservePodNames, expectEvictPodNames)
t.Logf("set the strategy to delete pods with label test:podlifetime-evict")
runPodLifetimeStrategy(ctx, t, clientSet, nodeInformer, nil, "", nil, false, &metav1.LabelSelector{MatchLabels: map[string]string{"test": "podlifetime-evict"}})
runPodLifetimeStrategy(ctx, t, clientSet, nodeInformer, nil, "", nil, false, &metav1.LabelSelector{MatchLabels: map[string]string{"test": "podlifetime-evict"}}, getPodsAssignedToNode)
t.Logf("Waiting 10s")
time.Sleep(10 * time.Second)
@@ -839,7 +858,7 @@ func TestPodLabelSelector(t *testing.T) {
func TestEvictAnnotation(t *testing.T) {
ctx := context.Background()
clientSet, nodeInformer, stopCh := initializeClient(t)
clientSet, nodeInformer, getPodsAssignedToNode, stopCh := initializeClient(t)
defer close(stopCh)
nodeList, err := clientSet.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
@@ -894,7 +913,7 @@ func TestEvictAnnotation(t *testing.T) {
t.Logf("Existing pods: %v", initialPodNames)
t.Log("Running PodLifetime strategy")
runPodLifetimeStrategy(ctx, t, clientSet, nodeInformer, nil, "", nil, false, nil)
runPodLifetimeStrategy(ctx, t, clientSet, nodeInformer, nil, "", nil, false, nil, getPodsAssignedToNode)
if err := wait.PollImmediate(5*time.Second, time.Minute, func() (bool, error) {
podList, err = clientSet.CoreV1().Pods(rc.Namespace).List(ctx, metav1.ListOptions{LabelSelector: labels.SelectorFromSet(rc.Spec.Template.Labels).String()})

View File

@@ -37,7 +37,7 @@ import (
func TestTooManyRestarts(t *testing.T) {
ctx := context.Background()
clientSet, _, stopCh := initializeClient(t)
clientSet, _, getPodsAssignedToNode, stopCh := initializeClient(t)
defer close(stopCh)
nodeList, err := clientSet.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
@@ -157,6 +157,7 @@ func TestTooManyRestarts(t *testing.T) {
},
workerNodes,
podEvictor,
getPodsAssignedToNode,
)
waitForTerminatingPodsToDisappear(ctx, t, clientSet, testNamespace.Name)

View File

@@ -18,7 +18,7 @@ const zoneTopologyKey string = "topology.kubernetes.io/zone"
func TestTopologySpreadConstraint(t *testing.T) {
ctx := context.Background()
clientSet, _, stopCh := initializeClient(t)
clientSet, _, getPodsAssignedToNode, stopCh := initializeClient(t)
defer close(stopCh)
nodeList, err := clientSet.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
if err != nil {
@@ -92,6 +92,7 @@ func TestTopologySpreadConstraint(t *testing.T) {
},
nodes,
podEvictor,
getPodsAssignedToNode,
)
t.Logf("Finished RemovePodsViolatingTopologySpreadConstraint strategy for %s", name)