1
0
mirror of https://github.com/kubernetes-sigs/descheduler.git synced 2026-01-26 21:31:18 +01:00

Compare commits

...

8 Commits

Author SHA1 Message Date
Kubernetes Prow Robot
f79131d0af Merge pull request #745 from damemi/update-go-version-122
[release-1.22] Bump Go to 1.16.14
2022-03-03 01:40:49 -08:00
Mike Dame
ef85c0fff1 Bump Go to 1.16.14 2022-02-25 20:49:51 +00:00
Kubernetes Prow Robot
f5538db9d2 Merge pull request #634 from a7i/release-1.22.1
Release 1.22.1
2021-09-29 09:36:49 -07:00
Amir Alavi
12e6bf931d Update Docs, Manifests, and Helm Chart version to 0.22.1 2021-09-28 10:55:01 -04:00
Amir Alavi
41fca2ed61 e2e tests for RemoveFailedPods strategy
Fix priority class default
2021-09-28 10:54:50 -04:00
Amir Alavi
8e1da96082 e2e TestTopologySpreadConstraint: ensure pods are running before checking for topology spread across domains 2021-09-28 10:51:49 -04:00
Amir Alavi
27fec39c65 RemoveFailedPods: guard against nil descheduler strategy (e.g. in case of default that loads all strategies) 2021-09-28 10:51:49 -04:00
Kubernetes Prow Robot
6bfc76b6b2 Merge pull request #624 from damemi/release-1.22
[release-1.22] Update Helm chart version to 0.22.0
2021-09-08 13:43:59 -07:00
21 changed files with 222 additions and 57 deletions

View File

@@ -11,7 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
FROM golang:1.16.7 FROM golang:1.16.14
WORKDIR /go/src/sigs.k8s.io/descheduler WORKDIR /go/src/sigs.k8s.io/descheduler
COPY . . COPY . .

View File

@@ -103,17 +103,17 @@ See the [resources | Kustomize](https://kubectl.docs.kubernetes.io/references/ku
Run As A Job Run As A Job
``` ```
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/job?ref=v0.22.0' | kubectl apply -f - kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/job?ref=v0.22.1' | kubectl apply -f -
``` ```
Run As A CronJob Run As A CronJob
``` ```
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/cronjob?ref=v0.22.0' | kubectl apply -f - kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/cronjob?ref=v0.22.1' | kubectl apply -f -
``` ```
Run As A Deployment Run As A Deployment
``` ```
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/deployment?ref=v0.22.0' | kubectl apply -f - kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/deployment?ref=v0.22.1' | kubectl apply -f -
``` ```
## User Guide ## User Guide

View File

@@ -1,7 +1,7 @@
apiVersion: v1 apiVersion: v1
name: descheduler name: descheduler
version: 0.22.0 version: 0.22.1
appVersion: 0.22.0 appVersion: 0.22.1
description: Descheduler for Kubernetes is used to rebalance clusters by evicting pods that can potentially be scheduled on better nodes. In the current implementation, descheduler does not schedule replacement of evicted pods but relies on the default scheduler for that. description: Descheduler for Kubernetes is used to rebalance clusters by evicting pods that can potentially be scheduled on better nodes. In the current implementation, descheduler does not schedule replacement of evicted pods but relies on the default scheduler for that.
keywords: keywords:
- kubernetes - kubernetes

View File

@@ -4,6 +4,7 @@ Starting with descheduler release v0.10.0 container images are available in the
Descheduler Version | Container Image | Architectures | Descheduler Version | Container Image | Architectures |
------------------- |-----------------------------------------------------|-------------------------| ------------------- |-----------------------------------------------------|-------------------------|
v0.22.1 | k8s.gcr.io/descheduler/descheduler:v0.22.1 | AMD64<br>ARM64<br>ARMv7 |
v0.22.0 | k8s.gcr.io/descheduler/descheduler:v0.22.0 | AMD64<br>ARM64<br>ARMv7 | v0.22.0 | k8s.gcr.io/descheduler/descheduler:v0.22.0 | AMD64<br>ARM64<br>ARMv7 |
v0.21.0 | k8s.gcr.io/descheduler/descheduler:v0.21.0 | AMD64<br>ARM64<br>ARMv7 | v0.21.0 | k8s.gcr.io/descheduler/descheduler:v0.21.0 | AMD64<br>ARM64<br>ARMv7 |
v0.20.0 | k8s.gcr.io/descheduler/descheduler:v0.20.0 | AMD64<br>ARM64 | v0.20.0 | k8s.gcr.io/descheduler/descheduler:v0.20.0 | AMD64<br>ARM64 |

View File

@@ -16,7 +16,7 @@ spec:
priorityClassName: system-cluster-critical priorityClassName: system-cluster-critical
containers: containers:
- name: descheduler - name: descheduler
image: k8s.gcr.io/descheduler/descheduler:v0.22.0 image: k8s.gcr.io/descheduler/descheduler:v0.22.1
volumeMounts: volumeMounts:
- mountPath: /policy-dir - mountPath: /policy-dir
name: policy-volume name: policy-volume

View File

@@ -19,7 +19,7 @@ spec:
serviceAccountName: descheduler-sa serviceAccountName: descheduler-sa
containers: containers:
- name: descheduler - name: descheduler
image: k8s.gcr.io/descheduler/descheduler:v0.22.0 image: k8s.gcr.io/descheduler/descheduler:v0.22.1
imagePullPolicy: IfNotPresent imagePullPolicy: IfNotPresent
command: command:
- "/bin/descheduler" - "/bin/descheduler"

View File

@@ -14,7 +14,7 @@ spec:
priorityClassName: system-cluster-critical priorityClassName: system-cluster-critical
containers: containers:
- name: descheduler - name: descheduler
image: k8s.gcr.io/descheduler/descheduler:v0.22.0 image: k8s.gcr.io/descheduler/descheduler:v0.22.1
volumeMounts: volumeMounts:
- mountPath: /policy-dir - mountPath: /policy-dir
name: policy-volume name: policy-volume

View File

@@ -1,7 +1,7 @@
// +build !ignore_autogenerated // +build !ignore_autogenerated
/* /*
Copyright 2021 The Kubernetes Authors. Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.

View File

@@ -1,7 +1,7 @@
// +build !ignore_autogenerated // +build !ignore_autogenerated
/* /*
Copyright 2021 The Kubernetes Authors. Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.

View File

@@ -1,7 +1,7 @@
// +build !ignore_autogenerated // +build !ignore_autogenerated
/* /*
Copyright 2021 The Kubernetes Authors. Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.

View File

@@ -1,7 +1,7 @@
// +build !ignore_autogenerated // +build !ignore_autogenerated
/* /*
Copyright 2021 The Kubernetes Authors. Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.

View File

@@ -1,7 +1,7 @@
// +build !ignore_autogenerated // +build !ignore_autogenerated
/* /*
Copyright 2021 The Kubernetes Authors. Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.

View File

@@ -1,7 +1,7 @@
// +build !ignore_autogenerated // +build !ignore_autogenerated
/* /*
Copyright 2021 The Kubernetes Authors. Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.

View File

@@ -1,7 +1,7 @@
// +build !ignore_autogenerated // +build !ignore_autogenerated
/* /*
Copyright 2021 The Kubernetes Authors. Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.

View File

@@ -1,7 +1,7 @@
// +build !ignore_autogenerated // +build !ignore_autogenerated
/* /*
Copyright 2021 The Kubernetes Authors. Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.

View File

@@ -19,7 +19,7 @@ import (
// validatedFailedPodsStrategyParams contains validated strategy parameters // validatedFailedPodsStrategyParams contains validated strategy parameters
type validatedFailedPodsStrategyParams struct { type validatedFailedPodsStrategyParams struct {
*validation.ValidatedStrategyParams validation.ValidatedStrategyParams
includingInitContainers bool includingInitContainers bool
reasons sets.String reasons sets.String
excludeOwnerKinds sets.String excludeOwnerKinds sets.String
@@ -46,9 +46,15 @@ func RemoveFailedPods(
evictions.WithLabelSelector(strategyParams.LabelSelector), evictions.WithLabelSelector(strategyParams.LabelSelector),
) )
var labelSelector *metav1.LabelSelector
if strategy.Params != nil {
labelSelector = strategy.Params.LabelSelector
}
for _, node := range nodes { for _, node := range nodes {
klog.V(1).InfoS("Processing node", "node", klog.KObj(node)) klog.V(1).InfoS("Processing node", "node", klog.KObj(node))
fieldSelectorString := "spec.nodeName=" + node.Name + ",status.phase=" + string(v1.PodFailed) fieldSelectorString := "spec.nodeName=" + node.Name + ",status.phase=" + string(v1.PodFailed)
pods, err := podutil.ListPodsOnANodeWithFieldSelector( pods, err := podutil.ListPodsOnANodeWithFieldSelector(
ctx, ctx,
client, client,
@@ -57,7 +63,7 @@ func RemoveFailedPods(
podutil.WithFilter(evictable.IsEvictable), podutil.WithFilter(evictable.IsEvictable),
podutil.WithNamespaces(strategyParams.IncludedNamespaces.UnsortedList()), podutil.WithNamespaces(strategyParams.IncludedNamespaces.UnsortedList()),
podutil.WithoutNamespaces(strategyParams.ExcludedNamespaces.UnsortedList()), podutil.WithoutNamespaces(strategyParams.ExcludedNamespaces.UnsortedList()),
podutil.WithLabelSelector(strategy.Params.LabelSelector), podutil.WithLabelSelector(labelSelector),
) )
if err != nil { if err != nil {
klog.ErrorS(err, "Error listing a nodes failed pods", "node", klog.KObj(node)) klog.ErrorS(err, "Error listing a nodes failed pods", "node", klog.KObj(node))
@@ -84,7 +90,9 @@ func validateAndParseRemoveFailedPodsParams(
params *api.StrategyParameters, params *api.StrategyParameters,
) (*validatedFailedPodsStrategyParams, error) { ) (*validatedFailedPodsStrategyParams, error) {
if params == nil { if params == nil {
return &validatedFailedPodsStrategyParams{}, nil return &validatedFailedPodsStrategyParams{
ValidatedStrategyParams: validation.DefaultValidatedStrategyParams(),
}, nil
} }
strategyParams, err := validation.ValidateAndParseStrategyParams(ctx, client, params) strategyParams, err := validation.ValidateAndParseStrategyParams(ctx, client, params)
@@ -103,7 +111,7 @@ func validateAndParseRemoveFailedPodsParams(
} }
return &validatedFailedPodsStrategyParams{ return &validatedFailedPodsStrategyParams{
ValidatedStrategyParams: strategyParams, ValidatedStrategyParams: *strategyParams,
includingInitContainers: includingInitContainers, includingInitContainers: includingInitContainers,
reasons: reasons, reasons: reasons,
excludeOwnerKinds: excludeOwnerKinds, excludeOwnerKinds: excludeOwnerKinds,

View File

@@ -44,6 +44,13 @@ func TestRemoveFailedPods(t *testing.T) {
expectedEvictedPodCount int expectedEvictedPodCount int
pods []v1.Pod pods []v1.Pod
}{ }{
{
description: "default empty strategy, 0 failures, 0 evictions",
strategy: api.DeschedulerStrategy{},
nodes: []*v1.Node{test.BuildTestNode("node1", 2000, 3000, 10, nil)},
expectedEvictedPodCount: 0,
pods: []v1.Pod{}, // no pods come back with field selector phase=Failed
},
{ {
description: "0 failures, 0 evictions", description: "0 failures, 0 evictions",
strategy: createStrategy(true, false, nil, nil, nil, false), strategy: createStrategy(true, false, nil, nil, nil, false),
@@ -225,6 +232,7 @@ func TestValidRemoveFailedPodsParams(t *testing.T) {
params *api.StrategyParameters params *api.StrategyParameters
}{ }{
{name: "validate nil params", params: nil}, {name: "validate nil params", params: nil},
{name: "validate empty params", params: &api.StrategyParameters{}},
{name: "validate reasons params", params: &api.StrategyParameters{FailedPods: &api.FailedPods{ {name: "validate reasons params", params: &api.StrategyParameters{FailedPods: &api.FailedPods{
Reasons: []string{"CreateContainerConfigError"}, Reasons: []string{"CreateContainerConfigError"},
}}}, }}},

View File

@@ -22,20 +22,22 @@ type ValidatedStrategyParams struct {
NodeFit bool NodeFit bool
} }
func DefaultValidatedStrategyParams() ValidatedStrategyParams {
return ValidatedStrategyParams{ThresholdPriority: utils.SystemCriticalPriority}
}
func ValidateAndParseStrategyParams( func ValidateAndParseStrategyParams(
ctx context.Context, ctx context.Context,
client clientset.Interface, client clientset.Interface,
params *api.StrategyParameters, params *api.StrategyParameters,
) (*ValidatedStrategyParams, error) { ) (*ValidatedStrategyParams, error) {
var includedNamespaces, excludedNamespaces sets.String
if params == nil { if params == nil {
return &ValidatedStrategyParams{ defaultValidatedStrategyParams := DefaultValidatedStrategyParams()
IncludedNamespaces: includedNamespaces, return &defaultValidatedStrategyParams, nil
ExcludedNamespaces: excludedNamespaces,
}, nil
} }
// At most one of include/exclude can be set // At most one of include/exclude can be set
var includedNamespaces, excludedNamespaces sets.String
if params.Namespaces != nil && len(params.Namespaces.Include) > 0 && len(params.Namespaces.Exclude) > 0 { if params.Namespaces != nil && len(params.Namespaces.Include) > 0 && len(params.Namespaces.Exclude) > 0 {
return nil, fmt.Errorf("only one of Include/Exclude namespaces can be set") return nil, fmt.Errorf("only one of Include/Exclude namespaces can be set")
} }

View File

@@ -0,0 +1,154 @@
package e2e
import (
"context"
batchv1 "k8s.io/api/batch/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
"strings"
"testing"
"time"
deschedulerapi "sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/descheduler/strategies"
)
var oneHourPodLifetimeSeconds uint = 3600
func TestFailedPods(t *testing.T) {
ctx := context.Background()
clientSet, _, stopCh := initializeClient(t)
defer close(stopCh)
nodeList, err := clientSet.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
if err != nil {
t.Errorf("Error listing node with %v", err)
}
nodes, _ := splitNodesAndWorkerNodes(nodeList.Items)
t.Log("Creating testing namespace")
testNamespace := &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "e2e-" + strings.ToLower(t.Name())}}
if _, err := clientSet.CoreV1().Namespaces().Create(ctx, testNamespace, metav1.CreateOptions{}); err != nil {
t.Fatalf("Unable to create ns %v", testNamespace.Name)
}
defer clientSet.CoreV1().Namespaces().Delete(ctx, testNamespace.Name, metav1.DeleteOptions{})
testCases := map[string]struct {
expectedEvictedCount int
strategyParams *deschedulerapi.StrategyParameters
}{
"test-failed-pods-nil-strategy": {
expectedEvictedCount: 1,
strategyParams: nil,
},
"test-failed-pods-default-strategy": {
expectedEvictedCount: 1,
strategyParams: &deschedulerapi.StrategyParameters{},
},
"test-failed-pods-default-failed-pods": {
expectedEvictedCount: 1,
strategyParams: &deschedulerapi.StrategyParameters{
FailedPods: &deschedulerapi.FailedPods{},
},
},
"test-failed-pods-reason-unmatched": {
expectedEvictedCount: 0,
strategyParams: &deschedulerapi.StrategyParameters{
FailedPods: &deschedulerapi.FailedPods{Reasons: []string{"ReasonDoesNotMatch"}},
},
},
"test-failed-pods-min-age-unmet": {
expectedEvictedCount: 0,
strategyParams: &deschedulerapi.StrategyParameters{
FailedPods: &deschedulerapi.FailedPods{MinPodLifetimeSeconds: &oneHourPodLifetimeSeconds},
},
},
"test-failed-pods-exclude-job-kind": {
expectedEvictedCount: 0,
strategyParams: &deschedulerapi.StrategyParameters{
FailedPods: &deschedulerapi.FailedPods{ExcludeOwnerKinds: []string{"Job"}},
},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
job := initFailedJob(name, testNamespace.Namespace)
t.Logf("Creating job %s in %s namespace", job.Name, job.Namespace)
jobClient := clientSet.BatchV1().Jobs(testNamespace.Name)
if _, err := jobClient.Create(ctx, job, metav1.CreateOptions{}); err != nil {
t.Fatalf("Error creating Job %s: %v", name, err)
}
deletePropagationPolicy := metav1.DeletePropagationForeground
defer jobClient.Delete(ctx, job.Name, metav1.DeleteOptions{PropagationPolicy: &deletePropagationPolicy})
waitForJobPodPhase(ctx, t, clientSet, job, v1.PodFailed)
podEvictor := initPodEvictorOrFail(t, clientSet, nodes)
t.Logf("Running RemoveFailedPods strategy for %s", name)
strategies.RemoveFailedPods(
ctx,
clientSet,
deschedulerapi.DeschedulerStrategy{
Enabled: true,
Params: tc.strategyParams,
},
nodes,
podEvictor,
)
t.Logf("Finished RemoveFailedPods strategy for %s", name)
if actualEvictedCount := podEvictor.TotalEvicted(); actualEvictedCount == tc.expectedEvictedCount {
t.Logf("Total of %d Pods were evicted for %s", actualEvictedCount, name)
} else {
t.Errorf("Unexpected number of pods have been evicted, got %v, expected %v", actualEvictedCount, tc.expectedEvictedCount)
}
})
}
}
func initFailedJob(name, namespace string) *batchv1.Job {
podSpec := MakePodSpec("", nil)
podSpec.Containers[0].Command = []string{"/bin/false"}
podSpec.RestartPolicy = v1.RestartPolicyNever
labelsSet := labels.Set{"test": name, "name": name}
jobBackoffLimit := int32(0)
return &batchv1.Job{
ObjectMeta: metav1.ObjectMeta{
Labels: labelsSet,
Name: name,
Namespace: namespace,
},
Spec: batchv1.JobSpec{
Template: v1.PodTemplateSpec{
Spec: podSpec,
ObjectMeta: metav1.ObjectMeta{Labels: labelsSet},
},
BackoffLimit: &jobBackoffLimit,
},
}
}
func waitForJobPodPhase(ctx context.Context, t *testing.T, clientSet clientset.Interface, job *batchv1.Job, phase v1.PodPhase) {
podClient := clientSet.CoreV1().Pods(job.Namespace)
if err := wait.PollImmediate(5*time.Second, 30*time.Second, func() (bool, error) {
t.Log(labels.FormatLabels(job.Labels))
if podList, err := podClient.List(ctx, metav1.ListOptions{LabelSelector: labels.FormatLabels(job.Labels)}); err != nil {
return false, err
} else {
if len(podList.Items) == 0 {
t.Logf("Job controller has not created Pod for job %s yet", job.Name)
return false, nil
}
for _, pod := range podList.Items {
if pod.Status.Phase != phase {
t.Logf("Pod %v not in %s phase yet, is %v instead", pod.Name, phase, pod.Status.Phase)
return false, nil
}
}
t.Logf("Job %v Pod is in %s phase now", job.Name, phase)
return true, nil
}
}); err != nil {
t.Fatalf("Error waiting for pods in %s phase: %v", phase, err)
}
}

View File

@@ -290,20 +290,7 @@ func TestLowNodeUtilization(t *testing.T) {
waitForRCPodsRunning(ctx, t, clientSet, rc) waitForRCPodsRunning(ctx, t, clientSet, rc)
// Run LowNodeUtilization strategy // Run LowNodeUtilization strategy
evictionPolicyGroupVersion, err := eutils.SupportEviction(clientSet) podEvictor := initPodEvictorOrFail(t, clientSet, nodes)
if err != nil || len(evictionPolicyGroupVersion) == 0 {
t.Fatalf("%v", err)
}
podEvictor := evictions.NewPodEvictor(
clientSet,
evictionPolicyGroupVersion,
false,
0,
nodes,
true,
false,
false,
)
podsOnMosttUtilizedNode, err := podutil.ListPodsOnANode(ctx, clientSet, workerNodes[0], podutil.WithFilter(podEvictor.Evictable().IsEvictable)) podsOnMosttUtilizedNode, err := podutil.ListPodsOnANode(ctx, clientSet, workerNodes[0], podutil.WithFilter(podEvictor.Evictable().IsEvictable))
if err != nil { if err != nil {
@@ -1293,3 +1280,20 @@ func splitNodesAndWorkerNodes(nodes []v1.Node) ([]*v1.Node, []*v1.Node) {
} }
return allNodes, workerNodes return allNodes, workerNodes
} }
func initPodEvictorOrFail(t *testing.T, clientSet clientset.Interface, nodes []*v1.Node) *evictions.PodEvictor {
evictionPolicyGroupVersion, err := eutils.SupportEviction(clientSet)
if err != nil || len(evictionPolicyGroupVersion) == 0 {
t.Fatalf("Error creating eviction policy group: %v", err)
}
return evictions.NewPodEvictor(
clientSet,
evictionPolicyGroupVersion,
false,
0,
nodes,
true,
false,
false,
)
}

View File

@@ -11,8 +11,6 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
deschedulerapi "sigs.k8s.io/descheduler/pkg/api" deschedulerapi "sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
eutils "sigs.k8s.io/descheduler/pkg/descheduler/evictions/utils"
"sigs.k8s.io/descheduler/pkg/descheduler/strategies" "sigs.k8s.io/descheduler/pkg/descheduler/strategies"
) )
@@ -79,22 +77,9 @@ func TestTopologySpreadConstraint(t *testing.T) {
defer deleteRC(ctx, t, clientSet, violatorRc) defer deleteRC(ctx, t, clientSet, violatorRc)
waitForRCPodsRunning(ctx, t, clientSet, violatorRc) waitForRCPodsRunning(ctx, t, clientSet, violatorRc)
// Run TopologySpreadConstraint strategy podEvictor := initPodEvictorOrFail(t, clientSet, nodes)
evictionPolicyGroupVersion, err := eutils.SupportEviction(clientSet)
if err != nil || len(evictionPolicyGroupVersion) == 0 {
t.Fatalf("Error creating eviction policy group for %s: %v", name, err)
}
podEvictor := evictions.NewPodEvictor(
clientSet,
evictionPolicyGroupVersion,
false,
0,
nodes,
true,
false,
false,
)
// Run TopologySpreadConstraint strategy
t.Logf("Running RemovePodsViolatingTopologySpreadConstraint strategy for %s", name) t.Logf("Running RemovePodsViolatingTopologySpreadConstraint strategy for %s", name)
strategies.RemovePodsViolatingTopologySpreadConstraint( strategies.RemovePodsViolatingTopologySpreadConstraint(
ctx, ctx,
@@ -119,6 +104,9 @@ func TestTopologySpreadConstraint(t *testing.T) {
t.Fatalf("Pods were not evicted for %s TopologySpreadConstraint", name) t.Fatalf("Pods were not evicted for %s TopologySpreadConstraint", name)
} }
// Ensure recently evicted Pod are rescheduled and running before asserting for a balanced topology spread
waitForRCPodsRunning(ctx, t, clientSet, rc)
pods, err := clientSet.CoreV1().Pods(testNamespace.Name).List(ctx, metav1.ListOptions{LabelSelector: fmt.Sprintf("%s=%s", tc.labelKey, tc.labelValue)}) pods, err := clientSet.CoreV1().Pods(testNamespace.Name).List(ctx, metav1.ListOptions{LabelSelector: fmt.Sprintf("%s=%s", tc.labelKey, tc.labelValue)})
if err != nil { if err != nil {
t.Errorf("Error listing pods for %s: %v", name, err) t.Errorf("Error listing pods for %s: %v", name, err)