mirror of
https://github.com/kubernetes-sigs/descheduler.git
synced 2026-01-25 20:59:28 +01:00
e2e tests for RemoveFailedPods strategy
Fix priority class default
This commit is contained in:
@@ -90,7 +90,9 @@ func validateAndParseRemoveFailedPodsParams(
|
||||
params *api.StrategyParameters,
|
||||
) (*validatedFailedPodsStrategyParams, error) {
|
||||
if params == nil {
|
||||
return &validatedFailedPodsStrategyParams{}, nil
|
||||
return &validatedFailedPodsStrategyParams{
|
||||
ValidatedStrategyParams: validation.DefaultValidatedStrategyParams(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
strategyParams, err := validation.ValidateAndParseStrategyParams(ctx, client, params)
|
||||
|
||||
@@ -22,20 +22,22 @@ type ValidatedStrategyParams struct {
|
||||
NodeFit bool
|
||||
}
|
||||
|
||||
func DefaultValidatedStrategyParams() ValidatedStrategyParams {
|
||||
return ValidatedStrategyParams{ThresholdPriority: utils.SystemCriticalPriority}
|
||||
}
|
||||
|
||||
func ValidateAndParseStrategyParams(
|
||||
ctx context.Context,
|
||||
client clientset.Interface,
|
||||
params *api.StrategyParameters,
|
||||
) (*ValidatedStrategyParams, error) {
|
||||
var includedNamespaces, excludedNamespaces sets.String
|
||||
if params == nil {
|
||||
return &ValidatedStrategyParams{
|
||||
IncludedNamespaces: includedNamespaces,
|
||||
ExcludedNamespaces: excludedNamespaces,
|
||||
}, nil
|
||||
defaultValidatedStrategyParams := DefaultValidatedStrategyParams()
|
||||
return &defaultValidatedStrategyParams, nil
|
||||
}
|
||||
|
||||
// At most one of include/exclude can be set
|
||||
var includedNamespaces, excludedNamespaces sets.String
|
||||
if params.Namespaces != nil && len(params.Namespaces.Include) > 0 && len(params.Namespaces.Exclude) > 0 {
|
||||
return nil, fmt.Errorf("only one of Include/Exclude namespaces can be set")
|
||||
}
|
||||
|
||||
154
test/e2e/e2e_failedpods_test.go
Normal file
154
test/e2e/e2e_failedpods_test.go
Normal file
@@ -0,0 +1,154 @@
|
||||
package e2e
|
||||
|
||||
import (
|
||||
"context"
|
||||
batchv1 "k8s.io/api/batch/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
deschedulerapi "sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/strategies"
|
||||
)
|
||||
|
||||
var oneHourPodLifetimeSeconds uint = 3600
|
||||
|
||||
func TestFailedPods(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
clientSet, _, stopCh := initializeClient(t)
|
||||
defer close(stopCh)
|
||||
nodeList, err := clientSet.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
t.Errorf("Error listing node with %v", err)
|
||||
}
|
||||
nodes, _ := splitNodesAndWorkerNodes(nodeList.Items)
|
||||
t.Log("Creating testing namespace")
|
||||
testNamespace := &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "e2e-" + strings.ToLower(t.Name())}}
|
||||
if _, err := clientSet.CoreV1().Namespaces().Create(ctx, testNamespace, metav1.CreateOptions{}); err != nil {
|
||||
t.Fatalf("Unable to create ns %v", testNamespace.Name)
|
||||
}
|
||||
defer clientSet.CoreV1().Namespaces().Delete(ctx, testNamespace.Name, metav1.DeleteOptions{})
|
||||
testCases := map[string]struct {
|
||||
expectedEvictedCount int
|
||||
strategyParams *deschedulerapi.StrategyParameters
|
||||
}{
|
||||
"test-failed-pods-nil-strategy": {
|
||||
expectedEvictedCount: 1,
|
||||
strategyParams: nil,
|
||||
},
|
||||
"test-failed-pods-default-strategy": {
|
||||
expectedEvictedCount: 1,
|
||||
strategyParams: &deschedulerapi.StrategyParameters{},
|
||||
},
|
||||
"test-failed-pods-default-failed-pods": {
|
||||
expectedEvictedCount: 1,
|
||||
strategyParams: &deschedulerapi.StrategyParameters{
|
||||
FailedPods: &deschedulerapi.FailedPods{},
|
||||
},
|
||||
},
|
||||
"test-failed-pods-reason-unmatched": {
|
||||
expectedEvictedCount: 0,
|
||||
strategyParams: &deschedulerapi.StrategyParameters{
|
||||
FailedPods: &deschedulerapi.FailedPods{Reasons: []string{"ReasonDoesNotMatch"}},
|
||||
},
|
||||
},
|
||||
"test-failed-pods-min-age-unmet": {
|
||||
expectedEvictedCount: 0,
|
||||
strategyParams: &deschedulerapi.StrategyParameters{
|
||||
FailedPods: &deschedulerapi.FailedPods{MinPodLifetimeSeconds: &oneHourPodLifetimeSeconds},
|
||||
},
|
||||
},
|
||||
"test-failed-pods-exclude-job-kind": {
|
||||
expectedEvictedCount: 0,
|
||||
strategyParams: &deschedulerapi.StrategyParameters{
|
||||
FailedPods: &deschedulerapi.FailedPods{ExcludeOwnerKinds: []string{"Job"}},
|
||||
},
|
||||
},
|
||||
}
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
job := initFailedJob(name, testNamespace.Namespace)
|
||||
t.Logf("Creating job %s in %s namespace", job.Name, job.Namespace)
|
||||
jobClient := clientSet.BatchV1().Jobs(testNamespace.Name)
|
||||
if _, err := jobClient.Create(ctx, job, metav1.CreateOptions{}); err != nil {
|
||||
t.Fatalf("Error creating Job %s: %v", name, err)
|
||||
}
|
||||
deletePropagationPolicy := metav1.DeletePropagationForeground
|
||||
defer jobClient.Delete(ctx, job.Name, metav1.DeleteOptions{PropagationPolicy: &deletePropagationPolicy})
|
||||
waitForJobPodPhase(ctx, t, clientSet, job, v1.PodFailed)
|
||||
|
||||
podEvictor := initPodEvictorOrFail(t, clientSet, nodes)
|
||||
|
||||
t.Logf("Running RemoveFailedPods strategy for %s", name)
|
||||
strategies.RemoveFailedPods(
|
||||
ctx,
|
||||
clientSet,
|
||||
deschedulerapi.DeschedulerStrategy{
|
||||
Enabled: true,
|
||||
Params: tc.strategyParams,
|
||||
},
|
||||
nodes,
|
||||
podEvictor,
|
||||
)
|
||||
t.Logf("Finished RemoveFailedPods strategy for %s", name)
|
||||
|
||||
if actualEvictedCount := podEvictor.TotalEvicted(); actualEvictedCount == tc.expectedEvictedCount {
|
||||
t.Logf("Total of %d Pods were evicted for %s", actualEvictedCount, name)
|
||||
} else {
|
||||
t.Errorf("Unexpected number of pods have been evicted, got %v, expected %v", actualEvictedCount, tc.expectedEvictedCount)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func initFailedJob(name, namespace string) *batchv1.Job {
|
||||
podSpec := MakePodSpec("", nil)
|
||||
podSpec.Containers[0].Command = []string{"/bin/false"}
|
||||
podSpec.RestartPolicy = v1.RestartPolicyNever
|
||||
labelsSet := labels.Set{"test": name, "name": name}
|
||||
jobBackoffLimit := int32(0)
|
||||
return &batchv1.Job{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: labelsSet,
|
||||
Name: name,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: batchv1.JobSpec{
|
||||
Template: v1.PodTemplateSpec{
|
||||
Spec: podSpec,
|
||||
ObjectMeta: metav1.ObjectMeta{Labels: labelsSet},
|
||||
},
|
||||
BackoffLimit: &jobBackoffLimit,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func waitForJobPodPhase(ctx context.Context, t *testing.T, clientSet clientset.Interface, job *batchv1.Job, phase v1.PodPhase) {
|
||||
podClient := clientSet.CoreV1().Pods(job.Namespace)
|
||||
if err := wait.PollImmediate(5*time.Second, 30*time.Second, func() (bool, error) {
|
||||
t.Log(labels.FormatLabels(job.Labels))
|
||||
if podList, err := podClient.List(ctx, metav1.ListOptions{LabelSelector: labels.FormatLabels(job.Labels)}); err != nil {
|
||||
return false, err
|
||||
} else {
|
||||
if len(podList.Items) == 0 {
|
||||
t.Logf("Job controller has not created Pod for job %s yet", job.Name)
|
||||
return false, nil
|
||||
}
|
||||
for _, pod := range podList.Items {
|
||||
if pod.Status.Phase != phase {
|
||||
t.Logf("Pod %v not in %s phase yet, is %v instead", pod.Name, phase, pod.Status.Phase)
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
t.Logf("Job %v Pod is in %s phase now", job.Name, phase)
|
||||
return true, nil
|
||||
}
|
||||
}); err != nil {
|
||||
t.Fatalf("Error waiting for pods in %s phase: %v", phase, err)
|
||||
}
|
||||
}
|
||||
@@ -290,20 +290,7 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
waitForRCPodsRunning(ctx, t, clientSet, rc)
|
||||
|
||||
// Run LowNodeUtilization strategy
|
||||
evictionPolicyGroupVersion, err := eutils.SupportEviction(clientSet)
|
||||
if err != nil || len(evictionPolicyGroupVersion) == 0 {
|
||||
t.Fatalf("%v", err)
|
||||
}
|
||||
podEvictor := evictions.NewPodEvictor(
|
||||
clientSet,
|
||||
evictionPolicyGroupVersion,
|
||||
false,
|
||||
0,
|
||||
nodes,
|
||||
true,
|
||||
false,
|
||||
false,
|
||||
)
|
||||
podEvictor := initPodEvictorOrFail(t, clientSet, nodes)
|
||||
|
||||
podsOnMosttUtilizedNode, err := podutil.ListPodsOnANode(ctx, clientSet, workerNodes[0], podutil.WithFilter(podEvictor.Evictable().IsEvictable))
|
||||
if err != nil {
|
||||
@@ -1293,3 +1280,20 @@ func splitNodesAndWorkerNodes(nodes []v1.Node) ([]*v1.Node, []*v1.Node) {
|
||||
}
|
||||
return allNodes, workerNodes
|
||||
}
|
||||
|
||||
func initPodEvictorOrFail(t *testing.T, clientSet clientset.Interface, nodes []*v1.Node) *evictions.PodEvictor {
|
||||
evictionPolicyGroupVersion, err := eutils.SupportEviction(clientSet)
|
||||
if err != nil || len(evictionPolicyGroupVersion) == 0 {
|
||||
t.Fatalf("Error creating eviction policy group: %v", err)
|
||||
}
|
||||
return evictions.NewPodEvictor(
|
||||
clientSet,
|
||||
evictionPolicyGroupVersion,
|
||||
false,
|
||||
0,
|
||||
nodes,
|
||||
true,
|
||||
false,
|
||||
false,
|
||||
)
|
||||
}
|
||||
|
||||
@@ -11,8 +11,6 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
deschedulerapi "sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
eutils "sigs.k8s.io/descheduler/pkg/descheduler/evictions/utils"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/strategies"
|
||||
)
|
||||
|
||||
@@ -79,22 +77,9 @@ func TestTopologySpreadConstraint(t *testing.T) {
|
||||
defer deleteRC(ctx, t, clientSet, violatorRc)
|
||||
waitForRCPodsRunning(ctx, t, clientSet, violatorRc)
|
||||
|
||||
// Run TopologySpreadConstraint strategy
|
||||
evictionPolicyGroupVersion, err := eutils.SupportEviction(clientSet)
|
||||
if err != nil || len(evictionPolicyGroupVersion) == 0 {
|
||||
t.Fatalf("Error creating eviction policy group for %s: %v", name, err)
|
||||
}
|
||||
podEvictor := evictions.NewPodEvictor(
|
||||
clientSet,
|
||||
evictionPolicyGroupVersion,
|
||||
false,
|
||||
0,
|
||||
nodes,
|
||||
true,
|
||||
false,
|
||||
false,
|
||||
)
|
||||
podEvictor := initPodEvictorOrFail(t, clientSet, nodes)
|
||||
|
||||
// Run TopologySpreadConstraint strategy
|
||||
t.Logf("Running RemovePodsViolatingTopologySpreadConstraint strategy for %s", name)
|
||||
strategies.RemovePodsViolatingTopologySpreadConstraint(
|
||||
ctx,
|
||||
|
||||
Reference in New Issue
Block a user