mirror of
https://github.com/kubernetes-sigs/descheduler.git
synced 2026-01-26 05:14:13 +01:00
[e2e] abstract common methods
This commit is contained in:
@@ -23,7 +23,6 @@ import (
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/removefailedpods"
|
||||
frameworktesting "sigs.k8s.io/descheduler/pkg/framework/testing"
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
"sigs.k8s.io/descheduler/test"
|
||||
)
|
||||
|
||||
var oneHourPodLifetimeSeconds uint = 3600
|
||||
@@ -134,7 +133,7 @@ func TestFailedPods(t *testing.T) {
|
||||
}
|
||||
|
||||
func initFailedJob(name, namespace string) *batchv1.Job {
|
||||
podSpec := test.MakePodSpec("", nil)
|
||||
podSpec := makePodSpec("", nil)
|
||||
podSpec.Containers[0].Command = []string{"/bin/false"}
|
||||
podSpec.RestartPolicy = v1.RestartPolicyNever
|
||||
labelsSet := labels.Set{"test": name, "name": name}
|
||||
|
||||
@@ -63,21 +63,27 @@ func TestLeaderElection(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("create deployment 1: %v", err)
|
||||
}
|
||||
defer clientSet.AppsV1().Deployments(deployment1.Namespace).Delete(ctx, deployment1.Name, metav1.DeleteOptions{})
|
||||
defer func() {
|
||||
clientSet.AppsV1().Deployments(deployment1.Namespace).Delete(ctx, deployment1.Name, metav1.DeleteOptions{})
|
||||
waitForPodsToDisappear(ctx, t, clientSet, deployment1.Labels, deployment1.Namespace)
|
||||
}()
|
||||
|
||||
deployment2, err := createDeployment(ctx, clientSet, ns2, 5, t)
|
||||
if err != nil {
|
||||
t.Fatalf("create deployment 2: %v", err)
|
||||
}
|
||||
defer clientSet.AppsV1().Deployments(deployment2.Namespace).Delete(ctx, deployment2.Name, metav1.DeleteOptions{})
|
||||
defer func() {
|
||||
clientSet.AppsV1().Deployments(deployment2.Namespace).Delete(ctx, deployment2.Name, metav1.DeleteOptions{})
|
||||
waitForPodsToDisappear(ctx, t, clientSet, deployment2.Labels, deployment2.Namespace)
|
||||
}()
|
||||
|
||||
waitForPodsRunning(ctx, t, clientSet, map[string]string{"test": "leaderelection", "name": "test-leaderelection"}, 5, ns1)
|
||||
|
||||
podListAOrg := getPodNameList(ctx, clientSet, ns1, t)
|
||||
podListAOrg := getCurrentPodNames(ctx, clientSet, ns1, t)
|
||||
|
||||
waitForPodsRunning(ctx, t, clientSet, map[string]string{"test": "leaderelection", "name": "test-leaderelection"}, 5, ns2)
|
||||
|
||||
podListBOrg := getPodNameList(ctx, clientSet, ns2, t)
|
||||
podListBOrg := getCurrentPodNames(ctx, clientSet, ns2, t)
|
||||
|
||||
s1, err := options.NewDeschedulerServer()
|
||||
if err != nil {
|
||||
@@ -141,9 +147,9 @@ func TestLeaderElection(t *testing.T) {
|
||||
time.Sleep(7 * time.Second)
|
||||
|
||||
// validate only pods from e2e-testleaderelection-a namespace are evicted.
|
||||
podListA := getPodNameList(ctx, clientSet, ns1, t)
|
||||
podListA := getCurrentPodNames(ctx, clientSet, ns1, t)
|
||||
|
||||
podListB := getPodNameList(ctx, clientSet, ns2, t)
|
||||
podListB := getCurrentPodNames(ctx, clientSet, ns2, t)
|
||||
|
||||
left := reflect.DeepEqual(podListAOrg, podListA)
|
||||
right := reflect.DeepEqual(podListBOrg, podListB)
|
||||
@@ -222,16 +228,3 @@ func createDeployment(ctx context.Context, clientSet clientset.Interface, namesp
|
||||
}
|
||||
return deployment, nil
|
||||
}
|
||||
|
||||
func getPodNameList(ctx context.Context, clientSet clientset.Interface, namespace string, t *testing.T) []string {
|
||||
podList, err := clientSet.CoreV1().Pods(namespace).List(
|
||||
ctx, metav1.ListOptions{LabelSelector: labels.SelectorFromSet(labels.Set(map[string]string{"test": "leaderelection", "name": "test-leaderelection"})).String()})
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to list pods from ns: %s: %v", namespace, err)
|
||||
}
|
||||
podNames := make([]string, len(podList.Items))
|
||||
for i, pod := range podList.Items {
|
||||
podNames[i] = pod.Name
|
||||
}
|
||||
return podNames
|
||||
}
|
||||
|
||||
@@ -27,8 +27,6 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"sigs.k8s.io/yaml"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
schedulingv1 "k8s.io/api/scheduling/v1"
|
||||
@@ -44,6 +42,7 @@ import (
|
||||
componentbaseconfig "k8s.io/component-base/config"
|
||||
"k8s.io/klog/v2"
|
||||
utilptr "k8s.io/utils/ptr"
|
||||
"sigs.k8s.io/yaml"
|
||||
|
||||
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
@@ -63,7 +62,6 @@ import (
|
||||
frameworktesting "sigs.k8s.io/descheduler/pkg/framework/testing"
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
"sigs.k8s.io/descheduler/test"
|
||||
)
|
||||
|
||||
func isClientRateLimiterError(err error) bool {
|
||||
@@ -195,67 +193,6 @@ func printPodLogs(ctx context.Context, t *testing.T, kubeClient clientset.Interf
|
||||
}
|
||||
}
|
||||
|
||||
func waitForDeschedulerPodRunning(t *testing.T, ctx context.Context, kubeClient clientset.Interface, testName string) string {
|
||||
deschedulerPodName := ""
|
||||
if err := wait.PollUntilContextTimeout(ctx, 1*time.Second, 60*time.Second, true, func(ctx context.Context) (bool, error) {
|
||||
podList, err := kubeClient.CoreV1().Pods("kube-system").List(ctx, metav1.ListOptions{
|
||||
LabelSelector: labels.SelectorFromSet(labels.Set(map[string]string{"app": "descheduler", "test": testName})).String(),
|
||||
})
|
||||
if err != nil {
|
||||
t.Logf("Unable to list pods: %v", err)
|
||||
if isClientRateLimiterError(err) {
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
|
||||
runningPods := []*v1.Pod{}
|
||||
for _, item := range podList.Items {
|
||||
if item.Status.Phase != v1.PodRunning {
|
||||
continue
|
||||
}
|
||||
pod := item
|
||||
runningPods = append(runningPods, &pod)
|
||||
}
|
||||
|
||||
if len(runningPods) != 1 {
|
||||
t.Logf("Expected a single running pod, got %v instead", len(runningPods))
|
||||
return false, nil
|
||||
}
|
||||
|
||||
deschedulerPodName = runningPods[0].Name
|
||||
t.Logf("Found a descheduler pod running: %v", deschedulerPodName)
|
||||
return true, nil
|
||||
}); err != nil {
|
||||
t.Fatalf("Error waiting for a running descheduler: %v", err)
|
||||
}
|
||||
return deschedulerPodName
|
||||
}
|
||||
|
||||
func waitForDeschedulerPodAbsent(t *testing.T, ctx context.Context, kubeClient clientset.Interface, testName string) {
|
||||
if err := wait.PollUntilContextTimeout(ctx, 1*time.Second, 60*time.Second, true, func(ctx context.Context) (bool, error) {
|
||||
podList, err := kubeClient.CoreV1().Pods("kube-system").List(ctx, metav1.ListOptions{
|
||||
LabelSelector: labels.SelectorFromSet(labels.Set(map[string]string{"app": "descheduler", "test": testName})).String(),
|
||||
})
|
||||
if err != nil {
|
||||
t.Logf("Unable to list pods: %v", err)
|
||||
if isClientRateLimiterError(err) {
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
|
||||
if len(podList.Items) > 0 {
|
||||
t.Logf("Found a descheduler pod. Waiting until it gets deleted")
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}); err != nil {
|
||||
t.Fatalf("Error waiting for a descheduler to disapear: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
if os.Getenv("DESCHEDULER_IMAGE") == "" {
|
||||
klog.Errorf("DESCHEDULER_IMAGE env is not set")
|
||||
@@ -297,7 +234,7 @@ func RcByNameContainer(name, namespace string, replicas int32, labels map[string
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: labels,
|
||||
},
|
||||
Spec: test.MakePodSpec(priorityClassName, gracePeriod),
|
||||
Spec: makePodSpec(priorityClassName, gracePeriod),
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -329,12 +266,83 @@ func DsByNameContainer(name, namespace string, labels map[string]string, gracePe
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: labels,
|
||||
},
|
||||
Spec: test.MakePodSpec("", gracePeriod),
|
||||
Spec: makePodSpec("", gracePeriod),
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func buildTestDeployment(name, namespace string, replicas int32, testLabel map[string]string, apply func(deployment *appsv1.Deployment)) *appsv1.Deployment {
|
||||
deployment := &appsv1.Deployment{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Deployment",
|
||||
APIVersion: "apps/v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: namespace,
|
||||
Labels: testLabel,
|
||||
},
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Replicas: utilptr.To[int32](replicas),
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: testLabel,
|
||||
},
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: testLabel,
|
||||
},
|
||||
Spec: makePodSpec("", utilptr.To[int64](0)),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
if apply != nil {
|
||||
apply(deployment)
|
||||
}
|
||||
|
||||
return deployment
|
||||
}
|
||||
|
||||
func makePodSpec(priorityClassName string, gracePeriod *int64) v1.PodSpec {
|
||||
return v1.PodSpec{
|
||||
SecurityContext: &v1.PodSecurityContext{
|
||||
RunAsNonRoot: utilptr.To(true),
|
||||
RunAsUser: utilptr.To[int64](1000),
|
||||
RunAsGroup: utilptr.To[int64](1000),
|
||||
SeccompProfile: &v1.SeccompProfile{
|
||||
Type: v1.SeccompProfileTypeRuntimeDefault,
|
||||
},
|
||||
},
|
||||
Containers: []v1.Container{{
|
||||
Name: "pause",
|
||||
ImagePullPolicy: "IfNotPresent",
|
||||
Image: "registry.k8s.io/pause",
|
||||
Ports: []v1.ContainerPort{{ContainerPort: 80}},
|
||||
Resources: v1.ResourceRequirements{
|
||||
Limits: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("100m"),
|
||||
v1.ResourceMemory: resource.MustParse("200Mi"),
|
||||
},
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("100m"),
|
||||
v1.ResourceMemory: resource.MustParse("100Mi"),
|
||||
},
|
||||
},
|
||||
SecurityContext: &v1.SecurityContext{
|
||||
AllowPrivilegeEscalation: utilptr.To(false),
|
||||
Capabilities: &v1.Capabilities{
|
||||
Drop: []v1.Capability{
|
||||
"ALL",
|
||||
},
|
||||
},
|
||||
},
|
||||
}},
|
||||
PriorityClassName: priorityClassName,
|
||||
TerminationGracePeriodSeconds: gracePeriod,
|
||||
}
|
||||
}
|
||||
|
||||
func initializeClient(ctx context.Context, t *testing.T) (clientset.Interface, informers.SharedInformerFactory, listersv1.NodeLister, podutil.GetPodsAssignedToNodeFunc) {
|
||||
clientSet, err := client.CreateClient(componentbaseconfig.ClientConnectionConfiguration{Kubeconfig: os.Getenv("KUBECONFIG")}, "")
|
||||
if err != nil {
|
||||
@@ -1705,6 +1713,10 @@ func waitForPodRunning(ctx context.Context, t *testing.T, clientSet clientset.In
|
||||
if err := wait.PollUntilContextTimeout(ctx, 5*time.Second, 30*time.Second, true, func(ctx context.Context) (bool, error) {
|
||||
podItem, err := clientSet.CoreV1().Pods(pod.Namespace).Get(ctx, pod.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Logf("Unable to list pods: %v", err)
|
||||
if isClientRateLimiterError(err) {
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
|
||||
@@ -1719,28 +1731,62 @@ func waitForPodRunning(ctx context.Context, t *testing.T, clientSet clientset.In
|
||||
}
|
||||
}
|
||||
|
||||
func waitForPodsRunning(ctx context.Context, t *testing.T, clientSet clientset.Interface, labelMap map[string]string, desireRunningPodNum int, namespace string) {
|
||||
if err := wait.PollUntilContextTimeout(ctx, 10*time.Second, 60*time.Second, true, func(ctx context.Context) (bool, error) {
|
||||
func waitForPodsRunning(ctx context.Context, t *testing.T, clientSet clientset.Interface, labelMap map[string]string, desiredRunningPodNum int, namespace string) []*v1.Pod {
|
||||
desiredRunningPods := make([]*v1.Pod, desiredRunningPodNum)
|
||||
if err := wait.PollUntilContextTimeout(ctx, 5*time.Second, 60*time.Second, true, func(ctx context.Context) (bool, error) {
|
||||
podList, err := clientSet.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{
|
||||
LabelSelector: labels.SelectorFromSet(labelMap).String(),
|
||||
})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if len(podList.Items) != desireRunningPodNum {
|
||||
t.Logf("Waiting for %v pods to be running, got %v instead", desireRunningPodNum, len(podList.Items))
|
||||
return false, nil
|
||||
}
|
||||
for _, pod := range podList.Items {
|
||||
if pod.Status.Phase != v1.PodRunning {
|
||||
t.Logf("Pod %v not running yet, is %v instead", pod.Name, pod.Status.Phase)
|
||||
t.Logf("Unable to list pods: %v", err)
|
||||
if isClientRateLimiterError(err) {
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
runningPods := []*v1.Pod{}
|
||||
for _, item := range podList.Items {
|
||||
if item.Status.Phase != v1.PodRunning {
|
||||
continue
|
||||
}
|
||||
pod := item
|
||||
runningPods = append(runningPods, &pod)
|
||||
}
|
||||
|
||||
if len(runningPods) != desiredRunningPodNum {
|
||||
t.Logf("Waiting for %v pods to be running, got %v instead", desiredRunningPodNum, len(runningPods))
|
||||
return false, nil
|
||||
}
|
||||
desiredRunningPods = runningPods
|
||||
|
||||
return true, nil
|
||||
}); err != nil {
|
||||
t.Fatalf("Error waiting for pods running: %v", err)
|
||||
}
|
||||
return desiredRunningPods
|
||||
}
|
||||
|
||||
func waitForPodsToDisappear(ctx context.Context, t *testing.T, clientSet clientset.Interface, labelMap map[string]string, namespace string) {
|
||||
if err := wait.PollUntilContextTimeout(ctx, 5*time.Second, 60*time.Second, true, func(ctx context.Context) (bool, error) {
|
||||
podList, err := clientSet.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{
|
||||
LabelSelector: labels.SelectorFromSet(labelMap).String(),
|
||||
})
|
||||
if err != nil {
|
||||
t.Logf("Unable to list pods: %v", err)
|
||||
if isClientRateLimiterError(err) {
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
|
||||
if len(podList.Items) > 0 {
|
||||
t.Logf("Found a existing pod. Waiting until it gets deleted")
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}); err != nil {
|
||||
t.Fatalf("Error waiting for pods to disappear: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func splitNodesAndWorkerNodes(nodes []v1.Node) ([]*v1.Node, []*v1.Node) {
|
||||
@@ -1756,8 +1802,8 @@ func splitNodesAndWorkerNodes(nodes []v1.Node) ([]*v1.Node, []*v1.Node) {
|
||||
return allNodes, workerNodes
|
||||
}
|
||||
|
||||
func getCurrentPodNames(t *testing.T, ctx context.Context, kubeClient clientset.Interface, namespace string) []string {
|
||||
podList, err := kubeClient.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{})
|
||||
func getCurrentPodNames(ctx context.Context, clientSet clientset.Interface, namespace string, t *testing.T) []string {
|
||||
podList, err := clientSet.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
t.Logf("Unable to list pods: %v", err)
|
||||
return nil
|
||||
|
||||
@@ -190,7 +190,7 @@ func TestTooManyRestarts(t *testing.T) {
|
||||
rs.Client = clientSet
|
||||
rs.EventClient = clientSet
|
||||
|
||||
preRunNames := sets.NewString(getCurrentPodNames(t, ctx, clientSet, testNamespace.Name)...)
|
||||
preRunNames := sets.NewString(getCurrentPodNames(ctx, clientSet, testNamespace.Name, t)...)
|
||||
// Deploy the descheduler with the configured policy
|
||||
deschedulerPolicyConfigMapObj, err := deschedulerPolicyConfigMap(tc.policy)
|
||||
if err != nil {
|
||||
@@ -228,15 +228,18 @@ func TestTooManyRestarts(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to delete %q deployment: %v", deschedulerDeploymentObj.Name, err)
|
||||
}
|
||||
waitForDeschedulerPodAbsent(t, ctx, clientSet, testNamespace.Name)
|
||||
waitForPodsToDisappear(ctx, t, clientSet, deschedulerDeploymentObj.Labels, deschedulerDeploymentObj.Namespace)
|
||||
}()
|
||||
|
||||
t.Logf("Waiting for the descheduler pod running")
|
||||
deschedulerPodName = waitForDeschedulerPodRunning(t, ctx, clientSet, testNamespace.Name)
|
||||
deschedulerPods := waitForPodsRunning(ctx, t, clientSet, deschedulerDeploymentObj.Labels, 1, deschedulerDeploymentObj.Namespace)
|
||||
if len(deschedulerPods) != 0 {
|
||||
deschedulerPodName = deschedulerPods[0].Name
|
||||
}
|
||||
|
||||
// Run RemovePodsHavingTooManyRestarts strategy
|
||||
if err := wait.PollUntilContextTimeout(ctx, 1*time.Second, 20*time.Second, true, func(ctx context.Context) (bool, error) {
|
||||
currentRunNames := sets.NewString(getCurrentPodNames(t, ctx, clientSet, testNamespace.Name)...)
|
||||
currentRunNames := sets.NewString(getCurrentPodNames(ctx, clientSet, testNamespace.Name, t)...)
|
||||
actualEvictedPod := preRunNames.Difference(currentRunNames)
|
||||
actualEvictedPodCount := uint(actualEvictedPod.Len())
|
||||
t.Logf("preRunNames: %v, currentRunNames: %v, actualEvictedPodCount: %v\n", preRunNames.List(), currentRunNames.List(), actualEvictedPodCount)
|
||||
|
||||
@@ -20,7 +20,6 @@ import (
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/removepodsviolatingtopologyspreadconstraint"
|
||||
frameworktesting "sigs.k8s.io/descheduler/pkg/framework/testing"
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
"sigs.k8s.io/descheduler/test"
|
||||
)
|
||||
|
||||
const zoneTopologyKey string = "topology.kubernetes.io/zone"
|
||||
@@ -126,26 +125,36 @@ func TestTopologySpreadConstraint(t *testing.T) {
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
t.Logf("Creating Deployment %s with %d replicas", name, tc.replicaCount)
|
||||
deployment := test.BuildTestDeployment(name, testNamespace.Name, int32(tc.replicaCount), tc.topologySpreadConstraint.LabelSelector.DeepCopy().MatchLabels, func(d *appsv1.Deployment) {
|
||||
deployLabels := tc.topologySpreadConstraint.LabelSelector.DeepCopy().MatchLabels
|
||||
deployLabels["name"] = name
|
||||
deployment := buildTestDeployment(name, testNamespace.Name, int32(tc.replicaCount), deployLabels, func(d *appsv1.Deployment) {
|
||||
d.Spec.Template.Spec.TopologySpreadConstraints = []v1.TopologySpreadConstraint{tc.topologySpreadConstraint}
|
||||
})
|
||||
if _, err := clientSet.AppsV1().Deployments(deployment.Namespace).Create(ctx, deployment, metav1.CreateOptions{}); err != nil {
|
||||
t.Fatalf("Error creating Deployment %s %v", name, err)
|
||||
}
|
||||
defer test.DeleteDeployment(ctx, t, clientSet, deployment)
|
||||
test.WaitForDeploymentPodsRunning(ctx, t, clientSet, deployment)
|
||||
defer func() {
|
||||
clientSet.AppsV1().Deployments(deployment.Namespace).Delete(ctx, deployment.Name, metav1.DeleteOptions{})
|
||||
waitForPodsToDisappear(ctx, t, clientSet, deployment.Labels, deployment.Namespace)
|
||||
}()
|
||||
waitForPodsRunning(ctx, t, clientSet, deployment.Labels, tc.replicaCount, deployment.Namespace)
|
||||
|
||||
// Create a "Violator" Deployment that has the same label and is forced to be on the same node using a nodeSelector
|
||||
violatorDeploymentName := name + "-violator"
|
||||
violatorCount := tc.topologySpreadConstraint.MaxSkew + 1
|
||||
violatorDeployment := test.BuildTestDeployment(violatorDeploymentName, testNamespace.Name, violatorCount, tc.topologySpreadConstraint.LabelSelector.DeepCopy().MatchLabels, func(d *appsv1.Deployment) {
|
||||
violatorDeployLabels := tc.topologySpreadConstraint.LabelSelector.DeepCopy().MatchLabels
|
||||
violatorDeployLabels["name"] = violatorDeploymentName
|
||||
violatorDeployment := buildTestDeployment(violatorDeploymentName, testNamespace.Name, violatorCount, violatorDeployLabels, func(d *appsv1.Deployment) {
|
||||
d.Spec.Template.Spec.NodeSelector = map[string]string{zoneTopologyKey: workerNodes[0].Labels[zoneTopologyKey]}
|
||||
})
|
||||
if _, err := clientSet.AppsV1().Deployments(deployment.Namespace).Create(ctx, violatorDeployment, metav1.CreateOptions{}); err != nil {
|
||||
t.Fatalf("Error creating Deployment %s: %v", violatorDeploymentName, err)
|
||||
}
|
||||
defer test.DeleteDeployment(ctx, t, clientSet, violatorDeployment)
|
||||
test.WaitForDeploymentPodsRunning(ctx, t, clientSet, violatorDeployment)
|
||||
defer func() {
|
||||
clientSet.AppsV1().Deployments(violatorDeployment.Namespace).Delete(ctx, violatorDeployment.Name, metav1.DeleteOptions{})
|
||||
waitForPodsToDisappear(ctx, t, clientSet, violatorDeployment.Labels, violatorDeployment.Namespace)
|
||||
}()
|
||||
waitForPodsRunning(ctx, t, clientSet, violatorDeployment.Labels, int(violatorCount), violatorDeployment.Namespace)
|
||||
|
||||
evictionPolicyGroupVersion, err := eutils.SupportEviction(clientSet)
|
||||
if err != nil || len(evictionPolicyGroupVersion) == 0 {
|
||||
@@ -195,7 +204,7 @@ func TestTopologySpreadConstraint(t *testing.T) {
|
||||
}
|
||||
|
||||
// Ensure recently evicted Pod are rescheduled and running before asserting for a balanced topology spread
|
||||
test.WaitForDeploymentPodsRunning(ctx, t, clientSet, deployment)
|
||||
waitForPodsRunning(ctx, t, clientSet, deployment.Labels, tc.replicaCount, deployment.Namespace)
|
||||
|
||||
listOptions := metav1.ListOptions{LabelSelector: labels.SelectorFromSet(tc.topologySpreadConstraint.LabelSelector.MatchLabels).String()}
|
||||
pods, err := clientSet.CoreV1().Pods(testNamespace.Name).List(ctx, listOptions)
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"testing"
|
||||
|
||||
componentbaseconfig "k8s.io/component-base/config"
|
||||
|
||||
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
|
||||
deschedulerapi "sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler"
|
||||
|
||||
@@ -34,42 +34,6 @@ import (
|
||||
utilptr "k8s.io/utils/ptr"
|
||||
)
|
||||
|
||||
func BuildTestDeployment(name, namespace string, replicas int32, labels map[string]string, apply func(deployment *appsv1.Deployment)) *appsv1.Deployment {
|
||||
// Add "name": name to the labels, overwriting if it exists.
|
||||
labels["name"] = name
|
||||
|
||||
deployment := &appsv1.Deployment{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Deployment",
|
||||
APIVersion: "apps/v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Replicas: utilptr.To[int32](replicas),
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"name": name,
|
||||
},
|
||||
},
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: labels,
|
||||
},
|
||||
Spec: MakePodSpec("", utilptr.To[int64](0)),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
if apply != nil {
|
||||
apply(deployment)
|
||||
}
|
||||
|
||||
return deployment
|
||||
}
|
||||
|
||||
// BuildTestPod creates a test pod with given parameters.
|
||||
func BuildTestPod(name string, cpu, memory int64, nodeName string, apply func(*v1.Pod)) *v1.Pod {
|
||||
pod := &v1.Pod{
|
||||
@@ -171,45 +135,6 @@ func BuildTestNode(name string, millicpu, mem, pods int64, apply func(*v1.Node))
|
||||
return node
|
||||
}
|
||||
|
||||
func MakePodSpec(priorityClassName string, gracePeriod *int64) v1.PodSpec {
|
||||
return v1.PodSpec{
|
||||
SecurityContext: &v1.PodSecurityContext{
|
||||
RunAsNonRoot: utilptr.To(true),
|
||||
RunAsUser: utilptr.To[int64](1000),
|
||||
RunAsGroup: utilptr.To[int64](1000),
|
||||
SeccompProfile: &v1.SeccompProfile{
|
||||
Type: v1.SeccompProfileTypeRuntimeDefault,
|
||||
},
|
||||
},
|
||||
Containers: []v1.Container{{
|
||||
Name: "pause",
|
||||
ImagePullPolicy: "Never",
|
||||
Image: "registry.k8s.io/pause",
|
||||
Ports: []v1.ContainerPort{{ContainerPort: 80}},
|
||||
Resources: v1.ResourceRequirements{
|
||||
Limits: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("100m"),
|
||||
v1.ResourceMemory: resource.MustParse("200Mi"),
|
||||
},
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("100m"),
|
||||
v1.ResourceMemory: resource.MustParse("100Mi"),
|
||||
},
|
||||
},
|
||||
SecurityContext: &v1.SecurityContext{
|
||||
AllowPrivilegeEscalation: utilptr.To(false),
|
||||
Capabilities: &v1.Capabilities{
|
||||
Drop: []v1.Capability{
|
||||
"ALL",
|
||||
},
|
||||
},
|
||||
},
|
||||
}},
|
||||
PriorityClassName: priorityClassName,
|
||||
TerminationGracePeriodSeconds: gracePeriod,
|
||||
}
|
||||
}
|
||||
|
||||
// MakeBestEffortPod makes the given pod a BestEffort pod
|
||||
func MakeBestEffortPod(pod *v1.Pod) {
|
||||
pod.Spec.Containers[0].Resources.Requests = nil
|
||||
@@ -316,30 +241,6 @@ func DeleteDeployment(ctx context.Context, t *testing.T, clientSet clientset.Int
|
||||
}
|
||||
}
|
||||
|
||||
func WaitForDeploymentPodsRunning(ctx context.Context, t *testing.T, clientSet clientset.Interface, deployment *appsv1.Deployment) {
|
||||
if err := wait.PollUntilContextTimeout(ctx, 5*time.Second, 30*time.Second, true, func(c context.Context) (bool, error) {
|
||||
podList, err := clientSet.CoreV1().Pods(deployment.Namespace).List(ctx, metav1.ListOptions{
|
||||
LabelSelector: labels.SelectorFromSet(deployment.Spec.Template.ObjectMeta.Labels).String(),
|
||||
})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if len(podList.Items) != int(*deployment.Spec.Replicas) {
|
||||
t.Logf("Waiting for %v pods to be created, got %v instead", *deployment.Spec.Replicas, len(podList.Items))
|
||||
return false, nil
|
||||
}
|
||||
for _, pod := range podList.Items {
|
||||
if pod.Status.Phase != v1.PodRunning {
|
||||
t.Logf("Pod %v not running yet, is %v instead", pod.Name, pod.Status.Phase)
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
}); err != nil {
|
||||
t.Fatalf("Error waiting for pods running: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func SetPodAntiAffinity(inputPod *v1.Pod, labelKey, labelValue string) {
|
||||
inputPod.Spec.Affinity = &v1.Affinity{
|
||||
PodAntiAffinity: &v1.PodAntiAffinity{
|
||||
|
||||
Reference in New Issue
Block a user