1
0
mirror of https://github.com/kubernetes-sigs/descheduler.git synced 2026-01-26 05:14:13 +01:00

topologyspreadconstraint: refactor to match scheduler's struct and initialization

Signed-off-by: Amir Alavi <amiralavi7@gmail.com>
This commit is contained in:
Amir Alavi
2023-09-04 14:40:14 -04:00
parent 704a82bcf4
commit bf2bd73f64
6 changed files with 293 additions and 158 deletions

View File

@@ -17,6 +17,7 @@ import (
"sigs.k8s.io/descheduler/pkg/framework/plugins/defaultevictor"
"sigs.k8s.io/descheduler/pkg/framework/plugins/removefailedpods"
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
"sigs.k8s.io/descheduler/test"
)
var oneHourPodLifetimeSeconds uint = 3600
@@ -131,7 +132,7 @@ func TestFailedPods(t *testing.T) {
}
func initFailedJob(name, namespace string) *batchv1.Job {
podSpec := MakePodSpec("", nil)
podSpec := test.MakePodSpec("", nil)
podSpec.Containers[0].Command = []string{"/bin/false"}
podSpec.RestartPolicy = v1.RestartPolicyNever
labelsSet := labels.Set{"test": name, "name": name}

View File

@@ -55,47 +55,9 @@ import (
"sigs.k8s.io/descheduler/pkg/framework/plugins/podlifetime"
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
"sigs.k8s.io/descheduler/pkg/utils"
"sigs.k8s.io/descheduler/test"
)
func MakePodSpec(priorityClassName string, gracePeriod *int64) v1.PodSpec {
return v1.PodSpec{
SecurityContext: &v1.PodSecurityContext{
RunAsNonRoot: utilpointer.Bool(true),
RunAsUser: utilpointer.Int64(1000),
RunAsGroup: utilpointer.Int64(1000),
SeccompProfile: &v1.SeccompProfile{
Type: v1.SeccompProfileTypeRuntimeDefault,
},
},
Containers: []v1.Container{{
Name: "pause",
ImagePullPolicy: "Never",
Image: "registry.k8s.io/pause",
Ports: []v1.ContainerPort{{ContainerPort: 80}},
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("100m"),
v1.ResourceMemory: resource.MustParse("200Mi"),
},
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("100m"),
v1.ResourceMemory: resource.MustParse("100Mi"),
},
},
SecurityContext: &v1.SecurityContext{
AllowPrivilegeEscalation: utilpointer.Bool(false),
Capabilities: &v1.Capabilities{
Drop: []v1.Capability{
"ALL",
},
},
},
}},
PriorityClassName: priorityClassName,
TerminationGracePeriodSeconds: gracePeriod,
}
}
// RcByNameContainer returns a ReplicationController with specified name and container
func RcByNameContainer(name, namespace string, replicas int32, labels map[string]string, gracePeriod *int64, priorityClassName string) *v1.ReplicationController {
// Add "name": name to the labels, overwriting if it exists.
@@ -121,7 +83,7 @@ func RcByNameContainer(name, namespace string, replicas int32, labels map[string
ObjectMeta: metav1.ObjectMeta{
Labels: labels,
},
Spec: MakePodSpec(priorityClassName, gracePeriod),
Spec: test.MakePodSpec(priorityClassName, gracePeriod),
},
},
}

View File

@@ -14,6 +14,7 @@ import (
"sigs.k8s.io/descheduler/pkg/framework/plugins/defaultevictor"
"sigs.k8s.io/descheduler/pkg/framework/plugins/removepodsviolatingtopologyspreadconstraint"
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
"sigs.k8s.io/descheduler/test"
)
const zoneTopologyKey string = "topology.kubernetes.io/zone"
@@ -39,7 +40,7 @@ func TestTopologySpreadConstraint(t *testing.T) {
replicaCount int
topologySpreadConstraint v1.TopologySpreadConstraint
}{
"test-rc-topology-spread-hard-constraint": {
"test-topology-spread-hard-constraint": {
expectedEvictedCount: 1,
replicaCount: 4,
topologySpreadConstraint: v1.TopologySpreadConstraint{
@@ -53,7 +54,7 @@ func TestTopologySpreadConstraint(t *testing.T) {
WhenUnsatisfiable: v1.DoNotSchedule,
},
},
"test-rc-topology-spread-soft-constraint": {
"test-topology-spread-soft-constraint": {
expectedEvictedCount: 1,
replicaCount: 4,
topologySpreadConstraint: v1.TopologySpreadConstraint{
@@ -67,7 +68,7 @@ func TestTopologySpreadConstraint(t *testing.T) {
WhenUnsatisfiable: v1.ScheduleAnyway,
},
},
"test-rc-node-taints-policy-honor": {
"test-node-taints-policy-honor": {
expectedEvictedCount: 1,
replicaCount: 4,
topologySpreadConstraint: v1.TopologySpreadConstraint{
@@ -82,7 +83,7 @@ func TestTopologySpreadConstraint(t *testing.T) {
WhenUnsatisfiable: v1.DoNotSchedule,
},
},
"test-rc-node-affinity-policy-ignore": {
"test-node-affinity-policy-ignore": {
expectedEvictedCount: 1,
replicaCount: 4,
topologySpreadConstraint: v1.TopologySpreadConstraint{
@@ -97,7 +98,7 @@ func TestTopologySpreadConstraint(t *testing.T) {
WhenUnsatisfiable: v1.DoNotSchedule,
},
},
"test-rc-match-label-keys": {
"test-match-label-keys": {
expectedEvictedCount: 0,
replicaCount: 4,
topologySpreadConstraint: v1.TopologySpreadConstraint{
@@ -115,26 +116,27 @@ func TestTopologySpreadConstraint(t *testing.T) {
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
t.Logf("Creating RC %s with %d replicas", name, tc.replicaCount)
rc := RcByNameContainer(name, testNamespace.Name, int32(tc.replicaCount), tc.topologySpreadConstraint.LabelSelector.DeepCopy().MatchLabels, nil, "")
rc.Spec.Template.Spec.TopologySpreadConstraints = []v1.TopologySpreadConstraint{tc.topologySpreadConstraint}
if _, err := clientSet.CoreV1().ReplicationControllers(rc.Namespace).Create(ctx, rc, metav1.CreateOptions{}); err != nil {
t.Fatalf("Error creating RC %s %v", name, err)
t.Logf("Creating Deployment %s with %d replicas", name, tc.replicaCount)
deployment := test.BuildTestDeployment(name, testNamespace.Name, int32(tc.replicaCount), tc.topologySpreadConstraint.LabelSelector.DeepCopy().MatchLabels, func(d *appsv1.Deployment) {
d.Spec.Template.Spec.TopologySpreadConstraints = []v1.TopologySpreadConstraint{tc.topologySpreadConstraint}
})
if _, err := clientSet.AppsV1().Deployments(deployment.Namespace).Create(ctx, deployment, metav1.CreateOptions{}); err != nil {
t.Fatalf("Error creating Deployment %s %v", name, err)
}
defer deleteRC(ctx, t, clientSet, rc)
waitForRCPodsRunning(ctx, t, clientSet, rc)
defer test.DeleteDeployment(ctx, t, clientSet, deployment)
test.WaitForDeploymentPodsRunning(ctx, t, clientSet, deployment)
// Create a "Violator" RC that has the same label and is forced to be on the same node using a nodeSelector
violatorRcName := name + "-violator"
// Create a "Violator" Deployment that has the same label and is forced to be on the same node using a nodeSelector
violatorDeploymentName := name + "-violator"
violatorCount := tc.topologySpreadConstraint.MaxSkew + 1
violatorRc := RcByNameContainer(violatorRcName, testNamespace.Name, violatorCount, tc.topologySpreadConstraint.LabelSelector.DeepCopy().MatchLabels, nil, "")
violatorRc.Spec.Template.Spec.NodeSelector = map[string]string{zoneTopologyKey: workerNodes[0].Labels[zoneTopologyKey]}
rc.Spec.Template.Spec.TopologySpreadConstraints = []v1.TopologySpreadConstraint{tc.topologySpreadConstraint}
if _, err := clientSet.CoreV1().ReplicationControllers(rc.Namespace).Create(ctx, violatorRc, metav1.CreateOptions{}); err != nil {
t.Fatalf("Error creating RC %s: %v", violatorRcName, err)
violatorDeployment := test.BuildTestDeployment(violatorDeploymentName, testNamespace.Name, violatorCount, tc.topologySpreadConstraint.LabelSelector.DeepCopy().MatchLabels, func(d *appsv1.Deployment) {
d.Spec.Template.Spec.NodeSelector = map[string]string{zoneTopologyKey: workerNodes[0].Labels[zoneTopologyKey]}
})
if _, err := clientSet.AppsV1().Deployments(deployment.Namespace).Create(ctx, violatorDeployment, metav1.CreateOptions{}); err != nil {
t.Fatalf("Error creating Deployment %s: %v", violatorDeploymentName, err)
}
defer deleteRC(ctx, t, clientSet, violatorRc)
waitForRCPodsRunning(ctx, t, clientSet, violatorRc)
defer test.DeleteDeployment(ctx, t, clientSet, violatorDeployment)
test.WaitForDeploymentPodsRunning(ctx, t, clientSet, violatorDeployment)
podEvictor := initPodEvictorOrFail(t, clientSet, getPodsAssignedToNode, nodes)
@@ -177,7 +179,7 @@ func TestTopologySpreadConstraint(t *testing.T) {
t.Logf("Finished RemovePodsViolatingTopologySpreadConstraint strategy for %s", name)
t.Logf("Wait for terminating pods of %s to disappear", name)
waitForTerminatingPodsToDisappear(ctx, t, clientSet, rc.Namespace)
waitForTerminatingPodsToDisappear(ctx, t, clientSet, deployment.Namespace)
if totalEvicted := podEvictor.TotalEvicted(); totalEvicted == tc.expectedEvictedCount {
t.Logf("Total of %d Pods were evicted for %s", totalEvicted, name)
@@ -190,7 +192,7 @@ func TestTopologySpreadConstraint(t *testing.T) {
}
// Ensure recently evicted Pod are rescheduled and running before asserting for a balanced topology spread
waitForRCPodsRunning(ctx, t, clientSet, rc)
test.WaitForDeploymentPodsRunning(ctx, t, clientSet, deployment)
listOptions := metav1.ListOptions{LabelSelector: labels.SelectorFromSet(tc.topologySpreadConstraint.LabelSelector.MatchLabels).String()}
pods, err := clientSet.CoreV1().Pods(testNamespace.Name).List(ctx, listOptions)