From 704a82bcf45adefa5e8277b4f56ec8dc9e2e1dfd Mon Sep 17 00:00:00 2001 From: Amir Alavi Date: Sun, 27 Aug 2023 21:44:43 -0400 Subject: [PATCH] topologyspreadconstraint: support `matchLabelKeys` Signed-off-by: Amir Alavi --- hack/kind_config.yaml | 4 ++ .../topologyspreadconstraint.go | 28 +++++++-- .../topologyspreadconstraint_test.go | 62 +++++++++++++++++++ test/e2e/e2e_topologyspreadconstraint_test.go | 37 +++++++++-- 4 files changed, 119 insertions(+), 12 deletions(-) diff --git a/hack/kind_config.yaml b/hack/kind_config.yaml index c4057022b..e366c168e 100644 --- a/hack/kind_config.yaml +++ b/hack/kind_config.yaml @@ -1,5 +1,9 @@ kind: Cluster apiVersion: kind.x-k8s.io/v1alpha4 +featureGates: + # beta as of 1.27 but we currently run e2e on 1.26 + # this flag should be removed as part of Descheduler 0.29 release + MatchLabelKeysInPodTopologySpread: true nodes: - role: control-plane - role: worker diff --git a/pkg/framework/plugins/removepodsviolatingtopologyspreadconstraint/topologyspreadconstraint.go b/pkg/framework/plugins/removepodsviolatingtopologyspreadconstraint/topologyspreadconstraint.go index ead0e6800..8eed61828 100644 --- a/pkg/framework/plugins/removepodsviolatingtopologyspreadconstraint/topologyspreadconstraint.go +++ b/pkg/framework/plugins/removepodsviolatingtopologyspreadconstraint/topologyspreadconstraint.go @@ -147,12 +147,9 @@ func (d *RemovePodsViolatingTopologySpreadConstraint) Balance(ctx context.Contex if !allowedConstraints.Has(constraint.WhenUnsatisfiable) { continue } - requiredSchedulingTerm := nodeaffinity.GetRequiredNodeAffinity(pod) - namespaceTopologySpreadConstraint := topologyConstraintSet{ - constraint: constraint, - podNodeAffinity: requiredSchedulingTerm, - podTolerations: pod.Spec.Tolerations, - } + + namespaceTopologySpreadConstraint := newTopologyConstraintSet(constraint, pod) + // Need to check v1.TopologySpreadConstraint deepEquality because // v1.TopologySpreadConstraint has pointer fields // and we don't need to go over duplicated constraints later on @@ -496,3 +493,22 @@ func matchNodeInclusionPolicies(tsc *v1.TopologySpreadConstraint, tolerations [] } return true } + +func newTopologyConstraintSet(constraint v1.TopologySpreadConstraint, pod *v1.Pod) topologyConstraintSet { + if pod.Labels != nil && len(constraint.MatchLabelKeys) > 0 { + if constraint.LabelSelector == nil { + constraint.LabelSelector = &metav1.LabelSelector{} + } + + for _, labelKey := range constraint.MatchLabelKeys { + metav1.AddLabelToSelector(constraint.LabelSelector, labelKey, pod.Labels[labelKey]) + } + } + + requiredSchedulingTerm := nodeaffinity.GetRequiredNodeAffinity(pod) + return topologyConstraintSet{ + constraint: constraint, + podNodeAffinity: requiredSchedulingTerm, + podTolerations: pod.Spec.Tolerations, + } +} diff --git a/pkg/framework/plugins/removepodsviolatingtopologyspreadconstraint/topologyspreadconstraint_test.go b/pkg/framework/plugins/removepodsviolatingtopologyspreadconstraint/topologyspreadconstraint_test.go index ecceb39d8..e97485cef 100644 --- a/pkg/framework/plugins/removepodsviolatingtopologyspreadconstraint/topologyspreadconstraint_test.go +++ b/pkg/framework/plugins/removepodsviolatingtopologyspreadconstraint/topologyspreadconstraint_test.go @@ -6,6 +6,7 @@ import ( "sort" "testing" + appsv1 "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" policy "k8s.io/api/policy/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -1064,6 +1065,55 @@ func TestTopologySpreadConstraint(t *testing.T) { namespaces: []string{"ns1"}, args: RemovePodsViolatingTopologySpreadConstraintArgs{LabelSelector: getLabelSelector("foo", []string{"baz"}, metav1.LabelSelectorOpNotIn)}, }, + { + name: "2 domains, sizes [2,0], maxSkew=1, move 1 pods given matchLabelKeys on same replicaset", + nodes: []*v1.Node{ + test.BuildTestNode("n1", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneA" }), + test.BuildTestNode("n2", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneB" }), + }, + pods: createTestPods([]testPodList{ + { + count: 1, + node: "n1", + labels: map[string]string{"foo": "bar", appsv1.DefaultDeploymentUniqueLabelKey: "foo"}, + constraints: getDefaultTopologyConstraintsWithPodTemplateHashMatch(1), + }, + { + count: 1, + node: "n1", + labels: map[string]string{"foo": "bar", appsv1.DefaultDeploymentUniqueLabelKey: "foo"}, + constraints: getDefaultTopologyConstraintsWithPodTemplateHashMatch(1), + }, + }), + expectedEvictedCount: 1, + expectedEvictedPods: []string{"pod-1"}, + namespaces: []string{"ns1"}, + args: RemovePodsViolatingTopologySpreadConstraintArgs{LabelSelector: getLabelSelector("foo", []string{"baz"}, metav1.LabelSelectorOpNotIn)}, + }, + { + name: "2 domains, sizes [2,0], maxSkew=1, move 0 pods given matchLabelKeys on two different replicasets", + nodes: []*v1.Node{ + test.BuildTestNode("n1", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneA" }), + test.BuildTestNode("n2", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneB" }), + }, + pods: createTestPods([]testPodList{ + { + count: 1, + node: "n1", + labels: map[string]string{"foo": "bar", appsv1.DefaultDeploymentUniqueLabelKey: "foo"}, + constraints: getDefaultTopologyConstraintsWithPodTemplateHashMatch(1), + }, + { + count: 1, + node: "n1", + labels: map[string]string{"foo": "bar", appsv1.DefaultDeploymentUniqueLabelKey: "bar"}, + constraints: getDefaultTopologyConstraintsWithPodTemplateHashMatch(1), + }, + }), + expectedEvictedCount: 0, + namespaces: []string{"ns1"}, + args: RemovePodsViolatingTopologySpreadConstraintArgs{LabelSelector: getLabelSelector("foo", []string{"baz"}, metav1.LabelSelectorOpNotIn)}, + }, { name: "2 domains, sizes [4,2], maxSkew=1, 2 pods in termination; nothing should be moved", nodes: []*v1.Node{ @@ -1543,6 +1593,18 @@ func getDefaultNodeTopologyConstraints(maxSkew int32) []v1.TopologySpreadConstra } } +func getDefaultTopologyConstraintsWithPodTemplateHashMatch(maxSkew int32) []v1.TopologySpreadConstraint { + return []v1.TopologySpreadConstraint{ + { + MaxSkew: maxSkew, + TopologyKey: "zone", + WhenUnsatisfiable: v1.DoNotSchedule, + LabelSelector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}, + MatchLabelKeys: []string{appsv1.DefaultDeploymentUniqueLabelKey}, + }, + } +} + func TestCheckIdenticalConstraints(t *testing.T) { newConstraintSame := v1.TopologySpreadConstraint{ MaxSkew: 2, diff --git a/test/e2e/e2e_topologyspreadconstraint_test.go b/test/e2e/e2e_topologyspreadconstraint_test.go index 8890884c8..48ebaab27 100644 --- a/test/e2e/e2e_topologyspreadconstraint_test.go +++ b/test/e2e/e2e_topologyspreadconstraint_test.go @@ -6,6 +6,7 @@ import ( "strings" "testing" + appsv1 "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" @@ -34,11 +35,13 @@ func TestTopologySpreadConstraint(t *testing.T) { defer clientSet.CoreV1().Namespaces().Delete(ctx, testNamespace.Name, metav1.DeleteOptions{}) testCases := map[string]struct { + expectedEvictedCount uint replicaCount int topologySpreadConstraint v1.TopologySpreadConstraint }{ "test-rc-topology-spread-hard-constraint": { - replicaCount: 4, + expectedEvictedCount: 1, + replicaCount: 4, topologySpreadConstraint: v1.TopologySpreadConstraint{ LabelSelector: &metav1.LabelSelector{ MatchLabels: map[string]string{ @@ -51,7 +54,8 @@ func TestTopologySpreadConstraint(t *testing.T) { }, }, "test-rc-topology-spread-soft-constraint": { - replicaCount: 4, + expectedEvictedCount: 1, + replicaCount: 4, topologySpreadConstraint: v1.TopologySpreadConstraint{ LabelSelector: &metav1.LabelSelector{ MatchLabels: map[string]string{ @@ -64,7 +68,8 @@ func TestTopologySpreadConstraint(t *testing.T) { }, }, "test-rc-node-taints-policy-honor": { - replicaCount: 4, + expectedEvictedCount: 1, + replicaCount: 4, topologySpreadConstraint: v1.TopologySpreadConstraint{ LabelSelector: &metav1.LabelSelector{ MatchLabels: map[string]string{ @@ -78,7 +83,8 @@ func TestTopologySpreadConstraint(t *testing.T) { }, }, "test-rc-node-affinity-policy-ignore": { - replicaCount: 4, + expectedEvictedCount: 1, + replicaCount: 4, topologySpreadConstraint: v1.TopologySpreadConstraint{ LabelSelector: &metav1.LabelSelector{ MatchLabels: map[string]string{ @@ -91,6 +97,21 @@ func TestTopologySpreadConstraint(t *testing.T) { WhenUnsatisfiable: v1.DoNotSchedule, }, }, + "test-rc-match-label-keys": { + expectedEvictedCount: 0, + replicaCount: 4, + topologySpreadConstraint: v1.TopologySpreadConstraint{ + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "test": "match-label-keys", + }, + }, + MatchLabelKeys: []string{appsv1.DefaultDeploymentUniqueLabelKey}, + MaxSkew: 1, + TopologyKey: zoneTopologyKey, + WhenUnsatisfiable: v1.DoNotSchedule, + }, + }, } for name, tc := range testCases { t.Run(name, func(t *testing.T) { @@ -158,10 +179,14 @@ func TestTopologySpreadConstraint(t *testing.T) { t.Logf("Wait for terminating pods of %s to disappear", name) waitForTerminatingPodsToDisappear(ctx, t, clientSet, rc.Namespace) - if totalEvicted := podEvictor.TotalEvicted(); totalEvicted > 0 { + if totalEvicted := podEvictor.TotalEvicted(); totalEvicted == tc.expectedEvictedCount { t.Logf("Total of %d Pods were evicted for %s", totalEvicted, name) } else { - t.Fatalf("Pods were not evicted for %s TopologySpreadConstraint", name) + t.Fatalf("Expected %d evictions but got %d for %s TopologySpreadConstraint", tc.expectedEvictedCount, totalEvicted, name) + } + + if tc.expectedEvictedCount == 0 { + return } // Ensure recently evicted Pod are rescheduled and running before asserting for a balanced topology spread