1
0
mirror of https://github.com/kubernetes-sigs/descheduler.git synced 2026-01-26 05:14:13 +01:00

topologyspreadconstraint: support matchLabelKeys

Signed-off-by: Amir Alavi <amiralavi7@gmail.com>
This commit is contained in:
Amir Alavi
2023-08-27 21:44:43 -04:00
parent a01322a5e5
commit 704a82bcf4
4 changed files with 119 additions and 12 deletions

View File

@@ -1,5 +1,9 @@
kind: Cluster kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4 apiVersion: kind.x-k8s.io/v1alpha4
featureGates:
# beta as of 1.27 but we currently run e2e on 1.26
# this flag should be removed as part of Descheduler 0.29 release
MatchLabelKeysInPodTopologySpread: true
nodes: nodes:
- role: control-plane - role: control-plane
- role: worker - role: worker

View File

@@ -147,12 +147,9 @@ func (d *RemovePodsViolatingTopologySpreadConstraint) Balance(ctx context.Contex
if !allowedConstraints.Has(constraint.WhenUnsatisfiable) { if !allowedConstraints.Has(constraint.WhenUnsatisfiable) {
continue continue
} }
requiredSchedulingTerm := nodeaffinity.GetRequiredNodeAffinity(pod)
namespaceTopologySpreadConstraint := topologyConstraintSet{ namespaceTopologySpreadConstraint := newTopologyConstraintSet(constraint, pod)
constraint: constraint,
podNodeAffinity: requiredSchedulingTerm,
podTolerations: pod.Spec.Tolerations,
}
// Need to check v1.TopologySpreadConstraint deepEquality because // Need to check v1.TopologySpreadConstraint deepEquality because
// v1.TopologySpreadConstraint has pointer fields // v1.TopologySpreadConstraint has pointer fields
// and we don't need to go over duplicated constraints later on // and we don't need to go over duplicated constraints later on
@@ -496,3 +493,22 @@ func matchNodeInclusionPolicies(tsc *v1.TopologySpreadConstraint, tolerations []
} }
return true return true
} }
func newTopologyConstraintSet(constraint v1.TopologySpreadConstraint, pod *v1.Pod) topologyConstraintSet {
if pod.Labels != nil && len(constraint.MatchLabelKeys) > 0 {
if constraint.LabelSelector == nil {
constraint.LabelSelector = &metav1.LabelSelector{}
}
for _, labelKey := range constraint.MatchLabelKeys {
metav1.AddLabelToSelector(constraint.LabelSelector, labelKey, pod.Labels[labelKey])
}
}
requiredSchedulingTerm := nodeaffinity.GetRequiredNodeAffinity(pod)
return topologyConstraintSet{
constraint: constraint,
podNodeAffinity: requiredSchedulingTerm,
podTolerations: pod.Spec.Tolerations,
}
}

View File

@@ -6,6 +6,7 @@ import (
"sort" "sort"
"testing" "testing"
appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
policy "k8s.io/api/policy/v1" policy "k8s.io/api/policy/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -1064,6 +1065,55 @@ func TestTopologySpreadConstraint(t *testing.T) {
namespaces: []string{"ns1"}, namespaces: []string{"ns1"},
args: RemovePodsViolatingTopologySpreadConstraintArgs{LabelSelector: getLabelSelector("foo", []string{"baz"}, metav1.LabelSelectorOpNotIn)}, args: RemovePodsViolatingTopologySpreadConstraintArgs{LabelSelector: getLabelSelector("foo", []string{"baz"}, metav1.LabelSelectorOpNotIn)},
}, },
{
name: "2 domains, sizes [2,0], maxSkew=1, move 1 pods given matchLabelKeys on same replicaset",
nodes: []*v1.Node{
test.BuildTestNode("n1", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneA" }),
test.BuildTestNode("n2", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneB" }),
},
pods: createTestPods([]testPodList{
{
count: 1,
node: "n1",
labels: map[string]string{"foo": "bar", appsv1.DefaultDeploymentUniqueLabelKey: "foo"},
constraints: getDefaultTopologyConstraintsWithPodTemplateHashMatch(1),
},
{
count: 1,
node: "n1",
labels: map[string]string{"foo": "bar", appsv1.DefaultDeploymentUniqueLabelKey: "foo"},
constraints: getDefaultTopologyConstraintsWithPodTemplateHashMatch(1),
},
}),
expectedEvictedCount: 1,
expectedEvictedPods: []string{"pod-1"},
namespaces: []string{"ns1"},
args: RemovePodsViolatingTopologySpreadConstraintArgs{LabelSelector: getLabelSelector("foo", []string{"baz"}, metav1.LabelSelectorOpNotIn)},
},
{
name: "2 domains, sizes [2,0], maxSkew=1, move 0 pods given matchLabelKeys on two different replicasets",
nodes: []*v1.Node{
test.BuildTestNode("n1", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneA" }),
test.BuildTestNode("n2", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneB" }),
},
pods: createTestPods([]testPodList{
{
count: 1,
node: "n1",
labels: map[string]string{"foo": "bar", appsv1.DefaultDeploymentUniqueLabelKey: "foo"},
constraints: getDefaultTopologyConstraintsWithPodTemplateHashMatch(1),
},
{
count: 1,
node: "n1",
labels: map[string]string{"foo": "bar", appsv1.DefaultDeploymentUniqueLabelKey: "bar"},
constraints: getDefaultTopologyConstraintsWithPodTemplateHashMatch(1),
},
}),
expectedEvictedCount: 0,
namespaces: []string{"ns1"},
args: RemovePodsViolatingTopologySpreadConstraintArgs{LabelSelector: getLabelSelector("foo", []string{"baz"}, metav1.LabelSelectorOpNotIn)},
},
{ {
name: "2 domains, sizes [4,2], maxSkew=1, 2 pods in termination; nothing should be moved", name: "2 domains, sizes [4,2], maxSkew=1, 2 pods in termination; nothing should be moved",
nodes: []*v1.Node{ nodes: []*v1.Node{
@@ -1543,6 +1593,18 @@ func getDefaultNodeTopologyConstraints(maxSkew int32) []v1.TopologySpreadConstra
} }
} }
func getDefaultTopologyConstraintsWithPodTemplateHashMatch(maxSkew int32) []v1.TopologySpreadConstraint {
return []v1.TopologySpreadConstraint{
{
MaxSkew: maxSkew,
TopologyKey: "zone",
WhenUnsatisfiable: v1.DoNotSchedule,
LabelSelector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}},
MatchLabelKeys: []string{appsv1.DefaultDeploymentUniqueLabelKey},
},
}
}
func TestCheckIdenticalConstraints(t *testing.T) { func TestCheckIdenticalConstraints(t *testing.T) {
newConstraintSame := v1.TopologySpreadConstraint{ newConstraintSame := v1.TopologySpreadConstraint{
MaxSkew: 2, MaxSkew: 2,

View File

@@ -6,6 +6,7 @@ import (
"strings" "strings"
"testing" "testing"
appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/labels"
@@ -34,11 +35,13 @@ func TestTopologySpreadConstraint(t *testing.T) {
defer clientSet.CoreV1().Namespaces().Delete(ctx, testNamespace.Name, metav1.DeleteOptions{}) defer clientSet.CoreV1().Namespaces().Delete(ctx, testNamespace.Name, metav1.DeleteOptions{})
testCases := map[string]struct { testCases := map[string]struct {
expectedEvictedCount uint
replicaCount int replicaCount int
topologySpreadConstraint v1.TopologySpreadConstraint topologySpreadConstraint v1.TopologySpreadConstraint
}{ }{
"test-rc-topology-spread-hard-constraint": { "test-rc-topology-spread-hard-constraint": {
replicaCount: 4, expectedEvictedCount: 1,
replicaCount: 4,
topologySpreadConstraint: v1.TopologySpreadConstraint{ topologySpreadConstraint: v1.TopologySpreadConstraint{
LabelSelector: &metav1.LabelSelector{ LabelSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{ MatchLabels: map[string]string{
@@ -51,7 +54,8 @@ func TestTopologySpreadConstraint(t *testing.T) {
}, },
}, },
"test-rc-topology-spread-soft-constraint": { "test-rc-topology-spread-soft-constraint": {
replicaCount: 4, expectedEvictedCount: 1,
replicaCount: 4,
topologySpreadConstraint: v1.TopologySpreadConstraint{ topologySpreadConstraint: v1.TopologySpreadConstraint{
LabelSelector: &metav1.LabelSelector{ LabelSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{ MatchLabels: map[string]string{
@@ -64,7 +68,8 @@ func TestTopologySpreadConstraint(t *testing.T) {
}, },
}, },
"test-rc-node-taints-policy-honor": { "test-rc-node-taints-policy-honor": {
replicaCount: 4, expectedEvictedCount: 1,
replicaCount: 4,
topologySpreadConstraint: v1.TopologySpreadConstraint{ topologySpreadConstraint: v1.TopologySpreadConstraint{
LabelSelector: &metav1.LabelSelector{ LabelSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{ MatchLabels: map[string]string{
@@ -78,7 +83,8 @@ func TestTopologySpreadConstraint(t *testing.T) {
}, },
}, },
"test-rc-node-affinity-policy-ignore": { "test-rc-node-affinity-policy-ignore": {
replicaCount: 4, expectedEvictedCount: 1,
replicaCount: 4,
topologySpreadConstraint: v1.TopologySpreadConstraint{ topologySpreadConstraint: v1.TopologySpreadConstraint{
LabelSelector: &metav1.LabelSelector{ LabelSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{ MatchLabels: map[string]string{
@@ -91,6 +97,21 @@ func TestTopologySpreadConstraint(t *testing.T) {
WhenUnsatisfiable: v1.DoNotSchedule, WhenUnsatisfiable: v1.DoNotSchedule,
}, },
}, },
"test-rc-match-label-keys": {
expectedEvictedCount: 0,
replicaCount: 4,
topologySpreadConstraint: v1.TopologySpreadConstraint{
LabelSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"test": "match-label-keys",
},
},
MatchLabelKeys: []string{appsv1.DefaultDeploymentUniqueLabelKey},
MaxSkew: 1,
TopologyKey: zoneTopologyKey,
WhenUnsatisfiable: v1.DoNotSchedule,
},
},
} }
for name, tc := range testCases { for name, tc := range testCases {
t.Run(name, func(t *testing.T) { t.Run(name, func(t *testing.T) {
@@ -158,10 +179,14 @@ func TestTopologySpreadConstraint(t *testing.T) {
t.Logf("Wait for terminating pods of %s to disappear", name) t.Logf("Wait for terminating pods of %s to disappear", name)
waitForTerminatingPodsToDisappear(ctx, t, clientSet, rc.Namespace) waitForTerminatingPodsToDisappear(ctx, t, clientSet, rc.Namespace)
if totalEvicted := podEvictor.TotalEvicted(); totalEvicted > 0 { if totalEvicted := podEvictor.TotalEvicted(); totalEvicted == tc.expectedEvictedCount {
t.Logf("Total of %d Pods were evicted for %s", totalEvicted, name) t.Logf("Total of %d Pods were evicted for %s", totalEvicted, name)
} else { } else {
t.Fatalf("Pods were not evicted for %s TopologySpreadConstraint", name) t.Fatalf("Expected %d evictions but got %d for %s TopologySpreadConstraint", tc.expectedEvictedCount, totalEvicted, name)
}
if tc.expectedEvictedCount == 0 {
return
} }
// Ensure recently evicted Pod are rescheduled and running before asserting for a balanced topology spread // Ensure recently evicted Pod are rescheduled and running before asserting for a balanced topology spread