1
0
mirror of https://github.com/kubernetes-sigs/descheduler.git synced 2026-01-26 05:14:13 +01:00

(TopologySpread) Evict pods with selectors that match multiple nodes

This commit is contained in:
Mike Dame
2021-03-11 10:04:59 -05:00
parent 0a11b5a138
commit af26b57e5e
2 changed files with 56 additions and 5 deletions

View File

@@ -289,11 +289,12 @@ func balanceDomains(
// however we still account for it "being evicted" so the algorithm can complete
// TODO(@damemi): Since we don't order pods wrt their affinities, we should refactor this to skip the current pod
// but still try to get the required # of movePods (instead of just chopping that value off the slice above)
if aboveToEvict[k].Spec.NodeSelector != nil ||
(aboveToEvict[k].Spec.Affinity != nil &&
isRequiredDuringSchedulingIgnoredDuringExecution := aboveToEvict[k].Spec.Affinity != nil &&
aboveToEvict[k].Spec.Affinity.NodeAffinity != nil &&
aboveToEvict[k].Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil &&
nodesPodFitsOnBesidesCurrent(aboveToEvict[k], nodeMap) == 0) {
aboveToEvict[k].Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil
if (aboveToEvict[k].Spec.NodeSelector != nil || isRequiredDuringSchedulingIgnoredDuringExecution) &&
nodesPodFitsOnBesidesCurrent(aboveToEvict[k], nodeMap) == 0 {
klog.V(2).InfoS("Ignoring pod for eviction due to node selector/affinity", "pod", klog.KObj(aboveToEvict[k]))
continue
}

View File

@@ -281,6 +281,56 @@ func TestTopologySpreadConstraint(t *testing.T) {
strategy: api.DeschedulerStrategy{},
namespaces: []string{"ns1"},
},
{
name: "2 domains, sizes [4,0], maxSkew=1, move 2 pods since selector matches multiple nodes",
nodes: []*v1.Node{
test.BuildTestNode("n1", 2000, 3000, 10, func(n *v1.Node) {
n.Labels["zone"] = "zoneA"
n.Labels["region"] = "boston"
}),
test.BuildTestNode("n2", 2000, 3000, 10, func(n *v1.Node) {
n.Labels["zone"] = "zoneB"
n.Labels["region"] = "boston"
}),
},
pods: createTestPods([]testPodList{
{
count: 1,
node: "n1",
labels: map[string]string{"foo": "bar"},
constraints: []v1.TopologySpreadConstraint{
{
MaxSkew: 1,
TopologyKey: "zone",
WhenUnsatisfiable: v1.DoNotSchedule,
LabelSelector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}},
},
},
nodeSelector: map[string]string{"region": "boston"},
},
{
count: 1,
node: "n1",
labels: map[string]string{"foo": "bar"},
nodeSelector: map[string]string{"region": "boston"},
},
{
count: 1,
node: "n1",
labels: map[string]string{"foo": "bar"},
nodeSelector: map[string]string{"region": "boston"},
},
{
count: 1,
node: "n1",
labels: map[string]string{"foo": "bar"},
nodeSelector: map[string]string{"region": "boston"},
},
}),
expectedEvictedCount: 2,
strategy: api.DeschedulerStrategy{},
namespaces: []string{"ns1"},
},
{
name: "3 domains, sizes [0, 1, 100], maxSkew=1, move 66 pods to get [34, 33, 34]",
nodes: []*v1.Node{