mirror of
https://github.com/kubernetes-sigs/descheduler.git
synced 2026-01-26 05:14:13 +01:00
Merge pull request #465 from damemi/skip-balanced-topology
Skip topology calculations if domains are already balanced
This commit is contained in:
@@ -89,7 +89,7 @@ func RemovePodsViolatingTopologySpreadConstraint(
|
||||
evictable := podEvictor.Evictable(evictions.WithPriorityThreshold(thresholdPriority))
|
||||
|
||||
// 1. for each namespace for which there is Topology Constraint
|
||||
// 2. for each TopologySpreadyConstraint in that namespace
|
||||
// 2. for each TopologySpreadConstraint in that namespace
|
||||
// { find all evictable pods in that namespace
|
||||
// { 3. for each evictable pod in that namespace
|
||||
// 4. If the pod matches this TopologySpreadConstraint LabelSelector
|
||||
@@ -155,7 +155,7 @@ func RemovePodsViolatingTopologySpreadConstraint(
|
||||
}
|
||||
|
||||
// 3. for each evictable pod in that namespace
|
||||
// (this loop is where we count the number of pods per topologyValue that match this constraint'selector selector)
|
||||
// (this loop is where we count the number of pods per topologyValue that match this constraint's selector)
|
||||
var sumPods float64
|
||||
for i := range namespacePods.Items {
|
||||
// 4. if the pod matches this TopologySpreadConstraint LabelSelector
|
||||
@@ -163,7 +163,7 @@ func RemovePodsViolatingTopologySpreadConstraint(
|
||||
continue
|
||||
}
|
||||
|
||||
// 5. If the pod'selector node matches this constraint'selector topologyKey, create a topoPair and add the pod
|
||||
// 5. If the pod's node matches this constraint'selector topologyKey, create a topoPair and add the pod
|
||||
node, ok := nodeMap[namespacePods.Items[i].Spec.NodeName]
|
||||
if !ok {
|
||||
// If ok is false, node is nil in which case node.Labels will panic. In which case a pod is yet to be scheduled. So it's safe to just continue here.
|
||||
@@ -179,6 +179,10 @@ func RemovePodsViolatingTopologySpreadConstraint(
|
||||
constraintTopologies[topoPair] = append(constraintTopologies[topoPair], &namespacePods.Items[i])
|
||||
sumPods++
|
||||
}
|
||||
if topologyIsBalanced(constraintTopologies, constraint) {
|
||||
klog.V(2).InfoS("Skipping topology constraint because it is already balanced", "constraint", constraint)
|
||||
continue
|
||||
}
|
||||
balanceDomains(podsForEviction, constraint, constraintTopologies, sumPods, evictable.IsEvictable, nodeMap)
|
||||
}
|
||||
}
|
||||
@@ -191,6 +195,25 @@ func RemovePodsViolatingTopologySpreadConstraint(
|
||||
}
|
||||
}
|
||||
|
||||
// topologyIsBalanced checks if any domains in the topology differ by more than the MaxSkew
|
||||
// this is called before any sorting or other calculations and is used to skip topologies that don't need to be balanced
|
||||
func topologyIsBalanced(topology map[topologyPair][]*v1.Pod, constraint v1.TopologySpreadConstraint) bool {
|
||||
minDomainSize := math.MaxInt32
|
||||
maxDomainSize := math.MinInt32
|
||||
for _, pods := range topology {
|
||||
if len(pods) < minDomainSize {
|
||||
minDomainSize = len(pods)
|
||||
}
|
||||
if len(pods) > maxDomainSize {
|
||||
maxDomainSize = len(pods)
|
||||
}
|
||||
if int32(maxDomainSize-minDomainSize) > constraint.MaxSkew {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// balanceDomains determines how many pods (minimum) should be evicted from large domains to achieve an ideal balance within maxSkew
|
||||
// To actually determine how many pods need to be moved, we sort the topology domains in ascending length
|
||||
// [2, 5, 3, 8, 5, 7]
|
||||
|
||||
@@ -25,6 +25,41 @@ func TestTopologySpreadConstraint(t *testing.T) {
|
||||
strategy api.DeschedulerStrategy
|
||||
namespaces []string
|
||||
}{
|
||||
{
|
||||
name: "2 domains, sizes [2,1], maxSkew=1, move 0 pods",
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode("n1", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneA" }),
|
||||
test.BuildTestNode("n2", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneB" }),
|
||||
},
|
||||
pods: createTestPods([]testPodList{
|
||||
{
|
||||
count: 1,
|
||||
node: "n1",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
constraints: []v1.TopologySpreadConstraint{
|
||||
{
|
||||
MaxSkew: 1,
|
||||
TopologyKey: "zone",
|
||||
WhenUnsatisfiable: v1.DoNotSchedule,
|
||||
LabelSelector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
count: 1,
|
||||
node: "n1",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
},
|
||||
{
|
||||
count: 1,
|
||||
node: "n2",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
},
|
||||
}),
|
||||
expectedEvictedCount: 0,
|
||||
strategy: api.DeschedulerStrategy{},
|
||||
namespaces: []string{"ns1"},
|
||||
},
|
||||
{
|
||||
name: "2 domains, sizes [3,1], maxSkew=1, move 1 pod to achieve [2,2]",
|
||||
nodes: []*v1.Node{
|
||||
|
||||
Reference in New Issue
Block a user