mirror of
https://github.com/kubernetes-sigs/descheduler.git
synced 2026-01-26 05:14:13 +01:00
topologySpreadConstraints: handle nodeTaintsPolicy and nodeAffinityPolicy constraints (#1218)
* Add handling for node eligibility * Make tests buildable * Update topologyspreadconstraint.go * Updated test cases failing * squashed changes for test case addition corrected function name refactored duplicate TopoContraint check logic Added more test cases for testing node eligibility scenario Added 5 test cases for testing scenarios related to node eligibility * topologySpreadConstraints e2e: `nodeTaintsPolicy` and `nodeAffinityPolicy` constraints --------- Co-authored-by: Marc Power <marcpow@microsoft.com> Co-authored-by: nitindagar0 <81955199+nitindagar0@users.noreply.github.com>
This commit is contained in:
@@ -28,6 +28,8 @@ import (
|
||||
"k8s.io/klog/v2"
|
||||
utilpointer "k8s.io/utils/pointer"
|
||||
|
||||
v1helper "k8s.io/component-helpers/scheduling/corev1"
|
||||
nodeaffinity "k8s.io/component-helpers/scheduling/corev1/nodeaffinity"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/node"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
@@ -79,6 +81,12 @@ func New(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plug
|
||||
}, nil
|
||||
}
|
||||
|
||||
type topologyConstraintSet struct {
|
||||
constraint v1.TopologySpreadConstraint
|
||||
podNodeAffinity nodeaffinity.RequiredNodeAffinity
|
||||
podTolerations []v1.Toleration
|
||||
}
|
||||
|
||||
// Name retrieves the plugin name
|
||||
func (d *RemovePodsViolatingTopologySpreadConstraint) Name() string {
|
||||
return PluginName
|
||||
@@ -132,20 +140,26 @@ func (d *RemovePodsViolatingTopologySpreadConstraint) Balance(ctx context.Contex
|
||||
}
|
||||
|
||||
// ...where there is a topology constraint
|
||||
namespaceTopologySpreadConstraints := []v1.TopologySpreadConstraint{}
|
||||
namespaceTopologySpreadConstraints := []topologyConstraintSet{}
|
||||
for _, pod := range namespacedPods[namespace] {
|
||||
for _, constraint := range pod.Spec.TopologySpreadConstraints {
|
||||
// Ignore topology constraints if they are not included
|
||||
if !allowedConstraints.Has(constraint.WhenUnsatisfiable) {
|
||||
continue
|
||||
}
|
||||
requiredSchedulingTerm := nodeaffinity.GetRequiredNodeAffinity(pod)
|
||||
namespaceTopologySpreadConstraint := topologyConstraintSet{
|
||||
constraint: constraint,
|
||||
podNodeAffinity: requiredSchedulingTerm,
|
||||
podTolerations: pod.Spec.Tolerations,
|
||||
}
|
||||
// Need to check v1.TopologySpreadConstraint deepEquality because
|
||||
// v1.TopologySpreadConstraint has pointer fields
|
||||
// and we don't need to go over duplicated constraints later on
|
||||
if hasIdenticalConstraints(constraint, namespaceTopologySpreadConstraints) {
|
||||
if hasIdenticalConstraints(namespaceTopologySpreadConstraint, namespaceTopologySpreadConstraints) {
|
||||
continue
|
||||
}
|
||||
namespaceTopologySpreadConstraints = append(namespaceTopologySpreadConstraints, constraint)
|
||||
namespaceTopologySpreadConstraints = append(namespaceTopologySpreadConstraints, namespaceTopologySpreadConstraint)
|
||||
}
|
||||
}
|
||||
if len(namespaceTopologySpreadConstraints) == 0 {
|
||||
@@ -153,13 +167,18 @@ func (d *RemovePodsViolatingTopologySpreadConstraint) Balance(ctx context.Contex
|
||||
}
|
||||
|
||||
// 2. for each topologySpreadConstraint in that namespace
|
||||
for _, constraint := range namespaceTopologySpreadConstraints {
|
||||
for _, constraintSet := range namespaceTopologySpreadConstraints {
|
||||
constraint := constraintSet.constraint
|
||||
nodeAffinity := constraintSet.podNodeAffinity
|
||||
tolerations := constraintSet.podTolerations
|
||||
constraintTopologies := make(map[topologyPair][]*v1.Pod)
|
||||
// pre-populate the topologyPair map with all the topologies available from the nodeMap
|
||||
// (we can't just build it from existing pods' nodes because a topology may have 0 pods)
|
||||
for _, node := range nodeMap {
|
||||
if val, ok := node.Labels[constraint.TopologyKey]; ok {
|
||||
constraintTopologies[topologyPair{key: constraint.TopologyKey, value: val}] = make([]*v1.Pod, 0)
|
||||
if matchNodeInclusionPolicies(&constraint, tolerations, node, nodeAffinity) {
|
||||
constraintTopologies[topologyPair{key: constraint.TopologyKey, value: val}] = make([]*v1.Pod, 0)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -202,7 +221,7 @@ func (d *RemovePodsViolatingTopologySpreadConstraint) Balance(ctx context.Contex
|
||||
klog.V(2).InfoS("Skipping topology constraint because it is already balanced", "constraint", constraint)
|
||||
continue
|
||||
}
|
||||
d.balanceDomains(podsForEviction, constraint, constraintTopologies, sumPods, nodes)
|
||||
d.balanceDomains(podsForEviction, constraintSet, constraintTopologies, sumPods, nodes)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -227,7 +246,7 @@ func (d *RemovePodsViolatingTopologySpreadConstraint) Balance(ctx context.Contex
|
||||
}
|
||||
|
||||
// hasIdenticalConstraints checks if we already had an identical TopologySpreadConstraint in namespaceTopologySpreadConstraints slice
|
||||
func hasIdenticalConstraints(newConstraint v1.TopologySpreadConstraint, namespaceTopologySpreadConstraints []v1.TopologySpreadConstraint) bool {
|
||||
func hasIdenticalConstraints(newConstraint topologyConstraintSet, namespaceTopologySpreadConstraints []topologyConstraintSet) bool {
|
||||
for _, constraint := range namespaceTopologySpreadConstraints {
|
||||
if reflect.DeepEqual(newConstraint, constraint) {
|
||||
return true
|
||||
@@ -277,18 +296,20 @@ func topologyIsBalanced(topology map[topologyPair][]*v1.Pod, constraint v1.Topol
|
||||
// (assuming even distribution by the scheduler of the evicted pods)
|
||||
func (d *RemovePodsViolatingTopologySpreadConstraint) balanceDomains(
|
||||
podsForEviction map[*v1.Pod]struct{},
|
||||
constraint v1.TopologySpreadConstraint,
|
||||
constraintSet topologyConstraintSet,
|
||||
constraintTopologies map[topologyPair][]*v1.Pod,
|
||||
sumPods float64,
|
||||
nodes []*v1.Node,
|
||||
) {
|
||||
constraint := constraintSet.constraint
|
||||
idealAvg := sumPods / float64(len(constraintTopologies))
|
||||
isEvictable := d.handle.Evictor().Filter
|
||||
sortedDomains := sortDomains(constraintTopologies, isEvictable)
|
||||
getPodsAssignedToNode := d.handle.GetPodsAssignedToNodeFunc()
|
||||
topologyBalanceNodeFit := utilpointer.BoolDeref(d.args.TopologyBalanceNodeFit, true)
|
||||
|
||||
nodesBelowIdealAvg := filterNodesBelowIdealAvg(nodes, sortedDomains, constraint.TopologyKey, idealAvg)
|
||||
eligibleNodes := filterEligibleNodes(nodes, constraintSet)
|
||||
nodesBelowIdealAvg := filterNodesBelowIdealAvg(eligibleNodes, sortedDomains, constraint.TopologyKey, idealAvg)
|
||||
|
||||
// i is the index for belowOrEqualAvg
|
||||
// j is the index for aboveAvg
|
||||
@@ -435,3 +456,43 @@ func comparePodsByPriority(iPod, jPod *v1.Pod) bool {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
// doNotScheduleTaintsFilterFunc returns the filter function that can
|
||||
// filter out the node taints that reject scheduling Pod on a Node.
|
||||
func doNotScheduleTaintsFilterFunc() func(t *v1.Taint) bool {
|
||||
return func(t *v1.Taint) bool {
|
||||
// PodToleratesNodeTaints is only interested in NoSchedule and NoExecute taints.
|
||||
return t.Effect == v1.TaintEffectNoSchedule || t.Effect == v1.TaintEffectNoExecute
|
||||
}
|
||||
}
|
||||
|
||||
func filterEligibleNodes(nodes []*v1.Node, constraintSet topologyConstraintSet) []*v1.Node {
|
||||
constraint := constraintSet.constraint
|
||||
nodeAffinity := constraintSet.podNodeAffinity
|
||||
tolerations := constraintSet.podTolerations
|
||||
var eligibleNodes []*v1.Node
|
||||
for _, node := range nodes {
|
||||
if matchNodeInclusionPolicies(&constraint, tolerations, node, nodeAffinity) {
|
||||
eligibleNodes = append(eligibleNodes, node)
|
||||
}
|
||||
}
|
||||
return eligibleNodes
|
||||
}
|
||||
|
||||
func matchNodeInclusionPolicies(tsc *v1.TopologySpreadConstraint, tolerations []v1.Toleration, node *v1.Node, require nodeaffinity.RequiredNodeAffinity) bool {
|
||||
// Nil is equivalent to honor
|
||||
if tsc.NodeAffinityPolicy == nil || *tsc.NodeAffinityPolicy == v1.NodeInclusionPolicyHonor {
|
||||
// We ignore parsing errors here for backwards compatibility.
|
||||
if match, _ := require.Match(node); !match {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// Nil is equivalent to ignore
|
||||
if tsc.NodeTaintsPolicy != nil && *tsc.NodeTaintsPolicy == v1.NodeInclusionPolicyHonor {
|
||||
if _, untolerated := v1helper.FindMatchingUntoleratedTaint(node.Spec.Taints, tolerations, doNotScheduleTaintsFilterFunc()); untolerated {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
core "k8s.io/client-go/testing"
|
||||
"k8s.io/client-go/tools/events"
|
||||
"k8s.io/component-helpers/scheduling/corev1/nodeaffinity"
|
||||
utilpointer "k8s.io/utils/pointer"
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
@@ -303,12 +304,14 @@ func TestTopologySpreadConstraint(t *testing.T) {
|
||||
count: 1,
|
||||
node: "n1",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
constraints: getDefaultTopologyConstraints(1),
|
||||
nodeSelector: map[string]string{"zone": "zoneA"},
|
||||
},
|
||||
{
|
||||
count: 1,
|
||||
node: "n1",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
count: 1,
|
||||
node: "n1",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
constraints: getDefaultTopologyConstraints(1),
|
||||
nodeAffinity: &v1.Affinity{NodeAffinity: &v1.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{MatchExpressions: []v1.NodeSelectorRequirement{{Key: "foo", Values: []string{"bar"}, Operator: v1.NodeSelectorOpIn}}},
|
||||
@@ -316,9 +319,10 @@ func TestTopologySpreadConstraint(t *testing.T) {
|
||||
}},
|
||||
},
|
||||
{
|
||||
count: 1,
|
||||
node: "n1",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
count: 1,
|
||||
node: "n1",
|
||||
constraints: getDefaultTopologyConstraints(1),
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
},
|
||||
}),
|
||||
expectedEvictedCount: 1,
|
||||
@@ -326,6 +330,163 @@ func TestTopologySpreadConstraint(t *testing.T) {
|
||||
args: RemovePodsViolatingTopologySpreadConstraintArgs{},
|
||||
nodeFit: true,
|
||||
},
|
||||
{
|
||||
name: "6 domains, sizes [0,0,0,3,3,2], maxSkew=1, No pod is evicted since topology is balanced",
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode("n1", 2000, 3000, 10, func(n *v1.Node) { n.Labels["node"] = "linNodeA"; n.Labels["os"] = "linux" }),
|
||||
test.BuildTestNode("n2", 2000, 3000, 10, func(n *v1.Node) { n.Labels["node"] = "linNodeB"; n.Labels["os"] = "linux" }),
|
||||
test.BuildTestNode("n3", 2000, 3000, 10, func(n *v1.Node) { n.Labels["node"] = "linNodeC"; n.Labels["os"] = "linux" }),
|
||||
|
||||
test.BuildTestNode("n4", 2000, 3000, 10, func(n *v1.Node) { n.Labels["node"] = "winNodeA"; n.Labels["os"] = "windows" }),
|
||||
test.BuildTestNode("n5", 2000, 3000, 10, func(n *v1.Node) { n.Labels["node"] = "winNodeB"; n.Labels["os"] = "windows" }),
|
||||
test.BuildTestNode("n6", 2000, 3000, 10, func(n *v1.Node) { n.Labels["node"] = "winNodeC"; n.Labels["os"] = "windows" }),
|
||||
},
|
||||
pods: createTestPods([]testPodList{
|
||||
{
|
||||
count: 3,
|
||||
node: "n4",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
constraints: getDefaultNodeTopologyConstraints(1),
|
||||
nodeSelector: map[string]string{"os": "windows"},
|
||||
},
|
||||
{
|
||||
count: 3,
|
||||
node: "n5",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
constraints: getDefaultNodeTopologyConstraints(1),
|
||||
nodeSelector: map[string]string{"os": "windows"},
|
||||
},
|
||||
{
|
||||
count: 2,
|
||||
node: "n6",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
constraints: getDefaultNodeTopologyConstraints(1),
|
||||
nodeSelector: map[string]string{"os": "windows"},
|
||||
},
|
||||
}),
|
||||
expectedEvictedCount: 0,
|
||||
namespaces: []string{"ns1"},
|
||||
args: RemovePodsViolatingTopologySpreadConstraintArgs{},
|
||||
nodeFit: false,
|
||||
},
|
||||
{
|
||||
name: "7 domains, sizes [0,0,0,3,3,2], maxSkew=1, two pods are evicted and moved to new node added",
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode("n1", 2000, 3000, 10, func(n *v1.Node) { n.Labels["node"] = "linNodeA"; n.Labels["os"] = "linux" }),
|
||||
test.BuildTestNode("n2", 2000, 3000, 10, func(n *v1.Node) { n.Labels["node"] = "linNodeB"; n.Labels["os"] = "linux" }),
|
||||
test.BuildTestNode("n3", 2000, 3000, 10, func(n *v1.Node) { n.Labels["node"] = "linNodeC"; n.Labels["os"] = "linux" }),
|
||||
|
||||
test.BuildTestNode("n4", 2000, 3000, 10, func(n *v1.Node) { n.Labels["node"] = "winNodeA"; n.Labels["os"] = "windows" }),
|
||||
test.BuildTestNode("n5", 2000, 3000, 10, func(n *v1.Node) { n.Labels["node"] = "winNodeB"; n.Labels["os"] = "windows" }),
|
||||
test.BuildTestNode("n6", 2000, 3000, 10, func(n *v1.Node) { n.Labels["node"] = "winNodeC"; n.Labels["os"] = "windows" }),
|
||||
test.BuildTestNode("n7", 2000, 3000, 10, func(n *v1.Node) { n.Labels["node"] = "winNodeD"; n.Labels["os"] = "windows" }),
|
||||
},
|
||||
pods: createTestPods([]testPodList{
|
||||
{
|
||||
count: 3,
|
||||
node: "n4",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
constraints: getDefaultNodeTopologyConstraints(1),
|
||||
nodeSelector: map[string]string{"os": "windows"},
|
||||
},
|
||||
{
|
||||
count: 3,
|
||||
node: "n5",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
constraints: getDefaultNodeTopologyConstraints(1),
|
||||
nodeSelector: map[string]string{"os": "windows"},
|
||||
},
|
||||
{
|
||||
count: 2,
|
||||
node: "n6",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
constraints: getDefaultNodeTopologyConstraints(1),
|
||||
nodeSelector: map[string]string{"os": "windows"},
|
||||
},
|
||||
}),
|
||||
expectedEvictedCount: 2,
|
||||
namespaces: []string{"ns1"},
|
||||
args: RemovePodsViolatingTopologySpreadConstraintArgs{},
|
||||
nodeFit: false,
|
||||
},
|
||||
{
|
||||
name: "6 domains, sizes [0,0,0,4,3,1], maxSkew=1, 1 pod is evicted from node 4",
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode("n1", 2000, 3000, 10, func(n *v1.Node) { n.Labels["node"] = "linNodeA"; n.Labels["os"] = "linux" }),
|
||||
test.BuildTestNode("n2", 2000, 3000, 10, func(n *v1.Node) { n.Labels["node"] = "linNodeB"; n.Labels["os"] = "linux" }),
|
||||
test.BuildTestNode("n3", 2000, 3000, 10, func(n *v1.Node) { n.Labels["node"] = "linNodeC"; n.Labels["os"] = "linux" }),
|
||||
|
||||
test.BuildTestNode("n4", 2000, 3000, 10, func(n *v1.Node) { n.Labels["node"] = "winNodeA"; n.Labels["os"] = "windows" }),
|
||||
test.BuildTestNode("n5", 2000, 3000, 10, func(n *v1.Node) { n.Labels["node"] = "winNodeB"; n.Labels["os"] = "windows" }),
|
||||
test.BuildTestNode("n6", 2000, 3000, 10, func(n *v1.Node) { n.Labels["node"] = "winNodeC"; n.Labels["os"] = "windows" }),
|
||||
},
|
||||
pods: createTestPods([]testPodList{
|
||||
{
|
||||
count: 4,
|
||||
node: "n4",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
constraints: getDefaultNodeTopologyConstraints(1),
|
||||
nodeSelector: map[string]string{"os": "windows"},
|
||||
},
|
||||
{
|
||||
count: 3,
|
||||
node: "n5",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
constraints: getDefaultNodeTopologyConstraints(1),
|
||||
nodeSelector: map[string]string{"os": "windows"},
|
||||
},
|
||||
{
|
||||
count: 1,
|
||||
node: "n6",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
constraints: getDefaultNodeTopologyConstraints(1),
|
||||
nodeSelector: map[string]string{"os": "windows"},
|
||||
},
|
||||
}),
|
||||
expectedEvictedCount: 1,
|
||||
namespaces: []string{"ns1"},
|
||||
args: RemovePodsViolatingTopologySpreadConstraintArgs{},
|
||||
nodeFit: false,
|
||||
},
|
||||
{
|
||||
name: "6 domains, sizes [0,0,0,4,3,1], maxSkew=1, 0 pod is evicted as pods of node:winNodeA can only be scheduled on this node and nodeFit is true ",
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode("n1", 2000, 3000, 10, func(n *v1.Node) { n.Labels["node"] = "linNodeA"; n.Labels["os"] = "linux" }),
|
||||
test.BuildTestNode("n2", 2000, 3000, 10, func(n *v1.Node) { n.Labels["node"] = "linNodeB"; n.Labels["os"] = "linux" }),
|
||||
test.BuildTestNode("n3", 2000, 3000, 10, func(n *v1.Node) { n.Labels["node"] = "linNodeC"; n.Labels["os"] = "linux" }),
|
||||
|
||||
test.BuildTestNode("n4", 2000, 3000, 10, func(n *v1.Node) { n.Labels["node"] = "winNodeA"; n.Labels["os"] = "windows" }),
|
||||
test.BuildTestNode("n5", 2000, 3000, 10, func(n *v1.Node) { n.Labels["node"] = "winNodeB"; n.Labels["os"] = "windows" }),
|
||||
test.BuildTestNode("n6", 2000, 3000, 10, func(n *v1.Node) { n.Labels["node"] = "winNodeC"; n.Labels["os"] = "windows" }),
|
||||
},
|
||||
pods: createTestPods([]testPodList{
|
||||
{
|
||||
count: 4,
|
||||
node: "n4",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
constraints: getDefaultNodeTopologyConstraints(1),
|
||||
nodeSelector: map[string]string{"node": "winNodeA"},
|
||||
},
|
||||
{
|
||||
count: 3,
|
||||
node: "n5",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
constraints: getDefaultNodeTopologyConstraints(1),
|
||||
nodeSelector: map[string]string{"os": "windows"},
|
||||
},
|
||||
{
|
||||
count: 1,
|
||||
node: "n6",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
constraints: getDefaultNodeTopologyConstraints(1),
|
||||
nodeSelector: map[string]string{"os": "windows"},
|
||||
},
|
||||
}),
|
||||
expectedEvictedCount: 0,
|
||||
namespaces: []string{"ns1"},
|
||||
args: RemovePodsViolatingTopologySpreadConstraintArgs{},
|
||||
nodeFit: true,
|
||||
},
|
||||
{
|
||||
name: "2 domains, sizes [4,0], maxSkew=1, move 2 pods since selector matches multiple nodes",
|
||||
nodes: []*v1.Node{
|
||||
@@ -1371,6 +1532,17 @@ func getDefaultTopologyConstraints(maxSkew int32) []v1.TopologySpreadConstraint
|
||||
}
|
||||
}
|
||||
|
||||
func getDefaultNodeTopologyConstraints(maxSkew int32) []v1.TopologySpreadConstraint {
|
||||
return []v1.TopologySpreadConstraint{
|
||||
{
|
||||
MaxSkew: maxSkew,
|
||||
TopologyKey: "node",
|
||||
WhenUnsatisfiable: v1.DoNotSchedule,
|
||||
LabelSelector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestCheckIdenticalConstraints(t *testing.T) {
|
||||
newConstraintSame := v1.TopologySpreadConstraint{
|
||||
MaxSkew: 2,
|
||||
@@ -1413,7 +1585,19 @@ func TestCheckIdenticalConstraints(t *testing.T) {
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
isIdentical := hasIdenticalConstraints(tc.newConstraint, tc.namespaceTopologySpreadConstraints)
|
||||
var constraintSets []topologyConstraintSet
|
||||
for _, constraints := range tc.namespaceTopologySpreadConstraints {
|
||||
constraintSets = append(constraintSets, topologyConstraintSet{
|
||||
constraint: constraints,
|
||||
podNodeAffinity: nodeaffinity.RequiredNodeAffinity{},
|
||||
podTolerations: []v1.Toleration{},
|
||||
})
|
||||
}
|
||||
isIdentical := hasIdenticalConstraints(topologyConstraintSet{
|
||||
constraint: tc.newConstraint,
|
||||
podNodeAffinity: nodeaffinity.RequiredNodeAffinity{},
|
||||
podTolerations: []v1.Toleration{},
|
||||
}, constraintSets)
|
||||
if isIdentical != tc.expectedResult {
|
||||
t.Errorf("Test error for description: %s. Expected result %v, got %v", tc.name, tc.expectedResult, isIdentical)
|
||||
}
|
||||
|
||||
@@ -2,14 +2,13 @@ package e2e
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
frameworkfake "sigs.k8s.io/descheduler/pkg/framework/fake"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/defaultevictor"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/removepodsviolatingtopologyspreadconstraint"
|
||||
@@ -35,32 +34,69 @@ func TestTopologySpreadConstraint(t *testing.T) {
|
||||
defer clientSet.CoreV1().Namespaces().Delete(ctx, testNamespace.Name, metav1.DeleteOptions{})
|
||||
|
||||
testCases := map[string]struct {
|
||||
replicaCount int
|
||||
maxSkew int
|
||||
labelKey string
|
||||
labelValue string
|
||||
constraint v1.UnsatisfiableConstraintAction
|
||||
replicaCount int
|
||||
topologySpreadConstraint v1.TopologySpreadConstraint
|
||||
}{
|
||||
"test-rc-topology-spread-hard-constraint": {
|
||||
replicaCount: 4,
|
||||
maxSkew: 1,
|
||||
labelKey: "test",
|
||||
labelValue: "topology-spread-hard-constraint",
|
||||
constraint: v1.DoNotSchedule,
|
||||
topologySpreadConstraint: v1.TopologySpreadConstraint{
|
||||
LabelSelector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"test": "topology-spread-hard-constraint",
|
||||
},
|
||||
},
|
||||
MaxSkew: 1,
|
||||
TopologyKey: zoneTopologyKey,
|
||||
WhenUnsatisfiable: v1.DoNotSchedule,
|
||||
},
|
||||
},
|
||||
"test-rc-topology-spread-soft-constraint": {
|
||||
replicaCount: 4,
|
||||
maxSkew: 1,
|
||||
labelKey: "test",
|
||||
labelValue: "topology-spread-soft-constraint",
|
||||
constraint: v1.ScheduleAnyway,
|
||||
topologySpreadConstraint: v1.TopologySpreadConstraint{
|
||||
LabelSelector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"test": "topology-spread-soft-constraint",
|
||||
},
|
||||
},
|
||||
MaxSkew: 1,
|
||||
TopologyKey: zoneTopologyKey,
|
||||
WhenUnsatisfiable: v1.ScheduleAnyway,
|
||||
},
|
||||
},
|
||||
"test-rc-node-taints-policy-honor": {
|
||||
replicaCount: 4,
|
||||
topologySpreadConstraint: v1.TopologySpreadConstraint{
|
||||
LabelSelector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"test": "node-taints-policy-honor",
|
||||
},
|
||||
},
|
||||
MaxSkew: 1,
|
||||
NodeTaintsPolicy: nodeInclusionPolicyRef(v1.NodeInclusionPolicyHonor),
|
||||
TopologyKey: zoneTopologyKey,
|
||||
WhenUnsatisfiable: v1.DoNotSchedule,
|
||||
},
|
||||
},
|
||||
"test-rc-node-affinity-policy-ignore": {
|
||||
replicaCount: 4,
|
||||
topologySpreadConstraint: v1.TopologySpreadConstraint{
|
||||
LabelSelector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"test": "node-taints-policy-honor",
|
||||
},
|
||||
},
|
||||
MaxSkew: 1,
|
||||
NodeAffinityPolicy: nodeInclusionPolicyRef(v1.NodeInclusionPolicyIgnore),
|
||||
TopologyKey: zoneTopologyKey,
|
||||
WhenUnsatisfiable: v1.DoNotSchedule,
|
||||
},
|
||||
},
|
||||
}
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
t.Logf("Creating RC %s with %d replicas", name, tc.replicaCount)
|
||||
rc := RcByNameContainer(name, testNamespace.Name, int32(tc.replicaCount), map[string]string{tc.labelKey: tc.labelValue}, nil, "")
|
||||
rc.Spec.Template.Spec.TopologySpreadConstraints = makeTopologySpreadConstraints(tc.maxSkew, tc.labelKey, tc.labelValue, tc.constraint)
|
||||
rc := RcByNameContainer(name, testNamespace.Name, int32(tc.replicaCount), tc.topologySpreadConstraint.LabelSelector.DeepCopy().MatchLabels, nil, "")
|
||||
rc.Spec.Template.Spec.TopologySpreadConstraints = []v1.TopologySpreadConstraint{tc.topologySpreadConstraint}
|
||||
if _, err := clientSet.CoreV1().ReplicationControllers(rc.Namespace).Create(ctx, rc, metav1.CreateOptions{}); err != nil {
|
||||
t.Fatalf("Error creating RC %s %v", name, err)
|
||||
}
|
||||
@@ -69,10 +105,10 @@ func TestTopologySpreadConstraint(t *testing.T) {
|
||||
|
||||
// Create a "Violator" RC that has the same label and is forced to be on the same node using a nodeSelector
|
||||
violatorRcName := name + "-violator"
|
||||
violatorCount := tc.maxSkew + 1
|
||||
violatorRc := RcByNameContainer(violatorRcName, testNamespace.Name, int32(violatorCount), map[string]string{tc.labelKey: tc.labelValue}, nil, "")
|
||||
violatorCount := tc.topologySpreadConstraint.MaxSkew + 1
|
||||
violatorRc := RcByNameContainer(violatorRcName, testNamespace.Name, violatorCount, tc.topologySpreadConstraint.LabelSelector.DeepCopy().MatchLabels, nil, "")
|
||||
violatorRc.Spec.Template.Spec.NodeSelector = map[string]string{zoneTopologyKey: workerNodes[0].Labels[zoneTopologyKey]}
|
||||
rc.Spec.Template.Spec.TopologySpreadConstraints = makeTopologySpreadConstraints(tc.maxSkew, tc.labelKey, tc.labelValue, tc.constraint)
|
||||
rc.Spec.Template.Spec.TopologySpreadConstraints = []v1.TopologySpreadConstraint{tc.topologySpreadConstraint}
|
||||
if _, err := clientSet.CoreV1().ReplicationControllers(rc.Namespace).Create(ctx, violatorRc, metav1.CreateOptions{}); err != nil {
|
||||
t.Fatalf("Error creating RC %s: %v", violatorRcName, err)
|
||||
}
|
||||
@@ -103,7 +139,7 @@ func TestTopologySpreadConstraint(t *testing.T) {
|
||||
}
|
||||
|
||||
plugin, err := removepodsviolatingtopologyspreadconstraint.New(&removepodsviolatingtopologyspreadconstraint.RemovePodsViolatingTopologySpreadConstraintArgs{
|
||||
Constraints: []v1.UnsatisfiableConstraintAction{tc.constraint},
|
||||
Constraints: []v1.UnsatisfiableConstraintAction{tc.topologySpreadConstraint.WhenUnsatisfiable},
|
||||
},
|
||||
&frameworkfake.HandleImpl{
|
||||
ClientsetImpl: clientSet,
|
||||
@@ -131,7 +167,8 @@ func TestTopologySpreadConstraint(t *testing.T) {
|
||||
// Ensure recently evicted Pod are rescheduled and running before asserting for a balanced topology spread
|
||||
waitForRCPodsRunning(ctx, t, clientSet, rc)
|
||||
|
||||
pods, err := clientSet.CoreV1().Pods(testNamespace.Name).List(ctx, metav1.ListOptions{LabelSelector: fmt.Sprintf("%s=%s", tc.labelKey, tc.labelValue)})
|
||||
listOptions := metav1.ListOptions{LabelSelector: labels.SelectorFromSet(tc.topologySpreadConstraint.LabelSelector.MatchLabels).String()}
|
||||
pods, err := clientSet.CoreV1().Pods(testNamespace.Name).List(ctx, listOptions)
|
||||
if err != nil {
|
||||
t.Errorf("Error listing pods for %s: %v", name, err)
|
||||
}
|
||||
@@ -146,26 +183,15 @@ func TestTopologySpreadConstraint(t *testing.T) {
|
||||
}
|
||||
|
||||
min, max := getMinAndMaxPodDistribution(nodePodCountMap)
|
||||
if max-min > tc.maxSkew {
|
||||
t.Errorf("Pod distribution for %s is still violating the max skew of %d as it is %d", name, tc.maxSkew, max-min)
|
||||
if max-min > int(tc.topologySpreadConstraint.MaxSkew) {
|
||||
t.Errorf("Pod distribution for %s is still violating the max skew of %d as it is %d", name, tc.topologySpreadConstraint.MaxSkew, max-min)
|
||||
}
|
||||
|
||||
t.Logf("Pods for %s were distributed in line with max skew of %d", name, tc.maxSkew)
|
||||
t.Logf("Pods for %s were distributed in line with max skew of %d", name, tc.topologySpreadConstraint.MaxSkew)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func makeTopologySpreadConstraints(maxSkew int, labelKey, labelValue string, constraint v1.UnsatisfiableConstraintAction) []v1.TopologySpreadConstraint {
|
||||
return []v1.TopologySpreadConstraint{
|
||||
{
|
||||
MaxSkew: int32(maxSkew),
|
||||
TopologyKey: zoneTopologyKey,
|
||||
WhenUnsatisfiable: constraint,
|
||||
LabelSelector: &metav1.LabelSelector{MatchLabels: map[string]string{labelKey: labelValue}},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func getMinAndMaxPodDistribution(nodePodCountMap map[string]int) (int, int) {
|
||||
min := math.MaxInt32
|
||||
max := math.MinInt32
|
||||
@@ -180,3 +206,7 @@ func getMinAndMaxPodDistribution(nodePodCountMap map[string]int) (int, int) {
|
||||
|
||||
return min, max
|
||||
}
|
||||
|
||||
func nodeInclusionPolicyRef(policy v1.NodeInclusionPolicy) *v1.NodeInclusionPolicy {
|
||||
return &policy
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user