mirror of
https://github.com/kubernetes-sigs/descheduler.git
synced 2026-01-26 05:14:13 +01:00
feat: Implement preferredDuringSchedulingIgnoredDuringExecution for RemovePodsViolatingNodeAffinity (#1210)
* feat: Implement preferredDuringSchedulingIgnoredDuringExecution for RemovePodsViolatingNodeAffinity Now, the descheduler can detect and evict pods that are not optimally allocated according to the "preferred..." node affinity. It only evicts a pod if it can be scheduled on a node that scores higher in terms of preferred node affinity than the current one. This can be activated by enabling the RemovePodsViolatingNodeAffinity plugin and passing "preferredDuringSchedulingIgnoredDuringExecution" in the args. For example, imagine we have a pod that prefers nodes with label "key1: value1" with a weight of 10. If this pod is scheduled on a node that doesn't have "key1: value1" as label but there's another node that has this label and where this pod can potentially run, then the descheduler will evict the pod. Another effect of this commit is that the RemovePodsViolatingNodeAffinity plugin will not remove pods that don't fit in the current node but for other reasons than violating the node affinity. Before that, enabling this plugin could cause evictions on pods that were running on tainted nodes without the necessary tolerations. This commit also fixes the wording of some tests from node_affinity_test.go and some parameters and expectations of these tests, which were wrong. * Optimization on RemovePodsViolatingNodeAffinity Before checking if a pod can be evicted or if it can be scheduled somewhere else, we first check if it has the corresponding nodeAffinity field defined. Otherwise, the pod is automatically discarded as a candidate. Apart from that, the method that calculates the weight that a pod gives to a node based on its preferred node affinity has been renamed to better reflect what it does.
This commit is contained in:
committed by
GitHub
parent
1be0ab2bd1
commit
31704047c5
@@ -26,6 +26,7 @@ import (
|
||||
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
)
|
||||
|
||||
const PluginName = "RemovePodsViolatingNodeAffinity"
|
||||
@@ -78,40 +79,68 @@ func (d *RemovePodsViolatingNodeAffinity) Name() string {
|
||||
func (d *RemovePodsViolatingNodeAffinity) Deschedule(ctx context.Context, nodes []*v1.Node) *frameworktypes.Status {
|
||||
for _, nodeAffinity := range d.args.NodeAffinityType {
|
||||
klog.V(2).InfoS("Executing for nodeAffinityType", "nodeAffinity", nodeAffinity)
|
||||
var err *frameworktypes.Status = nil
|
||||
|
||||
// The pods that we'll evict must be evictable. For example, the current number of replicas
|
||||
// must be greater than the pdb.minValue.
|
||||
// The pods must be able to get scheduled on a different node. Otherwise, it doesn't make much
|
||||
// sense to evict them.
|
||||
switch nodeAffinity {
|
||||
case "requiredDuringSchedulingIgnoredDuringExecution":
|
||||
for _, node := range nodes {
|
||||
klog.V(2).InfoS("Processing node", "node", klog.KObj(node))
|
||||
|
||||
pods, err := podutil.ListPodsOnANode(
|
||||
node.Name,
|
||||
d.handle.GetPodsAssignedToNodeFunc(),
|
||||
podutil.WrapFilterFuncs(d.podFilter, func(pod *v1.Pod) bool {
|
||||
return d.handle.Evictor().Filter(pod) &&
|
||||
!nodeutil.PodFitsCurrentNode(d.handle.GetPodsAssignedToNodeFunc(), pod, node) &&
|
||||
nodeutil.PodFitsAnyNode(d.handle.GetPodsAssignedToNodeFunc(), pod, nodes)
|
||||
}),
|
||||
)
|
||||
if err != nil {
|
||||
return &frameworktypes.Status{
|
||||
Err: fmt.Errorf("error listing pods on a node: %v", err),
|
||||
}
|
||||
}
|
||||
|
||||
for _, pod := range pods {
|
||||
if pod.Spec.Affinity != nil && pod.Spec.Affinity.NodeAffinity != nil && pod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil {
|
||||
klog.V(1).InfoS("Evicting pod", "pod", klog.KObj(pod))
|
||||
d.handle.Evictor().Evict(ctx, pod, evictions.EvictOptions{})
|
||||
if d.handle.Evictor().NodeLimitExceeded(node) {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
// In this specific case, the pod must also violate the nodeSelector to be evicted
|
||||
filterFunc := func(pod *v1.Pod, node *v1.Node, nodes []*v1.Node) bool {
|
||||
return utils.PodHasNodeAffinity(pod, utils.RequiredDuringSchedulingIgnoredDuringExecution) &&
|
||||
d.handle.Evictor().Filter(pod) &&
|
||||
nodeutil.PodFitsAnyNode(d.handle.GetPodsAssignedToNodeFunc(), pod, nodes) &&
|
||||
!nodeutil.PodMatchNodeSelector(pod, node)
|
||||
}
|
||||
err = d.processNodes(ctx, nodes, filterFunc)
|
||||
case "preferredDuringSchedulingIgnoredDuringExecution":
|
||||
// In this specific case, the pod must have a better fit on another node than
|
||||
// in the current one based on the preferred node affinity
|
||||
filterFunc := func(pod *v1.Pod, node *v1.Node, nodes []*v1.Node) bool {
|
||||
return utils.PodHasNodeAffinity(pod, utils.PreferredDuringSchedulingIgnoredDuringExecution) &&
|
||||
d.handle.Evictor().Filter(pod) &&
|
||||
nodeutil.PodFitsAnyNode(d.handle.GetPodsAssignedToNodeFunc(), pod, nodes) &&
|
||||
(nodeutil.GetBestNodeWeightGivenPodPreferredAffinity(pod, nodes) > nodeutil.GetNodeWeightGivenPodPreferredAffinity(pod, node))
|
||||
}
|
||||
err = d.processNodes(ctx, nodes, filterFunc)
|
||||
default:
|
||||
klog.ErrorS(nil, "Invalid nodeAffinityType", "nodeAffinity", nodeAffinity)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *RemovePodsViolatingNodeAffinity) processNodes(ctx context.Context, nodes []*v1.Node, filterFunc func(*v1.Pod, *v1.Node, []*v1.Node) bool) *frameworktypes.Status {
|
||||
for _, node := range nodes {
|
||||
klog.V(2).InfoS("Processing node", "node", klog.KObj(node))
|
||||
|
||||
// Potentially evictable pods
|
||||
pods, err := podutil.ListPodsOnANode(
|
||||
node.Name,
|
||||
d.handle.GetPodsAssignedToNodeFunc(),
|
||||
podutil.WrapFilterFuncs(d.podFilter, func(pod *v1.Pod) bool {
|
||||
return filterFunc(pod, node, nodes)
|
||||
}),
|
||||
)
|
||||
if err != nil {
|
||||
return &frameworktypes.Status{
|
||||
Err: fmt.Errorf("error listing pods on a node: %v", err),
|
||||
}
|
||||
}
|
||||
|
||||
for _, pod := range pods {
|
||||
klog.V(1).InfoS("Evicting pod", "pod", klog.KObj(pod))
|
||||
d.handle.Evictor().Evict(ctx, pod, evictions.EvictOptions{})
|
||||
if d.handle.Evictor().NodeLimitExceeded(node) {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -48,26 +48,49 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||
unschedulableNodeWithLabels.Labels[nodeLabelKey] = nodeLabelValue
|
||||
unschedulableNodeWithLabels.Spec.Unschedulable = true
|
||||
|
||||
addPodsToNode := func(node *v1.Node, deletionTimestamp *metav1.Time) []*v1.Pod {
|
||||
addPodsToNode := func(node *v1.Node, deletionTimestamp *metav1.Time, affinityType string) []*v1.Pod {
|
||||
podWithNodeAffinity := test.BuildTestPod("podWithNodeAffinity", 100, 0, node.Name, nil)
|
||||
podWithNodeAffinity.Spec.Affinity = &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: nodeLabelKey,
|
||||
Operator: "In",
|
||||
Values: []string{
|
||||
nodeLabelValue,
|
||||
},
|
||||
NodeAffinity: &v1.NodeAffinity{},
|
||||
}
|
||||
|
||||
switch affinityType {
|
||||
case "requiredDuringSchedulingIgnoredDuringExecution":
|
||||
podWithNodeAffinity.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution = &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: nodeLabelKey,
|
||||
Operator: "In",
|
||||
Values: []string{
|
||||
nodeLabelValue,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
case "preferredDuringSchedulingIgnoredDuringExecution":
|
||||
podWithNodeAffinity.Spec.Affinity.NodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution = []v1.PreferredSchedulingTerm{
|
||||
{
|
||||
Weight: 10,
|
||||
Preference: v1.NodeSelectorTerm{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: nodeLabelKey,
|
||||
Operator: "In",
|
||||
Values: []string{
|
||||
nodeLabelValue,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
case "requiredDuringSchedulingRequiredDuringExecution":
|
||||
default:
|
||||
t.Fatalf("Invalid affinity type %s", affinityType)
|
||||
}
|
||||
|
||||
pod1 := test.BuildTestPod("pod1", 100, 0, node.Name, nil)
|
||||
@@ -77,6 +100,7 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||
pod1.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
pod2.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
|
||||
podWithNodeAffinity.DeletionTimestamp = deletionTimestamp
|
||||
pod1.DeletionTimestamp = deletionTimestamp
|
||||
pod2.DeletionTimestamp = deletionTimestamp
|
||||
|
||||
@@ -87,6 +111,7 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
var uint0 uint = 0
|
||||
var uint1 uint = 1
|
||||
tests := []struct {
|
||||
description string
|
||||
@@ -104,16 +129,25 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||
NodeAffinityType: []string{"requiredDuringSchedulingRequiredDuringExecution"},
|
||||
},
|
||||
expectedEvictedPodCount: 0,
|
||||
pods: addPodsToNode(nodeWithoutLabels, nil),
|
||||
pods: addPodsToNode(nodeWithoutLabels, nil, "requiredDuringSchedulingRequiredDuringExecution"),
|
||||
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
|
||||
},
|
||||
{
|
||||
description: "Pod is correctly scheduled on node, no eviction expected",
|
||||
description: "Pod is correctly scheduled on node, no eviction expected [required affinity]",
|
||||
args: RemovePodsViolatingNodeAffinityArgs{
|
||||
NodeAffinityType: []string{"requiredDuringSchedulingIgnoredDuringExecution"},
|
||||
},
|
||||
expectedEvictedPodCount: 0,
|
||||
pods: addPodsToNode(nodeWithLabels, nil),
|
||||
pods: addPodsToNode(nodeWithLabels, nil, "requiredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{nodeWithLabels},
|
||||
},
|
||||
{
|
||||
description: "Pod is correctly scheduled on node, no eviction expected [preferred affinity]",
|
||||
args: RemovePodsViolatingNodeAffinityArgs{
|
||||
NodeAffinityType: []string{"preferredDuringSchedulingIgnoredDuringExecution"},
|
||||
},
|
||||
expectedEvictedPodCount: 0,
|
||||
pods: addPodsToNode(nodeWithLabels, nil, "preferredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{nodeWithLabels},
|
||||
},
|
||||
{
|
||||
@@ -122,66 +156,176 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||
args: RemovePodsViolatingNodeAffinityArgs{
|
||||
NodeAffinityType: []string{"requiredDuringSchedulingIgnoredDuringExecution"},
|
||||
},
|
||||
pods: addPodsToNode(nodeWithoutLabels, nil),
|
||||
pods: addPodsToNode(nodeWithoutLabels, nil, "requiredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
|
||||
},
|
||||
{
|
||||
description: "Pod is scheduled on node without matching labels, another schedulable node available, maxPodsToEvictPerNode set to 1, should not be evicted",
|
||||
description: "Pod is scheduled on node without matching labels, another schedulable node available with better fit, should be evicted",
|
||||
expectedEvictedPodCount: 1,
|
||||
args: RemovePodsViolatingNodeAffinityArgs{
|
||||
NodeAffinityType: []string{"preferredDuringSchedulingIgnoredDuringExecution"},
|
||||
},
|
||||
pods: addPodsToNode(nodeWithoutLabels, nil, "preferredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
|
||||
},
|
||||
{
|
||||
description: "Pod is scheduled on node without matching labels, another schedulable node available, maxPodsToEvictPerNode set to 1, should be evicted [required affinity]",
|
||||
expectedEvictedPodCount: 1,
|
||||
args: RemovePodsViolatingNodeAffinityArgs{
|
||||
NodeAffinityType: []string{"requiredDuringSchedulingIgnoredDuringExecution"},
|
||||
},
|
||||
pods: addPodsToNode(nodeWithoutLabels, nil),
|
||||
pods: addPodsToNode(nodeWithoutLabels, nil, "requiredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
|
||||
maxPodsToEvictPerNode: &uint1,
|
||||
},
|
||||
{
|
||||
description: "Pod is scheduled on node without matching labels, another schedulable node available, maxPodsToEvictPerNode set to 1, no pod evicted since pod terminting",
|
||||
description: "Pod is scheduled on node without matching labels, another schedulable node available, maxPodsToEvictPerNode set to 1, should be evicted [preferred affinity]",
|
||||
expectedEvictedPodCount: 1,
|
||||
args: RemovePodsViolatingNodeAffinityArgs{
|
||||
NodeAffinityType: []string{"requiredDuringSchedulingIgnoredDuringExecution"},
|
||||
NodeAffinityType: []string{"preferredDuringSchedulingIgnoredDuringExecution"},
|
||||
},
|
||||
pods: addPodsToNode(nodeWithoutLabels, &metav1.Time{}),
|
||||
pods: addPodsToNode(nodeWithoutLabels, nil, "preferredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
|
||||
maxPodsToEvictPerNode: &uint1,
|
||||
},
|
||||
{
|
||||
description: "Pod is scheduled on node without matching labels, another schedulable node available, maxNoOfPodsToEvictPerNamespace set to 1, should not be evicted",
|
||||
expectedEvictedPodCount: 1,
|
||||
args: RemovePodsViolatingNodeAffinityArgs{
|
||||
NodeAffinityType: []string{"requiredDuringSchedulingIgnoredDuringExecution"},
|
||||
},
|
||||
pods: addPodsToNode(nodeWithoutLabels, nil),
|
||||
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
|
||||
maxNoOfPodsToEvictPerNamespace: &uint1,
|
||||
},
|
||||
{
|
||||
description: "Pod is scheduled on node without matching labels, another schedulable node available, maxNoOfPodsToEvictPerNamespace set to 1, no pod evicted since pod terminting",
|
||||
expectedEvictedPodCount: 1,
|
||||
args: RemovePodsViolatingNodeAffinityArgs{
|
||||
NodeAffinityType: []string{"requiredDuringSchedulingIgnoredDuringExecution"},
|
||||
},
|
||||
pods: addPodsToNode(nodeWithoutLabels, &metav1.Time{}),
|
||||
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
|
||||
maxNoOfPodsToEvictPerNamespace: &uint1,
|
||||
},
|
||||
{
|
||||
description: "Pod is scheduled on node without matching labels, but no node where pod fits is available, should not evict",
|
||||
description: "Pod is scheduled on node without matching labels, another schedulable node available, maxPodsToEvictPerNode set to 0, should be not evicted [required affinity]",
|
||||
expectedEvictedPodCount: 0,
|
||||
args: RemovePodsViolatingNodeAffinityArgs{
|
||||
NodeAffinityType: []string{"requiredDuringSchedulingIgnoredDuringExecution"},
|
||||
},
|
||||
pods: addPodsToNode(nodeWithoutLabels, nil),
|
||||
pods: addPodsToNode(nodeWithoutLabels, nil, "requiredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
|
||||
maxPodsToEvictPerNode: &uint0,
|
||||
},
|
||||
{
|
||||
description: "Pod is scheduled on node without matching labels, another schedulable node available, maxPodsToEvictPerNode set to 0, should be not evicted [preferred affinity]",
|
||||
expectedEvictedPodCount: 0,
|
||||
args: RemovePodsViolatingNodeAffinityArgs{
|
||||
NodeAffinityType: []string{"preferredDuringSchedulingIgnoredDuringExecution"},
|
||||
},
|
||||
pods: addPodsToNode(nodeWithoutLabels, nil, "preferredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
|
||||
maxPodsToEvictPerNode: &uint0,
|
||||
},
|
||||
{
|
||||
description: "Pod is scheduled on node without matching labels, another schedulable node available, maxPodsToEvictPerNode set to 1, no pod evicted since pod terminating [required affinity]",
|
||||
expectedEvictedPodCount: 0,
|
||||
args: RemovePodsViolatingNodeAffinityArgs{
|
||||
NodeAffinityType: []string{"requiredDuringSchedulingIgnoredDuringExecution"},
|
||||
},
|
||||
pods: addPodsToNode(nodeWithoutLabels, &metav1.Time{}, "requiredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
|
||||
maxPodsToEvictPerNode: &uint1,
|
||||
},
|
||||
{
|
||||
description: "Pod is scheduled on node without matching labels, another schedulable node available, maxPodsToEvictPerNode set to 1, no pod evicted since pod terminating [preferred affinity]",
|
||||
expectedEvictedPodCount: 0,
|
||||
args: RemovePodsViolatingNodeAffinityArgs{
|
||||
NodeAffinityType: []string{"preferredDuringSchedulingIgnoredDuringExecution"},
|
||||
},
|
||||
pods: addPodsToNode(nodeWithoutLabels, &metav1.Time{}, "preferredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
|
||||
maxPodsToEvictPerNode: &uint1,
|
||||
},
|
||||
{
|
||||
description: "Pod is scheduled on node without matching labels, another schedulable node available, maxNoOfPodsToEvictPerNamespace set to 1, should be evicted [required affinity]",
|
||||
expectedEvictedPodCount: 1,
|
||||
args: RemovePodsViolatingNodeAffinityArgs{
|
||||
NodeAffinityType: []string{"requiredDuringSchedulingIgnoredDuringExecution"},
|
||||
},
|
||||
pods: addPodsToNode(nodeWithoutLabels, nil, "requiredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
|
||||
maxNoOfPodsToEvictPerNamespace: &uint1,
|
||||
},
|
||||
{
|
||||
description: "Pod is scheduled on node without matching labels, another schedulable node available, maxNoOfPodsToEvictPerNamespace set to 1, should be evicted [preferred affinity]",
|
||||
expectedEvictedPodCount: 1,
|
||||
args: RemovePodsViolatingNodeAffinityArgs{
|
||||
NodeAffinityType: []string{"preferredDuringSchedulingIgnoredDuringExecution"},
|
||||
},
|
||||
pods: addPodsToNode(nodeWithoutLabels, nil, "preferredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
|
||||
maxNoOfPodsToEvictPerNamespace: &uint1,
|
||||
},
|
||||
{
|
||||
description: "Pod is scheduled on node without matching labels, another schedulable node available, maxNoOfPodsToEvictPerNamespace set to 0, should not be evicted [required affinity]",
|
||||
expectedEvictedPodCount: 0,
|
||||
args: RemovePodsViolatingNodeAffinityArgs{
|
||||
NodeAffinityType: []string{"requiredDuringSchedulingIgnoredDuringExecution"},
|
||||
},
|
||||
pods: addPodsToNode(nodeWithoutLabels, nil, "requiredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
|
||||
maxNoOfPodsToEvictPerNamespace: &uint0,
|
||||
},
|
||||
{
|
||||
description: "Pod is scheduled on node without matching labels, another schedulable node available, maxNoOfPodsToEvictPerNamespace set to 0, should not be evicted [preferred affinity]",
|
||||
expectedEvictedPodCount: 0,
|
||||
args: RemovePodsViolatingNodeAffinityArgs{
|
||||
NodeAffinityType: []string{"preferredDuringSchedulingIgnoredDuringExecution"},
|
||||
},
|
||||
pods: addPodsToNode(nodeWithoutLabels, nil, "preferredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
|
||||
maxNoOfPodsToEvictPerNamespace: &uint0,
|
||||
},
|
||||
{
|
||||
description: "Pod is scheduled on node without matching labels, another schedulable node available, maxNoOfPodsToEvictPerNamespace set to 1, no pod evicted since pod terminting [required affinity]",
|
||||
expectedEvictedPodCount: 0,
|
||||
args: RemovePodsViolatingNodeAffinityArgs{
|
||||
NodeAffinityType: []string{"requiredDuringSchedulingIgnoredDuringExecution"},
|
||||
},
|
||||
pods: addPodsToNode(nodeWithoutLabels, &metav1.Time{}, "requiredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
|
||||
maxNoOfPodsToEvictPerNamespace: &uint1,
|
||||
},
|
||||
{
|
||||
description: "Pod is scheduled on node without matching labels, another schedulable node available, maxNoOfPodsToEvictPerNamespace set to 1, no pod evicted since pod terminting [preferred affinity]",
|
||||
expectedEvictedPodCount: 0,
|
||||
args: RemovePodsViolatingNodeAffinityArgs{
|
||||
NodeAffinityType: []string{"preferredDuringSchedulingIgnoredDuringExecution"},
|
||||
},
|
||||
pods: addPodsToNode(nodeWithoutLabels, &metav1.Time{}, "preferredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
|
||||
maxNoOfPodsToEvictPerNamespace: &uint1,
|
||||
},
|
||||
{
|
||||
description: "Pod is scheduled on node without matching labels, but no node where pod fits is available, should not evict [required affinity]",
|
||||
expectedEvictedPodCount: 0,
|
||||
args: RemovePodsViolatingNodeAffinityArgs{
|
||||
NodeAffinityType: []string{"requiredDuringSchedulingIgnoredDuringExecution"},
|
||||
},
|
||||
pods: addPodsToNode(nodeWithoutLabels, nil, "requiredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{nodeWithoutLabels, unschedulableNodeWithLabels},
|
||||
nodefit: true,
|
||||
},
|
||||
{
|
||||
description: "Pod is scheduled on node without matching labels, and node where pod fits is available, should evict",
|
||||
description: "Pod is scheduled on node without matching labels, but no node where pod fits is available, should not evict [preferred affinity]",
|
||||
expectedEvictedPodCount: 0,
|
||||
args: RemovePodsViolatingNodeAffinityArgs{
|
||||
NodeAffinityType: []string{"preferredDuringSchedulingIgnoredDuringExecution"},
|
||||
},
|
||||
pods: addPodsToNode(nodeWithoutLabels, nil, "preferredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{nodeWithoutLabels, unschedulableNodeWithLabels},
|
||||
nodefit: true,
|
||||
},
|
||||
{
|
||||
description: "Pod is scheduled on node without matching labels, and unschedulable node where pod could fit is available, should not evict [required affinity]",
|
||||
expectedEvictedPodCount: 0,
|
||||
args: RemovePodsViolatingNodeAffinityArgs{
|
||||
NodeAffinityType: []string{"requiredDuringSchedulingIgnoredDuringExecution"},
|
||||
},
|
||||
pods: addPodsToNode(nodeWithoutLabels, nil),
|
||||
pods: addPodsToNode(nodeWithoutLabels, nil, "requiredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{nodeWithLabels, unschedulableNodeWithLabels},
|
||||
maxPodsToEvictPerNode: &uint1,
|
||||
nodefit: true,
|
||||
},
|
||||
{
|
||||
description: "Pod is scheduled on node without matching labels, and unschedulable node where pod could fit is available, should not evict [preferred affinity]",
|
||||
expectedEvictedPodCount: 0,
|
||||
args: RemovePodsViolatingNodeAffinityArgs{
|
||||
NodeAffinityType: []string{"preferredDuringSchedulingIgnoredDuringExecution"},
|
||||
},
|
||||
pods: addPodsToNode(nodeWithoutLabels, nil, "preferredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{nodeWithLabels, unschedulableNodeWithLabels},
|
||||
maxPodsToEvictPerNode: &uint1,
|
||||
nodefit: true,
|
||||
|
||||
Reference in New Issue
Block a user