mirror of
https://github.com/kubernetes-sigs/descheduler.git
synced 2026-01-26 05:14:13 +01:00
add PreEvictionFilter extension to DefaultEvictor Plugin
This commit is contained in:
@@ -44,7 +44,9 @@ type constraint func(pod *v1.Pod) error
|
||||
// This plugin is only meant to customize other actions (extension points) of the evictor,
|
||||
// like filtering, sorting, and other ones that might be relevant in the future
|
||||
type DefaultEvictor struct {
|
||||
args runtime.Object
|
||||
constraints []constraint
|
||||
handle framework.Handle
|
||||
}
|
||||
|
||||
// IsPodEvictableBasedOnPriority checks if the given pod is evictable based on priority resolved from pod Spec.
|
||||
@@ -66,6 +68,8 @@ func New(args runtime.Object, handle framework.Handle) (framework.Plugin, error)
|
||||
}
|
||||
|
||||
ev := &DefaultEvictor{}
|
||||
ev.handle = handle
|
||||
ev.args = defaultEvictorArgs
|
||||
|
||||
if defaultEvictorArgs.EvictFailedBarePods {
|
||||
klog.V(1).InfoS("Warning: EvictFailedBarePods is set to True. This could cause eviction of pods without ownerReferences.")
|
||||
@@ -125,18 +129,6 @@ func New(args runtime.Object, handle framework.Handle) (framework.Plugin, error)
|
||||
return nil
|
||||
})
|
||||
}
|
||||
if defaultEvictorArgs.NodeFit {
|
||||
ev.constraints = append(ev.constraints, func(pod *v1.Pod) error {
|
||||
nodes, err := nodeutil.ReadyNodes(context.TODO(), handle.ClientSet(), handle.SharedInformerFactory().Core().V1().Nodes(), defaultEvictorArgs.NodeSelector)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not list nodes when processing NodeFit")
|
||||
}
|
||||
if !nodeutil.PodFitsAnyOtherNode(handle.GetPodsAssignedToNodeFunc(), pod, nodes) {
|
||||
return fmt.Errorf("pod does not fit on any other node because of nodeSelector(s), Taint(s), or nodes marked as unschedulable")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
if defaultEvictorArgs.LabelSelector != nil && !defaultEvictorArgs.LabelSelector.Empty() {
|
||||
ev.constraints = append(ev.constraints, func(pod *v1.Pod) error {
|
||||
if !defaultEvictorArgs.LabelSelector.Matches(labels.Set(pod.Labels)) {
|
||||
@@ -154,6 +146,23 @@ func (d *DefaultEvictor) Name() string {
|
||||
return PluginName
|
||||
}
|
||||
|
||||
func (d *DefaultEvictor) PreEvictionFilter(pod *v1.Pod) bool {
|
||||
defaultEvictorArgs := d.args.(*DefaultEvictorArgs)
|
||||
if defaultEvictorArgs.NodeFit {
|
||||
nodes, err := nodeutil.ReadyNodes(context.TODO(), d.handle.ClientSet(), d.handle.SharedInformerFactory().Core().V1().Nodes(), defaultEvictorArgs.NodeSelector)
|
||||
if err != nil {
|
||||
klog.V(1).ErrorS(fmt.Errorf("Pod fails the following checks"), "pod", klog.KObj(pod))
|
||||
return false
|
||||
}
|
||||
if !nodeutil.PodFitsAnyOtherNode(d.handle.GetPodsAssignedToNodeFunc(), pod, nodes) {
|
||||
klog.V(1).ErrorS(fmt.Errorf("pod does not fit on any other node because of nodeSelector(s), Taint(s), or nodes marked as unschedulable"), "pod", klog.KObj(pod))
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (d *DefaultEvictor) Filter(pod *v1.Pod) bool {
|
||||
checkErrs := []error{}
|
||||
|
||||
|
||||
@@ -30,6 +30,332 @@ import (
|
||||
"sigs.k8s.io/descheduler/test"
|
||||
)
|
||||
|
||||
func TestDefaultEvictorPreEvictionFilter(t *testing.T) {
|
||||
n1 := test.BuildTestNode("node1", 1000, 2000, 13, nil)
|
||||
|
||||
nodeTaintKey := "hardware"
|
||||
nodeTaintValue := "gpu"
|
||||
|
||||
nodeLabelKey := "datacenter"
|
||||
nodeLabelValue := "east"
|
||||
type testCase struct {
|
||||
description string
|
||||
pods []*v1.Pod
|
||||
nodes []*v1.Node
|
||||
evictFailedBarePods bool
|
||||
evictLocalStoragePods bool
|
||||
evictSystemCriticalPods bool
|
||||
priorityThreshold *int32
|
||||
nodeFit bool
|
||||
result bool
|
||||
}
|
||||
|
||||
testCases := []testCase{
|
||||
{
|
||||
description: "Pod with no tolerations running on normal node, all other nodes tainted",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p1", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
}),
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode("node2", 1000, 2000, 13, func(node *v1.Node) {
|
||||
node.Spec.Taints = []v1.Taint{
|
||||
{
|
||||
Key: nodeTaintKey,
|
||||
Value: nodeTaintValue,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
}
|
||||
}),
|
||||
test.BuildTestNode("node3", 1000, 2000, 13, func(node *v1.Node) {
|
||||
node.Spec.Taints = []v1.Taint{
|
||||
{
|
||||
Key: nodeTaintKey,
|
||||
Value: nodeTaintValue,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
}
|
||||
}),
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
nodeFit: true,
|
||||
result: false,
|
||||
}, {
|
||||
description: "Pod with correct tolerations running on normal node, all other nodes tainted",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p1", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
pod.Spec.Tolerations = []v1.Toleration{
|
||||
{
|
||||
Key: nodeTaintKey,
|
||||
Value: nodeTaintValue,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
}
|
||||
}),
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode("node2", 1000, 2000, 13, func(node *v1.Node) {
|
||||
node.Spec.Taints = []v1.Taint{
|
||||
{
|
||||
Key: nodeTaintKey,
|
||||
Value: nodeTaintValue,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
}
|
||||
}),
|
||||
test.BuildTestNode("node3", 1000, 2000, 13, func(node *v1.Node) {
|
||||
node.Spec.Taints = []v1.Taint{
|
||||
{
|
||||
Key: nodeTaintKey,
|
||||
Value: nodeTaintValue,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
}
|
||||
}),
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
nodeFit: true,
|
||||
result: true,
|
||||
}, {
|
||||
description: "Pod with incorrect node selector",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p1", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
pod.Spec.NodeSelector = map[string]string{
|
||||
nodeLabelKey: "fail",
|
||||
}
|
||||
}),
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode("node2", 1000, 2000, 13, func(node *v1.Node) {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
}
|
||||
}),
|
||||
test.BuildTestNode("node3", 1000, 2000, 13, func(node *v1.Node) {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
}
|
||||
}),
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
nodeFit: true,
|
||||
result: false,
|
||||
}, {
|
||||
description: "Pod with correct node selector",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p1", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
pod.Spec.NodeSelector = map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
}
|
||||
}),
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode("node2", 1000, 2000, 13, func(node *v1.Node) {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
}
|
||||
}),
|
||||
test.BuildTestNode("node3", 1000, 2000, 13, func(node *v1.Node) {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
}
|
||||
}),
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
nodeFit: true,
|
||||
result: true,
|
||||
}, {
|
||||
description: "Pod with correct node selector, but only available node doesn't have enough CPU",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p1", 12, 8, n1.Name, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
pod.Spec.NodeSelector = map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
}
|
||||
}),
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode("node2-TEST", 10, 16, 10, func(node *v1.Node) {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
}
|
||||
}),
|
||||
test.BuildTestNode("node3-TEST", 10, 16, 10, func(node *v1.Node) {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
}
|
||||
}),
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
nodeFit: true,
|
||||
result: false,
|
||||
}, {
|
||||
description: "Pod with correct node selector, and one node has enough memory",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p1", 12, 8, n1.Name, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
pod.Spec.NodeSelector = map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
}
|
||||
}),
|
||||
test.BuildTestPod("node2-pod-10GB-mem", 20, 10, "node2", func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.Labels = map[string]string{
|
||||
"test": "true",
|
||||
}
|
||||
}),
|
||||
test.BuildTestPod("node3-pod-10GB-mem", 20, 10, "node3", func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.Labels = map[string]string{
|
||||
"test": "true",
|
||||
}
|
||||
}),
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode("node2", 100, 16, 10, func(node *v1.Node) {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
}
|
||||
}),
|
||||
test.BuildTestNode("node3", 100, 20, 10, func(node *v1.Node) {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
}
|
||||
}),
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
nodeFit: true,
|
||||
result: true,
|
||||
}, {
|
||||
description: "Pod with correct node selector, but both nodes don't have enough memory",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p1", 12, 8, n1.Name, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
pod.Spec.NodeSelector = map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
}
|
||||
}),
|
||||
test.BuildTestPod("node2-pod-10GB-mem", 10, 10, "node2", func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.Labels = map[string]string{
|
||||
"test": "true",
|
||||
}
|
||||
}),
|
||||
test.BuildTestPod("node3-pod-10GB-mem", 10, 10, "node3", func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.Labels = map[string]string{
|
||||
"test": "true",
|
||||
}
|
||||
}),
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode("node2", 100, 16, 10, func(node *v1.Node) {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
}
|
||||
}),
|
||||
test.BuildTestNode("node3", 100, 16, 10, func(node *v1.Node) {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
}
|
||||
}),
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
nodeFit: true,
|
||||
result: false,
|
||||
}, {
|
||||
description: "Pod with incorrect node selector, but nodefit false, should still be evicted",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p1", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
pod.Spec.NodeSelector = map[string]string{
|
||||
nodeLabelKey: "fail",
|
||||
}
|
||||
}),
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode("node2", 1000, 2000, 13, func(node *v1.Node) {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
}
|
||||
}),
|
||||
test.BuildTestNode("node3", 1000, 2000, 13, func(node *v1.Node) {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
}
|
||||
}),
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
nodeFit: false,
|
||||
result: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range testCases {
|
||||
|
||||
t.Run(test.description, func(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
var objs []runtime.Object
|
||||
for _, node := range test.nodes {
|
||||
objs = append(objs, node)
|
||||
}
|
||||
for _, pod := range test.pods {
|
||||
objs = append(objs, pod)
|
||||
}
|
||||
|
||||
fakeClient := fake.NewSimpleClientset(objs...)
|
||||
|
||||
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
|
||||
podInformer := sharedInformerFactory.Core().V1().Pods()
|
||||
|
||||
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
|
||||
if err != nil {
|
||||
t.Errorf("Build get pods assigned to node function error: %v", err)
|
||||
}
|
||||
|
||||
sharedInformerFactory.Start(ctx.Done())
|
||||
sharedInformerFactory.WaitForCacheSync(ctx.Done())
|
||||
|
||||
defaultEvictorArgs := &DefaultEvictorArgs{
|
||||
EvictLocalStoragePods: test.evictLocalStoragePods,
|
||||
EvictSystemCriticalPods: test.evictSystemCriticalPods,
|
||||
IgnorePvcPods: false,
|
||||
EvictFailedBarePods: test.evictFailedBarePods,
|
||||
PriorityThreshold: &api.PriorityThreshold{
|
||||
Value: test.priorityThreshold,
|
||||
},
|
||||
NodeFit: test.nodeFit,
|
||||
}
|
||||
|
||||
evictorPlugin, err := New(
|
||||
defaultEvictorArgs,
|
||||
&frameworkfake.HandleImpl{
|
||||
ClientsetImpl: fakeClient,
|
||||
GetPodsAssignedToNodeFuncImpl: getPodsAssignedToNode,
|
||||
SharedInformerFactoryImpl: sharedInformerFactory,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to initialize the plugin: %v", err)
|
||||
}
|
||||
|
||||
result := evictorPlugin.(framework.EvictorPlugin).PreEvictionFilter(test.pods[0])
|
||||
if (result) != test.result {
|
||||
t.Errorf("Filter should return for pod %s %t, but it returns %t", test.pods[0].Name, test.result, result)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDefaultEvictorFilter(t *testing.T) {
|
||||
n1 := test.BuildTestNode("node1", 1000, 2000, 13, nil)
|
||||
lowPriority := int32(800)
|
||||
@@ -38,8 +364,6 @@ func TestDefaultEvictorFilter(t *testing.T) {
|
||||
nodeTaintKey := "hardware"
|
||||
nodeTaintValue := "gpu"
|
||||
|
||||
nodeLabelKey := "datacenter"
|
||||
nodeLabelValue := "east"
|
||||
type testCase struct {
|
||||
description string
|
||||
pods []*v1.Pod
|
||||
@@ -348,7 +672,7 @@ func TestDefaultEvictorFilter(t *testing.T) {
|
||||
priorityThreshold: &lowPriority,
|
||||
result: true,
|
||||
}, {
|
||||
description: "Pod with no tolerations running on normal node, all other nodes tainted",
|
||||
description: "Pod with no tolerations running on normal node, all other nodes tainted, no PreEvictionFilter, should ignore nodeFit",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p1", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
@@ -377,195 +701,7 @@ func TestDefaultEvictorFilter(t *testing.T) {
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
nodeFit: true,
|
||||
result: false,
|
||||
}, {
|
||||
description: "Pod with correct tolerations running on normal node, all other nodes tainted",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p1", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
pod.Spec.Tolerations = []v1.Toleration{
|
||||
{
|
||||
Key: nodeTaintKey,
|
||||
Value: nodeTaintValue,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
}
|
||||
}),
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode("node2", 1000, 2000, 13, func(node *v1.Node) {
|
||||
node.Spec.Taints = []v1.Taint{
|
||||
{
|
||||
Key: nodeTaintKey,
|
||||
Value: nodeTaintValue,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
}
|
||||
}),
|
||||
test.BuildTestNode("node3", 1000, 2000, 13, func(node *v1.Node) {
|
||||
node.Spec.Taints = []v1.Taint{
|
||||
{
|
||||
Key: nodeTaintKey,
|
||||
Value: nodeTaintValue,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
}
|
||||
}),
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
nodeFit: true,
|
||||
result: true,
|
||||
}, {
|
||||
description: "Pod with incorrect node selector",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p1", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
pod.Spec.NodeSelector = map[string]string{
|
||||
nodeLabelKey: "fail",
|
||||
}
|
||||
}),
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode("node2", 1000, 2000, 13, func(node *v1.Node) {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
}
|
||||
}),
|
||||
test.BuildTestNode("node3", 1000, 2000, 13, func(node *v1.Node) {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
}
|
||||
}),
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
nodeFit: true,
|
||||
result: false,
|
||||
}, {
|
||||
description: "Pod with correct node selector",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p1", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
pod.Spec.NodeSelector = map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
}
|
||||
}),
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode("node2", 1000, 2000, 13, func(node *v1.Node) {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
}
|
||||
}),
|
||||
test.BuildTestNode("node3", 1000, 2000, 13, func(node *v1.Node) {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
}
|
||||
}),
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
nodeFit: true,
|
||||
result: true,
|
||||
}, {
|
||||
description: "Pod with correct node selector, but only available node doesn't have enough CPU",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p1", 12, 8, n1.Name, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
pod.Spec.NodeSelector = map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
}
|
||||
}),
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode("node2-TEST", 10, 16, 10, func(node *v1.Node) {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
}
|
||||
}),
|
||||
test.BuildTestNode("node3-TEST", 10, 16, 10, func(node *v1.Node) {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
}
|
||||
}),
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
nodeFit: true,
|
||||
result: false,
|
||||
}, {
|
||||
description: "Pod with correct node selector, and one node has enough memory",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p1", 12, 8, n1.Name, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
pod.Spec.NodeSelector = map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
}
|
||||
}),
|
||||
test.BuildTestPod("node2-pod-10GB-mem", 20, 10, "node2", func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.Labels = map[string]string{
|
||||
"test": "true",
|
||||
}
|
||||
}),
|
||||
test.BuildTestPod("node3-pod-10GB-mem", 20, 10, "node3", func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.Labels = map[string]string{
|
||||
"test": "true",
|
||||
}
|
||||
}),
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode("node2", 100, 16, 10, func(node *v1.Node) {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
}
|
||||
}),
|
||||
test.BuildTestNode("node3", 100, 20, 10, func(node *v1.Node) {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
}
|
||||
}),
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
nodeFit: true,
|
||||
result: true,
|
||||
}, {
|
||||
description: "Pod with correct node selector, but both nodes don't have enough memory",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p1", 12, 8, n1.Name, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
pod.Spec.NodeSelector = map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
}
|
||||
}),
|
||||
test.BuildTestPod("node2-pod-10GB-mem", 10, 10, "node2", func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.Labels = map[string]string{
|
||||
"test": "true",
|
||||
}
|
||||
}),
|
||||
test.BuildTestPod("node3-pod-10GB-mem", 10, 10, "node3", func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.Labels = map[string]string{
|
||||
"test": "true",
|
||||
}
|
||||
}),
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode("node2", 100, 16, 10, func(node *v1.Node) {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
}
|
||||
}),
|
||||
test.BuildTestNode("node3", 100, 16, 10, func(node *v1.Node) {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
}
|
||||
}),
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
nodeFit: true,
|
||||
result: false,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -596,14 +732,6 @@ func TestDefaultEvictorFilter(t *testing.T) {
|
||||
sharedInformerFactory.Start(ctx.Done())
|
||||
sharedInformerFactory.WaitForCacheSync(ctx.Done())
|
||||
|
||||
// var opts []func(opts *FilterOptions)
|
||||
// if test.priorityThreshold != nil {
|
||||
// opts = append(opts, WithPriorityThreshold(*test.priorityThreshold))
|
||||
// }
|
||||
// if test.nodeFit {
|
||||
// opts = append(opts, WithNodeFit(true))
|
||||
// }
|
||||
|
||||
defaultEvictorArgs := &DefaultEvictorArgs{
|
||||
EvictLocalStoragePods: test.evictLocalStoragePods,
|
||||
EvictSystemCriticalPods: test.evictSystemCriticalPods,
|
||||
@@ -627,7 +755,7 @@ func TestDefaultEvictorFilter(t *testing.T) {
|
||||
}
|
||||
|
||||
result := evictorPlugin.(framework.EvictorPlugin).Filter(test.pods[0])
|
||||
if result != test.result {
|
||||
if (result) != test.result {
|
||||
t.Errorf("Filter should return for pod %s %t, but it returns %t", test.pods[0].Name, test.result, result)
|
||||
}
|
||||
|
||||
|
||||
@@ -18,9 +18,10 @@ package nodeutilization
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sort"
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/klog/v2"
|
||||
@@ -286,36 +287,38 @@ func evictPods(
|
||||
continue
|
||||
}
|
||||
|
||||
if podEvictor.Evict(ctx, pod, evictions.EvictOptions{}) {
|
||||
klog.V(3).InfoS("Evicted pods", "pod", klog.KObj(pod))
|
||||
if podEvictor.PreEvictionFilter(pod) {
|
||||
if podEvictor.Evict(ctx, pod, evictions.EvictOptions{}) {
|
||||
klog.V(3).InfoS("Evicted pods", "pod", klog.KObj(pod))
|
||||
|
||||
for name := range totalAvailableUsage {
|
||||
if name == v1.ResourcePods {
|
||||
nodeInfo.usage[name].Sub(*resource.NewQuantity(1, resource.DecimalSI))
|
||||
totalAvailableUsage[name].Sub(*resource.NewQuantity(1, resource.DecimalSI))
|
||||
} else {
|
||||
quantity := utils.GetResourceRequestQuantity(pod, name)
|
||||
nodeInfo.usage[name].Sub(quantity)
|
||||
totalAvailableUsage[name].Sub(quantity)
|
||||
for name := range totalAvailableUsage {
|
||||
if name == v1.ResourcePods {
|
||||
nodeInfo.usage[name].Sub(*resource.NewQuantity(1, resource.DecimalSI))
|
||||
totalAvailableUsage[name].Sub(*resource.NewQuantity(1, resource.DecimalSI))
|
||||
} else {
|
||||
quantity := utils.GetResourceRequestQuantity(pod, name)
|
||||
nodeInfo.usage[name].Sub(quantity)
|
||||
totalAvailableUsage[name].Sub(quantity)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
keysAndValues := []interface{}{
|
||||
"node", nodeInfo.node.Name,
|
||||
"CPU", nodeInfo.usage[v1.ResourceCPU].MilliValue(),
|
||||
"Mem", nodeInfo.usage[v1.ResourceMemory].Value(),
|
||||
"Pods", nodeInfo.usage[v1.ResourcePods].Value(),
|
||||
}
|
||||
for name := range totalAvailableUsage {
|
||||
if !nodeutil.IsBasicResource(name) {
|
||||
keysAndValues = append(keysAndValues, string(name), totalAvailableUsage[name].Value())
|
||||
keysAndValues := []interface{}{
|
||||
"node", nodeInfo.node.Name,
|
||||
"CPU", nodeInfo.usage[v1.ResourceCPU].MilliValue(),
|
||||
"Mem", nodeInfo.usage[v1.ResourceMemory].Value(),
|
||||
"Pods", nodeInfo.usage[v1.ResourcePods].Value(),
|
||||
}
|
||||
for name := range totalAvailableUsage {
|
||||
if !nodeutil.IsBasicResource(name) {
|
||||
keysAndValues = append(keysAndValues, string(name), totalAvailableUsage[name].Value())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
klog.V(3).InfoS("Updated node usage", keysAndValues...)
|
||||
// check if pods can be still evicted
|
||||
if !continueEviction(nodeInfo, totalAvailableUsage) {
|
||||
break
|
||||
klog.V(3).InfoS("Updated node usage", keysAndValues...)
|
||||
// check if pods can be still evicted
|
||||
if !continueEviction(nodeInfo, totalAvailableUsage) {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if podEvictor.NodeLimitExceeded(nodeInfo.node) {
|
||||
|
||||
@@ -56,8 +56,9 @@ func New(args runtime.Object, handle framework.Handle) (framework.Plugin, error)
|
||||
excludedNamespaces = sets.NewString(podLifeTimeArgs.Namespaces.Exclude...)
|
||||
}
|
||||
|
||||
// We can combine Filter and PreEvictionFilter since for this strategy it does not matter where we run PreEvictionFilter
|
||||
podFilter, err := podutil.NewOptions().
|
||||
WithFilter(handle.Evictor().Filter).
|
||||
WithFilter(podutil.WrapFilterFuncs(handle.Evictor().Filter, handle.Evictor().PreEvictionFilter)).
|
||||
WithNamespaces(includedNamespaces).
|
||||
WithoutNamespaces(excludedNamespaces).
|
||||
WithLabelSelector(podLifeTimeArgs.LabelSelector).
|
||||
|
||||
@@ -71,8 +71,9 @@ func New(args runtime.Object, handle framework.Handle) (framework.Plugin, error)
|
||||
excludedNamespaces = sets.NewString(removeDuplicatesArgs.Namespaces.Exclude...)
|
||||
}
|
||||
|
||||
// We can combine Filter and PreEvictionFilter since for this strategy it does not matter where we run PreEvictionFilter
|
||||
podFilter, err := podutil.NewOptions().
|
||||
WithFilter(handle.Evictor().Filter).
|
||||
WithFilter(podutil.WrapFilterFuncs(handle.Evictor().Filter, handle.Evictor().PreEvictionFilter)).
|
||||
WithNamespaces(includedNamespaces).
|
||||
WithoutNamespaces(excludedNamespaces).
|
||||
BuildFilterFunc()
|
||||
|
||||
@@ -57,8 +57,9 @@ func New(args runtime.Object, handle framework.Handle) (framework.Plugin, error)
|
||||
excludedNamespaces = sets.NewString(failedPodsArgs.Namespaces.Exclude...)
|
||||
}
|
||||
|
||||
// We can combine Filter and PreEvictionFilter since for this strategy it does not matter where we run PreEvictionFilter
|
||||
podFilter, err := podutil.NewOptions().
|
||||
WithFilter(handle.Evictor().Filter).
|
||||
WithFilter(podutil.WrapFilterFuncs(handle.Evictor().Filter, handle.Evictor().PreEvictionFilter)).
|
||||
WithNamespaces(includedNamespaces).
|
||||
WithoutNamespaces(excludedNamespaces).
|
||||
WithLabelSelector(failedPodsArgs.LabelSelector).
|
||||
|
||||
@@ -57,8 +57,9 @@ func New(args runtime.Object, handle framework.Handle) (framework.Plugin, error)
|
||||
excludedNamespaces = sets.NewString(tooManyRestartsArgs.Namespaces.Exclude...)
|
||||
}
|
||||
|
||||
// We can combine Filter and PreEvictionFilter since for this strategy it does not matter where we run PreEvictionFilter
|
||||
podFilter, err := podutil.NewOptions().
|
||||
WithFilter(handle.Evictor().Filter).
|
||||
WithFilter(podutil.WrapFilterFuncs(handle.Evictor().Filter, handle.Evictor().PreEvictionFilter)).
|
||||
WithNamespaces(includedNamespaces).
|
||||
WithoutNamespaces(excludedNamespaces).
|
||||
WithLabelSelector(tooManyRestartsArgs.LabelSelector).
|
||||
|
||||
@@ -92,7 +92,7 @@ loop:
|
||||
podutil.SortPodsBasedOnPriorityLowToHigh(pods)
|
||||
totalPods := len(pods)
|
||||
for i := 0; i < totalPods; i++ {
|
||||
if checkPodsWithAntiAffinityExist(pods[i], pods) && d.handle.Evictor().Filter(pods[i]) {
|
||||
if checkPodsWithAntiAffinityExist(pods[i], pods) && d.handle.Evictor().Filter(pods[i]) && d.handle.Evictor().PreEvictionFilter(pods[i]) {
|
||||
if d.handle.Evictor().Evict(ctx, pods[i], evictions.EvictOptions{}) {
|
||||
// Since the current pod is evicted all other pods which have anti-affinity with this
|
||||
// pod need not be evicted.
|
||||
|
||||
@@ -54,8 +54,9 @@ func New(args runtime.Object, handle framework.Handle) (framework.Plugin, error)
|
||||
excludedNamespaces = sets.NewString(nodeAffinityArgs.Namespaces.Exclude...)
|
||||
}
|
||||
|
||||
// We can combine Filter and PreEvictionFilter since for this strategy it does not matter where we run PreEvictionFilter
|
||||
podFilter, err := podutil.NewOptions().
|
||||
WithFilter(handle.Evictor().Filter).
|
||||
WithFilter(podutil.WrapFilterFuncs(handle.Evictor().Filter, handle.Evictor().PreEvictionFilter)).
|
||||
WithNamespaces(includedNamespaces).
|
||||
WithoutNamespaces(excludedNamespaces).
|
||||
WithLabelSelector(nodeAffinityArgs.LabelSelector).
|
||||
|
||||
@@ -57,8 +57,9 @@ func New(args runtime.Object, handle framework.Handle) (framework.Plugin, error)
|
||||
excludedNamespaces = sets.NewString(nodeTaintsArgs.Namespaces.Exclude...)
|
||||
}
|
||||
|
||||
// We can combine Filter and PreEvictionFilter since for this strategy it does not matter where we run PreEvictionFilter
|
||||
podFilter, err := podutil.NewOptions().
|
||||
WithFilter(handle.Evictor().Filter).
|
||||
WithFilter(podutil.WrapFilterFuncs(handle.Evictor().Filter, handle.Evictor().PreEvictionFilter)).
|
||||
WithNamespaces(includedNamespaces).
|
||||
WithoutNamespaces(excludedNamespaces).
|
||||
WithLabelSelector(nodeTaintsArgs.LabelSelector).
|
||||
|
||||
@@ -215,7 +215,10 @@ func (d *RemovePodsViolatingTopologySpreadConstraint) Balance(ctx context.Contex
|
||||
if !d.podFilter(pod) {
|
||||
continue
|
||||
}
|
||||
d.handle.Evictor().Evict(ctx, pod, evictions.EvictOptions{})
|
||||
|
||||
if d.handle.Evictor().PreEvictionFilter(pod) {
|
||||
d.handle.Evictor().Evict(ctx, pod, evictions.EvictOptions{})
|
||||
}
|
||||
if d.handle.Evictor().NodeLimitExceeded(nodeMap[pod.Spec.NodeName]) {
|
||||
nodeLimitExceeded[pod.Spec.NodeName] = true
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user