1
0
mirror of https://github.com/kubernetes-sigs/descheduler.git synced 2026-01-26 05:14:13 +01:00

Migrate RemovePodsViolatingNodeAffinity into a plugin

This commit is contained in:
Lucas Severo Alves
2022-08-04 19:05:03 +02:00
parent d109ea64d0
commit 0c3bf7f957
8 changed files with 282 additions and 176 deletions

View File

@@ -46,3 +46,14 @@ type RemoveFailedPodsArgs struct {
Reasons []string
IncludingInitContainers bool
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// RemovePodsViolatingNodeAffinityArgs holds arguments used to configure RemovePodsViolatingNodeAffinity plugin.
type RemovePodsViolatingNodeAffinityArgs struct {
metav1.TypeMeta
Namespaces *api.Namespaces
LabelSelector *metav1.LabelSelector
NodeAffinityType []string
}

View File

@@ -48,3 +48,17 @@ func validateCommonArgs(namespaces *api.Namespaces, labelSelector *metav1.LabelS
return nil
}
// ValidateRemovePodsViolatingNodeAffinityArgs validates RemovePodsViolatingNodeAffinity arguments
func ValidateRemovePodsViolatingNodeAffinityArgs(args *componentconfig.RemovePodsViolatingNodeAffinityArgs) error {
if args == nil || len(args.NodeAffinityType) == 0 {
return fmt.Errorf("nodeAffinityType needs to be set")
}
// At most one of include/exclude can be set
if args.Namespaces != nil && len(args.Namespaces.Include) > 0 && len(args.Namespaces.Exclude) > 0 {
return fmt.Errorf("only one of Include/Exclude namespaces can be set")
}
return nil
}

View File

@@ -104,6 +104,46 @@ func (in *RemoveFailedPodsArgs) DeepCopyObject() runtime.Object {
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RemovePodsViolatingNodeAffinityArgs) DeepCopyInto(out *RemovePodsViolatingNodeAffinityArgs) {
*out = *in
out.TypeMeta = in.TypeMeta
if in.Namespaces != nil {
in, out := &in.Namespaces, &out.Namespaces
*out = new(api.Namespaces)
(*in).DeepCopyInto(*out)
}
if in.LabelSelector != nil {
in, out := &in.LabelSelector, &out.LabelSelector
*out = new(v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
if in.NodeAffinityType != nil {
in, out := &in.NodeAffinityType, &out.NodeAffinityType
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RemovePodsViolatingNodeAffinityArgs.
func (in *RemovePodsViolatingNodeAffinityArgs) DeepCopy() *RemovePodsViolatingNodeAffinityArgs {
if in == nil {
return nil
}
out := new(RemovePodsViolatingNodeAffinityArgs)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *RemovePodsViolatingNodeAffinityArgs) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RemovePodsViolatingNodeTaintsArgs) DeepCopyInto(out *RemovePodsViolatingNodeTaintsArgs) {
*out = *in

View File

@@ -249,7 +249,7 @@ func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer
"LowNodeUtilization": nodeutilization.LowNodeUtilization,
"HighNodeUtilization": nodeutilization.HighNodeUtilization,
"RemovePodsViolatingInterPodAntiAffinity": strategies.RemovePodsViolatingInterPodAntiAffinity,
"RemovePodsViolatingNodeAffinity": strategies.RemovePodsViolatingNodeAffinity,
"RemovePodsViolatingNodeAffinity": nil,
"RemovePodsViolatingNodeTaints": nil,
"RemovePodsHavingTooManyRestarts": strategies.RemovePodsHavingTooManyRestarts,
"PodLifeTime": strategies.PodLifeTime,

View File

@@ -1,107 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package strategies
import (
"context"
"fmt"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/sets"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/klog/v2"
"sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
)
func validatePodsViolatingNodeAffinityParams(params *api.StrategyParameters) error {
if params == nil || len(params.NodeAffinityType) == 0 {
return fmt.Errorf("NodeAffinityType is empty")
}
// At most one of include/exclude can be set
if params.Namespaces != nil && len(params.Namespaces.Include) > 0 && len(params.Namespaces.Exclude) > 0 {
return fmt.Errorf("only one of Include/Exclude namespaces can be set")
}
if params.ThresholdPriority != nil && params.ThresholdPriorityClassName != "" {
return fmt.Errorf("only one of thresholdPriority and thresholdPriorityClassName can be set")
}
return nil
}
// RemovePodsViolatingNodeAffinity evicts pods on nodes which violate node affinity
func RemovePodsViolatingNodeAffinity(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor, evictorFilter *evictions.EvictorFilter, getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc) {
if err := validatePodsViolatingNodeAffinityParams(strategy.Params); err != nil {
klog.ErrorS(err, "Invalid RemovePodsViolatingNodeAffinity parameters")
return
}
var includedNamespaces, excludedNamespaces sets.String
if strategy.Params.Namespaces != nil {
includedNamespaces = sets.NewString(strategy.Params.Namespaces.Include...)
excludedNamespaces = sets.NewString(strategy.Params.Namespaces.Exclude...)
}
podFilter, err := podutil.NewOptions().
WithNamespaces(includedNamespaces).
WithoutNamespaces(excludedNamespaces).
WithLabelSelector(strategy.Params.LabelSelector).
BuildFilterFunc()
if err != nil {
klog.ErrorS(err, "Error initializing pod filter function")
return
}
for _, nodeAffinity := range strategy.Params.NodeAffinityType {
klog.V(2).InfoS("Executing for nodeAffinityType", "nodeAffinity", nodeAffinity)
switch nodeAffinity {
case "requiredDuringSchedulingIgnoredDuringExecution":
loop:
for _, node := range nodes {
klog.V(1).InfoS("Processing node", "node", klog.KObj(node))
pods, err := podutil.ListPodsOnANode(
node.Name,
getPodsAssignedToNode,
podutil.WrapFilterFuncs(podFilter, func(pod *v1.Pod) bool {
return evictorFilter.Filter(pod) &&
!nodeutil.PodFitsCurrentNode(getPodsAssignedToNode, pod, node) &&
nodeutil.PodFitsAnyNode(getPodsAssignedToNode, pod, nodes)
}),
)
if err != nil {
klog.ErrorS(err, "Failed to get pods", "node", klog.KObj(node))
}
for _, pod := range pods {
if pod.Spec.Affinity != nil && pod.Spec.Affinity.NodeAffinity != nil && pod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil {
klog.V(1).InfoS("Evicting pod", "pod", klog.KObj(pod))
podEvictor.EvictPod(ctx, pod, evictions.EvictOptions{})
if podEvictor.NodeLimitExceeded(node) {
continue loop
}
}
}
}
default:
klog.ErrorS(nil, "Invalid nodeAffinityType", "nodeAffinity", nodeAffinity)
}
}
}

View File

@@ -26,6 +26,7 @@ import (
"sigs.k8s.io/descheduler/pkg/apis/componentconfig/validation"
"sigs.k8s.io/descheduler/pkg/framework"
"sigs.k8s.io/descheduler/pkg/framework/plugins/removefailedpods"
"sigs.k8s.io/descheduler/pkg/framework/plugins/removepodsviolatingnodeaffinity"
"sigs.k8s.io/descheduler/pkg/framework/plugins/removepodsviolatingnodetaints"
)
@@ -82,4 +83,24 @@ var pluginsMap = map[string]func(ctx context.Context, nodes []*v1.Node, params *
klog.V(1).ErrorS(err, "plugin finished with error", "pluginName", removefailedpods.PluginName)
}
},
"RemovePodsViolatingNodeAffinity": func(ctx context.Context, nodes []*v1.Node, params *api.StrategyParameters, handle *handleImpl) {
args := &componentconfig.RemovePodsViolatingNodeAffinityArgs{
Namespaces: params.Namespaces,
LabelSelector: params.LabelSelector,
NodeAffinityType: params.NodeAffinityType,
}
if err := validation.ValidateRemovePodsViolatingNodeAffinityArgs(args); err != nil {
klog.V(1).ErrorS(err, "unable to validate plugin arguments", "pluginName", removepodsviolatingnodeaffinity.PluginName)
return
}
pg, err := removepodsviolatingnodeaffinity.New(args, handle)
if err != nil {
klog.V(1).ErrorS(err, "unable to initialize a plugin", "pluginName", removepodsviolatingnodeaffinity.PluginName)
return
}
status := pg.(framework.DeschedulePlugin).Deschedule(ctx, nodes)
if status != nil && status.Err != nil {
klog.V(1).ErrorS(err, "plugin finished with error", "pluginName", removepodsviolatingnodeaffinity.PluginName)
}
},
}

View File

@@ -0,0 +1,119 @@
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package removepodsviolatingnodeaffinity
import (
"fmt"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/sets"
"context"
v1 "k8s.io/api/core/v1"
"k8s.io/klog/v2"
"sigs.k8s.io/descheduler/pkg/apis/componentconfig"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
"sigs.k8s.io/descheduler/pkg/framework"
)
const PluginName = "RemovePodsViolatingNodeAffinity"
// RemovePodsViolatingNodeAffinity evicts pods on the node which violate node affinity
type RemovePodsViolatingNodeAffinity struct {
handle framework.Handle
args *componentconfig.RemovePodsViolatingNodeAffinityArgs
podFilter podutil.FilterFunc
}
var _ framework.Plugin = &RemovePodsViolatingNodeAffinity{}
var _ framework.DeschedulePlugin = &RemovePodsViolatingNodeAffinity{}
// New builds plugin from its arguments while passing a handle
func New(args runtime.Object, handle framework.Handle) (framework.Plugin, error) {
nodeAffinityArgs, ok := args.(*componentconfig.RemovePodsViolatingNodeAffinityArgs)
if !ok {
return nil, fmt.Errorf("want args to be of type RemovePodsViolatingNodeAffinityArgs, got %T", args)
}
var includedNamespaces, excludedNamespaces sets.String
if nodeAffinityArgs.Namespaces != nil {
includedNamespaces = sets.NewString(nodeAffinityArgs.Namespaces.Include...)
excludedNamespaces = sets.NewString(nodeAffinityArgs.Namespaces.Exclude...)
}
podFilter, err := podutil.NewOptions().
WithFilter(handle.Evictor().Filter).
WithNamespaces(includedNamespaces).
WithoutNamespaces(excludedNamespaces).
WithLabelSelector(nodeAffinityArgs.LabelSelector).
BuildFilterFunc()
if err != nil {
return nil, fmt.Errorf("error initializing pod filter function: %v", err)
}
return &RemovePodsViolatingNodeAffinity{
handle: handle,
podFilter: podFilter,
args: nodeAffinityArgs,
}, nil
}
// Name retrieves the plugin name
func (d *RemovePodsViolatingNodeAffinity) Name() string {
return PluginName
}
func (d *RemovePodsViolatingNodeAffinity) Deschedule(ctx context.Context, nodes []*v1.Node) *framework.Status {
for _, nodeAffinity := range d.args.NodeAffinityType {
klog.V(2).InfoS("Executing for nodeAffinityType", "nodeAffinity", nodeAffinity)
switch nodeAffinity {
case "requiredDuringSchedulingIgnoredDuringExecution":
for _, node := range nodes {
klog.V(1).InfoS("Processing node", "node", klog.KObj(node))
pods, err := podutil.ListPodsOnANode(
node.Name,
d.handle.GetPodsAssignedToNodeFunc(),
podutil.WrapFilterFuncs(d.podFilter, func(pod *v1.Pod) bool {
return d.handle.Evictor().Filter(pod) &&
!nodeutil.PodFitsCurrentNode(d.handle.GetPodsAssignedToNodeFunc(), pod, node) &&
nodeutil.PodFitsAnyNode(d.handle.GetPodsAssignedToNodeFunc(), pod, nodes)
}),
)
if err != nil {
return &framework.Status{
Err: fmt.Errorf("error listing pods on a node: %v", err),
}
}
for _, pod := range pods {
if pod.Spec.Affinity != nil && pod.Spec.Affinity.NodeAffinity != nil && pod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil {
klog.V(1).InfoS("Evicting pod", "pod", klog.KObj(pod))
d.handle.Evictor().Evict(ctx, pod, evictions.EvictOptions{})
if d.handle.Evictor().NodeLimitExceeded(node) {
break
}
}
}
}
default:
klog.ErrorS(nil, "Invalid nodeAffinityType", "nodeAffinity", nodeAffinity)
}
}
return nil
}

View File

@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package strategies
package removepodsviolatingnodeaffinity
import (
"context"
@@ -27,33 +27,16 @@ import (
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes/fake"
"k8s.io/client-go/tools/events"
"sigs.k8s.io/descheduler/pkg/apis/componentconfig"
"sigs.k8s.io/descheduler/pkg/framework"
"sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
frameworkfake "sigs.k8s.io/descheduler/pkg/framework/fake"
"sigs.k8s.io/descheduler/test"
)
func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
requiredDuringSchedulingIgnoredDuringExecutionStrategy := api.DeschedulerStrategy{
Enabled: true,
Params: &api.StrategyParameters{
NodeAffinityType: []string{
"requiredDuringSchedulingIgnoredDuringExecution",
},
},
}
requiredDuringSchedulingIgnoredDuringExecutionWithNodeFitStrategy := api.DeschedulerStrategy{
Enabled: true,
Params: &api.StrategyParameters{
NodeAffinityType: []string{
"requiredDuringSchedulingIgnoredDuringExecution",
},
NodeFit: true,
},
}
nodeLabelKey := "kubernetes.io/desiredNode"
nodeLabelValue := "yes"
nodeWithLabels := test.BuildTestNode("nodeWithLabels", 2000, 3000, 10, nil)
@@ -109,28 +92,26 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
description string
nodes []*v1.Node
pods []*v1.Pod
strategy api.DeschedulerStrategy
expectedEvictedPodCount uint
maxPodsToEvictPerNode *uint
maxNoOfPodsToEvictPerNamespace *uint
args componentconfig.RemovePodsViolatingNodeAffinityArgs
nodefit bool
}{
{
description: "Invalid strategy type, should not evict any pods",
strategy: api.DeschedulerStrategy{
Enabled: true,
Params: &api.StrategyParameters{
NodeAffinityType: []string{
"requiredDuringSchedulingRequiredDuringExecution",
},
},
description: "Invalid Affinity type, should not evict any pods",
args: componentconfig.RemovePodsViolatingNodeAffinityArgs{
NodeAffinityType: []string{"requiredDuringSchedulingRequiredDuringExecution"},
},
expectedEvictedPodCount: 0,
pods: addPodsToNode(nodeWithoutLabels, nil),
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
},
{
description: "Pod is correctly scheduled on node, no eviction expected",
strategy: requiredDuringSchedulingIgnoredDuringExecutionStrategy,
description: "Pod is correctly scheduled on node, no eviction expected",
args: componentconfig.RemovePodsViolatingNodeAffinityArgs{
NodeAffinityType: []string{"requiredDuringSchedulingIgnoredDuringExecution"},
},
expectedEvictedPodCount: 0,
pods: addPodsToNode(nodeWithLabels, nil),
nodes: []*v1.Node{nodeWithLabels},
@@ -138,38 +119,48 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
{
description: "Pod is scheduled on node without matching labels, another schedulable node available, should be evicted",
expectedEvictedPodCount: 1,
strategy: requiredDuringSchedulingIgnoredDuringExecutionStrategy,
pods: addPodsToNode(nodeWithoutLabels, nil),
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
args: componentconfig.RemovePodsViolatingNodeAffinityArgs{
NodeAffinityType: []string{"requiredDuringSchedulingIgnoredDuringExecution"},
},
pods: addPodsToNode(nodeWithoutLabels, nil),
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
},
{
description: "Pod is scheduled on node without matching labels, another schedulable node available, maxPodsToEvictPerNode set to 1, should not be evicted",
expectedEvictedPodCount: 1,
strategy: requiredDuringSchedulingIgnoredDuringExecutionStrategy,
pods: addPodsToNode(nodeWithoutLabels, nil),
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
maxPodsToEvictPerNode: &uint1,
args: componentconfig.RemovePodsViolatingNodeAffinityArgs{
NodeAffinityType: []string{"requiredDuringSchedulingIgnoredDuringExecution"},
},
pods: addPodsToNode(nodeWithoutLabels, nil),
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
maxPodsToEvictPerNode: &uint1,
},
{
description: "Pod is scheduled on node without matching labels, another schedulable node available, maxPodsToEvictPerNode set to 1, no pod evicted since pod terminting",
expectedEvictedPodCount: 1,
strategy: requiredDuringSchedulingIgnoredDuringExecutionStrategy,
pods: addPodsToNode(nodeWithoutLabels, &metav1.Time{}),
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
maxPodsToEvictPerNode: &uint1,
args: componentconfig.RemovePodsViolatingNodeAffinityArgs{
NodeAffinityType: []string{"requiredDuringSchedulingIgnoredDuringExecution"},
},
pods: addPodsToNode(nodeWithoutLabels, &metav1.Time{}),
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
maxPodsToEvictPerNode: &uint1,
},
{
description: "Pod is scheduled on node without matching labels, another schedulable node available, maxNoOfPodsToEvictPerNamespace set to 1, should not be evicted",
expectedEvictedPodCount: 1,
strategy: requiredDuringSchedulingIgnoredDuringExecutionStrategy,
description: "Pod is scheduled on node without matching labels, another schedulable node available, maxNoOfPodsToEvictPerNamespace set to 1, should not be evicted",
expectedEvictedPodCount: 1,
args: componentconfig.RemovePodsViolatingNodeAffinityArgs{
NodeAffinityType: []string{"requiredDuringSchedulingIgnoredDuringExecution"},
},
pods: addPodsToNode(nodeWithoutLabels, nil),
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
maxNoOfPodsToEvictPerNamespace: &uint1,
},
{
description: "Pod is scheduled on node without matching labels, another schedulable node available, maxNoOfPodsToEvictPerNamespace set to 1, no pod evicted since pod terminting",
expectedEvictedPodCount: 1,
strategy: requiredDuringSchedulingIgnoredDuringExecutionStrategy,
description: "Pod is scheduled on node without matching labels, another schedulable node available, maxNoOfPodsToEvictPerNamespace set to 1, no pod evicted since pod terminting",
expectedEvictedPodCount: 1,
args: componentconfig.RemovePodsViolatingNodeAffinityArgs{
NodeAffinityType: []string{"requiredDuringSchedulingIgnoredDuringExecution"},
},
pods: addPodsToNode(nodeWithoutLabels, &metav1.Time{}),
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
maxNoOfPodsToEvictPerNamespace: &uint1,
@@ -177,17 +168,23 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
{
description: "Pod is scheduled on node without matching labels, but no node where pod fits is available, should not evict",
expectedEvictedPodCount: 0,
strategy: requiredDuringSchedulingIgnoredDuringExecutionWithNodeFitStrategy,
pods: addPodsToNode(nodeWithoutLabels, nil),
nodes: []*v1.Node{nodeWithoutLabels, unschedulableNodeWithLabels},
args: componentconfig.RemovePodsViolatingNodeAffinityArgs{
NodeAffinityType: []string{"requiredDuringSchedulingIgnoredDuringExecution"},
},
pods: addPodsToNode(nodeWithoutLabels, nil),
nodes: []*v1.Node{nodeWithoutLabels, unschedulableNodeWithLabels},
nodefit: true,
},
{
description: "Pod is scheduled on node without matching labels, and node where pod fits is available, should evict",
expectedEvictedPodCount: 0,
strategy: requiredDuringSchedulingIgnoredDuringExecutionWithNodeFitStrategy,
pods: addPodsToNode(nodeWithoutLabels, nil),
nodes: []*v1.Node{nodeWithLabels, unschedulableNodeWithLabels},
maxPodsToEvictPerNode: &uint1,
args: componentconfig.RemovePodsViolatingNodeAffinityArgs{
NodeAffinityType: []string{"requiredDuringSchedulingIgnoredDuringExecution"},
},
pods: addPodsToNode(nodeWithoutLabels, nil),
nodes: []*v1.Node{nodeWithLabels, unschedulableNodeWithLabels},
maxPodsToEvictPerNode: &uint1,
nodefit: true,
},
}
@@ -229,22 +226,33 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
eventRecorder,
)
nodeFit := false
if tc.strategy.Params != nil {
nodeFit = tc.strategy.Params.NodeFit
handle := &frameworkfake.HandleImpl{
ClientsetImpl: fakeClient,
GetPodsAssignedToNodeFuncImpl: getPodsAssignedToNode,
PodEvictorImpl: podEvictor,
SharedInformerFactoryImpl: sharedInformerFactory,
EvictorFilterImpl: evictions.NewEvictorFilter(
tc.nodes,
getPodsAssignedToNode,
false,
false,
false,
false,
evictions.WithNodeFit(tc.nodefit),
),
}
evictorFilter := evictions.NewEvictorFilter(
tc.nodes,
getPodsAssignedToNode,
false,
false,
false,
false,
evictions.WithNodeFit(nodeFit),
plugin, err := New(
&componentconfig.RemovePodsViolatingNodeAffinityArgs{
NodeAffinityType: tc.args.NodeAffinityType,
},
handle,
)
if err != nil {
t.Fatalf("Unable to initialize the plugin: %v", err)
}
RemovePodsViolatingNodeAffinity(ctx, fakeClient, tc.strategy, tc.nodes, podEvictor, evictorFilter, getPodsAssignedToNode)
plugin.(framework.DeschedulePlugin).Deschedule(ctx, tc.nodes)
actualEvictedPodCount := podEvictor.TotalEvicted()
if actualEvictedPodCount != tc.expectedEvictedPodCount {
t.Errorf("Test %#v failed, expected %v pod evictions, but got %v pod evictions\n", tc.description, tc.expectedEvictedPodCount, actualEvictedPodCount)