mirror of
https://github.com/kubernetes-sigs/descheduler.git
synced 2026-01-26 13:29:11 +01:00
Merge pull request #892 from JaneLiuL/master
bring removeduplicates to plugin
This commit is contained in:
@@ -69,3 +69,12 @@ type RemovePodsHavingTooManyRestartsArgs struct {
|
||||
PodRestartThreshold int32
|
||||
IncludingInitContainers bool
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
type RemoveDuplicatesArgs struct {
|
||||
metav1.TypeMeta
|
||||
|
||||
Namespaces *api.Namespaces
|
||||
ExcludeOwnerKinds []string
|
||||
}
|
||||
|
||||
@@ -95,3 +95,10 @@ func validatePodRestartThreshold(podRestartThreshold int32) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func ValidateRemoveDuplicatesArgs(args *componentconfig.RemoveDuplicatesArgs) error {
|
||||
// At most one of include/exclude can be set
|
||||
return errorsAggregate(
|
||||
validateNamespaceArgs(args.Namespaces),
|
||||
)
|
||||
}
|
||||
|
||||
@@ -54,6 +54,41 @@ func (in *DeschedulerConfiguration) DeepCopyObject() runtime.Object {
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *RemoveDuplicatesArgs) DeepCopyInto(out *RemoveDuplicatesArgs) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
if in.Namespaces != nil {
|
||||
in, out := &in.Namespaces, &out.Namespaces
|
||||
*out = new(api.Namespaces)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.ExcludeOwnerKinds != nil {
|
||||
in, out := &in.ExcludeOwnerKinds, &out.ExcludeOwnerKinds
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RemoveDuplicatesArgs.
|
||||
func (in *RemoveDuplicatesArgs) DeepCopy() *RemoveDuplicatesArgs {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(RemoveDuplicatesArgs)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *RemoveDuplicatesArgs) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *RemoveFailedPodsArgs) DeepCopyInto(out *RemoveFailedPodsArgs) {
|
||||
*out = *in
|
||||
|
||||
@@ -19,6 +19,7 @@ package descheduler
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
policy "k8s.io/api/policy/v1beta1"
|
||||
@@ -27,13 +28,11 @@ import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/informers"
|
||||
corev1informers "k8s.io/client-go/informers/core/v1"
|
||||
schedulingv1informers "k8s.io/client-go/informers/scheduling/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
fakeclientset "k8s.io/client-go/kubernetes/fake"
|
||||
core "k8s.io/client-go/testing"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
corev1informers "k8s.io/client-go/informers/core/v1"
|
||||
schedulingv1informers "k8s.io/client-go/informers/scheduling/v1"
|
||||
|
||||
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
|
||||
"sigs.k8s.io/descheduler/metrics"
|
||||
@@ -245,7 +244,7 @@ func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer
|
||||
sharedInformerFactory.WaitForCacheSync(ctx.Done())
|
||||
|
||||
strategyFuncs := map[api.StrategyName]strategyFunction{
|
||||
"RemoveDuplicates": strategies.RemoveDuplicatePods,
|
||||
"RemoveDuplicates": nil,
|
||||
"LowNodeUtilization": nodeutilization.LowNodeUtilization,
|
||||
"HighNodeUtilization": nodeutilization.HighNodeUtilization,
|
||||
"RemovePodsViolatingInterPodAntiAffinity": strategies.RemovePodsViolatingInterPodAntiAffinity,
|
||||
|
||||
@@ -25,6 +25,7 @@ import (
|
||||
"sigs.k8s.io/descheduler/pkg/apis/componentconfig"
|
||||
"sigs.k8s.io/descheduler/pkg/apis/componentconfig/validation"
|
||||
"sigs.k8s.io/descheduler/pkg/framework"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/removeduplicates"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/removefailedpods"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/removepodshavingtoomanyrestarts"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/removepodsviolatingnodeaffinity"
|
||||
@@ -129,4 +130,25 @@ var pluginsMap = map[string]func(ctx context.Context, nodes []*v1.Node, params *
|
||||
klog.V(1).ErrorS(err, "plugin finished with error", "pluginName", removepodshavingtoomanyrestarts.PluginName)
|
||||
}
|
||||
},
|
||||
"RemoveDuplicates": func(ctx context.Context, nodes []*v1.Node, params *api.StrategyParameters, handle *handleImpl) {
|
||||
args := &componentconfig.RemoveDuplicatesArgs{
|
||||
Namespaces: params.Namespaces,
|
||||
}
|
||||
if params.RemoveDuplicates != nil {
|
||||
args.ExcludeOwnerKinds = params.RemoveDuplicates.ExcludeOwnerKinds
|
||||
}
|
||||
if err := validation.ValidateRemoveDuplicatesArgs(args); err != nil {
|
||||
klog.V(1).ErrorS(err, "unable to validate plugin arguments", "pluginName", removeduplicates.PluginName)
|
||||
return
|
||||
}
|
||||
pg, err := removeduplicates.New(args, handle)
|
||||
if err != nil {
|
||||
klog.V(1).ErrorS(err, "unable to initialize a plugin", "pluginName", removeduplicates.PluginName)
|
||||
return
|
||||
}
|
||||
status := pg.(framework.BalancePlugin).Balance(ctx, nodes)
|
||||
if status != nil && status.Err != nil {
|
||||
klog.V(1).ErrorS(err, "plugin finished with error", "pluginName", removeduplicates.PluginName)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@@ -14,90 +14,94 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package strategies
|
||||
package removeduplicates
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"math"
|
||||
"reflect"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/apis/componentconfig"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
"sigs.k8s.io/descheduler/pkg/framework"
|
||||
)
|
||||
|
||||
func validateRemoveDuplicatePodsParams(params *api.StrategyParameters) error {
|
||||
if params == nil {
|
||||
return nil
|
||||
}
|
||||
// At most one of include/exclude can be set
|
||||
if params.Namespaces != nil && len(params.Namespaces.Include) > 0 && len(params.Namespaces.Exclude) > 0 {
|
||||
return fmt.Errorf("only one of Include/Exclude namespaces can be set")
|
||||
}
|
||||
if params.ThresholdPriority != nil && params.ThresholdPriorityClassName != "" {
|
||||
return fmt.Errorf("only one of thresholdPriority and thresholdPriorityClassName can be set")
|
||||
}
|
||||
const PluginName = "RemoveDuplicates"
|
||||
|
||||
return nil
|
||||
// RemoveDuplicatePods removes the duplicate pods on node. This plugin evicts all duplicate pods on node.
|
||||
// A pod is said to be a duplicate of other if both of them are from same creator, kind and are within the same
|
||||
// namespace, and have at least one container with the same image.
|
||||
// As of now, this plugin won't evict daemonsets, mirror pods, critical pods and pods with local storages.
|
||||
|
||||
type RemoveDuplicates struct {
|
||||
handle framework.Handle
|
||||
args *componentconfig.RemoveDuplicatesArgs
|
||||
podFilter podutil.FilterFunc
|
||||
}
|
||||
|
||||
var _ framework.Plugin = &RemoveDuplicates{}
|
||||
var _ framework.BalancePlugin = &RemoveDuplicates{}
|
||||
|
||||
type podOwner struct {
|
||||
namespace, kind, name string
|
||||
imagesHash string
|
||||
}
|
||||
|
||||
// RemoveDuplicatePods removes the duplicate pods on node. This strategy evicts all duplicate pods on node.
|
||||
// A pod is said to be a duplicate of other if both of them are from same creator, kind and are within the same
|
||||
// namespace, and have at least one container with the same image.
|
||||
// As of now, this strategy won't evict daemonsets, mirror pods, critical pods and pods with local storages.
|
||||
func RemoveDuplicatePods(
|
||||
ctx context.Context,
|
||||
client clientset.Interface,
|
||||
strategy api.DeschedulerStrategy,
|
||||
nodes []*v1.Node,
|
||||
podEvictor *evictions.PodEvictor,
|
||||
evictorFilter *evictions.EvictorFilter,
|
||||
getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc,
|
||||
) {
|
||||
if err := validateRemoveDuplicatePodsParams(strategy.Params); err != nil {
|
||||
klog.ErrorS(err, "Invalid RemoveDuplicatePods parameters")
|
||||
return
|
||||
// New builds plugin from its arguments while passing a handle
|
||||
func New(args runtime.Object, handle framework.Handle) (framework.Plugin, error) {
|
||||
removeDuplicatesArgs, ok := args.(*componentconfig.RemoveDuplicatesArgs)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("want args to be of type RemoveDuplicatesArgs, got %T", args)
|
||||
}
|
||||
|
||||
var includedNamespaces, excludedNamespaces sets.String
|
||||
if strategy.Params != nil && strategy.Params.Namespaces != nil {
|
||||
includedNamespaces = sets.NewString(strategy.Params.Namespaces.Include...)
|
||||
excludedNamespaces = sets.NewString(strategy.Params.Namespaces.Exclude...)
|
||||
if removeDuplicatesArgs.Namespaces != nil {
|
||||
includedNamespaces = sets.NewString(removeDuplicatesArgs.Namespaces.Include...)
|
||||
excludedNamespaces = sets.NewString(removeDuplicatesArgs.Namespaces.Exclude...)
|
||||
}
|
||||
|
||||
podFilter, err := podutil.NewOptions().
|
||||
WithFilter(handle.Evictor().Filter).
|
||||
WithNamespaces(includedNamespaces).
|
||||
WithoutNamespaces(excludedNamespaces).
|
||||
BuildFilterFunc()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error initializing pod filter function: %v", err)
|
||||
}
|
||||
|
||||
return &RemoveDuplicates{
|
||||
handle: handle,
|
||||
args: removeDuplicatesArgs,
|
||||
podFilter: podFilter,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Name retrieves the plugin name
|
||||
func (r *RemoveDuplicates) Name() string {
|
||||
return PluginName
|
||||
}
|
||||
|
||||
// Balance extension point implementation for the plugin
|
||||
func (r *RemoveDuplicates) Balance(ctx context.Context, nodes []*v1.Node) *framework.Status {
|
||||
duplicatePods := make(map[podOwner]map[string][]*v1.Pod)
|
||||
ownerKeyOccurence := make(map[podOwner]int32)
|
||||
nodeCount := 0
|
||||
nodeMap := make(map[string]*v1.Node)
|
||||
|
||||
podFilter, err := podutil.NewOptions().
|
||||
WithFilter(evictorFilter.Filter).
|
||||
WithNamespaces(includedNamespaces).
|
||||
WithoutNamespaces(excludedNamespaces).
|
||||
BuildFilterFunc()
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Error initializing pod filter function")
|
||||
return
|
||||
}
|
||||
|
||||
for _, node := range nodes {
|
||||
klog.V(1).InfoS("Processing node", "node", klog.KObj(node))
|
||||
pods, err := podutil.ListPodsOnANode(node.Name, getPodsAssignedToNode, podFilter)
|
||||
pods, err := podutil.ListPodsOnANode(node.Name, r.handle.GetPodsAssignedToNodeFunc(), r.podFilter)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Error listing evictable pods on node", "node", klog.KObj(node))
|
||||
continue
|
||||
@@ -120,7 +124,8 @@ func RemoveDuplicatePods(
|
||||
duplicateKeysMap := map[string][][]string{}
|
||||
for _, pod := range pods {
|
||||
ownerRefList := podutil.OwnerRef(pod)
|
||||
if hasExcludedOwnerRefKind(ownerRefList, strategy) || len(ownerRefList) == 0 {
|
||||
|
||||
if len(ownerRefList) == 0 || hasExcludedOwnerRefKind(ownerRefList, r.args.ExcludeOwnerKinds) {
|
||||
continue
|
||||
}
|
||||
podContainerKeys := make([]string, 0, len(ownerRefList)*len(pod.Spec.Containers))
|
||||
@@ -183,6 +188,7 @@ func RemoveDuplicatePods(
|
||||
|
||||
// 1. how many pods can be evicted to respect uniform placement of pods among viable nodes?
|
||||
for ownerKey, podNodes := range duplicatePods {
|
||||
|
||||
targetNodes := getTargetNodes(podNodes, nodes)
|
||||
|
||||
klog.V(2).InfoS("Adjusting feasible nodes", "owner", ownerKey, "from", nodeCount, "to", len(targetNodes))
|
||||
@@ -200,24 +206,15 @@ func RemoveDuplicatePods(
|
||||
// It's assumed all duplicated pods are in the same priority class
|
||||
// TODO(jchaloup): check if the pod has a different node to lend to
|
||||
for _, pod := range pods[upperAvg-1:] {
|
||||
podEvictor.EvictPod(ctx, pod, evictions.EvictOptions{})
|
||||
if podEvictor.NodeLimitExceeded(nodeMap[nodeName]) {
|
||||
r.handle.Evictor().Evict(ctx, pod, evictions.EvictOptions{})
|
||||
if r.handle.Evictor().NodeLimitExceeded(nodeMap[nodeName]) {
|
||||
continue loop
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func getNodeAffinityNodeSelector(pod *v1.Pod) *v1.NodeSelector {
|
||||
if pod.Spec.Affinity == nil {
|
||||
return nil
|
||||
}
|
||||
if pod.Spec.Affinity.NodeAffinity == nil {
|
||||
return nil
|
||||
}
|
||||
return pod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution
|
||||
return nil
|
||||
}
|
||||
|
||||
func getTargetNodes(podNodes map[string][]*v1.Pod, nodes []*v1.Node) []*v1.Node {
|
||||
@@ -277,15 +274,27 @@ func getTargetNodes(podNodes map[string][]*v1.Pod, nodes []*v1.Node) []*v1.Node
|
||||
return targetNodes
|
||||
}
|
||||
|
||||
func hasExcludedOwnerRefKind(ownerRefs []metav1.OwnerReference, strategy api.DeschedulerStrategy) bool {
|
||||
if strategy.Params == nil || strategy.Params.RemoveDuplicates == nil {
|
||||
func hasExcludedOwnerRefKind(ownerRefs []metav1.OwnerReference, excludeOwnerKinds []string) bool {
|
||||
if len(excludeOwnerKinds) == 0 {
|
||||
return false
|
||||
}
|
||||
exclude := sets.NewString(strategy.Params.RemoveDuplicates.ExcludeOwnerKinds...)
|
||||
|
||||
exclude := sets.NewString(excludeOwnerKinds...)
|
||||
for _, owner := range ownerRefs {
|
||||
if exclude.Has(owner.Kind) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func getNodeAffinityNodeSelector(pod *v1.Pod) *v1.NodeSelector {
|
||||
if pod.Spec.Affinity == nil {
|
||||
return nil
|
||||
}
|
||||
if pod.Spec.Affinity.NodeAffinity == nil {
|
||||
return nil
|
||||
}
|
||||
return pod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution
|
||||
}
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@@ -14,10 +14,14 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package strategies
|
||||
package removeduplicates
|
||||
|
||||
import (
|
||||
"context"
|
||||
"k8s.io/client-go/tools/events"
|
||||
"sigs.k8s.io/descheduler/pkg/apis/componentconfig"
|
||||
"sigs.k8s.io/descheduler/pkg/framework"
|
||||
frameworkfake "sigs.k8s.io/descheduler/pkg/framework/fake"
|
||||
"testing"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
@@ -27,9 +31,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
"k8s.io/client-go/tools/events"
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
@@ -187,105 +189,98 @@ func TestFindDuplicatePods(t *testing.T) {
|
||||
pods []*v1.Pod
|
||||
nodes []*v1.Node
|
||||
expectedEvictedPodCount uint
|
||||
strategy api.DeschedulerStrategy
|
||||
excludeOwnerKinds []string
|
||||
nodefit bool
|
||||
}{
|
||||
{
|
||||
description: "Three pods in the `dev` Namespace, bound to same ReplicaSet. 1 should be evicted.",
|
||||
pods: []*v1.Pod{p1, p2, p3},
|
||||
nodes: []*v1.Node{node1, node2},
|
||||
expectedEvictedPodCount: 1,
|
||||
strategy: api.DeschedulerStrategy{},
|
||||
},
|
||||
{
|
||||
description: "Three pods in the `dev` Namespace, bound to same ReplicaSet, but ReplicaSet kind is excluded. 0 should be evicted.",
|
||||
pods: []*v1.Pod{p1, p2, p3},
|
||||
nodes: []*v1.Node{node1, node2},
|
||||
expectedEvictedPodCount: 0,
|
||||
strategy: api.DeschedulerStrategy{Params: &api.StrategyParameters{RemoveDuplicates: &api.RemoveDuplicates{ExcludeOwnerKinds: []string{"ReplicaSet"}}}},
|
||||
excludeOwnerKinds: []string{"ReplicaSet"},
|
||||
},
|
||||
{
|
||||
description: "Three Pods in the `test` Namespace, bound to same ReplicaSet. 1 should be evicted.",
|
||||
pods: []*v1.Pod{p8, p9, p10},
|
||||
nodes: []*v1.Node{node1, node2},
|
||||
expectedEvictedPodCount: 1,
|
||||
strategy: api.DeschedulerStrategy{},
|
||||
},
|
||||
{
|
||||
description: "Three Pods in the `dev` Namespace, three Pods in the `test` Namespace. Bound to ReplicaSet with same name. 4 should be evicted.",
|
||||
pods: []*v1.Pod{p1, p2, p3, p8, p9, p10},
|
||||
nodes: []*v1.Node{node1, node2},
|
||||
expectedEvictedPodCount: 2,
|
||||
strategy: api.DeschedulerStrategy{},
|
||||
},
|
||||
{
|
||||
description: "Pods are: part of DaemonSet, with local storage, mirror pod annotation, critical pod annotation - none should be evicted.",
|
||||
pods: []*v1.Pod{p4, p5, p6, p7},
|
||||
nodes: []*v1.Node{node1, node2},
|
||||
expectedEvictedPodCount: 0,
|
||||
strategy: api.DeschedulerStrategy{},
|
||||
},
|
||||
{
|
||||
description: "Test all Pods: 4 should be evicted.",
|
||||
pods: []*v1.Pod{p1, p2, p3, p4, p5, p6, p7, p8, p9, p10},
|
||||
nodes: []*v1.Node{node1, node2},
|
||||
expectedEvictedPodCount: 2,
|
||||
strategy: api.DeschedulerStrategy{},
|
||||
},
|
||||
{
|
||||
description: "Pods with the same owner but different images should not be evicted",
|
||||
pods: []*v1.Pod{p11, p12},
|
||||
nodes: []*v1.Node{node1, node2},
|
||||
expectedEvictedPodCount: 0,
|
||||
strategy: api.DeschedulerStrategy{},
|
||||
},
|
||||
{
|
||||
description: "Pods with multiple containers should not match themselves",
|
||||
pods: []*v1.Pod{p13},
|
||||
nodes: []*v1.Node{node1, node2},
|
||||
expectedEvictedPodCount: 0,
|
||||
strategy: api.DeschedulerStrategy{},
|
||||
},
|
||||
{
|
||||
description: "Pods with matching ownerrefs and at not all matching image should not trigger an eviction",
|
||||
pods: []*v1.Pod{p11, p13},
|
||||
nodes: []*v1.Node{node1, node2},
|
||||
expectedEvictedPodCount: 0,
|
||||
strategy: api.DeschedulerStrategy{},
|
||||
},
|
||||
{
|
||||
description: "Three pods in the `dev` Namespace, bound to same ReplicaSet. Only node available has a taint, and nodeFit set to true. 0 should be evicted.",
|
||||
pods: []*v1.Pod{p1, p2, p3},
|
||||
nodes: []*v1.Node{node1, node3},
|
||||
expectedEvictedPodCount: 0,
|
||||
strategy: api.DeschedulerStrategy{Params: &api.StrategyParameters{NodeFit: true}},
|
||||
nodefit: true,
|
||||
},
|
||||
{
|
||||
description: "Three pods in the `node-fit` Namespace, bound to same ReplicaSet, all with a nodeSelector. Only node available has an incorrect node label, and nodeFit set to true. 0 should be evicted.",
|
||||
pods: []*v1.Pod{p15, p16, p17},
|
||||
nodes: []*v1.Node{node1, node4},
|
||||
expectedEvictedPodCount: 0,
|
||||
strategy: api.DeschedulerStrategy{Params: &api.StrategyParameters{NodeFit: true}},
|
||||
nodefit: true,
|
||||
},
|
||||
{
|
||||
description: "Three pods in the `node-fit` Namespace, bound to same ReplicaSet. Only node available is not schedulable, and nodeFit set to true. 0 should be evicted.",
|
||||
pods: []*v1.Pod{p1, p2, p3},
|
||||
nodes: []*v1.Node{node1, node5},
|
||||
expectedEvictedPodCount: 0,
|
||||
strategy: api.DeschedulerStrategy{Params: &api.StrategyParameters{NodeFit: true}},
|
||||
nodefit: true,
|
||||
},
|
||||
{
|
||||
description: "Three pods in the `node-fit` Namespace, bound to same ReplicaSet. Only node available does not have enough CPU, and nodeFit set to true. 0 should be evicted.",
|
||||
pods: []*v1.Pod{p1, p2, p3, p19},
|
||||
nodes: []*v1.Node{node1, node6},
|
||||
expectedEvictedPodCount: 0,
|
||||
strategy: api.DeschedulerStrategy{Params: &api.StrategyParameters{NodeFit: true}},
|
||||
nodefit: true,
|
||||
},
|
||||
{
|
||||
description: "Three pods in the `node-fit` Namespace, bound to same ReplicaSet. Only node available has enough CPU, and nodeFit set to true. 1 should be evicted.",
|
||||
pods: []*v1.Pod{p1, p2, p3, p20},
|
||||
nodes: []*v1.Node{node1, node6},
|
||||
expectedEvictedPodCount: 1,
|
||||
strategy: api.DeschedulerStrategy{Params: &api.StrategyParameters{NodeFit: true}},
|
||||
nodefit: true,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -327,25 +322,37 @@ func TestFindDuplicatePods(t *testing.T) {
|
||||
eventRecorder,
|
||||
)
|
||||
|
||||
nodeFit := false
|
||||
if testCase.strategy.Params != nil {
|
||||
nodeFit = testCase.strategy.Params.NodeFit
|
||||
nodeFit := testCase.nodefit
|
||||
|
||||
handle := &frameworkfake.HandleImpl{
|
||||
ClientsetImpl: fakeClient,
|
||||
GetPodsAssignedToNodeFuncImpl: getPodsAssignedToNode,
|
||||
PodEvictorImpl: podEvictor,
|
||||
EvictorFilterImpl: evictions.NewEvictorFilter(
|
||||
testCase.nodes,
|
||||
getPodsAssignedToNode,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
evictions.WithNodeFit(nodeFit),
|
||||
),
|
||||
SharedInformerFactoryImpl: sharedInformerFactory,
|
||||
}
|
||||
|
||||
evictorFilter := evictions.NewEvictorFilter(
|
||||
testCase.nodes,
|
||||
getPodsAssignedToNode,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
evictions.WithNodeFit(nodeFit),
|
||||
plugin, err := New(&componentconfig.RemoveDuplicatesArgs{
|
||||
ExcludeOwnerKinds: testCase.excludeOwnerKinds,
|
||||
},
|
||||
handle,
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to initialize the plugin: %v", err)
|
||||
}
|
||||
|
||||
RemoveDuplicatePods(ctx, fakeClient, testCase.strategy, testCase.nodes, podEvictor, evictorFilter, getPodsAssignedToNode)
|
||||
podsEvicted := podEvictor.TotalEvicted()
|
||||
if podsEvicted != testCase.expectedEvictedPodCount {
|
||||
t.Errorf("Test error for description: %s. Expected evicted pods count %v, got %v", testCase.description, testCase.expectedEvictedPodCount, podsEvicted)
|
||||
plugin.(framework.BalancePlugin).Balance(ctx, testCase.nodes)
|
||||
actualEvictedPodCount := podEvictor.TotalEvicted()
|
||||
if actualEvictedPodCount != testCase.expectedEvictedPodCount {
|
||||
t.Errorf("Test %#v failed, Unexpected no of pods evicted: pods evicted: %d, expected: %d", testCase.description, actualEvictedPodCount, testCase.expectedEvictedPodCount)
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -481,7 +488,6 @@ func TestRemoveDuplicatesUniformly(t *testing.T) {
|
||||
pods []*v1.Pod
|
||||
nodes []*v1.Node
|
||||
expectedEvictedPodCount uint
|
||||
strategy api.DeschedulerStrategy
|
||||
}{
|
||||
{
|
||||
description: "Evict pods uniformly",
|
||||
@@ -503,7 +509,6 @@ func TestRemoveDuplicatesUniformly(t *testing.T) {
|
||||
test.BuildTestNode("n2", 2000, 3000, 10, nil),
|
||||
test.BuildTestNode("n3", 2000, 3000, 10, nil),
|
||||
},
|
||||
strategy: api.DeschedulerStrategy{},
|
||||
},
|
||||
{
|
||||
description: "Evict pods uniformly with one node left out",
|
||||
@@ -524,7 +529,6 @@ func TestRemoveDuplicatesUniformly(t *testing.T) {
|
||||
test.BuildTestNode("n1", 2000, 3000, 10, nil),
|
||||
test.BuildTestNode("n2", 2000, 3000, 10, nil),
|
||||
},
|
||||
strategy: api.DeschedulerStrategy{},
|
||||
},
|
||||
{
|
||||
description: "Evict pods uniformly with two replica sets",
|
||||
@@ -546,7 +550,6 @@ func TestRemoveDuplicatesUniformly(t *testing.T) {
|
||||
test.BuildTestNode("n2", 2000, 3000, 10, nil),
|
||||
test.BuildTestNode("n3", 2000, 3000, 10, nil),
|
||||
},
|
||||
strategy: api.DeschedulerStrategy{},
|
||||
},
|
||||
{
|
||||
description: "Evict pods uniformly with two owner references",
|
||||
@@ -578,7 +581,6 @@ func TestRemoveDuplicatesUniformly(t *testing.T) {
|
||||
test.BuildTestNode("n2", 2000, 3000, 10, nil),
|
||||
test.BuildTestNode("n3", 2000, 3000, 10, nil),
|
||||
},
|
||||
strategy: api.DeschedulerStrategy{},
|
||||
},
|
||||
{
|
||||
description: "Evict pods with number of pods less than nodes",
|
||||
@@ -593,7 +595,6 @@ func TestRemoveDuplicatesUniformly(t *testing.T) {
|
||||
test.BuildTestNode("n2", 2000, 3000, 10, nil),
|
||||
test.BuildTestNode("n3", 2000, 3000, 10, nil),
|
||||
},
|
||||
strategy: api.DeschedulerStrategy{},
|
||||
},
|
||||
{
|
||||
description: "Evict pods with number of pods less than nodes, but ignore different pods with the same ownerref",
|
||||
@@ -612,7 +613,6 @@ func TestRemoveDuplicatesUniformly(t *testing.T) {
|
||||
test.BuildTestNode("n2", 2000, 3000, 10, nil),
|
||||
test.BuildTestNode("n3", 2000, 3000, 10, nil),
|
||||
},
|
||||
strategy: api.DeschedulerStrategy{},
|
||||
},
|
||||
{
|
||||
description: "Evict pods with a single pod with three nodes",
|
||||
@@ -626,7 +626,6 @@ func TestRemoveDuplicatesUniformly(t *testing.T) {
|
||||
test.BuildTestNode("n2", 2000, 3000, 10, nil),
|
||||
test.BuildTestNode("n3", 2000, 3000, 10, nil),
|
||||
},
|
||||
strategy: api.DeschedulerStrategy{},
|
||||
},
|
||||
{
|
||||
description: "Evict pods uniformly respecting taints",
|
||||
@@ -651,7 +650,6 @@ func TestRemoveDuplicatesUniformly(t *testing.T) {
|
||||
test.BuildTestNode("master2", 2000, 3000, 10, setMasterNoScheduleTaint),
|
||||
test.BuildTestNode("master3", 2000, 3000, 10, setMasterNoScheduleTaint),
|
||||
},
|
||||
strategy: api.DeschedulerStrategy{},
|
||||
},
|
||||
{
|
||||
description: "Evict pods uniformly respecting RequiredDuringSchedulingIgnoredDuringExecution node affinity",
|
||||
@@ -676,7 +674,6 @@ func TestRemoveDuplicatesUniformly(t *testing.T) {
|
||||
test.BuildTestNode("master2", 2000, 3000, 10, setMasterNoScheduleLabel),
|
||||
test.BuildTestNode("master3", 2000, 3000, 10, setMasterNoScheduleLabel),
|
||||
},
|
||||
strategy: api.DeschedulerStrategy{},
|
||||
},
|
||||
{
|
||||
description: "Evict pods uniformly respecting node selector",
|
||||
@@ -701,7 +698,6 @@ func TestRemoveDuplicatesUniformly(t *testing.T) {
|
||||
test.BuildTestNode("master2", 2000, 3000, 10, nil),
|
||||
test.BuildTestNode("master3", 2000, 3000, 10, nil),
|
||||
},
|
||||
strategy: api.DeschedulerStrategy{},
|
||||
},
|
||||
{
|
||||
description: "Evict pods uniformly respecting node selector with zero target nodes",
|
||||
@@ -726,7 +722,6 @@ func TestRemoveDuplicatesUniformly(t *testing.T) {
|
||||
test.BuildTestNode("master2", 2000, 3000, 10, nil),
|
||||
test.BuildTestNode("master3", 2000, 3000, 10, nil),
|
||||
},
|
||||
strategy: api.DeschedulerStrategy{},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -768,19 +763,32 @@ func TestRemoveDuplicatesUniformly(t *testing.T) {
|
||||
eventRecorder,
|
||||
)
|
||||
|
||||
evictorFilter := evictions.NewEvictorFilter(
|
||||
testCase.nodes,
|
||||
getPodsAssignedToNode,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
)
|
||||
handle := &frameworkfake.HandleImpl{
|
||||
ClientsetImpl: fakeClient,
|
||||
GetPodsAssignedToNodeFuncImpl: getPodsAssignedToNode,
|
||||
PodEvictorImpl: podEvictor,
|
||||
EvictorFilterImpl: evictions.NewEvictorFilter(
|
||||
testCase.nodes,
|
||||
getPodsAssignedToNode,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
),
|
||||
SharedInformerFactoryImpl: sharedInformerFactory,
|
||||
}
|
||||
|
||||
RemoveDuplicatePods(ctx, fakeClient, testCase.strategy, testCase.nodes, podEvictor, evictorFilter, getPodsAssignedToNode)
|
||||
podsEvicted := podEvictor.TotalEvicted()
|
||||
if podsEvicted != testCase.expectedEvictedPodCount {
|
||||
t.Errorf("Test error for description: %s. Expected evicted pods count %v, got %v", testCase.description, testCase.expectedEvictedPodCount, podsEvicted)
|
||||
plugin, err := New(&componentconfig.RemoveDuplicatesArgs{},
|
||||
handle,
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to initialize the plugin: %v", err)
|
||||
}
|
||||
|
||||
plugin.(framework.BalancePlugin).Balance(ctx, testCase.nodes)
|
||||
actualEvictedPodCount := podEvictor.TotalEvicted()
|
||||
if actualEvictedPodCount != testCase.expectedEvictedPodCount {
|
||||
t.Errorf("Test %#v failed, Unexpected no of pods evicted: pods evicted: %d, expected: %d", testCase.description, actualEvictedPodCount, testCase.expectedEvictedPodCount)
|
||||
}
|
||||
})
|
||||
}
|
||||
Reference in New Issue
Block a user