From 0006fb039dfb9d36bde3cc17d9bf6573923f94ca Mon Sep 17 00:00:00 2001 From: Jan Chaloupka Date: Fri, 10 Jul 2020 14:45:12 +0200 Subject: [PATCH 1/5] ListPodsOnANode: have one function parameter per each line --- pkg/descheduler/pod/pods.go | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/pkg/descheduler/pod/pods.go b/pkg/descheduler/pod/pods.go index 8e7ad39dc..6c237ab6d 100644 --- a/pkg/descheduler/pod/pods.go +++ b/pkg/descheduler/pod/pods.go @@ -18,12 +18,13 @@ package pod import ( "context" + "sort" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" clientset "k8s.io/client-go/kubernetes" "sigs.k8s.io/descheduler/pkg/utils" - "sort" ) // ListPodsOnANode lists all of the pods on a node @@ -31,7 +32,12 @@ import ( // (Usually this is podEvictor.IsEvictable, in order to only list the evictable pods on a node, but can // be used by strategies to extend IsEvictable if there are further restrictions, such as with NodeAffinity). // The filter function should return true if the pod should be returned from ListPodsOnANode -func ListPodsOnANode(ctx context.Context, client clientset.Interface, node *v1.Node, filter func(pod *v1.Pod) bool) ([]*v1.Pod, error) { +func ListPodsOnANode( + ctx context.Context, + client clientset.Interface, + node *v1.Node, + filter func(pod *v1.Pod) bool, +) ([]*v1.Pod, error) { fieldSelector, err := fields.ParseSelector("spec.nodeName=" + node.Name + ",status.phase!=" + string(v1.PodSucceeded) + ",status.phase!=" + string(v1.PodFailed)) if err != nil { return []*v1.Pod{}, err From 74f70fdbc954bd8be10f224d0422d98e67c57a00 Mon Sep 17 00:00:00 2001 From: Jan Chaloupka Date: Sat, 11 Jul 2020 20:00:47 +0200 Subject: [PATCH 2/5] ListPodsOnANode: define Options type to pass various options Options like: - filter - included/excluded namespaces - labels --- pkg/descheduler/pod/pods.go | 22 ++++++++++++++++--- pkg/descheduler/pod/pods_test.go | 2 +- pkg/descheduler/strategies/duplicates.go | 2 +- .../strategies/lownodeutilization.go | 2 +- pkg/descheduler/strategies/node_affinity.go | 4 ++-- pkg/descheduler/strategies/node_taint.go | 2 +- .../strategies/pod_antiaffinity.go | 3 ++- pkg/descheduler/strategies/pod_lifetime.go | 2 +- pkg/descheduler/strategies/toomanyrestarts.go | 2 +- test/e2e/e2e_test.go | 4 ++-- 10 files changed, 31 insertions(+), 14 deletions(-) diff --git a/pkg/descheduler/pod/pods.go b/pkg/descheduler/pod/pods.go index 6c237ab6d..e2b3fa2fb 100644 --- a/pkg/descheduler/pod/pods.go +++ b/pkg/descheduler/pod/pods.go @@ -27,17 +27,33 @@ import ( "sigs.k8s.io/descheduler/pkg/utils" ) +type Options struct { + filter func(pod *v1.Pod) bool +} + +// WithFilter sets a pod filter. +// The filter function should return true if the pod should be returned from ListPodsOnANode +func WithFilter(filter func(pod *v1.Pod) bool) func(opts *Options) { + return func(opts *Options) { + opts.filter = filter + } +} + // ListPodsOnANode lists all of the pods on a node // It also accepts an optional "filter" function which can be used to further limit the pods that are returned. // (Usually this is podEvictor.IsEvictable, in order to only list the evictable pods on a node, but can // be used by strategies to extend IsEvictable if there are further restrictions, such as with NodeAffinity). -// The filter function should return true if the pod should be returned from ListPodsOnANode func ListPodsOnANode( ctx context.Context, client clientset.Interface, node *v1.Node, - filter func(pod *v1.Pod) bool, + opts ...func(opts *Options), ) ([]*v1.Pod, error) { + options := &Options{} + for _, opt := range opts { + opt(options) + } + fieldSelector, err := fields.ParseSelector("spec.nodeName=" + node.Name + ",status.phase!=" + string(v1.PodSucceeded) + ",status.phase!=" + string(v1.PodFailed)) if err != nil { return []*v1.Pod{}, err @@ -51,7 +67,7 @@ func ListPodsOnANode( pods := make([]*v1.Pod, 0) for i := range podList.Items { - if filter != nil && !filter(&podList.Items[i]) { + if options.filter != nil && !options.filter(&podList.Items[i]) { continue } pods = append(pods, &podList.Items[i]) diff --git a/pkg/descheduler/pod/pods_test.go b/pkg/descheduler/pod/pods_test.go index 9bff1d2b4..d67e042f2 100644 --- a/pkg/descheduler/pod/pods_test.go +++ b/pkg/descheduler/pod/pods_test.go @@ -67,7 +67,7 @@ func TestListPodsOnANode(t *testing.T) { } return true, nil, fmt.Errorf("Failed to list: %v", list) }) - pods, _ := ListPodsOnANode(context.TODO(), fakeClient, testCase.node, nil) + pods, _ := ListPodsOnANode(context.TODO(), fakeClient, testCase.node) if len(pods) != testCase.expectedPodCount { t.Errorf("expected %v pods on node %v, got %+v", testCase.expectedPodCount, testCase.node.Name, len(pods)) } diff --git a/pkg/descheduler/strategies/duplicates.go b/pkg/descheduler/strategies/duplicates.go index 9a276f910..d14582352 100644 --- a/pkg/descheduler/strategies/duplicates.go +++ b/pkg/descheduler/strategies/duplicates.go @@ -46,7 +46,7 @@ func RemoveDuplicatePods( ) { for _, node := range nodes { klog.V(1).Infof("Processing node: %#v", node.Name) - pods, err := podutil.ListPodsOnANode(ctx, client, node, podEvictor.IsEvictable) + pods, err := podutil.ListPodsOnANode(ctx, client, node, podutil.WithFilter(podEvictor.IsEvictable)) if err != nil { klog.Errorf("error listing evictable pods on node %s: %+v", node.Name, err) continue diff --git a/pkg/descheduler/strategies/lownodeutilization.go b/pkg/descheduler/strategies/lownodeutilization.go index 8b715b27b..788775946 100644 --- a/pkg/descheduler/strategies/lownodeutilization.go +++ b/pkg/descheduler/strategies/lownodeutilization.go @@ -326,7 +326,7 @@ func sortNodesByUsage(nodes []NodeUsageMap) { func createNodePodsMap(ctx context.Context, client clientset.Interface, nodes []*v1.Node) NodePodsMap { npm := NodePodsMap{} for _, node := range nodes { - pods, err := podutil.ListPodsOnANode(ctx, client, node, nil) + pods, err := podutil.ListPodsOnANode(ctx, client, node) if err != nil { klog.Warningf("node %s will not be processed, error in accessing its pods (%#v)", node.Name, err) } else { diff --git a/pkg/descheduler/strategies/node_affinity.go b/pkg/descheduler/strategies/node_affinity.go index 87153bc38..a804b5e23 100644 --- a/pkg/descheduler/strategies/node_affinity.go +++ b/pkg/descheduler/strategies/node_affinity.go @@ -43,11 +43,11 @@ func RemovePodsViolatingNodeAffinity(ctx context.Context, client clientset.Inter for _, node := range nodes { klog.V(1).Infof("Processing node: %#v\n", node.Name) - pods, err := podutil.ListPodsOnANode(ctx, client, node, func(pod *v1.Pod) bool { + pods, err := podutil.ListPodsOnANode(ctx, client, node, podutil.WithFilter(func(pod *v1.Pod) bool { return podEvictor.IsEvictable(pod) && !nodeutil.PodFitsCurrentNode(pod, node) && nodeutil.PodFitsAnyNode(pod, nodes) - }) + })) if err != nil { klog.Errorf("failed to get pods from %v: %v", node.Name, err) } diff --git a/pkg/descheduler/strategies/node_taint.go b/pkg/descheduler/strategies/node_taint.go index 4de9941e6..bc612890d 100644 --- a/pkg/descheduler/strategies/node_taint.go +++ b/pkg/descheduler/strategies/node_taint.go @@ -33,7 +33,7 @@ import ( func RemovePodsViolatingNodeTaints(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor) { for _, node := range nodes { klog.V(1).Infof("Processing node: %#v\n", node.Name) - pods, err := podutil.ListPodsOnANode(ctx, client, node, podEvictor.IsEvictable) + pods, err := podutil.ListPodsOnANode(ctx, client, node, podutil.WithFilter(podEvictor.IsEvictable)) if err != nil { //no pods evicted as error encountered retrieving evictable Pods return diff --git a/pkg/descheduler/strategies/pod_antiaffinity.go b/pkg/descheduler/strategies/pod_antiaffinity.go index 141df6dbd..943aa8fd8 100644 --- a/pkg/descheduler/strategies/pod_antiaffinity.go +++ b/pkg/descheduler/strategies/pod_antiaffinity.go @@ -18,6 +18,7 @@ package strategies import ( "context" + "sigs.k8s.io/descheduler/pkg/api" "sigs.k8s.io/descheduler/pkg/descheduler/evictions" podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod" @@ -33,7 +34,7 @@ import ( func RemovePodsViolatingInterPodAntiAffinity(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor) { for _, node := range nodes { klog.V(1).Infof("Processing node: %#v\n", node.Name) - pods, err := podutil.ListPodsOnANode(ctx, client, node, podEvictor.IsEvictable) + pods, err := podutil.ListPodsOnANode(ctx, client, node, podutil.WithFilter(podEvictor.IsEvictable)) if err != nil { return } diff --git a/pkg/descheduler/strategies/pod_lifetime.go b/pkg/descheduler/strategies/pod_lifetime.go index c7fe49501..ae6c0208c 100644 --- a/pkg/descheduler/strategies/pod_lifetime.go +++ b/pkg/descheduler/strategies/pod_lifetime.go @@ -54,7 +54,7 @@ func PodLifeTime(ctx context.Context, client clientset.Interface, strategy api.D } func listOldPodsOnNode(ctx context.Context, client clientset.Interface, node *v1.Node, maxAge uint, evictor *evictions.PodEvictor) []*v1.Pod { - pods, err := podutil.ListPodsOnANode(ctx, client, node, evictor.IsEvictable) + pods, err := podutil.ListPodsOnANode(ctx, client, node, podutil.WithFilter(evictor.IsEvictable)) if err != nil { return nil } diff --git a/pkg/descheduler/strategies/toomanyrestarts.go b/pkg/descheduler/strategies/toomanyrestarts.go index 5c65d7d0d..e0750c14f 100644 --- a/pkg/descheduler/strategies/toomanyrestarts.go +++ b/pkg/descheduler/strategies/toomanyrestarts.go @@ -38,7 +38,7 @@ func RemovePodsHavingTooManyRestarts(ctx context.Context, client clientset.Inter } for _, node := range nodes { klog.V(1).Infof("Processing node: %s", node.Name) - pods, err := podutil.ListPodsOnANode(ctx, client, node, podEvictor.IsEvictable) + pods, err := podutil.ListPodsOnANode(ctx, client, node, podutil.WithFilter(podEvictor.IsEvictable)) if err != nil { klog.Errorf("Error when list pods at node %s", node.Name) continue diff --git a/test/e2e/e2e_test.go b/test/e2e/e2e_test.go index 848096f20..cdd85b8e1 100644 --- a/test/e2e/e2e_test.go +++ b/test/e2e/e2e_test.go @@ -324,7 +324,7 @@ func evictPods(ctx context.Context, t *testing.T, clientSet clientset.Interface, continue } // List all the pods on the current Node - podsOnANode, err := podutil.ListPodsOnANode(ctx, clientSet, node, podEvictor.IsEvictable) + podsOnANode, err := podutil.ListPodsOnANode(ctx, clientSet, node, podutil.WithFilter(podEvictor.IsEvictable)) if err != nil { t.Errorf("Error listing pods on a node %v", err) } @@ -336,7 +336,7 @@ func evictPods(ctx context.Context, t *testing.T, clientSet clientset.Interface, } t.Log("Eviction of pods starting") startEndToEndForLowNodeUtilization(ctx, clientSet, nodeInformer, podEvictor) - podsOnleastUtilizedNode, err := podutil.ListPodsOnANode(ctx, clientSet, leastLoadedNode, podEvictor.IsEvictable) + podsOnleastUtilizedNode, err := podutil.ListPodsOnANode(ctx, clientSet, leastLoadedNode, podutil.WithFilter(podEvictor.IsEvictable)) if err != nil { t.Errorf("Error listing pods on a node %v", err) } From 11f1333af7298577ae4fb61d11e5736ab7de281f Mon Sep 17 00:00:00 2001 From: Jan Chaloupka Date: Sat, 11 Jul 2020 20:21:28 +0200 Subject: [PATCH 3/5] ListPodsOnANode: allow to include/exclude namespaces Info: field selector is still not properly mocked so it's not possible to unit test it --- pkg/api/types.go | 12 +++- pkg/api/v1alpha1/types.go | 10 +++- pkg/api/v1alpha1/zz_generated.conversion.go | 38 +++++++++++++ pkg/api/v1alpha1/zz_generated.deepcopy.go | 27 +++++++++ pkg/api/zz_generated.deepcopy.go | 27 +++++++++ pkg/descheduler/pod/pods.go | 56 ++++++++++++++++++- pkg/descheduler/strategies/node_affinity.go | 34 ++++++++--- pkg/descheduler/strategies/node_taint.go | 32 ++++++++++- .../strategies/pod_antiaffinity.go | 28 +++++++++- pkg/descheduler/strategies/pod_lifetime.go | 35 ++++++++++-- pkg/descheduler/strategies/toomanyrestarts.go | 27 ++++++++- 11 files changed, 303 insertions(+), 23 deletions(-) diff --git a/pkg/api/types.go b/pkg/api/types.go index e80c780b5..fb3becbea 100644 --- a/pkg/api/types.go +++ b/pkg/api/types.go @@ -44,13 +44,23 @@ type DeschedulerStrategy struct { Params *StrategyParameters } -// Only one of its members may be specified +// Namespaces carries a list of included/excluded namespaces +// for which a given strategy is applicable +type Namespaces struct { + Include []string + Exclude []string +} + +// Besides Namespaces only one of its members may be specified +// TODO(jchaloup): move Namespaces to individual strategies once the policy +// version is bumped to v1alpha2 type StrategyParameters struct { NodeResourceUtilizationThresholds *NodeResourceUtilizationThresholds NodeAffinityType []string PodsHavingTooManyRestarts *PodsHavingTooManyRestarts MaxPodLifeTimeSeconds *uint RemoveDuplicates *RemoveDuplicates + Namespaces Namespaces } type Percentage float64 diff --git a/pkg/api/v1alpha1/types.go b/pkg/api/v1alpha1/types.go index df52ed1c2..aa1f60c8d 100644 --- a/pkg/api/v1alpha1/types.go +++ b/pkg/api/v1alpha1/types.go @@ -44,13 +44,21 @@ type DeschedulerStrategy struct { Params *StrategyParameters `json:"params,omitempty"` } -// Only one of its members may be specified +// Namespaces carries a list of included/excluded namespaces +// for which a given strategy is applicable. +type Namespaces struct { + Include []string `json:"include"` + Exclude []string `json:"exclude"` +} + +// Besides Namespaces only one of its members may be specified type StrategyParameters struct { NodeResourceUtilizationThresholds *NodeResourceUtilizationThresholds `json:"nodeResourceUtilizationThresholds,omitempty"` NodeAffinityType []string `json:"nodeAffinityType,omitempty"` PodsHavingTooManyRestarts *PodsHavingTooManyRestarts `json:"podsHavingTooManyRestarts,omitempty"` MaxPodLifeTimeSeconds *uint `json:"maxPodLifeTimeSeconds,omitempty"` RemoveDuplicates *RemoveDuplicates `json:"removeDuplicates,omitempty"` + Namespaces Namespaces `json:"namespaces"` } type Percentage float64 diff --git a/pkg/api/v1alpha1/zz_generated.conversion.go b/pkg/api/v1alpha1/zz_generated.conversion.go index 3fa76f749..d2a58a2eb 100644 --- a/pkg/api/v1alpha1/zz_generated.conversion.go +++ b/pkg/api/v1alpha1/zz_generated.conversion.go @@ -55,6 +55,16 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddGeneratedConversionFunc((*Namespaces)(nil), (*api.Namespaces)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_Namespaces_To_api_Namespaces(a.(*Namespaces), b.(*api.Namespaces), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*api.Namespaces)(nil), (*Namespaces)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_api_Namespaces_To_v1alpha1_Namespaces(a.(*api.Namespaces), b.(*Namespaces), scope) + }); err != nil { + return err + } if err := s.AddGeneratedConversionFunc((*NodeResourceUtilizationThresholds)(nil), (*api.NodeResourceUtilizationThresholds)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1alpha1_NodeResourceUtilizationThresholds_To_api_NodeResourceUtilizationThresholds(a.(*NodeResourceUtilizationThresholds), b.(*api.NodeResourceUtilizationThresholds), scope) }); err != nil { @@ -142,6 +152,28 @@ func Convert_api_DeschedulerStrategy_To_v1alpha1_DeschedulerStrategy(in *api.Des return autoConvert_api_DeschedulerStrategy_To_v1alpha1_DeschedulerStrategy(in, out, s) } +func autoConvert_v1alpha1_Namespaces_To_api_Namespaces(in *Namespaces, out *api.Namespaces, s conversion.Scope) error { + out.Include = *(*[]string)(unsafe.Pointer(&in.Include)) + out.Exclude = *(*[]string)(unsafe.Pointer(&in.Exclude)) + return nil +} + +// Convert_v1alpha1_Namespaces_To_api_Namespaces is an autogenerated conversion function. +func Convert_v1alpha1_Namespaces_To_api_Namespaces(in *Namespaces, out *api.Namespaces, s conversion.Scope) error { + return autoConvert_v1alpha1_Namespaces_To_api_Namespaces(in, out, s) +} + +func autoConvert_api_Namespaces_To_v1alpha1_Namespaces(in *api.Namespaces, out *Namespaces, s conversion.Scope) error { + out.Include = *(*[]string)(unsafe.Pointer(&in.Include)) + out.Exclude = *(*[]string)(unsafe.Pointer(&in.Exclude)) + return nil +} + +// Convert_api_Namespaces_To_v1alpha1_Namespaces is an autogenerated conversion function. +func Convert_api_Namespaces_To_v1alpha1_Namespaces(in *api.Namespaces, out *Namespaces, s conversion.Scope) error { + return autoConvert_api_Namespaces_To_v1alpha1_Namespaces(in, out, s) +} + func autoConvert_v1alpha1_NodeResourceUtilizationThresholds_To_api_NodeResourceUtilizationThresholds(in *NodeResourceUtilizationThresholds, out *api.NodeResourceUtilizationThresholds, s conversion.Scope) error { out.Thresholds = *(*api.ResourceThresholds)(unsafe.Pointer(&in.Thresholds)) out.TargetThresholds = *(*api.ResourceThresholds)(unsafe.Pointer(&in.TargetThresholds)) @@ -214,6 +246,9 @@ func autoConvert_v1alpha1_StrategyParameters_To_api_StrategyParameters(in *Strat out.PodsHavingTooManyRestarts = (*api.PodsHavingTooManyRestarts)(unsafe.Pointer(in.PodsHavingTooManyRestarts)) out.MaxPodLifeTimeSeconds = (*uint)(unsafe.Pointer(in.MaxPodLifeTimeSeconds)) out.RemoveDuplicates = (*api.RemoveDuplicates)(unsafe.Pointer(in.RemoveDuplicates)) + if err := Convert_v1alpha1_Namespaces_To_api_Namespaces(&in.Namespaces, &out.Namespaces, s); err != nil { + return err + } return nil } @@ -228,6 +263,9 @@ func autoConvert_api_StrategyParameters_To_v1alpha1_StrategyParameters(in *api.S out.PodsHavingTooManyRestarts = (*PodsHavingTooManyRestarts)(unsafe.Pointer(in.PodsHavingTooManyRestarts)) out.MaxPodLifeTimeSeconds = (*uint)(unsafe.Pointer(in.MaxPodLifeTimeSeconds)) out.RemoveDuplicates = (*RemoveDuplicates)(unsafe.Pointer(in.RemoveDuplicates)) + if err := Convert_api_Namespaces_To_v1alpha1_Namespaces(&in.Namespaces, &out.Namespaces, s); err != nil { + return err + } return nil } diff --git a/pkg/api/v1alpha1/zz_generated.deepcopy.go b/pkg/api/v1alpha1/zz_generated.deepcopy.go index 832418cbd..f9707bb1d 100644 --- a/pkg/api/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/api/v1alpha1/zz_generated.deepcopy.go @@ -77,6 +77,32 @@ func (in *DeschedulerStrategy) DeepCopy() *DeschedulerStrategy { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Namespaces) DeepCopyInto(out *Namespaces) { + *out = *in + if in.Include != nil { + in, out := &in.Include, &out.Include + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Exclude != nil { + in, out := &in.Exclude, &out.Exclude + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Namespaces. +func (in *Namespaces) DeepCopy() *Namespaces { + if in == nil { + return nil + } + out := new(Namespaces) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NodeResourceUtilizationThresholds) DeepCopyInto(out *NodeResourceUtilizationThresholds) { *out = *in @@ -216,6 +242,7 @@ func (in *StrategyParameters) DeepCopyInto(out *StrategyParameters) { *out = new(RemoveDuplicates) (*in).DeepCopyInto(*out) } + in.Namespaces.DeepCopyInto(&out.Namespaces) return } diff --git a/pkg/api/zz_generated.deepcopy.go b/pkg/api/zz_generated.deepcopy.go index 66d7cf02a..b8d8dc574 100644 --- a/pkg/api/zz_generated.deepcopy.go +++ b/pkg/api/zz_generated.deepcopy.go @@ -77,6 +77,32 @@ func (in *DeschedulerStrategy) DeepCopy() *DeschedulerStrategy { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Namespaces) DeepCopyInto(out *Namespaces) { + *out = *in + if in.Include != nil { + in, out := &in.Include, &out.Include + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Exclude != nil { + in, out := &in.Exclude, &out.Exclude + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Namespaces. +func (in *Namespaces) DeepCopy() *Namespaces { + if in == nil { + return nil + } + out := new(Namespaces) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NodeResourceUtilizationThresholds) DeepCopyInto(out *NodeResourceUtilizationThresholds) { *out = *in @@ -216,6 +242,7 @@ func (in *StrategyParameters) DeepCopyInto(out *StrategyParameters) { *out = new(RemoveDuplicates) (*in).DeepCopyInto(*out) } + in.Namespaces.DeepCopyInto(&out.Namespaces) return } diff --git a/pkg/descheduler/pod/pods.go b/pkg/descheduler/pod/pods.go index e2b3fa2fb..cbb4f8c19 100644 --- a/pkg/descheduler/pod/pods.go +++ b/pkg/descheduler/pod/pods.go @@ -28,7 +28,9 @@ import ( ) type Options struct { - filter func(pod *v1.Pod) bool + filter func(pod *v1.Pod) bool + includedNamespaces []string + excludedNamespaces []string } // WithFilter sets a pod filter. @@ -39,6 +41,20 @@ func WithFilter(filter func(pod *v1.Pod) bool) func(opts *Options) { } } +// WithNamespaces sets included namespaces +func WithNamespaces(namespaces []string) func(opts *Options) { + return func(opts *Options) { + opts.includedNamespaces = namespaces + } +} + +// WithoutNamespaces sets excluded namespaces +func WithoutNamespaces(namespaces []string) func(opts *Options) { + return func(opts *Options) { + opts.excludedNamespaces = namespaces + } +} + // ListPodsOnANode lists all of the pods on a node // It also accepts an optional "filter" function which can be used to further limit the pods that are returned. // (Usually this is podEvictor.IsEvictable, in order to only list the evictable pods on a node, but can @@ -54,18 +70,52 @@ func ListPodsOnANode( opt(options) } - fieldSelector, err := fields.ParseSelector("spec.nodeName=" + node.Name + ",status.phase!=" + string(v1.PodSucceeded) + ",status.phase!=" + string(v1.PodFailed)) + pods := make([]*v1.Pod, 0) + + fieldSelectorString := "spec.nodeName=" + node.Name + ",status.phase!=" + string(v1.PodSucceeded) + ",status.phase!=" + string(v1.PodFailed) + + if len(options.includedNamespaces) > 0 { + fieldSelector, err := fields.ParseSelector(fieldSelectorString) + if err != nil { + return []*v1.Pod{}, err + } + + for _, namespace := range options.includedNamespaces { + podList, err := client.CoreV1().Pods(namespace).List(ctx, + metav1.ListOptions{FieldSelector: fieldSelector.String()}) + if err != nil { + return []*v1.Pod{}, err + } + for i := range podList.Items { + if options.filter != nil && !options.filter(&podList.Items[i]) { + continue + } + pods = append(pods, &podList.Items[i]) + } + } + return pods, nil + } + + if len(options.excludedNamespaces) > 0 { + for _, namespace := range options.excludedNamespaces { + fieldSelectorString += ",metadata.namespace!=" + namespace + } + } + + fieldSelector, err := fields.ParseSelector(fieldSelectorString) if err != nil { return []*v1.Pod{}, err } + // INFO(jchaloup): field selectors do not work properly with listers + // Once the descheduler switcheds to pod listers (through informers), + // We need to flip to client-side filtering. podList, err := client.CoreV1().Pods(v1.NamespaceAll).List(ctx, metav1.ListOptions{FieldSelector: fieldSelector.String()}) if err != nil { return []*v1.Pod{}, err } - pods := make([]*v1.Pod, 0) for i := range podList.Items { if options.filter != nil && !options.filter(&podList.Items[i]) { continue diff --git a/pkg/descheduler/strategies/node_affinity.go b/pkg/descheduler/strategies/node_affinity.go index a804b5e23..5454ce947 100644 --- a/pkg/descheduler/strategies/node_affinity.go +++ b/pkg/descheduler/strategies/node_affinity.go @@ -18,6 +18,7 @@ package strategies import ( "context" + "fmt" v1 "k8s.io/api/core/v1" clientset "k8s.io/client-go/kubernetes" @@ -29,10 +30,22 @@ import ( podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod" ) +func validatePodsViolatingNodeAffinityParams(params *api.StrategyParameters) error { + if params == nil || len(params.NodeAffinityType) == 0 { + return fmt.Errorf("NodeAffinityType is empty") + } + // At most one of include/exclude can be set + if len(params.Namespaces.Include) > 0 && len(params.Namespaces.Exclude) > 0 { + return fmt.Errorf("only one of Include/Exclude namespaces can be set") + } + + return nil +} + // RemovePodsViolatingNodeAffinity evicts pods on nodes which violate node affinity func RemovePodsViolatingNodeAffinity(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor) { - if strategy.Params == nil { - klog.V(1).Infof("NodeAffinityType not set") + if err := validatePodsViolatingNodeAffinityParams(strategy.Params); err != nil { + klog.V(1).Info(err) return } for _, nodeAffinity := range strategy.Params.NodeAffinityType { @@ -43,11 +56,18 @@ func RemovePodsViolatingNodeAffinity(ctx context.Context, client clientset.Inter for _, node := range nodes { klog.V(1).Infof("Processing node: %#v\n", node.Name) - pods, err := podutil.ListPodsOnANode(ctx, client, node, podutil.WithFilter(func(pod *v1.Pod) bool { - return podEvictor.IsEvictable(pod) && - !nodeutil.PodFitsCurrentNode(pod, node) && - nodeutil.PodFitsAnyNode(pod, nodes) - })) + pods, err := podutil.ListPodsOnANode( + ctx, + client, + node, + podutil.WithFilter(func(pod *v1.Pod) bool { + return podEvictor.IsEvictable(pod) && + !nodeutil.PodFitsCurrentNode(pod, node) && + nodeutil.PodFitsAnyNode(pod, nodes) + }), + podutil.WithNamespaces(strategy.Params.Namespaces.Include), + podutil.WithoutNamespaces(strategy.Params.Namespaces.Exclude), + ) if err != nil { klog.Errorf("failed to get pods from %v: %v", node.Name, err) } diff --git a/pkg/descheduler/strategies/node_taint.go b/pkg/descheduler/strategies/node_taint.go index bc612890d..529b6b6b3 100644 --- a/pkg/descheduler/strategies/node_taint.go +++ b/pkg/descheduler/strategies/node_taint.go @@ -18,6 +18,7 @@ package strategies import ( "context" + "fmt" "sigs.k8s.io/descheduler/pkg/api" "sigs.k8s.io/descheduler/pkg/descheduler/evictions" @@ -29,11 +30,40 @@ import ( "k8s.io/klog/v2" ) +func validateRemovePodsViolatingNodeTaintsParams(params *api.StrategyParameters) error { + if params == nil { + return nil + } + + // At most one of include/exclude can be set + if len(params.Namespaces.Include) > 0 && len(params.Namespaces.Exclude) > 0 { + return fmt.Errorf("only one of Include/Exclude namespaces can be set") + } + + return nil +} + // RemovePodsViolatingNodeTaints evicts pods on the node which violate NoSchedule Taints on nodes func RemovePodsViolatingNodeTaints(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor) { + if err := validateRemovePodsViolatingNodeTaintsParams(strategy.Params); err != nil { + klog.V(1).Info(err) + return + } + var namespaces api.Namespaces + if strategy.Params != nil { + namespaces = strategy.Params.Namespaces + } + for _, node := range nodes { klog.V(1).Infof("Processing node: %#v\n", node.Name) - pods, err := podutil.ListPodsOnANode(ctx, client, node, podutil.WithFilter(podEvictor.IsEvictable)) + pods, err := podutil.ListPodsOnANode( + ctx, + client, + node, + podutil.WithFilter(podEvictor.IsEvictable), + podutil.WithNamespaces(namespaces.Include), + podutil.WithoutNamespaces(namespaces.Exclude), + ) if err != nil { //no pods evicted as error encountered retrieving evictable Pods return diff --git a/pkg/descheduler/strategies/pod_antiaffinity.go b/pkg/descheduler/strategies/pod_antiaffinity.go index 943aa8fd8..66667e3fc 100644 --- a/pkg/descheduler/strategies/pod_antiaffinity.go +++ b/pkg/descheduler/strategies/pod_antiaffinity.go @@ -18,6 +18,7 @@ package strategies import ( "context" + "fmt" "sigs.k8s.io/descheduler/pkg/api" "sigs.k8s.io/descheduler/pkg/descheduler/evictions" @@ -30,11 +31,36 @@ import ( "k8s.io/klog/v2" ) +func validateRemovePodsViolatingInterPodAntiAffinityParams(params *api.StrategyParameters) error { + if params == nil { + return nil + } + + // At most one of include/exclude can be set + if len(params.Namespaces.Include) > 0 && len(params.Namespaces.Exclude) > 0 { + return fmt.Errorf("only one of Include/Exclude namespaces can be set") + } + + return nil +} + // RemovePodsViolatingInterPodAntiAffinity evicts pods on the node which are having a pod affinity rules. func RemovePodsViolatingInterPodAntiAffinity(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor) { + var namespaces api.Namespaces + if strategy.Params != nil { + namespaces = strategy.Params.Namespaces + } + for _, node := range nodes { klog.V(1).Infof("Processing node: %#v\n", node.Name) - pods, err := podutil.ListPodsOnANode(ctx, client, node, podutil.WithFilter(podEvictor.IsEvictable)) + pods, err := podutil.ListPodsOnANode( + ctx, + client, + node, + podutil.WithFilter(podEvictor.IsEvictable), + podutil.WithNamespaces(namespaces.Include), + podutil.WithoutNamespaces(namespaces.Exclude), + ) if err != nil { return } diff --git a/pkg/descheduler/strategies/pod_lifetime.go b/pkg/descheduler/strategies/pod_lifetime.go index ae6c0208c..a214dc3cf 100644 --- a/pkg/descheduler/strategies/pod_lifetime.go +++ b/pkg/descheduler/strategies/pod_lifetime.go @@ -18,6 +18,7 @@ package strategies import ( "context" + "fmt" v1 "k8s.io/api/core/v1" v1meta "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -29,16 +30,30 @@ import ( podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod" ) +func validatePodLifeTimeParams(params *api.StrategyParameters) error { + if params == nil || params.MaxPodLifeTimeSeconds == nil { + return fmt.Errorf("MaxPodLifeTimeSeconds not set") + } + + // At most one of include/exclude can be set + if len(params.Namespaces.Include) > 0 && len(params.Namespaces.Exclude) > 0 { + return fmt.Errorf("only one of Include/Exclude namespaces can be set") + } + + return nil +} + // PodLifeTime evicts pods on nodes that were created more than strategy.Params.MaxPodLifeTimeSeconds seconds ago. func PodLifeTime(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor) { - if strategy.Params == nil || strategy.Params.MaxPodLifeTimeSeconds == nil { - klog.V(1).Infof("MaxPodLifeTimeSeconds not set") + if err := validatePodLifeTimeParams(strategy.Params); err != nil { + klog.V(1).Info(err) return } for _, node := range nodes { klog.V(1).Infof("Processing node: %#v", node.Name) - pods := listOldPodsOnNode(ctx, client, node, *strategy.Params.MaxPodLifeTimeSeconds, podEvictor) + + pods := listOldPodsOnNode(ctx, client, node, strategy.Params, podEvictor) for _, pod := range pods { success, err := podEvictor.EvictPod(ctx, pod, node, "PodLifeTime") if success { @@ -50,11 +65,19 @@ func PodLifeTime(ctx context.Context, client clientset.Interface, strategy api.D break } } + } } -func listOldPodsOnNode(ctx context.Context, client clientset.Interface, node *v1.Node, maxAge uint, evictor *evictions.PodEvictor) []*v1.Pod { - pods, err := podutil.ListPodsOnANode(ctx, client, node, podutil.WithFilter(evictor.IsEvictable)) +func listOldPodsOnNode(ctx context.Context, client clientset.Interface, node *v1.Node, params *api.StrategyParameters, evictor *evictions.PodEvictor) []*v1.Pod { + pods, err := podutil.ListPodsOnANode( + ctx, + client, + node, + podutil.WithFilter(evictor.IsEvictable), + podutil.WithNamespaces(params.Namespaces.Include), + podutil.WithoutNamespaces(params.Namespaces.Exclude), + ) if err != nil { return nil } @@ -62,7 +85,7 @@ func listOldPodsOnNode(ctx context.Context, client clientset.Interface, node *v1 var oldPods []*v1.Pod for _, pod := range pods { podAgeSeconds := uint(v1meta.Now().Sub(pod.GetCreationTimestamp().Local()).Seconds()) - if podAgeSeconds > maxAge { + if podAgeSeconds > *params.MaxPodLifeTimeSeconds { oldPods = append(oldPods, pod) } } diff --git a/pkg/descheduler/strategies/toomanyrestarts.go b/pkg/descheduler/strategies/toomanyrestarts.go index e0750c14f..ae745b45f 100644 --- a/pkg/descheduler/strategies/toomanyrestarts.go +++ b/pkg/descheduler/strategies/toomanyrestarts.go @@ -18,6 +18,7 @@ package strategies import ( "context" + "fmt" v1 "k8s.io/api/core/v1" clientset "k8s.io/client-go/kubernetes" @@ -28,17 +29,37 @@ import ( podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod" ) +func validateRemovePodsHavingTooManyRestartsParams(params *api.StrategyParameters) error { + if params == nil || params.PodsHavingTooManyRestarts == nil || params.PodsHavingTooManyRestarts.PodRestartThreshold < 1 { + return fmt.Errorf("PodsHavingTooManyRestarts threshold not set") + } + + // At most one of include/exclude can be set + if len(params.Namespaces.Include) > 0 && len(params.Namespaces.Exclude) > 0 { + return fmt.Errorf("only one of Include/Exclude namespaces can be set") + } + + return nil +} + // RemovePodsHavingTooManyRestarts removes the pods that have too many restarts on node. // There are too many cases leading this issue: Volume mount failed, app error due to nodes' different settings. // As of now, this strategy won't evict daemonsets, mirror pods, critical pods and pods with local storages. func RemovePodsHavingTooManyRestarts(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor) { - if strategy.Params == nil || strategy.Params.PodsHavingTooManyRestarts == nil || strategy.Params.PodsHavingTooManyRestarts.PodRestartThreshold < 1 { - klog.V(1).Infof("PodsHavingTooManyRestarts thresholds not set") + if err := validateRemovePodsHavingTooManyRestartsParams(strategy.Params); err != nil { + klog.V(1).Info(err) return } for _, node := range nodes { klog.V(1).Infof("Processing node: %s", node.Name) - pods, err := podutil.ListPodsOnANode(ctx, client, node, podutil.WithFilter(podEvictor.IsEvictable)) + pods, err := podutil.ListPodsOnANode( + ctx, + client, + node, + podutil.WithFilter(podEvictor.IsEvictable), + podutil.WithNamespaces(strategy.Params.Namespaces.Include), + podutil.WithoutNamespaces(strategy.Params.Namespaces.Exclude), + ) if err != nil { klog.Errorf("Error when list pods at node %s", node.Name) continue From c40a9c397f1e8b182c67c7abf298db6c72fc949f Mon Sep 17 00:00:00 2001 From: Jan Chaloupka Date: Mon, 13 Jul 2020 16:12:47 +0200 Subject: [PATCH 4/5] e2e: add test for included/excluded namespace through PodLifeTime strategy --- test/e2e/e2e_test.go | 219 +++++++++++++++++++++++++++++++++++++++---- 1 file changed, 202 insertions(+), 17 deletions(-) diff --git a/test/e2e/e2e_test.go b/test/e2e/e2e_test.go index cdd85b8e1..2449298f0 100644 --- a/test/e2e/e2e_test.go +++ b/test/e2e/e2e_test.go @@ -20,6 +20,7 @@ import ( "context" "math" "os" + "sort" "strings" "testing" "time" @@ -27,6 +28,7 @@ import ( v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/informers" coreinformers "k8s.io/client-go/informers/core/v1" @@ -134,9 +136,7 @@ func startEndToEndForLowNodeUtilization(ctx context.Context, clientset clientset time.Sleep(10 * time.Second) } -func TestLowNodeUtilization(t *testing.T) { - ctx := context.Background() - +func initializeClient(t *testing.T) (clientset.Interface, coreinformers.NodeInformer, chan struct{}) { clientSet, err := client.CreateClient(os.Getenv("KUBECONFIG")) if err != nil { t.Errorf("Error during client creation with %v", err) @@ -147,10 +147,18 @@ func TestLowNodeUtilization(t *testing.T) { sharedInformerFactory := informers.NewSharedInformerFactory(clientSet, 0) sharedInformerFactory.Start(stopChannel) sharedInformerFactory.WaitForCacheSync(stopChannel) - defer close(stopChannel) nodeInformer := sharedInformerFactory.Core().V1().Nodes() + return clientSet, nodeInformer, stopChannel +} + +func TestLowNodeUtilization(t *testing.T) { + ctx := context.Background() + + clientSet, nodeInformer, stopCh := initializeClient(t) + defer close(stopCh) + nodeList, err := clientSet.CoreV1().Nodes().List(ctx, metav1.ListOptions{}) if err != nil { t.Errorf("Error listing node with %v", err) @@ -177,22 +185,199 @@ func TestLowNodeUtilization(t *testing.T) { deleteRC(ctx, t, clientSet, rc) } +func runPodLifetimeStrategy(ctx context.Context, clientset clientset.Interface, nodeInformer coreinformers.NodeInformer, namespaces deschedulerapi.Namespaces) { + // Run descheduler. + evictionPolicyGroupVersion, err := eutils.SupportEviction(clientset) + if err != nil || len(evictionPolicyGroupVersion) == 0 { + klog.Fatalf("%v", err) + } + + nodes, err := nodeutil.ReadyNodes(ctx, clientset, nodeInformer, "", nil) + if err != nil { + klog.Fatalf("%v", err) + } + + maxPodLifeTimeSeconds := uint(1) + strategies.PodLifeTime( + ctx, + clientset, + deschedulerapi.DeschedulerStrategy{ + Enabled: true, + Params: &deschedulerapi.StrategyParameters{ + MaxPodLifeTimeSeconds: &maxPodLifeTimeSeconds, + Namespaces: namespaces, + }, + }, + nodes, + evictions.NewPodEvictor( + clientset, + evictionPolicyGroupVersion, + false, + 0, + nodes, + false, + ), + ) +} + +func getPodNames(pods []v1.Pod) []string { + names := []string{} + for _, pod := range pods { + names = append(names, pod.Name) + } + return names +} + +func intersectStrings(lista, listb []string) []string { + commonNames := []string{} + + for _, stra := range lista { + for _, strb := range listb { + if stra == strb { + commonNames = append(commonNames, stra) + break + } + } + } + + return commonNames +} + +// TODO(jchaloup): add testcases for two included/excluded namespaces + +func TestNamespaceConstraintsInclude(t *testing.T) { + ctx := context.Background() + + clientSet, nodeInformer, stopCh := initializeClient(t) + defer close(stopCh) + + testNamespace := &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "e2e-" + strings.ToLower(t.Name())}} + if _, err := clientSet.CoreV1().Namespaces().Create(ctx, testNamespace, metav1.CreateOptions{}); err != nil { + t.Fatalf("Unable to create ns %v", testNamespace.Name) + } + defer clientSet.CoreV1().Namespaces().Delete(ctx, testNamespace.Name, metav1.DeleteOptions{}) + + rc := RcByNameContainer("test-rc-podlifetime", testNamespace.Name, 5, map[string]string{"test": "podlifetime-include"}, nil) + if _, err := clientSet.CoreV1().ReplicationControllers(rc.Namespace).Create(ctx, rc, metav1.CreateOptions{}); err != nil { + t.Errorf("Error creating deployment %v", err) + } + defer deleteRC(ctx, t, clientSet, rc) + + // wait for a while so all the pods are at least few seconds older + time.Sleep(5 * time.Second) + + // it's assumed all new pods are named differently from currently running -> no name collision + podList, err := clientSet.CoreV1().Pods(rc.Namespace).List(ctx, metav1.ListOptions{LabelSelector: labels.SelectorFromSet(rc.Spec.Template.Labels).String()}) + if err != nil { + t.Fatalf("Unable to list pods: %v", err) + } + + if len(podList.Items) != 5 { + t.Fatalf("Expected 5 replicas, got %v instead", len(podList.Items)) + } + + initialPodNames := getPodNames(podList.Items) + sort.Strings(initialPodNames) + t.Logf("Existing pods: %v", initialPodNames) + + t.Logf("set the strategy to delete pods from %v namespace", rc.Namespace) + runPodLifetimeStrategy(ctx, clientSet, nodeInformer, deschedulerapi.Namespaces{ + Include: []string{rc.Namespace}, + }) + + // All pods are supposed to be deleted, wait until all the old pods are deleted + if err := wait.PollImmediate(time.Second, 20*time.Second, func() (bool, error) { + podList, err := clientSet.CoreV1().Pods(rc.Namespace).List(ctx, metav1.ListOptions{LabelSelector: labels.SelectorFromSet(rc.Spec.Template.Labels).String()}) + if err != nil { + return false, nil + } + + includePodNames := getPodNames(podList.Items) + // validate all pod were deleted + if len(intersectStrings(initialPodNames, includePodNames)) > 0 { + t.Logf("Waiting until %v pods get deleted", intersectStrings(initialPodNames, includePodNames)) + // check if there's at least one pod not in Terminating state + for _, pod := range podList.Items { + // In case podList contains newly created pods + if len(intersectStrings(initialPodNames, []string{pod.Name})) == 0 { + continue + } + if pod.DeletionTimestamp == nil { + t.Logf("Pod %v not in terminating state", pod.Name) + return false, nil + } + } + t.Logf("All %v pods are terminating", intersectStrings(initialPodNames, includePodNames)) + } + + return true, nil + }); err != nil { + t.Fatalf("Error waiting for pods to be deleted: %v", err) + } +} + +func TestNamespaceConstraintsExclude(t *testing.T) { + ctx := context.Background() + + clientSet, nodeInformer, stopCh := initializeClient(t) + defer close(stopCh) + + testNamespace := &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "e2e-" + strings.ToLower(t.Name())}} + if _, err := clientSet.CoreV1().Namespaces().Create(ctx, testNamespace, metav1.CreateOptions{}); err != nil { + t.Fatalf("Unable to create ns %v", testNamespace.Name) + } + defer clientSet.CoreV1().Namespaces().Delete(ctx, testNamespace.Name, metav1.DeleteOptions{}) + + rc := RcByNameContainer("test-rc-podlifetime", testNamespace.Name, 5, map[string]string{"test": "podlifetime-exclude"}, nil) + if _, err := clientSet.CoreV1().ReplicationControllers(rc.Namespace).Create(ctx, rc, metav1.CreateOptions{}); err != nil { + t.Errorf("Error creating deployment %v", err) + } + defer deleteRC(ctx, t, clientSet, rc) + + // wait for a while so all the pods are at least few seconds older + time.Sleep(5 * time.Second) + + // it's assumed all new pods are named differently from currently running -> no name collision + podList, err := clientSet.CoreV1().Pods(rc.Namespace).List(ctx, metav1.ListOptions{LabelSelector: labels.SelectorFromSet(rc.Spec.Template.Labels).String()}) + if err != nil { + t.Fatalf("Unable to list pods: %v", err) + } + + if len(podList.Items) != 5 { + t.Fatalf("Expected 5 replicas, got %v instead", len(podList.Items)) + } + + initialPodNames := getPodNames(podList.Items) + sort.Strings(initialPodNames) + t.Logf("Existing pods: %v", initialPodNames) + + t.Logf("set the strategy to delete pods from namespaces except the %v namespace", rc.Namespace) + runPodLifetimeStrategy(ctx, clientSet, nodeInformer, deschedulerapi.Namespaces{ + Exclude: []string{rc.Namespace}, + }) + + t.Logf("Waiting 10s") + time.Sleep(10 * time.Second) + podList, err = clientSet.CoreV1().Pods(rc.Namespace).List(ctx, metav1.ListOptions{LabelSelector: labels.SelectorFromSet(rc.Spec.Template.Labels).String()}) + if err != nil { + t.Fatalf("Unable to list pods after running strategy: %v", err) + } + + excludePodNames := getPodNames(podList.Items) + sort.Strings(excludePodNames) + t.Logf("Existing pods: %v", excludePodNames) + + // validate no pods were deleted + if len(intersectStrings(initialPodNames, excludePodNames)) != 5 { + t.Fatalf("None of %v pods are expected to be deleted", initialPodNames) + } +} + func TestEvictAnnotation(t *testing.T) { ctx := context.Background() - clientSet, err := client.CreateClient(os.Getenv("KUBECONFIG")) - if err != nil { - t.Errorf("Error during client creation with %v", err) - } - - stopChannel := make(chan struct{}, 0) - - sharedInformerFactory := informers.NewSharedInformerFactory(clientSet, 0) - sharedInformerFactory.Start(stopChannel) - sharedInformerFactory.WaitForCacheSync(stopChannel) - defer close(stopChannel) - - nodeInformer := sharedInformerFactory.Core().V1().Nodes() + clientSet, nodeInformer, stopCh := initializeClient(t) + defer close(stopCh) nodeList, err := clientSet.CoreV1().Nodes().List(ctx, metav1.ListOptions{}) if err != nil { From 42db31683fb43f73f87c7d2a6a3bf06329a7b0dc Mon Sep 17 00:00:00 2001 From: Jan Chaloupka Date: Wed, 22 Jul 2020 15:49:27 +0200 Subject: [PATCH 5/5] README: describe usage of the namespace filtering --- README.md | 42 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) diff --git a/README.md b/README.md index edbbaa04b..6519e6466 100644 --- a/README.md +++ b/README.md @@ -228,6 +228,48 @@ strategies: maxPodLifeTimeSeconds: 86400 ```` +## Namespace filtering + +Strategies like `PodLifeTime`, `RemovePodsHavingTooManyRestarts`, `RemovePodsViolatingNodeTaints`, +`RemovePodsViolatingNodeAffinity` and `RemovePodsViolatingInterPodAntiAffinity` can specify `namespaces` +parameter which allows to specify a list of including, resp. excluding namespaces. +E.g. + +``` +apiVersion: "descheduler/v1alpha1" +kind: "DeschedulerPolicy" +strategies: + "PodLifeTime": + enabled: true + params: + maxPodLifeTimeSeconds: 86400 + namespaces: + include: + - "namespace1" + - "namespace2" +``` + +In the examples `PodLifeTime` gets executed only over `namespace1` and `namespace2`. +The similar holds for `exclude` field: + +``` +apiVersion: "descheduler/v1alpha1" +kind: "DeschedulerPolicy" +strategies: + "PodLifeTime": + enabled: true + params: + maxPodLifeTimeSeconds: 86400 + namespaces: + exclude: + - "namespace1" + - "namespace2" +``` + +The strategy gets executed over all namespaces but `namespace1` and `namespace2`. + +It's not allowed to compute `include` with `exclude` field. + ## Pod Evictions When the descheduler decides to evict pods from a node, it employs the following general mechanism: