mirror of
https://github.com/kubernetes-sigs/descheduler.git
synced 2026-01-26 05:14:13 +01:00
Merge pull request #338 from ingvagabund/filter-out-pods-by-namespaces
Filter pods by namespaces
This commit is contained in:
42
README.md
42
README.md
@@ -234,6 +234,48 @@ strategies:
|
||||
maxPodLifeTimeSeconds: 86400
|
||||
````
|
||||
|
||||
## Namespace filtering
|
||||
|
||||
Strategies like `PodLifeTime`, `RemovePodsHavingTooManyRestarts`, `RemovePodsViolatingNodeTaints`,
|
||||
`RemovePodsViolatingNodeAffinity` and `RemovePodsViolatingInterPodAntiAffinity` can specify `namespaces`
|
||||
parameter which allows to specify a list of including, resp. excluding namespaces.
|
||||
E.g.
|
||||
|
||||
```
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
strategies:
|
||||
"PodLifeTime":
|
||||
enabled: true
|
||||
params:
|
||||
maxPodLifeTimeSeconds: 86400
|
||||
namespaces:
|
||||
include:
|
||||
- "namespace1"
|
||||
- "namespace2"
|
||||
```
|
||||
|
||||
In the examples `PodLifeTime` gets executed only over `namespace1` and `namespace2`.
|
||||
The similar holds for `exclude` field:
|
||||
|
||||
```
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
strategies:
|
||||
"PodLifeTime":
|
||||
enabled: true
|
||||
params:
|
||||
maxPodLifeTimeSeconds: 86400
|
||||
namespaces:
|
||||
exclude:
|
||||
- "namespace1"
|
||||
- "namespace2"
|
||||
```
|
||||
|
||||
The strategy gets executed over all namespaces but `namespace1` and `namespace2`.
|
||||
|
||||
It's not allowed to compute `include` with `exclude` field.
|
||||
|
||||
## Pod Evictions
|
||||
|
||||
When the descheduler decides to evict pods from a node, it employs the following general mechanism:
|
||||
|
||||
@@ -44,13 +44,23 @@ type DeschedulerStrategy struct {
|
||||
Params *StrategyParameters
|
||||
}
|
||||
|
||||
// Only one of its members may be specified
|
||||
// Namespaces carries a list of included/excluded namespaces
|
||||
// for which a given strategy is applicable
|
||||
type Namespaces struct {
|
||||
Include []string
|
||||
Exclude []string
|
||||
}
|
||||
|
||||
// Besides Namespaces only one of its members may be specified
|
||||
// TODO(jchaloup): move Namespaces to individual strategies once the policy
|
||||
// version is bumped to v1alpha2
|
||||
type StrategyParameters struct {
|
||||
NodeResourceUtilizationThresholds *NodeResourceUtilizationThresholds
|
||||
NodeAffinityType []string
|
||||
PodsHavingTooManyRestarts *PodsHavingTooManyRestarts
|
||||
MaxPodLifeTimeSeconds *uint
|
||||
RemoveDuplicates *RemoveDuplicates
|
||||
Namespaces Namespaces
|
||||
}
|
||||
|
||||
type Percentage float64
|
||||
|
||||
@@ -44,13 +44,21 @@ type DeschedulerStrategy struct {
|
||||
Params *StrategyParameters `json:"params,omitempty"`
|
||||
}
|
||||
|
||||
// Only one of its members may be specified
|
||||
// Namespaces carries a list of included/excluded namespaces
|
||||
// for which a given strategy is applicable.
|
||||
type Namespaces struct {
|
||||
Include []string `json:"include"`
|
||||
Exclude []string `json:"exclude"`
|
||||
}
|
||||
|
||||
// Besides Namespaces only one of its members may be specified
|
||||
type StrategyParameters struct {
|
||||
NodeResourceUtilizationThresholds *NodeResourceUtilizationThresholds `json:"nodeResourceUtilizationThresholds,omitempty"`
|
||||
NodeAffinityType []string `json:"nodeAffinityType,omitempty"`
|
||||
PodsHavingTooManyRestarts *PodsHavingTooManyRestarts `json:"podsHavingTooManyRestarts,omitempty"`
|
||||
MaxPodLifeTimeSeconds *uint `json:"maxPodLifeTimeSeconds,omitempty"`
|
||||
RemoveDuplicates *RemoveDuplicates `json:"removeDuplicates,omitempty"`
|
||||
Namespaces Namespaces `json:"namespaces"`
|
||||
}
|
||||
|
||||
type Percentage float64
|
||||
|
||||
@@ -55,6 +55,16 @@ func RegisterConversions(s *runtime.Scheme) error {
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*Namespaces)(nil), (*api.Namespaces)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1alpha1_Namespaces_To_api_Namespaces(a.(*Namespaces), b.(*api.Namespaces), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*api.Namespaces)(nil), (*Namespaces)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_api_Namespaces_To_v1alpha1_Namespaces(a.(*api.Namespaces), b.(*Namespaces), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*NodeResourceUtilizationThresholds)(nil), (*api.NodeResourceUtilizationThresholds)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1alpha1_NodeResourceUtilizationThresholds_To_api_NodeResourceUtilizationThresholds(a.(*NodeResourceUtilizationThresholds), b.(*api.NodeResourceUtilizationThresholds), scope)
|
||||
}); err != nil {
|
||||
@@ -142,6 +152,28 @@ func Convert_api_DeschedulerStrategy_To_v1alpha1_DeschedulerStrategy(in *api.Des
|
||||
return autoConvert_api_DeschedulerStrategy_To_v1alpha1_DeschedulerStrategy(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1alpha1_Namespaces_To_api_Namespaces(in *Namespaces, out *api.Namespaces, s conversion.Scope) error {
|
||||
out.Include = *(*[]string)(unsafe.Pointer(&in.Include))
|
||||
out.Exclude = *(*[]string)(unsafe.Pointer(&in.Exclude))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_v1alpha1_Namespaces_To_api_Namespaces is an autogenerated conversion function.
|
||||
func Convert_v1alpha1_Namespaces_To_api_Namespaces(in *Namespaces, out *api.Namespaces, s conversion.Scope) error {
|
||||
return autoConvert_v1alpha1_Namespaces_To_api_Namespaces(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_api_Namespaces_To_v1alpha1_Namespaces(in *api.Namespaces, out *Namespaces, s conversion.Scope) error {
|
||||
out.Include = *(*[]string)(unsafe.Pointer(&in.Include))
|
||||
out.Exclude = *(*[]string)(unsafe.Pointer(&in.Exclude))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_api_Namespaces_To_v1alpha1_Namespaces is an autogenerated conversion function.
|
||||
func Convert_api_Namespaces_To_v1alpha1_Namespaces(in *api.Namespaces, out *Namespaces, s conversion.Scope) error {
|
||||
return autoConvert_api_Namespaces_To_v1alpha1_Namespaces(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1alpha1_NodeResourceUtilizationThresholds_To_api_NodeResourceUtilizationThresholds(in *NodeResourceUtilizationThresholds, out *api.NodeResourceUtilizationThresholds, s conversion.Scope) error {
|
||||
out.Thresholds = *(*api.ResourceThresholds)(unsafe.Pointer(&in.Thresholds))
|
||||
out.TargetThresholds = *(*api.ResourceThresholds)(unsafe.Pointer(&in.TargetThresholds))
|
||||
@@ -214,6 +246,9 @@ func autoConvert_v1alpha1_StrategyParameters_To_api_StrategyParameters(in *Strat
|
||||
out.PodsHavingTooManyRestarts = (*api.PodsHavingTooManyRestarts)(unsafe.Pointer(in.PodsHavingTooManyRestarts))
|
||||
out.MaxPodLifeTimeSeconds = (*uint)(unsafe.Pointer(in.MaxPodLifeTimeSeconds))
|
||||
out.RemoveDuplicates = (*api.RemoveDuplicates)(unsafe.Pointer(in.RemoveDuplicates))
|
||||
if err := Convert_v1alpha1_Namespaces_To_api_Namespaces(&in.Namespaces, &out.Namespaces, s); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -228,6 +263,9 @@ func autoConvert_api_StrategyParameters_To_v1alpha1_StrategyParameters(in *api.S
|
||||
out.PodsHavingTooManyRestarts = (*PodsHavingTooManyRestarts)(unsafe.Pointer(in.PodsHavingTooManyRestarts))
|
||||
out.MaxPodLifeTimeSeconds = (*uint)(unsafe.Pointer(in.MaxPodLifeTimeSeconds))
|
||||
out.RemoveDuplicates = (*RemoveDuplicates)(unsafe.Pointer(in.RemoveDuplicates))
|
||||
if err := Convert_api_Namespaces_To_v1alpha1_Namespaces(&in.Namespaces, &out.Namespaces, s); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -77,6 +77,32 @@ func (in *DeschedulerStrategy) DeepCopy() *DeschedulerStrategy {
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Namespaces) DeepCopyInto(out *Namespaces) {
|
||||
*out = *in
|
||||
if in.Include != nil {
|
||||
in, out := &in.Include, &out.Include
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.Exclude != nil {
|
||||
in, out := &in.Exclude, &out.Exclude
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Namespaces.
|
||||
func (in *Namespaces) DeepCopy() *Namespaces {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(Namespaces)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *NodeResourceUtilizationThresholds) DeepCopyInto(out *NodeResourceUtilizationThresholds) {
|
||||
*out = *in
|
||||
@@ -216,6 +242,7 @@ func (in *StrategyParameters) DeepCopyInto(out *StrategyParameters) {
|
||||
*out = new(RemoveDuplicates)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
in.Namespaces.DeepCopyInto(&out.Namespaces)
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -77,6 +77,32 @@ func (in *DeschedulerStrategy) DeepCopy() *DeschedulerStrategy {
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Namespaces) DeepCopyInto(out *Namespaces) {
|
||||
*out = *in
|
||||
if in.Include != nil {
|
||||
in, out := &in.Include, &out.Include
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.Exclude != nil {
|
||||
in, out := &in.Exclude, &out.Exclude
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Namespaces.
|
||||
func (in *Namespaces) DeepCopy() *Namespaces {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(Namespaces)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *NodeResourceUtilizationThresholds) DeepCopyInto(out *NodeResourceUtilizationThresholds) {
|
||||
*out = *in
|
||||
@@ -216,6 +242,7 @@ func (in *StrategyParameters) DeepCopyInto(out *StrategyParameters) {
|
||||
*out = new(RemoveDuplicates)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
in.Namespaces.DeepCopyInto(&out.Namespaces)
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -18,34 +18,106 @@ package pod
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sort"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
"sort"
|
||||
)
|
||||
|
||||
type Options struct {
|
||||
filter func(pod *v1.Pod) bool
|
||||
includedNamespaces []string
|
||||
excludedNamespaces []string
|
||||
}
|
||||
|
||||
// WithFilter sets a pod filter.
|
||||
// The filter function should return true if the pod should be returned from ListPodsOnANode
|
||||
func WithFilter(filter func(pod *v1.Pod) bool) func(opts *Options) {
|
||||
return func(opts *Options) {
|
||||
opts.filter = filter
|
||||
}
|
||||
}
|
||||
|
||||
// WithNamespaces sets included namespaces
|
||||
func WithNamespaces(namespaces []string) func(opts *Options) {
|
||||
return func(opts *Options) {
|
||||
opts.includedNamespaces = namespaces
|
||||
}
|
||||
}
|
||||
|
||||
// WithoutNamespaces sets excluded namespaces
|
||||
func WithoutNamespaces(namespaces []string) func(opts *Options) {
|
||||
return func(opts *Options) {
|
||||
opts.excludedNamespaces = namespaces
|
||||
}
|
||||
}
|
||||
|
||||
// ListPodsOnANode lists all of the pods on a node
|
||||
// It also accepts an optional "filter" function which can be used to further limit the pods that are returned.
|
||||
// (Usually this is podEvictor.IsEvictable, in order to only list the evictable pods on a node, but can
|
||||
// be used by strategies to extend IsEvictable if there are further restrictions, such as with NodeAffinity).
|
||||
// The filter function should return true if the pod should be returned from ListPodsOnANode
|
||||
func ListPodsOnANode(ctx context.Context, client clientset.Interface, node *v1.Node, filter func(pod *v1.Pod) bool) ([]*v1.Pod, error) {
|
||||
fieldSelector, err := fields.ParseSelector("spec.nodeName=" + node.Name + ",status.phase!=" + string(v1.PodSucceeded) + ",status.phase!=" + string(v1.PodFailed))
|
||||
func ListPodsOnANode(
|
||||
ctx context.Context,
|
||||
client clientset.Interface,
|
||||
node *v1.Node,
|
||||
opts ...func(opts *Options),
|
||||
) ([]*v1.Pod, error) {
|
||||
options := &Options{}
|
||||
for _, opt := range opts {
|
||||
opt(options)
|
||||
}
|
||||
|
||||
pods := make([]*v1.Pod, 0)
|
||||
|
||||
fieldSelectorString := "spec.nodeName=" + node.Name + ",status.phase!=" + string(v1.PodSucceeded) + ",status.phase!=" + string(v1.PodFailed)
|
||||
|
||||
if len(options.includedNamespaces) > 0 {
|
||||
fieldSelector, err := fields.ParseSelector(fieldSelectorString)
|
||||
if err != nil {
|
||||
return []*v1.Pod{}, err
|
||||
}
|
||||
|
||||
for _, namespace := range options.includedNamespaces {
|
||||
podList, err := client.CoreV1().Pods(namespace).List(ctx,
|
||||
metav1.ListOptions{FieldSelector: fieldSelector.String()})
|
||||
if err != nil {
|
||||
return []*v1.Pod{}, err
|
||||
}
|
||||
for i := range podList.Items {
|
||||
if options.filter != nil && !options.filter(&podList.Items[i]) {
|
||||
continue
|
||||
}
|
||||
pods = append(pods, &podList.Items[i])
|
||||
}
|
||||
}
|
||||
return pods, nil
|
||||
}
|
||||
|
||||
if len(options.excludedNamespaces) > 0 {
|
||||
for _, namespace := range options.excludedNamespaces {
|
||||
fieldSelectorString += ",metadata.namespace!=" + namespace
|
||||
}
|
||||
}
|
||||
|
||||
fieldSelector, err := fields.ParseSelector(fieldSelectorString)
|
||||
if err != nil {
|
||||
return []*v1.Pod{}, err
|
||||
}
|
||||
|
||||
// INFO(jchaloup): field selectors do not work properly with listers
|
||||
// Once the descheduler switcheds to pod listers (through informers),
|
||||
// We need to flip to client-side filtering.
|
||||
podList, err := client.CoreV1().Pods(v1.NamespaceAll).List(ctx,
|
||||
metav1.ListOptions{FieldSelector: fieldSelector.String()})
|
||||
if err != nil {
|
||||
return []*v1.Pod{}, err
|
||||
}
|
||||
|
||||
pods := make([]*v1.Pod, 0)
|
||||
for i := range podList.Items {
|
||||
if filter != nil && !filter(&podList.Items[i]) {
|
||||
if options.filter != nil && !options.filter(&podList.Items[i]) {
|
||||
continue
|
||||
}
|
||||
pods = append(pods, &podList.Items[i])
|
||||
|
||||
@@ -67,7 +67,7 @@ func TestListPodsOnANode(t *testing.T) {
|
||||
}
|
||||
return true, nil, fmt.Errorf("Failed to list: %v", list)
|
||||
})
|
||||
pods, _ := ListPodsOnANode(context.TODO(), fakeClient, testCase.node, nil)
|
||||
pods, _ := ListPodsOnANode(context.TODO(), fakeClient, testCase.node)
|
||||
if len(pods) != testCase.expectedPodCount {
|
||||
t.Errorf("expected %v pods on node %v, got %+v", testCase.expectedPodCount, testCase.node.Name, len(pods))
|
||||
}
|
||||
|
||||
@@ -46,7 +46,7 @@ func RemoveDuplicatePods(
|
||||
) {
|
||||
for _, node := range nodes {
|
||||
klog.V(1).Infof("Processing node: %#v", node.Name)
|
||||
pods, err := podutil.ListPodsOnANode(ctx, client, node, podEvictor.IsEvictable)
|
||||
pods, err := podutil.ListPodsOnANode(ctx, client, node, podutil.WithFilter(podEvictor.IsEvictable))
|
||||
if err != nil {
|
||||
klog.Errorf("error listing evictable pods on node %s: %+v", node.Name, err)
|
||||
continue
|
||||
|
||||
@@ -326,7 +326,7 @@ func sortNodesByUsage(nodes []NodeUsageMap) {
|
||||
func createNodePodsMap(ctx context.Context, client clientset.Interface, nodes []*v1.Node) NodePodsMap {
|
||||
npm := NodePodsMap{}
|
||||
for _, node := range nodes {
|
||||
pods, err := podutil.ListPodsOnANode(ctx, client, node, nil)
|
||||
pods, err := podutil.ListPodsOnANode(ctx, client, node)
|
||||
if err != nil {
|
||||
klog.Warningf("node %s will not be processed, error in accessing its pods (%#v)", node.Name, err)
|
||||
} else {
|
||||
|
||||
@@ -18,6 +18,7 @@ package strategies
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
@@ -29,10 +30,22 @@ import (
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
)
|
||||
|
||||
func validatePodsViolatingNodeAffinityParams(params *api.StrategyParameters) error {
|
||||
if params == nil || len(params.NodeAffinityType) == 0 {
|
||||
return fmt.Errorf("NodeAffinityType is empty")
|
||||
}
|
||||
// At most one of include/exclude can be set
|
||||
if len(params.Namespaces.Include) > 0 && len(params.Namespaces.Exclude) > 0 {
|
||||
return fmt.Errorf("only one of Include/Exclude namespaces can be set")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// RemovePodsViolatingNodeAffinity evicts pods on nodes which violate node affinity
|
||||
func RemovePodsViolatingNodeAffinity(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor) {
|
||||
if strategy.Params == nil {
|
||||
klog.V(1).Infof("NodeAffinityType not set")
|
||||
if err := validatePodsViolatingNodeAffinityParams(strategy.Params); err != nil {
|
||||
klog.V(1).Info(err)
|
||||
return
|
||||
}
|
||||
for _, nodeAffinity := range strategy.Params.NodeAffinityType {
|
||||
@@ -43,11 +56,18 @@ func RemovePodsViolatingNodeAffinity(ctx context.Context, client clientset.Inter
|
||||
for _, node := range nodes {
|
||||
klog.V(1).Infof("Processing node: %#v\n", node.Name)
|
||||
|
||||
pods, err := podutil.ListPodsOnANode(ctx, client, node, func(pod *v1.Pod) bool {
|
||||
return podEvictor.IsEvictable(pod) &&
|
||||
!nodeutil.PodFitsCurrentNode(pod, node) &&
|
||||
nodeutil.PodFitsAnyNode(pod, nodes)
|
||||
})
|
||||
pods, err := podutil.ListPodsOnANode(
|
||||
ctx,
|
||||
client,
|
||||
node,
|
||||
podutil.WithFilter(func(pod *v1.Pod) bool {
|
||||
return podEvictor.IsEvictable(pod) &&
|
||||
!nodeutil.PodFitsCurrentNode(pod, node) &&
|
||||
nodeutil.PodFitsAnyNode(pod, nodes)
|
||||
}),
|
||||
podutil.WithNamespaces(strategy.Params.Namespaces.Include),
|
||||
podutil.WithoutNamespaces(strategy.Params.Namespaces.Exclude),
|
||||
)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to get pods from %v: %v", node.Name, err)
|
||||
}
|
||||
|
||||
@@ -18,6 +18,7 @@ package strategies
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
@@ -29,11 +30,40 @@ import (
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
func validateRemovePodsViolatingNodeTaintsParams(params *api.StrategyParameters) error {
|
||||
if params == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// At most one of include/exclude can be set
|
||||
if len(params.Namespaces.Include) > 0 && len(params.Namespaces.Exclude) > 0 {
|
||||
return fmt.Errorf("only one of Include/Exclude namespaces can be set")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// RemovePodsViolatingNodeTaints evicts pods on the node which violate NoSchedule Taints on nodes
|
||||
func RemovePodsViolatingNodeTaints(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor) {
|
||||
if err := validateRemovePodsViolatingNodeTaintsParams(strategy.Params); err != nil {
|
||||
klog.V(1).Info(err)
|
||||
return
|
||||
}
|
||||
var namespaces api.Namespaces
|
||||
if strategy.Params != nil {
|
||||
namespaces = strategy.Params.Namespaces
|
||||
}
|
||||
|
||||
for _, node := range nodes {
|
||||
klog.V(1).Infof("Processing node: %#v\n", node.Name)
|
||||
pods, err := podutil.ListPodsOnANode(ctx, client, node, podEvictor.IsEvictable)
|
||||
pods, err := podutil.ListPodsOnANode(
|
||||
ctx,
|
||||
client,
|
||||
node,
|
||||
podutil.WithFilter(podEvictor.IsEvictable),
|
||||
podutil.WithNamespaces(namespaces.Include),
|
||||
podutil.WithoutNamespaces(namespaces.Exclude),
|
||||
)
|
||||
if err != nil {
|
||||
//no pods evicted as error encountered retrieving evictable Pods
|
||||
return
|
||||
|
||||
@@ -18,6 +18,8 @@ package strategies
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
@@ -29,11 +31,36 @@ import (
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
func validateRemovePodsViolatingInterPodAntiAffinityParams(params *api.StrategyParameters) error {
|
||||
if params == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// At most one of include/exclude can be set
|
||||
if len(params.Namespaces.Include) > 0 && len(params.Namespaces.Exclude) > 0 {
|
||||
return fmt.Errorf("only one of Include/Exclude namespaces can be set")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// RemovePodsViolatingInterPodAntiAffinity evicts pods on the node which are having a pod affinity rules.
|
||||
func RemovePodsViolatingInterPodAntiAffinity(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor) {
|
||||
var namespaces api.Namespaces
|
||||
if strategy.Params != nil {
|
||||
namespaces = strategy.Params.Namespaces
|
||||
}
|
||||
|
||||
for _, node := range nodes {
|
||||
klog.V(1).Infof("Processing node: %#v\n", node.Name)
|
||||
pods, err := podutil.ListPodsOnANode(ctx, client, node, podEvictor.IsEvictable)
|
||||
pods, err := podutil.ListPodsOnANode(
|
||||
ctx,
|
||||
client,
|
||||
node,
|
||||
podutil.WithFilter(podEvictor.IsEvictable),
|
||||
podutil.WithNamespaces(namespaces.Include),
|
||||
podutil.WithoutNamespaces(namespaces.Exclude),
|
||||
)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
@@ -18,6 +18,7 @@ package strategies
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
v1meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -29,16 +30,30 @@ import (
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
)
|
||||
|
||||
func validatePodLifeTimeParams(params *api.StrategyParameters) error {
|
||||
if params == nil || params.MaxPodLifeTimeSeconds == nil {
|
||||
return fmt.Errorf("MaxPodLifeTimeSeconds not set")
|
||||
}
|
||||
|
||||
// At most one of include/exclude can be set
|
||||
if len(params.Namespaces.Include) > 0 && len(params.Namespaces.Exclude) > 0 {
|
||||
return fmt.Errorf("only one of Include/Exclude namespaces can be set")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// PodLifeTime evicts pods on nodes that were created more than strategy.Params.MaxPodLifeTimeSeconds seconds ago.
|
||||
func PodLifeTime(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor) {
|
||||
if strategy.Params == nil || strategy.Params.MaxPodLifeTimeSeconds == nil {
|
||||
klog.V(1).Infof("MaxPodLifeTimeSeconds not set")
|
||||
if err := validatePodLifeTimeParams(strategy.Params); err != nil {
|
||||
klog.V(1).Info(err)
|
||||
return
|
||||
}
|
||||
|
||||
for _, node := range nodes {
|
||||
klog.V(1).Infof("Processing node: %#v", node.Name)
|
||||
pods := listOldPodsOnNode(ctx, client, node, *strategy.Params.MaxPodLifeTimeSeconds, podEvictor)
|
||||
|
||||
pods := listOldPodsOnNode(ctx, client, node, strategy.Params, podEvictor)
|
||||
for _, pod := range pods {
|
||||
success, err := podEvictor.EvictPod(ctx, pod, node, "PodLifeTime")
|
||||
if success {
|
||||
@@ -50,11 +65,19 @@ func PodLifeTime(ctx context.Context, client clientset.Interface, strategy api.D
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
func listOldPodsOnNode(ctx context.Context, client clientset.Interface, node *v1.Node, maxAge uint, evictor *evictions.PodEvictor) []*v1.Pod {
|
||||
pods, err := podutil.ListPodsOnANode(ctx, client, node, evictor.IsEvictable)
|
||||
func listOldPodsOnNode(ctx context.Context, client clientset.Interface, node *v1.Node, params *api.StrategyParameters, evictor *evictions.PodEvictor) []*v1.Pod {
|
||||
pods, err := podutil.ListPodsOnANode(
|
||||
ctx,
|
||||
client,
|
||||
node,
|
||||
podutil.WithFilter(evictor.IsEvictable),
|
||||
podutil.WithNamespaces(params.Namespaces.Include),
|
||||
podutil.WithoutNamespaces(params.Namespaces.Exclude),
|
||||
)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
@@ -62,7 +85,7 @@ func listOldPodsOnNode(ctx context.Context, client clientset.Interface, node *v1
|
||||
var oldPods []*v1.Pod
|
||||
for _, pod := range pods {
|
||||
podAgeSeconds := uint(v1meta.Now().Sub(pod.GetCreationTimestamp().Local()).Seconds())
|
||||
if podAgeSeconds > maxAge {
|
||||
if podAgeSeconds > *params.MaxPodLifeTimeSeconds {
|
||||
oldPods = append(oldPods, pod)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -18,6 +18,7 @@ package strategies
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
@@ -28,17 +29,37 @@ import (
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
)
|
||||
|
||||
func validateRemovePodsHavingTooManyRestartsParams(params *api.StrategyParameters) error {
|
||||
if params == nil || params.PodsHavingTooManyRestarts == nil || params.PodsHavingTooManyRestarts.PodRestartThreshold < 1 {
|
||||
return fmt.Errorf("PodsHavingTooManyRestarts threshold not set")
|
||||
}
|
||||
|
||||
// At most one of include/exclude can be set
|
||||
if len(params.Namespaces.Include) > 0 && len(params.Namespaces.Exclude) > 0 {
|
||||
return fmt.Errorf("only one of Include/Exclude namespaces can be set")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// RemovePodsHavingTooManyRestarts removes the pods that have too many restarts on node.
|
||||
// There are too many cases leading this issue: Volume mount failed, app error due to nodes' different settings.
|
||||
// As of now, this strategy won't evict daemonsets, mirror pods, critical pods and pods with local storages.
|
||||
func RemovePodsHavingTooManyRestarts(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor) {
|
||||
if strategy.Params == nil || strategy.Params.PodsHavingTooManyRestarts == nil || strategy.Params.PodsHavingTooManyRestarts.PodRestartThreshold < 1 {
|
||||
klog.V(1).Infof("PodsHavingTooManyRestarts thresholds not set")
|
||||
if err := validateRemovePodsHavingTooManyRestartsParams(strategy.Params); err != nil {
|
||||
klog.V(1).Info(err)
|
||||
return
|
||||
}
|
||||
for _, node := range nodes {
|
||||
klog.V(1).Infof("Processing node: %s", node.Name)
|
||||
pods, err := podutil.ListPodsOnANode(ctx, client, node, podEvictor.IsEvictable)
|
||||
pods, err := podutil.ListPodsOnANode(
|
||||
ctx,
|
||||
client,
|
||||
node,
|
||||
podutil.WithFilter(podEvictor.IsEvictable),
|
||||
podutil.WithNamespaces(strategy.Params.Namespaces.Include),
|
||||
podutil.WithoutNamespaces(strategy.Params.Namespaces.Exclude),
|
||||
)
|
||||
if err != nil {
|
||||
klog.Errorf("Error when list pods at node %s", node.Name)
|
||||
continue
|
||||
|
||||
@@ -20,6 +20,7 @@ import (
|
||||
"context"
|
||||
"math"
|
||||
"os"
|
||||
"sort"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -27,6 +28,7 @@ import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/informers"
|
||||
coreinformers "k8s.io/client-go/informers/core/v1"
|
||||
@@ -134,9 +136,7 @@ func startEndToEndForLowNodeUtilization(ctx context.Context, clientset clientset
|
||||
time.Sleep(10 * time.Second)
|
||||
}
|
||||
|
||||
func TestLowNodeUtilization(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
func initializeClient(t *testing.T) (clientset.Interface, coreinformers.NodeInformer, chan struct{}) {
|
||||
clientSet, err := client.CreateClient(os.Getenv("KUBECONFIG"))
|
||||
if err != nil {
|
||||
t.Errorf("Error during client creation with %v", err)
|
||||
@@ -147,10 +147,18 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
sharedInformerFactory := informers.NewSharedInformerFactory(clientSet, 0)
|
||||
sharedInformerFactory.Start(stopChannel)
|
||||
sharedInformerFactory.WaitForCacheSync(stopChannel)
|
||||
defer close(stopChannel)
|
||||
|
||||
nodeInformer := sharedInformerFactory.Core().V1().Nodes()
|
||||
|
||||
return clientSet, nodeInformer, stopChannel
|
||||
}
|
||||
|
||||
func TestLowNodeUtilization(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
clientSet, nodeInformer, stopCh := initializeClient(t)
|
||||
defer close(stopCh)
|
||||
|
||||
nodeList, err := clientSet.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
t.Errorf("Error listing node with %v", err)
|
||||
@@ -177,22 +185,199 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
deleteRC(ctx, t, clientSet, rc)
|
||||
}
|
||||
|
||||
func runPodLifetimeStrategy(ctx context.Context, clientset clientset.Interface, nodeInformer coreinformers.NodeInformer, namespaces deschedulerapi.Namespaces) {
|
||||
// Run descheduler.
|
||||
evictionPolicyGroupVersion, err := eutils.SupportEviction(clientset)
|
||||
if err != nil || len(evictionPolicyGroupVersion) == 0 {
|
||||
klog.Fatalf("%v", err)
|
||||
}
|
||||
|
||||
nodes, err := nodeutil.ReadyNodes(ctx, clientset, nodeInformer, "", nil)
|
||||
if err != nil {
|
||||
klog.Fatalf("%v", err)
|
||||
}
|
||||
|
||||
maxPodLifeTimeSeconds := uint(1)
|
||||
strategies.PodLifeTime(
|
||||
ctx,
|
||||
clientset,
|
||||
deschedulerapi.DeschedulerStrategy{
|
||||
Enabled: true,
|
||||
Params: &deschedulerapi.StrategyParameters{
|
||||
MaxPodLifeTimeSeconds: &maxPodLifeTimeSeconds,
|
||||
Namespaces: namespaces,
|
||||
},
|
||||
},
|
||||
nodes,
|
||||
evictions.NewPodEvictor(
|
||||
clientset,
|
||||
evictionPolicyGroupVersion,
|
||||
false,
|
||||
0,
|
||||
nodes,
|
||||
false,
|
||||
),
|
||||
)
|
||||
}
|
||||
|
||||
func getPodNames(pods []v1.Pod) []string {
|
||||
names := []string{}
|
||||
for _, pod := range pods {
|
||||
names = append(names, pod.Name)
|
||||
}
|
||||
return names
|
||||
}
|
||||
|
||||
func intersectStrings(lista, listb []string) []string {
|
||||
commonNames := []string{}
|
||||
|
||||
for _, stra := range lista {
|
||||
for _, strb := range listb {
|
||||
if stra == strb {
|
||||
commonNames = append(commonNames, stra)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return commonNames
|
||||
}
|
||||
|
||||
// TODO(jchaloup): add testcases for two included/excluded namespaces
|
||||
|
||||
func TestNamespaceConstraintsInclude(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
clientSet, nodeInformer, stopCh := initializeClient(t)
|
||||
defer close(stopCh)
|
||||
|
||||
testNamespace := &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "e2e-" + strings.ToLower(t.Name())}}
|
||||
if _, err := clientSet.CoreV1().Namespaces().Create(ctx, testNamespace, metav1.CreateOptions{}); err != nil {
|
||||
t.Fatalf("Unable to create ns %v", testNamespace.Name)
|
||||
}
|
||||
defer clientSet.CoreV1().Namespaces().Delete(ctx, testNamespace.Name, metav1.DeleteOptions{})
|
||||
|
||||
rc := RcByNameContainer("test-rc-podlifetime", testNamespace.Name, 5, map[string]string{"test": "podlifetime-include"}, nil)
|
||||
if _, err := clientSet.CoreV1().ReplicationControllers(rc.Namespace).Create(ctx, rc, metav1.CreateOptions{}); err != nil {
|
||||
t.Errorf("Error creating deployment %v", err)
|
||||
}
|
||||
defer deleteRC(ctx, t, clientSet, rc)
|
||||
|
||||
// wait for a while so all the pods are at least few seconds older
|
||||
time.Sleep(5 * time.Second)
|
||||
|
||||
// it's assumed all new pods are named differently from currently running -> no name collision
|
||||
podList, err := clientSet.CoreV1().Pods(rc.Namespace).List(ctx, metav1.ListOptions{LabelSelector: labels.SelectorFromSet(rc.Spec.Template.Labels).String()})
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to list pods: %v", err)
|
||||
}
|
||||
|
||||
if len(podList.Items) != 5 {
|
||||
t.Fatalf("Expected 5 replicas, got %v instead", len(podList.Items))
|
||||
}
|
||||
|
||||
initialPodNames := getPodNames(podList.Items)
|
||||
sort.Strings(initialPodNames)
|
||||
t.Logf("Existing pods: %v", initialPodNames)
|
||||
|
||||
t.Logf("set the strategy to delete pods from %v namespace", rc.Namespace)
|
||||
runPodLifetimeStrategy(ctx, clientSet, nodeInformer, deschedulerapi.Namespaces{
|
||||
Include: []string{rc.Namespace},
|
||||
})
|
||||
|
||||
// All pods are supposed to be deleted, wait until all the old pods are deleted
|
||||
if err := wait.PollImmediate(time.Second, 20*time.Second, func() (bool, error) {
|
||||
podList, err := clientSet.CoreV1().Pods(rc.Namespace).List(ctx, metav1.ListOptions{LabelSelector: labels.SelectorFromSet(rc.Spec.Template.Labels).String()})
|
||||
if err != nil {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
includePodNames := getPodNames(podList.Items)
|
||||
// validate all pod were deleted
|
||||
if len(intersectStrings(initialPodNames, includePodNames)) > 0 {
|
||||
t.Logf("Waiting until %v pods get deleted", intersectStrings(initialPodNames, includePodNames))
|
||||
// check if there's at least one pod not in Terminating state
|
||||
for _, pod := range podList.Items {
|
||||
// In case podList contains newly created pods
|
||||
if len(intersectStrings(initialPodNames, []string{pod.Name})) == 0 {
|
||||
continue
|
||||
}
|
||||
if pod.DeletionTimestamp == nil {
|
||||
t.Logf("Pod %v not in terminating state", pod.Name)
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
t.Logf("All %v pods are terminating", intersectStrings(initialPodNames, includePodNames))
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}); err != nil {
|
||||
t.Fatalf("Error waiting for pods to be deleted: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNamespaceConstraintsExclude(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
clientSet, nodeInformer, stopCh := initializeClient(t)
|
||||
defer close(stopCh)
|
||||
|
||||
testNamespace := &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "e2e-" + strings.ToLower(t.Name())}}
|
||||
if _, err := clientSet.CoreV1().Namespaces().Create(ctx, testNamespace, metav1.CreateOptions{}); err != nil {
|
||||
t.Fatalf("Unable to create ns %v", testNamespace.Name)
|
||||
}
|
||||
defer clientSet.CoreV1().Namespaces().Delete(ctx, testNamespace.Name, metav1.DeleteOptions{})
|
||||
|
||||
rc := RcByNameContainer("test-rc-podlifetime", testNamespace.Name, 5, map[string]string{"test": "podlifetime-exclude"}, nil)
|
||||
if _, err := clientSet.CoreV1().ReplicationControllers(rc.Namespace).Create(ctx, rc, metav1.CreateOptions{}); err != nil {
|
||||
t.Errorf("Error creating deployment %v", err)
|
||||
}
|
||||
defer deleteRC(ctx, t, clientSet, rc)
|
||||
|
||||
// wait for a while so all the pods are at least few seconds older
|
||||
time.Sleep(5 * time.Second)
|
||||
|
||||
// it's assumed all new pods are named differently from currently running -> no name collision
|
||||
podList, err := clientSet.CoreV1().Pods(rc.Namespace).List(ctx, metav1.ListOptions{LabelSelector: labels.SelectorFromSet(rc.Spec.Template.Labels).String()})
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to list pods: %v", err)
|
||||
}
|
||||
|
||||
if len(podList.Items) != 5 {
|
||||
t.Fatalf("Expected 5 replicas, got %v instead", len(podList.Items))
|
||||
}
|
||||
|
||||
initialPodNames := getPodNames(podList.Items)
|
||||
sort.Strings(initialPodNames)
|
||||
t.Logf("Existing pods: %v", initialPodNames)
|
||||
|
||||
t.Logf("set the strategy to delete pods from namespaces except the %v namespace", rc.Namespace)
|
||||
runPodLifetimeStrategy(ctx, clientSet, nodeInformer, deschedulerapi.Namespaces{
|
||||
Exclude: []string{rc.Namespace},
|
||||
})
|
||||
|
||||
t.Logf("Waiting 10s")
|
||||
time.Sleep(10 * time.Second)
|
||||
podList, err = clientSet.CoreV1().Pods(rc.Namespace).List(ctx, metav1.ListOptions{LabelSelector: labels.SelectorFromSet(rc.Spec.Template.Labels).String()})
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to list pods after running strategy: %v", err)
|
||||
}
|
||||
|
||||
excludePodNames := getPodNames(podList.Items)
|
||||
sort.Strings(excludePodNames)
|
||||
t.Logf("Existing pods: %v", excludePodNames)
|
||||
|
||||
// validate no pods were deleted
|
||||
if len(intersectStrings(initialPodNames, excludePodNames)) != 5 {
|
||||
t.Fatalf("None of %v pods are expected to be deleted", initialPodNames)
|
||||
}
|
||||
}
|
||||
|
||||
func TestEvictAnnotation(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
clientSet, err := client.CreateClient(os.Getenv("KUBECONFIG"))
|
||||
if err != nil {
|
||||
t.Errorf("Error during client creation with %v", err)
|
||||
}
|
||||
|
||||
stopChannel := make(chan struct{}, 0)
|
||||
|
||||
sharedInformerFactory := informers.NewSharedInformerFactory(clientSet, 0)
|
||||
sharedInformerFactory.Start(stopChannel)
|
||||
sharedInformerFactory.WaitForCacheSync(stopChannel)
|
||||
defer close(stopChannel)
|
||||
|
||||
nodeInformer := sharedInformerFactory.Core().V1().Nodes()
|
||||
clientSet, nodeInformer, stopCh := initializeClient(t)
|
||||
defer close(stopCh)
|
||||
|
||||
nodeList, err := clientSet.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
@@ -324,7 +509,7 @@ func evictPods(ctx context.Context, t *testing.T, clientSet clientset.Interface,
|
||||
continue
|
||||
}
|
||||
// List all the pods on the current Node
|
||||
podsOnANode, err := podutil.ListPodsOnANode(ctx, clientSet, node, podEvictor.IsEvictable)
|
||||
podsOnANode, err := podutil.ListPodsOnANode(ctx, clientSet, node, podutil.WithFilter(podEvictor.IsEvictable))
|
||||
if err != nil {
|
||||
t.Errorf("Error listing pods on a node %v", err)
|
||||
}
|
||||
@@ -336,7 +521,7 @@ func evictPods(ctx context.Context, t *testing.T, clientSet clientset.Interface,
|
||||
}
|
||||
t.Log("Eviction of pods starting")
|
||||
startEndToEndForLowNodeUtilization(ctx, clientSet, nodeInformer, podEvictor)
|
||||
podsOnleastUtilizedNode, err := podutil.ListPodsOnANode(ctx, clientSet, leastLoadedNode, podEvictor.IsEvictable)
|
||||
podsOnleastUtilizedNode, err := podutil.ListPodsOnANode(ctx, clientSet, leastLoadedNode, podutil.WithFilter(podEvictor.IsEvictable))
|
||||
if err != nil {
|
||||
t.Errorf("Error listing pods on a node %v", err)
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user