mirror of
https://github.com/kubernetes-sigs/descheduler.git
synced 2026-01-26 05:14:13 +01:00
Merge pull request #510 from lixiang233/Ft_filter_by_label
Filter pods by labelSelector
This commit is contained in:
38
README.md
38
README.md
@@ -43,6 +43,7 @@ Table of Contents
|
||||
* [Filter Pods](#filter-pods)
|
||||
* [Namespace filtering](#namespace-filtering)
|
||||
* [Priority filtering](#priority-filtering)
|
||||
* [Label filtering](#label-filtering)
|
||||
* [Pod Evictions](#pod-evictions)
|
||||
* [Pod Disruption Budget (PDB)](#pod-disruption-budget-pdb)
|
||||
* [Compatibility Matrix](#compatibility-matrix)
|
||||
@@ -239,6 +240,7 @@ node.
|
||||
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))|
|
||||
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))|
|
||||
|`namespaces`|(see [namespace filtering](#namespace-filtering))|
|
||||
|`labelSelector`|(see [label filtering](#label-filtering))|
|
||||
|
||||
**Example:**
|
||||
|
||||
@@ -276,6 +278,7 @@ podA gets evicted from nodeA.
|
||||
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))|
|
||||
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))|
|
||||
|`namespaces`|(see [namespace filtering](#namespace-filtering))|
|
||||
|`labelSelector`|(see [label filtering](#label-filtering))|
|
||||
|
||||
**Example:**
|
||||
|
||||
@@ -304,6 +307,7 @@ and will be evicted.
|
||||
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))|
|
||||
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))|
|
||||
|`namespaces`|(see [namespace filtering](#namespace-filtering))|
|
||||
|`labelSelector`|(see [label filtering](#label-filtering))|
|
||||
|
||||
**Example:**
|
||||
|
||||
@@ -352,6 +356,7 @@ This strategy makes sure that pods having too many restarts are removed from nod
|
||||
can't get the volume/disk attached to the instance, then the pod should be re-scheduled to other nodes. Its parameters
|
||||
include `podRestartThreshold`, which is the number of restarts at which a pod should be evicted, and `includingInitContainers`,
|
||||
which determines whether init container restarts should be factored into that calculation.
|
||||
|`labelSelector`|(see [label filtering](#label-filtering))|
|
||||
|
||||
**Parameters:**
|
||||
|
||||
@@ -393,6 +398,7 @@ to `Running` and `Pending`.
|
||||
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))|
|
||||
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))|
|
||||
|`namespaces`|(see [namespace filtering](#namespace-filtering))|
|
||||
|`labelSelector`|(see [label filtering](#label-filtering))|
|
||||
|
||||
**Example:**
|
||||
|
||||
@@ -498,6 +504,38 @@ strategies:
|
||||
Note that you can't configure both `thresholdPriority` and `thresholdPriorityClassName`, if the given priority class
|
||||
does not exist, descheduler won't create it and will throw an error.
|
||||
|
||||
### Label filtering
|
||||
|
||||
The following strategies can configure a [standard kubernetes labelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#labelselector-v1-meta)
|
||||
to filter pods by their labels:
|
||||
|
||||
* `PodLifeTime`
|
||||
* `RemovePodsHavingTooManyRestarts`
|
||||
* `RemovePodsViolatingNodeTaints`
|
||||
* `RemovePodsViolatingNodeAffinity`
|
||||
* `RemovePodsViolatingInterPodAntiAffinity`
|
||||
|
||||
This allows running strategies among pods the descheduler is interested in.
|
||||
|
||||
For example:
|
||||
|
||||
```yaml
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
strategies:
|
||||
"PodLifeTime":
|
||||
enabled: true
|
||||
params:
|
||||
podLifeTime:
|
||||
maxPodLifeTimeSeconds: 86400
|
||||
labelSelector:
|
||||
matchLabels:
|
||||
component: redis
|
||||
matchExpressions:
|
||||
- {key: tier, operator: In, values: [cache]}
|
||||
- {key: environment, operator: NotIn, values: [dev]}
|
||||
```
|
||||
|
||||
## Pod Evictions
|
||||
|
||||
When the descheduler decides to evict pods from a node, it employs the following general mechanism:
|
||||
|
||||
@@ -76,6 +76,7 @@ type StrategyParameters struct {
|
||||
Namespaces *Namespaces
|
||||
ThresholdPriority *int32
|
||||
ThresholdPriorityClassName string
|
||||
LabelSelector *metav1.LabelSelector
|
||||
}
|
||||
|
||||
type Percentage float64
|
||||
|
||||
@@ -74,6 +74,7 @@ type StrategyParameters struct {
|
||||
Namespaces *Namespaces `json:"namespaces"`
|
||||
ThresholdPriority *int32 `json:"thresholdPriority"`
|
||||
ThresholdPriorityClassName string `json:"thresholdPriorityClassName"`
|
||||
LabelSelector *metav1.LabelSelector `json:"labelSelector"`
|
||||
}
|
||||
|
||||
type Percentage float64
|
||||
|
||||
@@ -23,6 +23,7 @@ package v1alpha1
|
||||
import (
|
||||
unsafe "unsafe"
|
||||
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
conversion "k8s.io/apimachinery/pkg/conversion"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
api "sigs.k8s.io/descheduler/pkg/api"
|
||||
@@ -290,6 +291,7 @@ func autoConvert_v1alpha1_StrategyParameters_To_api_StrategyParameters(in *Strat
|
||||
out.Namespaces = (*api.Namespaces)(unsafe.Pointer(in.Namespaces))
|
||||
out.ThresholdPriority = (*int32)(unsafe.Pointer(in.ThresholdPriority))
|
||||
out.ThresholdPriorityClassName = in.ThresholdPriorityClassName
|
||||
out.LabelSelector = (*v1.LabelSelector)(unsafe.Pointer(in.LabelSelector))
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -308,6 +310,7 @@ func autoConvert_api_StrategyParameters_To_v1alpha1_StrategyParameters(in *api.S
|
||||
out.Namespaces = (*Namespaces)(unsafe.Pointer(in.Namespaces))
|
||||
out.ThresholdPriority = (*int32)(unsafe.Pointer(in.ThresholdPriority))
|
||||
out.ThresholdPriorityClassName = in.ThresholdPriorityClassName
|
||||
out.LabelSelector = (*v1.LabelSelector)(unsafe.Pointer(in.LabelSelector))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -21,6 +21,7 @@ limitations under the License.
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
@@ -298,6 +299,11 @@ func (in *StrategyParameters) DeepCopyInto(out *StrategyParameters) {
|
||||
*out = new(int32)
|
||||
**out = **in
|
||||
}
|
||||
if in.LabelSelector != nil {
|
||||
in, out := &in.LabelSelector, &out.LabelSelector
|
||||
*out = new(v1.LabelSelector)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -21,6 +21,7 @@ limitations under the License.
|
||||
package api
|
||||
|
||||
import (
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
@@ -298,6 +299,11 @@ func (in *StrategyParameters) DeepCopyInto(out *StrategyParameters) {
|
||||
*out = new(int32)
|
||||
**out = **in
|
||||
}
|
||||
if in.LabelSelector != nil {
|
||||
in, out := &in.LabelSelector, &out.LabelSelector
|
||||
*out = new(v1.LabelSelector)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -31,6 +31,7 @@ type Options struct {
|
||||
filter func(pod *v1.Pod) bool
|
||||
includedNamespaces []string
|
||||
excludedNamespaces []string
|
||||
labelSelector *metav1.LabelSelector
|
||||
}
|
||||
|
||||
// WithFilter sets a pod filter.
|
||||
@@ -55,6 +56,13 @@ func WithoutNamespaces(namespaces []string) func(opts *Options) {
|
||||
}
|
||||
}
|
||||
|
||||
// WithLabelSelector sets a pod label selector
|
||||
func WithLabelSelector(labelSelector *metav1.LabelSelector) func(opts *Options) {
|
||||
return func(opts *Options) {
|
||||
opts.labelSelector = labelSelector
|
||||
}
|
||||
}
|
||||
|
||||
// ListPodsOnANode lists all of the pods on a node
|
||||
// It also accepts an optional "filter" function which can be used to further limit the pods that are returned.
|
||||
// (Usually this is podEvictor.Evictable().IsEvictable, in order to only list the evictable pods on a node, but can
|
||||
@@ -74,6 +82,15 @@ func ListPodsOnANode(
|
||||
|
||||
fieldSelectorString := "spec.nodeName=" + node.Name + ",status.phase!=" + string(v1.PodSucceeded) + ",status.phase!=" + string(v1.PodFailed)
|
||||
|
||||
labelSelectorString := ""
|
||||
if options.labelSelector != nil {
|
||||
selector, err := metav1.LabelSelectorAsSelector(options.labelSelector)
|
||||
if err != nil {
|
||||
return []*v1.Pod{}, err
|
||||
}
|
||||
labelSelectorString = selector.String()
|
||||
}
|
||||
|
||||
if len(options.includedNamespaces) > 0 {
|
||||
fieldSelector, err := fields.ParseSelector(fieldSelectorString)
|
||||
if err != nil {
|
||||
@@ -82,7 +99,10 @@ func ListPodsOnANode(
|
||||
|
||||
for _, namespace := range options.includedNamespaces {
|
||||
podList, err := client.CoreV1().Pods(namespace).List(ctx,
|
||||
metav1.ListOptions{FieldSelector: fieldSelector.String()})
|
||||
metav1.ListOptions{
|
||||
FieldSelector: fieldSelector.String(),
|
||||
LabelSelector: labelSelectorString,
|
||||
})
|
||||
if err != nil {
|
||||
return []*v1.Pod{}, err
|
||||
}
|
||||
@@ -111,7 +131,10 @@ func ListPodsOnANode(
|
||||
// Once the descheduler switches to pod listers (through informers),
|
||||
// We need to flip to client-side filtering.
|
||||
podList, err := client.CoreV1().Pods(v1.NamespaceAll).List(ctx,
|
||||
metav1.ListOptions{FieldSelector: fieldSelector.String()})
|
||||
metav1.ListOptions{
|
||||
FieldSelector: fieldSelector.String(),
|
||||
LabelSelector: labelSelectorString,
|
||||
})
|
||||
if err != nil {
|
||||
return []*v1.Pod{}, err
|
||||
}
|
||||
|
||||
@@ -24,6 +24,7 @@ import (
|
||||
"testing"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
core "k8s.io/client-go/testing"
|
||||
@@ -40,6 +41,7 @@ func TestListPodsOnANode(t *testing.T) {
|
||||
name string
|
||||
pods map[string][]v1.Pod
|
||||
node *v1.Node
|
||||
labelSelector *metav1.LabelSelector
|
||||
expectedPodCount int
|
||||
}{
|
||||
{
|
||||
@@ -52,6 +54,33 @@ func TestListPodsOnANode(t *testing.T) {
|
||||
"n2": {*test.BuildTestPod("pod3", 100, 0, "n2", nil)},
|
||||
},
|
||||
node: test.BuildTestNode("n1", 2000, 3000, 10, nil),
|
||||
labelSelector: nil,
|
||||
expectedPodCount: 2,
|
||||
},
|
||||
{
|
||||
name: "test listing pods with label selector",
|
||||
pods: map[string][]v1.Pod{
|
||||
"n1": {
|
||||
*test.BuildTestPod("pod1", 100, 0, "n1", nil),
|
||||
*test.BuildTestPod("pod2", 100, 0, "n1", func(pod *v1.Pod) {
|
||||
pod.Labels = map[string]string{"foo": "bar"}
|
||||
}),
|
||||
*test.BuildTestPod("pod3", 100, 0, "n1", func(pod *v1.Pod) {
|
||||
pod.Labels = map[string]string{"foo": "bar1"}
|
||||
}),
|
||||
},
|
||||
"n2": {*test.BuildTestPod("pod4", 100, 0, "n2", nil)},
|
||||
},
|
||||
node: test.BuildTestNode("n1", 2000, 3000, 10, nil),
|
||||
labelSelector: &metav1.LabelSelector{
|
||||
MatchExpressions: []metav1.LabelSelectorRequirement{
|
||||
{
|
||||
Key: "foo",
|
||||
Operator: metav1.LabelSelectorOpIn,
|
||||
Values: []string{"bar", "bar1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedPodCount: 2,
|
||||
},
|
||||
}
|
||||
@@ -67,7 +96,7 @@ func TestListPodsOnANode(t *testing.T) {
|
||||
}
|
||||
return true, nil, fmt.Errorf("Failed to list: %v", list)
|
||||
})
|
||||
pods, _ := ListPodsOnANode(context.TODO(), fakeClient, testCase.node)
|
||||
pods, _ := ListPodsOnANode(context.TODO(), fakeClient, testCase.node, WithLabelSelector(testCase.labelSelector))
|
||||
if len(pods) != testCase.expectedPodCount {
|
||||
t.Errorf("expected %v pods on node %v, got %+v", testCase.expectedPodCount, testCase.node.Name, len(pods))
|
||||
}
|
||||
|
||||
@@ -85,6 +85,7 @@ func RemovePodsViolatingNodeAffinity(ctx context.Context, client clientset.Inter
|
||||
}),
|
||||
podutil.WithNamespaces(includedNamespaces),
|
||||
podutil.WithoutNamespaces(excludedNamespaces),
|
||||
podutil.WithLabelSelector(strategy.Params.LabelSelector),
|
||||
)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Failed to get pods", "node", klog.KObj(node))
|
||||
|
||||
@@ -26,6 +26,7 @@ import (
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
@@ -54,9 +55,13 @@ func RemovePodsViolatingNodeTaints(ctx context.Context, client clientset.Interfa
|
||||
}
|
||||
|
||||
var includedNamespaces, excludedNamespaces []string
|
||||
if strategy.Params != nil && strategy.Params.Namespaces != nil {
|
||||
includedNamespaces = strategy.Params.Namespaces.Include
|
||||
excludedNamespaces = strategy.Params.Namespaces.Exclude
|
||||
var labelSelector *metav1.LabelSelector
|
||||
if strategy.Params != nil {
|
||||
if strategy.Params.Namespaces != nil {
|
||||
includedNamespaces = strategy.Params.Namespaces.Include
|
||||
excludedNamespaces = strategy.Params.Namespaces.Exclude
|
||||
}
|
||||
labelSelector = strategy.Params.LabelSelector
|
||||
}
|
||||
|
||||
thresholdPriority, err := utils.GetPriorityFromStrategyParams(ctx, client, strategy.Params)
|
||||
@@ -76,6 +81,7 @@ func RemovePodsViolatingNodeTaints(ctx context.Context, client clientset.Interfa
|
||||
podutil.WithFilter(evictable.IsEvictable),
|
||||
podutil.WithNamespaces(includedNamespaces),
|
||||
podutil.WithoutNamespaces(excludedNamespaces),
|
||||
podutil.WithLabelSelector(labelSelector),
|
||||
)
|
||||
if err != nil {
|
||||
//no pods evicted as error encountered retrieving evictable Pods
|
||||
|
||||
@@ -55,9 +55,13 @@ func RemovePodsViolatingInterPodAntiAffinity(ctx context.Context, client clients
|
||||
}
|
||||
|
||||
var includedNamespaces, excludedNamespaces []string
|
||||
if strategy.Params != nil && strategy.Params.Namespaces != nil {
|
||||
includedNamespaces = strategy.Params.Namespaces.Include
|
||||
excludedNamespaces = strategy.Params.Namespaces.Exclude
|
||||
var labelSelector *metav1.LabelSelector
|
||||
if strategy.Params != nil {
|
||||
if strategy.Params.Namespaces != nil {
|
||||
includedNamespaces = strategy.Params.Namespaces.Include
|
||||
excludedNamespaces = strategy.Params.Namespaces.Exclude
|
||||
}
|
||||
labelSelector = strategy.Params.LabelSelector
|
||||
}
|
||||
|
||||
thresholdPriority, err := utils.GetPriorityFromStrategyParams(ctx, client, strategy.Params)
|
||||
@@ -76,6 +80,7 @@ func RemovePodsViolatingInterPodAntiAffinity(ctx context.Context, client clients
|
||||
node,
|
||||
podutil.WithNamespaces(includedNamespaces),
|
||||
podutil.WithoutNamespaces(excludedNamespaces),
|
||||
podutil.WithLabelSelector(labelSelector),
|
||||
)
|
||||
if err != nil {
|
||||
return
|
||||
|
||||
@@ -21,7 +21,7 @@ import (
|
||||
"fmt"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
v1meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
@@ -91,7 +91,7 @@ func PodLifeTime(ctx context.Context, client clientset.Interface, strategy api.D
|
||||
for _, node := range nodes {
|
||||
klog.V(1).InfoS("Processing node", "node", klog.KObj(node))
|
||||
|
||||
pods := listOldPodsOnNode(ctx, client, node, includedNamespaces, excludedNamespaces, *strategy.Params.PodLifeTime.MaxPodLifeTimeSeconds, filter)
|
||||
pods := listOldPodsOnNode(ctx, client, node, includedNamespaces, excludedNamespaces, strategy.Params.LabelSelector, *strategy.Params.PodLifeTime.MaxPodLifeTimeSeconds, filter)
|
||||
for _, pod := range pods {
|
||||
success, err := podEvictor.EvictPod(ctx, pod, node, "PodLifeTime")
|
||||
if success {
|
||||
@@ -107,7 +107,15 @@ func PodLifeTime(ctx context.Context, client clientset.Interface, strategy api.D
|
||||
}
|
||||
}
|
||||
|
||||
func listOldPodsOnNode(ctx context.Context, client clientset.Interface, node *v1.Node, includedNamespaces, excludedNamespaces []string, maxPodLifeTimeSeconds uint, filter func(pod *v1.Pod) bool) []*v1.Pod {
|
||||
func listOldPodsOnNode(
|
||||
ctx context.Context,
|
||||
client clientset.Interface,
|
||||
node *v1.Node,
|
||||
includedNamespaces, excludedNamespaces []string,
|
||||
labelSelector *metav1.LabelSelector,
|
||||
maxPodLifeTimeSeconds uint,
|
||||
filter func(pod *v1.Pod) bool,
|
||||
) []*v1.Pod {
|
||||
pods, err := podutil.ListPodsOnANode(
|
||||
ctx,
|
||||
client,
|
||||
@@ -115,6 +123,7 @@ func listOldPodsOnNode(ctx context.Context, client clientset.Interface, node *v1
|
||||
podutil.WithFilter(filter),
|
||||
podutil.WithNamespaces(includedNamespaces),
|
||||
podutil.WithoutNamespaces(excludedNamespaces),
|
||||
podutil.WithLabelSelector(labelSelector),
|
||||
)
|
||||
if err != nil {
|
||||
return nil
|
||||
@@ -122,7 +131,7 @@ func listOldPodsOnNode(ctx context.Context, client clientset.Interface, node *v1
|
||||
|
||||
var oldPods []*v1.Pod
|
||||
for _, pod := range pods {
|
||||
podAgeSeconds := uint(v1meta.Now().Sub(pod.GetCreationTimestamp().Local()).Seconds())
|
||||
podAgeSeconds := uint(metav1.Now().Sub(pod.GetCreationTimestamp().Local()).Seconds())
|
||||
if podAgeSeconds > maxPodLifeTimeSeconds {
|
||||
oldPods = append(oldPods, pod)
|
||||
}
|
||||
|
||||
@@ -111,6 +111,19 @@ func TestPodLifeTime(t *testing.T) {
|
||||
pod.ObjectMeta.OwnerReferences = ownerRef1
|
||||
})
|
||||
|
||||
// Setup two old pods with different labels
|
||||
p12 := test.BuildTestPod("p12", 100, 0, node.Name, nil)
|
||||
p12.Namespace = "dev"
|
||||
p12.ObjectMeta.CreationTimestamp = olderPodCreationTime
|
||||
p13 := test.BuildTestPod("p13", 100, 0, node.Name, nil)
|
||||
p13.Namespace = "dev"
|
||||
p13.ObjectMeta.CreationTimestamp = olderPodCreationTime
|
||||
|
||||
p12.ObjectMeta.Labels = map[string]string{"foo": "bar"}
|
||||
p13.ObjectMeta.Labels = map[string]string{"foo": "bar1"}
|
||||
p12.ObjectMeta.OwnerReferences = ownerRef1
|
||||
p13.ObjectMeta.OwnerReferences = ownerRef1
|
||||
|
||||
var maxLifeTime uint = 600
|
||||
testCases := []struct {
|
||||
description string
|
||||
@@ -208,6 +221,21 @@ func TestPodLifeTime(t *testing.T) {
|
||||
pods: []v1.Pod{*p11},
|
||||
expectedEvictedPodCount: 1,
|
||||
},
|
||||
{
|
||||
description: "Two old pods with different labels, 1 selected by labelSelector",
|
||||
strategy: api.DeschedulerStrategy{
|
||||
Enabled: true,
|
||||
Params: &api.StrategyParameters{
|
||||
PodLifeTime: &api.PodLifeTime{MaxPodLifeTimeSeconds: &maxLifeTime},
|
||||
LabelSelector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"foo": "bar"},
|
||||
},
|
||||
},
|
||||
},
|
||||
maxPodsToEvictPerNode: 5,
|
||||
pods: []v1.Pod{*p12, *p13},
|
||||
expectedEvictedPodCount: 1,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
|
||||
@@ -78,6 +78,7 @@ func RemovePodsHavingTooManyRestarts(ctx context.Context, client clientset.Inter
|
||||
podutil.WithFilter(evictable.IsEvictable),
|
||||
podutil.WithNamespaces(includedNamespaces),
|
||||
podutil.WithoutNamespaces(excludedNamespaces),
|
||||
podutil.WithLabelSelector(strategy.Params.LabelSelector),
|
||||
)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Error listing a nodes pods", "node", klog.KObj(node))
|
||||
|
||||
@@ -187,7 +187,15 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
deleteRC(ctx, t, clientSet, rc)
|
||||
}
|
||||
|
||||
func runPodLifetimeStrategy(ctx context.Context, clientset clientset.Interface, nodeInformer coreinformers.NodeInformer, namespaces *deschedulerapi.Namespaces, priorityClass string, priority *int32) {
|
||||
func runPodLifetimeStrategy(
|
||||
ctx context.Context,
|
||||
clientset clientset.Interface,
|
||||
nodeInformer coreinformers.NodeInformer,
|
||||
namespaces *deschedulerapi.Namespaces,
|
||||
priorityClass string,
|
||||
priority *int32,
|
||||
labelSelector *metav1.LabelSelector,
|
||||
) {
|
||||
// Run descheduler.
|
||||
evictionPolicyGroupVersion, err := eutils.SupportEviction(clientset)
|
||||
if err != nil || len(evictionPolicyGroupVersion) == 0 {
|
||||
@@ -210,6 +218,7 @@ func runPodLifetimeStrategy(ctx context.Context, clientset clientset.Interface,
|
||||
Namespaces: namespaces,
|
||||
ThresholdPriority: priority,
|
||||
ThresholdPriorityClassName: priorityClass,
|
||||
LabelSelector: labelSelector,
|
||||
},
|
||||
},
|
||||
nodes,
|
||||
@@ -288,7 +297,7 @@ func TestNamespaceConstraintsInclude(t *testing.T) {
|
||||
t.Logf("set the strategy to delete pods from %v namespace", rc.Namespace)
|
||||
runPodLifetimeStrategy(ctx, clientSet, nodeInformer, &deschedulerapi.Namespaces{
|
||||
Include: []string{rc.Namespace},
|
||||
}, "", nil)
|
||||
}, "", nil, nil)
|
||||
|
||||
// All pods are supposed to be deleted, wait until all the old pods are deleted
|
||||
if err := wait.PollImmediate(time.Second, 20*time.Second, func() (bool, error) {
|
||||
@@ -359,7 +368,7 @@ func TestNamespaceConstraintsExclude(t *testing.T) {
|
||||
t.Logf("set the strategy to delete pods from namespaces except the %v namespace", rc.Namespace)
|
||||
runPodLifetimeStrategy(ctx, clientSet, nodeInformer, &deschedulerapi.Namespaces{
|
||||
Exclude: []string{rc.Namespace},
|
||||
}, "", nil)
|
||||
}, "", nil, nil)
|
||||
|
||||
t.Logf("Waiting 10s")
|
||||
time.Sleep(10 * time.Second)
|
||||
@@ -461,10 +470,10 @@ func testPriority(t *testing.T, isPriorityClass bool) {
|
||||
|
||||
if isPriorityClass {
|
||||
t.Logf("set the strategy to delete pods with priority lower than priority class %s", highPriorityClass.Name)
|
||||
runPodLifetimeStrategy(ctx, clientSet, nodeInformer, nil, highPriorityClass.Name, nil)
|
||||
runPodLifetimeStrategy(ctx, clientSet, nodeInformer, nil, highPriorityClass.Name, nil, nil)
|
||||
} else {
|
||||
t.Logf("set the strategy to delete pods with priority lower than %d", highPriority)
|
||||
runPodLifetimeStrategy(ctx, clientSet, nodeInformer, nil, "", &highPriority)
|
||||
runPodLifetimeStrategy(ctx, clientSet, nodeInformer, nil, "", &highPriority, nil)
|
||||
}
|
||||
|
||||
t.Logf("Waiting 10s")
|
||||
@@ -517,6 +526,109 @@ func testPriority(t *testing.T, isPriorityClass bool) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestPodLabelSelector(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
clientSet, nodeInformer, stopCh := initializeClient(t)
|
||||
defer close(stopCh)
|
||||
|
||||
testNamespace := &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "e2e-" + strings.ToLower(t.Name())}}
|
||||
if _, err := clientSet.CoreV1().Namespaces().Create(ctx, testNamespace, metav1.CreateOptions{}); err != nil {
|
||||
t.Fatalf("Unable to create ns %v", testNamespace.Name)
|
||||
}
|
||||
defer clientSet.CoreV1().Namespaces().Delete(ctx, testNamespace.Name, metav1.DeleteOptions{})
|
||||
|
||||
// create two replicationControllers with different labels
|
||||
rcEvict := RcByNameContainer("test-rc-podlifetime-evict", testNamespace.Name, 5, map[string]string{"test": "podlifetime-evict"}, nil, "")
|
||||
if _, err := clientSet.CoreV1().ReplicationControllers(rcEvict.Namespace).Create(ctx, rcEvict, metav1.CreateOptions{}); err != nil {
|
||||
t.Errorf("Error creating rc %v", err)
|
||||
}
|
||||
defer deleteRC(ctx, t, clientSet, rcEvict)
|
||||
|
||||
rcReserve := RcByNameContainer("test-rc-podlifetime-reserve", testNamespace.Name, 5, map[string]string{"test": "podlifetime-reserve"}, nil, "")
|
||||
if _, err := clientSet.CoreV1().ReplicationControllers(rcReserve.Namespace).Create(ctx, rcReserve, metav1.CreateOptions{}); err != nil {
|
||||
t.Errorf("Error creating rc %v", err)
|
||||
}
|
||||
defer deleteRC(ctx, t, clientSet, rcReserve)
|
||||
|
||||
// wait for a while so all the pods are at least few seconds older
|
||||
time.Sleep(5 * time.Second)
|
||||
|
||||
// it's assumed all new pods are named differently from currently running -> no name collision
|
||||
podListEvict, err := clientSet.CoreV1().Pods(rcEvict.Namespace).List(
|
||||
ctx, metav1.ListOptions{LabelSelector: labels.SelectorFromSet(rcEvict.Spec.Template.Labels).String()})
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to list pods: %v", err)
|
||||
}
|
||||
podListReserve, err := clientSet.CoreV1().Pods(rcReserve.Namespace).List(
|
||||
ctx, metav1.ListOptions{LabelSelector: labels.SelectorFromSet(rcReserve.Spec.Template.Labels).String()})
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to list pods: %v", err)
|
||||
}
|
||||
|
||||
if len(podListEvict.Items)+len(podListReserve.Items) != 10 {
|
||||
t.Fatalf("Expected 10 replicas, got %v instead", len(podListEvict.Items)+len(podListReserve.Items))
|
||||
}
|
||||
|
||||
expectReservePodNames := getPodNames(podListReserve.Items)
|
||||
expectEvictPodNames := getPodNames(podListEvict.Items)
|
||||
sort.Strings(expectReservePodNames)
|
||||
sort.Strings(expectEvictPodNames)
|
||||
t.Logf("Pods not expect to be evicted: %v, pods expect to be evicted: %v", expectReservePodNames, expectEvictPodNames)
|
||||
|
||||
t.Logf("set the strategy to delete pods with label test:podlifetime-evict")
|
||||
runPodLifetimeStrategy(ctx, clientSet, nodeInformer, nil, "", nil, &metav1.LabelSelector{MatchLabels: map[string]string{"test": "podlifetime-evict"}})
|
||||
|
||||
t.Logf("Waiting 10s")
|
||||
time.Sleep(10 * time.Second)
|
||||
// check if all pods without target label are not evicted
|
||||
podListReserve, err = clientSet.CoreV1().Pods(rcReserve.Namespace).List(
|
||||
ctx, metav1.ListOptions{LabelSelector: labels.SelectorFromSet(rcReserve.Spec.Template.Labels).String()})
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to list pods after running strategy: %v", err)
|
||||
}
|
||||
|
||||
reservedPodNames := getPodNames(podListReserve.Items)
|
||||
sort.Strings(reservedPodNames)
|
||||
t.Logf("Existing reserved pods: %v", reservedPodNames)
|
||||
|
||||
// validate no pods were deleted
|
||||
if len(intersectStrings(expectReservePodNames, reservedPodNames)) != 5 {
|
||||
t.Fatalf("None of %v unevictable pods are expected to be deleted", expectReservePodNames)
|
||||
}
|
||||
|
||||
//check if all selected pods are evicted
|
||||
if err := wait.PollImmediate(time.Second, 20*time.Second, func() (bool, error) {
|
||||
podListEvict, err := clientSet.CoreV1().Pods(rcEvict.Namespace).List(
|
||||
ctx, metav1.ListOptions{LabelSelector: labels.SelectorFromSet(rcEvict.Spec.Template.Labels).String()})
|
||||
if err != nil {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
newPodNames := getPodNames(podListEvict.Items)
|
||||
// validate all pod were deleted
|
||||
if len(intersectStrings(expectEvictPodNames, newPodNames)) > 0 {
|
||||
t.Logf("Waiting until %v selected pods get deleted", intersectStrings(expectEvictPodNames, newPodNames))
|
||||
// check if there's at least one pod not in Terminating state
|
||||
for _, pod := range podListEvict.Items {
|
||||
// In case podList contains newly created pods
|
||||
if len(intersectStrings(expectEvictPodNames, []string{pod.Name})) == 0 {
|
||||
continue
|
||||
}
|
||||
if pod.DeletionTimestamp == nil {
|
||||
t.Logf("Pod %v not in terminating state", pod.Name)
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
t.Logf("All %v pods are terminating", intersectStrings(expectEvictPodNames, newPodNames))
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}); err != nil {
|
||||
t.Fatalf("Error waiting for pods to be deleted: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestEvictAnnotation(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
|
||||
Reference in New Issue
Block a user