1
0
mirror of https://github.com/kubernetes-sigs/descheduler.git synced 2026-01-26 05:14:13 +01:00

Add maxNoOfPodsToEvictPerNamespace policy

This commit is contained in:
Kubernetes Prow Robot
2021-11-29 08:43:23 -08:00
committed by Jane Liu L
23 changed files with 268 additions and 221 deletions

View File

@@ -85,12 +85,6 @@ func (rs *DeschedulerServer) AddFlags(fs *pflag.FlagSet) {
fs.StringVar(&rs.KubeconfigFile, "kubeconfig", rs.KubeconfigFile, "File with kube configuration.")
fs.StringVar(&rs.PolicyConfigFile, "policy-config-file", rs.PolicyConfigFile, "File with descheduler policy configuration.")
fs.BoolVar(&rs.DryRun, "dry-run", rs.DryRun, "execute descheduler in dry run mode.")
// node-selector query causes descheduler to run only on nodes that matches the node labels in the query
fs.StringVar(&rs.NodeSelector, "node-selector", rs.NodeSelector, "DEPRECATED: selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2)")
// max-no-pods-to-evict limits the maximum number of pods to be evicted per node by descheduler.
fs.IntVar(&rs.MaxNoOfPodsToEvictPerNode, "max-pods-to-evict-per-node", rs.MaxNoOfPodsToEvictPerNode, "DEPRECATED: limits the maximum number of pods to be evicted per node by descheduler")
// evict-local-storage-pods allows eviction of pods that are using local storage. This is false by default.
fs.BoolVar(&rs.EvictLocalStoragePods, "evict-local-storage-pods", rs.EvictLocalStoragePods, "DEPRECATED: enables evicting pods using local storage by descheduler")
fs.BoolVar(&rs.DisableMetrics, "disable-metrics", rs.DisableMetrics, "Disables metrics. The metrics are by default served through https://localhost:10258/metrics. Secure address, resp. port can be changed through --bind-address, resp. --secure-port flags.")
rs.SecureServing.AddFlags(fs)

View File

@@ -42,7 +42,10 @@ type DeschedulerPolicy struct {
IgnorePVCPods *bool
// MaxNoOfPodsToEvictPerNode restricts maximum of pods to be evicted per node.
MaxNoOfPodsToEvictPerNode *int
MaxNoOfPodsToEvictPerNode *uint
// MaxNoOfPodsToEvictPerNamespace restricts maximum of pods to be evicted per namespace.
MaxNoOfPodsToEvictPerNamespace *uint
}
type StrategyName string

View File

@@ -43,6 +43,9 @@ type DeschedulerPolicy struct {
// MaxNoOfPodsToEvictPerNode restricts maximum of pods to be evicted per node.
MaxNoOfPodsToEvictPerNode *int `json:"maxNoOfPodsToEvictPerNode,omitempty"`
// MaxNoOfPodsToEvictPerNamespace restricts maximum of pods to be evicted per namespace.
MaxNoOfPodsToEvictPerNamespace *int `json:"maxNoOfPodsToEvictPerNamespace,omitempty"`
}
type StrategyName string

View File

@@ -136,7 +136,20 @@ func autoConvert_v1alpha1_DeschedulerPolicy_To_api_DeschedulerPolicy(in *Desched
out.EvictLocalStoragePods = (*bool)(unsafe.Pointer(in.EvictLocalStoragePods))
out.EvictSystemCriticalPods = (*bool)(unsafe.Pointer(in.EvictSystemCriticalPods))
out.IgnorePVCPods = (*bool)(unsafe.Pointer(in.IgnorePVCPods))
out.MaxNoOfPodsToEvictPerNode = (*int)(unsafe.Pointer(in.MaxNoOfPodsToEvictPerNode))
if in.MaxNoOfPodsToEvictPerNode != nil {
in, out := &in.MaxNoOfPodsToEvictPerNode, &out.MaxNoOfPodsToEvictPerNode
*out = new(uint)
**out = uint(**in)
} else {
out.MaxNoOfPodsToEvictPerNode = nil
}
if in.MaxNoOfPodsToEvictPerNamespace != nil {
in, out := &in.MaxNoOfPodsToEvictPerNamespace, &out.MaxNoOfPodsToEvictPerNamespace
*out = new(uint)
**out = uint(**in)
} else {
out.MaxNoOfPodsToEvictPerNamespace = nil
}
return nil
}
@@ -151,7 +164,20 @@ func autoConvert_api_DeschedulerPolicy_To_v1alpha1_DeschedulerPolicy(in *api.Des
out.EvictLocalStoragePods = (*bool)(unsafe.Pointer(in.EvictLocalStoragePods))
out.EvictSystemCriticalPods = (*bool)(unsafe.Pointer(in.EvictSystemCriticalPods))
out.IgnorePVCPods = (*bool)(unsafe.Pointer(in.IgnorePVCPods))
out.MaxNoOfPodsToEvictPerNode = (*int)(unsafe.Pointer(in.MaxNoOfPodsToEvictPerNode))
if in.MaxNoOfPodsToEvictPerNode != nil {
in, out := &in.MaxNoOfPodsToEvictPerNode, &out.MaxNoOfPodsToEvictPerNode
*out = new(int)
**out = int(**in)
} else {
out.MaxNoOfPodsToEvictPerNode = nil
}
if in.MaxNoOfPodsToEvictPerNamespace != nil {
in, out := &in.MaxNoOfPodsToEvictPerNamespace, &out.MaxNoOfPodsToEvictPerNamespace
*out = new(int)
**out = int(**in)
} else {
out.MaxNoOfPodsToEvictPerNamespace = nil
}
return nil
}

View File

@@ -62,6 +62,11 @@ func (in *DeschedulerPolicy) DeepCopyInto(out *DeschedulerPolicy) {
*out = new(int)
**out = **in
}
if in.MaxNoOfPodsToEvictPerNamespace != nil {
in, out := &in.MaxNoOfPodsToEvictPerNamespace, &out.MaxNoOfPodsToEvictPerNamespace
*out = new(int)
**out = **in
}
return
}

View File

@@ -59,7 +59,12 @@ func (in *DeschedulerPolicy) DeepCopyInto(out *DeschedulerPolicy) {
}
if in.MaxNoOfPodsToEvictPerNode != nil {
in, out := &in.MaxNoOfPodsToEvictPerNode, &out.MaxNoOfPodsToEvictPerNode
*out = new(int)
*out = new(uint)
**out = **in
}
if in.MaxNoOfPodsToEvictPerNamespace != nil {
in, out := &in.MaxNoOfPodsToEvictPerNamespace, &out.MaxNoOfPodsToEvictPerNamespace
*out = new(uint)
**out = **in
}
return

View File

@@ -17,9 +17,10 @@ limitations under the License.
package v1alpha1
import (
componentbaseconfig "k8s.io/component-base/config"
"time"
componentbaseconfig "k8s.io/component-base/config"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)

View File

@@ -19,14 +19,13 @@ package descheduler
import (
"context"
"fmt"
"sigs.k8s.io/descheduler/pkg/descheduler/strategies/nodeutilization"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/informers"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/klog/v2"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/informers"
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
"sigs.k8s.io/descheduler/metrics"
"sigs.k8s.io/descheduler/pkg/api"
@@ -35,6 +34,7 @@ import (
eutils "sigs.k8s.io/descheduler/pkg/descheduler/evictions/utils"
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
"sigs.k8s.io/descheduler/pkg/descheduler/strategies"
"sigs.k8s.io/descheduler/pkg/descheduler/strategies/nodeutilization"
)
func Run(rs *options.DeschedulerServer) error {
@@ -86,12 +86,12 @@ func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer
"RemoveFailedPods": strategies.RemoveFailedPods,
}
nodeSelector := rs.NodeSelector
var nodeSelector string
if deschedulerPolicy.NodeSelector != nil {
nodeSelector = *deschedulerPolicy.NodeSelector
}
evictLocalStoragePods := rs.EvictLocalStoragePods
var evictLocalStoragePods bool
if deschedulerPolicy.EvictLocalStoragePods != nil {
evictLocalStoragePods = *deschedulerPolicy.EvictLocalStoragePods
}
@@ -109,11 +109,6 @@ func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer
ignorePvcPods = *deschedulerPolicy.IgnorePVCPods
}
maxNoOfPodsToEvictPerNode := rs.MaxNoOfPodsToEvictPerNode
if deschedulerPolicy.MaxNoOfPodsToEvictPerNode != nil {
maxNoOfPodsToEvictPerNode = *deschedulerPolicy.MaxNoOfPodsToEvictPerNode
}
wait.Until(func() {
nodes, err := nodeutil.ReadyNodes(ctx, rs.Client, nodeInformer, nodeSelector)
if err != nil {
@@ -132,7 +127,8 @@ func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer
rs.Client,
evictionPolicyGroupVersion,
rs.DryRun,
maxNoOfPodsToEvictPerNode,
deschedulerPolicy.MaxNoOfPodsToEvictPerNode,
deschedulerPolicy.MaxNoOfPodsToEvictPerNamespace,
nodes,
evictLocalStoragePods,
evictSystemCriticalPods,

View File

@@ -45,57 +45,64 @@ const (
)
// nodePodEvictedCount keeps count of pods evicted on node
type nodePodEvictedCount map[*v1.Node]int
type nodePodEvictedCount map[*v1.Node]uint
type namespacePodEvictCount map[string]uint
type PodEvictor struct {
client clientset.Interface
nodes []*v1.Node
policyGroupVersion string
dryRun bool
maxPodsToEvictPerNode int
nodepodCount nodePodEvictedCount
evictLocalStoragePods bool
evictSystemCriticalPods bool
ignorePvcPods bool
client clientset.Interface
nodes []*v1.Node
policyGroupVersion string
dryRun bool
maxPodsToEvictPerNode *uint
maxPodsToEvictPerNamespace *uint
nodepodCount nodePodEvictedCount
namespacePodCount namespacePodEvictCount
evictLocalStoragePods bool
evictSystemCriticalPods bool
ignorePvcPods bool
}
func NewPodEvictor(
client clientset.Interface,
policyGroupVersion string,
dryRun bool,
maxPodsToEvictPerNode int,
maxPodsToEvictPerNode *uint,
maxPodsToEvictPerNamespace *uint,
nodes []*v1.Node,
evictLocalStoragePods bool,
evictSystemCriticalPods bool,
ignorePvcPods bool,
) *PodEvictor {
var nodePodCount = make(nodePodEvictedCount)
var namespacePodCount = make(namespacePodEvictCount)
for _, node := range nodes {
// Initialize podsEvicted till now with 0.
nodePodCount[node] = 0
}
return &PodEvictor{
client: client,
nodes: nodes,
policyGroupVersion: policyGroupVersion,
dryRun: dryRun,
maxPodsToEvictPerNode: maxPodsToEvictPerNode,
nodepodCount: nodePodCount,
evictLocalStoragePods: evictLocalStoragePods,
evictSystemCriticalPods: evictSystemCriticalPods,
ignorePvcPods: ignorePvcPods,
client: client,
nodes: nodes,
policyGroupVersion: policyGroupVersion,
dryRun: dryRun,
maxPodsToEvictPerNode: maxPodsToEvictPerNode,
maxPodsToEvictPerNamespace: maxPodsToEvictPerNamespace,
nodepodCount: nodePodCount,
namespacePodCount: namespacePodCount,
evictLocalStoragePods: evictLocalStoragePods,
evictSystemCriticalPods: evictSystemCriticalPods,
ignorePvcPods: ignorePvcPods,
}
}
// NodeEvicted gives a number of pods evicted for node
func (pe *PodEvictor) NodeEvicted(node *v1.Node) int {
func (pe *PodEvictor) NodeEvicted(node *v1.Node) uint {
return pe.nodepodCount[node]
}
// TotalEvicted gives a number of pods evicted through all nodes
func (pe *PodEvictor) TotalEvicted() int {
var total int
func (pe *PodEvictor) TotalEvicted() uint {
var total uint
for _, count := range pe.nodepodCount {
total += count
}
@@ -110,9 +117,14 @@ func (pe *PodEvictor) EvictPod(ctx context.Context, pod *v1.Pod, node *v1.Node,
if len(reasons) > 0 {
reason += " (" + strings.Join(reasons, ", ") + ")"
}
if pe.maxPodsToEvictPerNode > 0 && pe.nodepodCount[node]+1 > pe.maxPodsToEvictPerNode {
metrics.PodsEvicted.With(map[string]string{"result": "maximum number reached", "strategy": strategy, "namespace": pod.Namespace}).Inc()
return false, fmt.Errorf("Maximum number %v of evicted pods per %q node reached", pe.maxPodsToEvictPerNode, node.Name)
if pe.maxPodsToEvictPerNode != nil && pe.nodepodCount[node]+1 > *pe.maxPodsToEvictPerNode {
metrics.PodsEvicted.With(map[string]string{"result": "maximum number of pods per node reached", "strategy": strategy, "namespace": pod.Namespace}).Inc()
return false, fmt.Errorf("Maximum number %v of evicted pods per %q node reached", *pe.maxPodsToEvictPerNode, node.Name)
}
if pe.maxPodsToEvictPerNamespace != nil && pe.namespacePodCount[pod.Namespace]+1 > *pe.maxPodsToEvictPerNamespace {
metrics.PodsEvicted.With(map[string]string{"result": "maximum number of pods per namespace reached", "strategy": strategy, "namespace": pod.Namespace}).Inc()
return false, fmt.Errorf("Maximum number %v of evicted pods per %q namespace reached", *pe.maxPodsToEvictPerNamespace, pod.Namespace)
}
err := evictPod(ctx, pe.client, pod, pe.policyGroupVersion, pe.dryRun)
@@ -124,6 +136,7 @@ func (pe *PodEvictor) EvictPod(ctx context.Context, pod *v1.Pod, node *v1.Node,
}
pe.nodepodCount[node]++
pe.namespacePodCount[pod.Namespace]++
if pe.dryRun {
klog.V(1).InfoS("Evicted pod in dry run mode", "pod", klog.KObj(pod), "reason", reason)
} else {

View File

@@ -173,15 +173,13 @@ func TestFindDuplicatePods(t *testing.T) {
testCases := []struct {
description string
maxPodsToEvictPerNode int
pods []v1.Pod
nodes []*v1.Node
expectedEvictedPodCount int
expectedEvictedPodCount uint
strategy api.DeschedulerStrategy
}{
{
description: "Three pods in the `dev` Namespace, bound to same ReplicaSet. 1 should be evicted.",
maxPodsToEvictPerNode: 5,
pods: []v1.Pod{*p1, *p2, *p3},
nodes: []*v1.Node{node1, node2},
expectedEvictedPodCount: 1,
@@ -189,7 +187,6 @@ func TestFindDuplicatePods(t *testing.T) {
},
{
description: "Three pods in the `dev` Namespace, bound to same ReplicaSet, but ReplicaSet kind is excluded. 0 should be evicted.",
maxPodsToEvictPerNode: 5,
pods: []v1.Pod{*p1, *p2, *p3},
nodes: []*v1.Node{node1, node2},
expectedEvictedPodCount: 0,
@@ -197,7 +194,6 @@ func TestFindDuplicatePods(t *testing.T) {
},
{
description: "Three Pods in the `test` Namespace, bound to same ReplicaSet. 1 should be evicted.",
maxPodsToEvictPerNode: 5,
pods: []v1.Pod{*p8, *p9, *p10},
nodes: []*v1.Node{node1, node2},
expectedEvictedPodCount: 1,
@@ -205,7 +201,6 @@ func TestFindDuplicatePods(t *testing.T) {
},
{
description: "Three Pods in the `dev` Namespace, three Pods in the `test` Namespace. Bound to ReplicaSet with same name. 4 should be evicted.",
maxPodsToEvictPerNode: 5,
pods: []v1.Pod{*p1, *p2, *p3, *p8, *p9, *p10},
nodes: []*v1.Node{node1, node2},
expectedEvictedPodCount: 2,
@@ -213,7 +208,6 @@ func TestFindDuplicatePods(t *testing.T) {
},
{
description: "Pods are: part of DaemonSet, with local storage, mirror pod annotation, critical pod annotation - none should be evicted.",
maxPodsToEvictPerNode: 2,
pods: []v1.Pod{*p4, *p5, *p6, *p7},
nodes: []*v1.Node{node1, node2},
expectedEvictedPodCount: 0,
@@ -221,7 +215,6 @@ func TestFindDuplicatePods(t *testing.T) {
},
{
description: "Test all Pods: 4 should be evicted.",
maxPodsToEvictPerNode: 5,
pods: []v1.Pod{*p1, *p2, *p3, *p4, *p5, *p6, *p7, *p8, *p9, *p10},
nodes: []*v1.Node{node1, node2},
expectedEvictedPodCount: 2,
@@ -229,7 +222,6 @@ func TestFindDuplicatePods(t *testing.T) {
},
{
description: "Pods with the same owner but different images should not be evicted",
maxPodsToEvictPerNode: 5,
pods: []v1.Pod{*p11, *p12},
nodes: []*v1.Node{node1, node2},
expectedEvictedPodCount: 0,
@@ -237,7 +229,6 @@ func TestFindDuplicatePods(t *testing.T) {
},
{
description: "Pods with multiple containers should not match themselves",
maxPodsToEvictPerNode: 5,
pods: []v1.Pod{*p13},
nodes: []*v1.Node{node1, node2},
expectedEvictedPodCount: 0,
@@ -245,7 +236,6 @@ func TestFindDuplicatePods(t *testing.T) {
},
{
description: "Pods with matching ownerrefs and at not all matching image should not trigger an eviction",
maxPodsToEvictPerNode: 5,
pods: []v1.Pod{*p11, *p13},
nodes: []*v1.Node{node1, node2},
expectedEvictedPodCount: 0,
@@ -253,7 +243,6 @@ func TestFindDuplicatePods(t *testing.T) {
},
{
description: "Three pods in the `dev` Namespace, bound to same ReplicaSet. Only node available has a taint, and nodeFit set to true. 0 should be evicted.",
maxPodsToEvictPerNode: 5,
pods: []v1.Pod{*p1, *p2, *p3},
nodes: []*v1.Node{node1, node3},
expectedEvictedPodCount: 0,
@@ -261,7 +250,6 @@ func TestFindDuplicatePods(t *testing.T) {
},
{
description: "Three pods in the `node-fit` Namespace, bound to same ReplicaSet, all with a nodeSelector. Only node available has an incorrect node label, and nodeFit set to true. 0 should be evicted.",
maxPodsToEvictPerNode: 5,
pods: []v1.Pod{*p15, *p16, *p17},
nodes: []*v1.Node{node1, node4},
expectedEvictedPodCount: 0,
@@ -269,7 +257,6 @@ func TestFindDuplicatePods(t *testing.T) {
},
{
description: "Three pods in the `node-fit` Namespace, bound to same ReplicaSet. Only node available is not schedulable, and nodeFit set to true. 0 should be evicted.",
maxPodsToEvictPerNode: 5,
pods: []v1.Pod{*p1, *p2, *p3},
nodes: []*v1.Node{node1, node5},
expectedEvictedPodCount: 0,
@@ -287,7 +274,8 @@ func TestFindDuplicatePods(t *testing.T) {
fakeClient,
"v1",
false,
testCase.maxPodsToEvictPerNode,
nil,
nil,
testCase.nodes,
false,
false,
@@ -432,10 +420,9 @@ func TestRemoveDuplicatesUniformly(t *testing.T) {
testCases := []struct {
description string
maxPodsToEvictPerNode int
pods []v1.Pod
nodes []*v1.Node
expectedEvictedPodCount int
expectedEvictedPodCount uint
strategy api.DeschedulerStrategy
}{
{
@@ -695,7 +682,8 @@ func TestRemoveDuplicatesUniformly(t *testing.T) {
fakeClient,
policyv1.SchemeGroupVersion.String(),
false,
testCase.maxPodsToEvictPerNode,
nil,
nil,
testCase.nodes,
false,
false,

View File

@@ -2,9 +2,10 @@ package strategies
import (
"context"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"testing"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
v1 "k8s.io/api/core/v1"
policyv1 "k8s.io/api/policy/v1"
"k8s.io/apimachinery/pkg/runtime"
@@ -41,7 +42,7 @@ func TestRemoveFailedPods(t *testing.T) {
description string
nodes []*v1.Node
strategy api.DeschedulerStrategy
expectedEvictedPodCount int
expectedEvictedPodCount uint
pods []v1.Pod
}{
{
@@ -220,7 +221,8 @@ func TestRemoveFailedPods(t *testing.T) {
fakeClient,
policyv1.SchemeGroupVersion.String(),
false,
100,
nil,
nil,
tc.nodes,
false,
false,

View File

@@ -102,13 +102,15 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
}
}
var uint1 uint = 1
tests := []struct {
description string
nodes []*v1.Node
pods []v1.Pod
strategy api.DeschedulerStrategy
expectedEvictedPodCount int
maxPodsToEvictPerNode int
description string
nodes []*v1.Node
pods []v1.Pod
strategy api.DeschedulerStrategy
expectedEvictedPodCount uint
maxPodsToEvictPerNode *uint
maxNoOfPodsToEvictPerNamespace *uint
}{
{
description: "Invalid strategy type, should not evict any pods",
@@ -123,7 +125,6 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
expectedEvictedPodCount: 0,
pods: addPodsToNode(nodeWithoutLabels, nil),
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
maxPodsToEvictPerNode: 0,
},
{
description: "Pod is correctly scheduled on node, no eviction expected",
@@ -131,7 +132,6 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
expectedEvictedPodCount: 0,
pods: addPodsToNode(nodeWithLabels, nil),
nodes: []*v1.Node{nodeWithLabels},
maxPodsToEvictPerNode: 0,
},
{
description: "Pod is scheduled on node without matching labels, another schedulable node available, should be evicted",
@@ -139,7 +139,6 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
strategy: requiredDuringSchedulingIgnoredDuringExecutionStrategy,
pods: addPodsToNode(nodeWithoutLabels, nil),
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
maxPodsToEvictPerNode: 0,
},
{
description: "Pod is scheduled on node without matching labels, another schedulable node available, maxPodsToEvictPerNode set to 1, should not be evicted",
@@ -147,7 +146,7 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
strategy: requiredDuringSchedulingIgnoredDuringExecutionStrategy,
pods: addPodsToNode(nodeWithoutLabels, nil),
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
maxPodsToEvictPerNode: 1,
maxPodsToEvictPerNode: &uint1,
},
{
description: "Pod is scheduled on node without matching labels, another schedulable node available, maxPodsToEvictPerNode set to 1, no pod evicted since pod terminting",
@@ -155,7 +154,23 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
strategy: requiredDuringSchedulingIgnoredDuringExecutionStrategy,
pods: addPodsToNode(nodeWithoutLabels, &metav1.Time{}),
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
maxPodsToEvictPerNode: 1,
maxPodsToEvictPerNode: &uint1,
},
{
description: "Pod is scheduled on node without matching labels, another schedulable node available, maxNoOfPodsToEvictPerNamespace set to 1, should not be evicted",
expectedEvictedPodCount: 1,
strategy: requiredDuringSchedulingIgnoredDuringExecutionStrategy,
pods: addPodsToNode(nodeWithoutLabels, nil),
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
maxNoOfPodsToEvictPerNamespace: &uint1,
},
{
description: "Pod is scheduled on node without matching labels, another schedulable node available, maxNoOfPodsToEvictPerNamespace set to 1, no pod evicted since pod terminting",
expectedEvictedPodCount: 1,
strategy: requiredDuringSchedulingIgnoredDuringExecutionStrategy,
pods: addPodsToNode(nodeWithoutLabels, &metav1.Time{}),
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
maxNoOfPodsToEvictPerNamespace: &uint1,
},
{
description: "Pod is scheduled on node without matching labels, but no node where pod fits is available, should not evict",
@@ -163,7 +178,6 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
strategy: requiredDuringSchedulingIgnoredDuringExecutionWithNodeFitStrategy,
pods: addPodsToNode(nodeWithoutLabels, nil),
nodes: []*v1.Node{nodeWithoutLabels, unschedulableNodeWithLabels},
maxPodsToEvictPerNode: 0,
},
{
description: "Pod is scheduled on node without matching labels, and node where pod fits is available, should evict",
@@ -171,7 +185,7 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
strategy: requiredDuringSchedulingIgnoredDuringExecutionWithNodeFitStrategy,
pods: addPodsToNode(nodeWithoutLabels, nil),
nodes: []*v1.Node{nodeWithLabels, unschedulableNodeWithLabels},
maxPodsToEvictPerNode: 1,
maxPodsToEvictPerNode: &uint1,
},
}
@@ -187,6 +201,7 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
policyv1.SchemeGroupVersion.String(),
false,
tc.maxPodsToEvictPerNode,
tc.maxNoOfPodsToEvictPerNamespace,
tc.nodes,
false,
false,

View File

@@ -116,15 +116,18 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
"datacenter": "west",
}
var uint1 uint = 1
tests := []struct {
description string
nodes []*v1.Node
pods []v1.Pod
evictLocalStoragePods bool
evictSystemCriticalPods bool
maxPodsToEvictPerNode int
expectedEvictedPodCount int
nodeFit bool
description string
nodes []*v1.Node
pods []v1.Pod
evictLocalStoragePods bool
evictSystemCriticalPods bool
maxPodsToEvictPerNode *uint
maxNoOfPodsToEvictPerNamespace *uint
expectedEvictedPodCount uint
nodeFit bool
}{
{
@@ -133,7 +136,6 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
nodes: []*v1.Node{node1},
evictLocalStoragePods: false,
evictSystemCriticalPods: false,
maxPodsToEvictPerNode: 0,
expectedEvictedPodCount: 1, //p2 gets evicted
},
{
@@ -142,7 +144,6 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
nodes: []*v1.Node{node1},
evictLocalStoragePods: false,
evictSystemCriticalPods: false,
maxPodsToEvictPerNode: 0,
expectedEvictedPodCount: 1, //p4 gets evicted
},
{
@@ -151,16 +152,24 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
nodes: []*v1.Node{node1},
evictLocalStoragePods: false,
evictSystemCriticalPods: false,
maxPodsToEvictPerNode: 1,
maxPodsToEvictPerNode: &uint1,
expectedEvictedPodCount: 1, //p5 or p6 gets evicted
},
{
description: "Only <maxNoOfPodsToEvictPerNamespace> number of Pods not tolerating node taint should be evicted",
pods: []v1.Pod{*p1, *p5, *p6},
nodes: []*v1.Node{node1},
evictLocalStoragePods: false,
evictSystemCriticalPods: false,
maxNoOfPodsToEvictPerNamespace: &uint1,
expectedEvictedPodCount: 1, //p5 or p6 gets evicted
},
{
description: "Critical pods not tolerating node taint should not be evicted",
pods: []v1.Pod{*p7, *p8, *p9, *p10},
nodes: []*v1.Node{node2},
evictLocalStoragePods: false,
evictSystemCriticalPods: false,
maxPodsToEvictPerNode: 0,
expectedEvictedPodCount: 0, //nothing is evicted
},
{
@@ -169,7 +178,6 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
nodes: []*v1.Node{node2},
evictLocalStoragePods: true,
evictSystemCriticalPods: false,
maxPodsToEvictPerNode: 0,
expectedEvictedPodCount: 1, //p9 gets evicted
},
{
@@ -178,7 +186,6 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
nodes: []*v1.Node{node2},
evictLocalStoragePods: false,
evictSystemCriticalPods: false,
maxPodsToEvictPerNode: 0,
expectedEvictedPodCount: 1, //p11 gets evicted
},
{
@@ -187,7 +194,6 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
nodes: []*v1.Node{node2},
evictLocalStoragePods: false,
evictSystemCriticalPods: true,
maxPodsToEvictPerNode: 0,
expectedEvictedPodCount: 2, //p2 and p7 are evicted
},
{
@@ -196,7 +202,6 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
nodes: []*v1.Node{node1, node2},
evictLocalStoragePods: false,
evictSystemCriticalPods: false,
maxPodsToEvictPerNode: 0,
expectedEvictedPodCount: 0, //p2 gets evicted
nodeFit: true,
},
@@ -206,7 +211,6 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
nodes: []*v1.Node{node1, node3},
evictLocalStoragePods: false,
evictSystemCriticalPods: false,
maxPodsToEvictPerNode: 0,
expectedEvictedPodCount: 0, //p2 gets evicted
nodeFit: true,
},
@@ -216,7 +220,6 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
nodes: []*v1.Node{node1, node4},
evictLocalStoragePods: false,
evictSystemCriticalPods: false,
maxPodsToEvictPerNode: 0,
expectedEvictedPodCount: 0, //p2 gets evicted
nodeFit: true,
},
@@ -235,6 +238,7 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
policyv1.SchemeGroupVersion.String(),
false,
tc.maxPodsToEvictPerNode,
tc.maxNoOfPodsToEvictPerNamespace,
tc.nodes,
tc.evictLocalStoragePods,
tc.evictSystemCriticalPods,

View File

@@ -44,13 +44,12 @@ func TestHighNodeUtilization(t *testing.T) {
nodeSelectorValue := "west"
testCases := []struct {
name string
thresholds api.ResourceThresholds
nodes map[string]*v1.Node
pods map[string]*v1.PodList
maxPodsToEvictPerNode int
expectedPodsEvicted int
evictedPods []string
name string
thresholds api.ResourceThresholds
nodes map[string]*v1.Node
pods map[string]*v1.PodList
expectedPodsEvicted uint
evictedPods []string
}{
{
name: "no node below threshold usage",
@@ -89,8 +88,7 @@ func TestHighNodeUtilization(t *testing.T) {
},
},
},
maxPodsToEvictPerNode: 0,
expectedPodsEvicted: 0,
expectedPodsEvicted: 0,
},
{
name: "no evictable pods",
@@ -147,8 +145,7 @@ func TestHighNodeUtilization(t *testing.T) {
},
},
},
maxPodsToEvictPerNode: 0,
expectedPodsEvicted: 0,
expectedPodsEvicted: 0,
},
{
name: "no node to schedule evicted pods",
@@ -182,8 +179,7 @@ func TestHighNodeUtilization(t *testing.T) {
},
},
},
maxPodsToEvictPerNode: 0,
expectedPodsEvicted: 0,
expectedPodsEvicted: 0,
},
{
name: "without priorities",
@@ -224,9 +220,8 @@ func TestHighNodeUtilization(t *testing.T) {
},
},
},
maxPodsToEvictPerNode: 0,
expectedPodsEvicted: 2,
evictedPods: []string{"p1", "p7"},
expectedPodsEvicted: 2,
evictedPods: []string{"p1", "p7"},
},
{
name: "without priorities stop when resource capacity is depleted",
@@ -260,8 +255,7 @@ func TestHighNodeUtilization(t *testing.T) {
},
},
},
maxPodsToEvictPerNode: 0,
expectedPodsEvicted: 1,
expectedPodsEvicted: 1,
},
{
name: "with priorities",
@@ -302,9 +296,8 @@ func TestHighNodeUtilization(t *testing.T) {
},
},
},
maxPodsToEvictPerNode: 0,
expectedPodsEvicted: 1,
evictedPods: []string{"p1"},
expectedPodsEvicted: 1,
evictedPods: []string{"p1"},
},
{
name: "without priorities evicting best-effort pods only",
@@ -342,9 +335,8 @@ func TestHighNodeUtilization(t *testing.T) {
Items: []v1.Pod{},
},
},
maxPodsToEvictPerNode: 0,
expectedPodsEvicted: 1,
evictedPods: []string{"p1"},
expectedPodsEvicted: 1,
evictedPods: []string{"p1"},
},
{
name: "with extended resource",
@@ -403,9 +395,8 @@ func TestHighNodeUtilization(t *testing.T) {
Items: []v1.Pod{},
},
},
maxPodsToEvictPerNode: 0,
expectedPodsEvicted: 2,
evictedPods: []string{"p1", "p2"},
expectedPodsEvicted: 2,
evictedPods: []string{"p1", "p2"},
},
{
name: "with extended resource in some of nodes",
@@ -446,8 +437,7 @@ func TestHighNodeUtilization(t *testing.T) {
Items: []v1.Pod{},
},
},
maxPodsToEvictPerNode: 0,
expectedPodsEvicted: 0,
expectedPodsEvicted: 0,
},
{
name: "Other node match pod node selector",
@@ -484,8 +474,7 @@ func TestHighNodeUtilization(t *testing.T) {
},
},
},
maxPodsToEvictPerNode: 0,
expectedPodsEvicted: 1,
expectedPodsEvicted: 1,
},
{
name: "Other node does not match pod node selector",
@@ -518,8 +507,7 @@ func TestHighNodeUtilization(t *testing.T) {
},
},
},
maxPodsToEvictPerNode: 0,
expectedPodsEvicted: 0,
expectedPodsEvicted: 0,
},
}
@@ -577,7 +565,8 @@ func TestHighNodeUtilization(t *testing.T) {
fakeClient,
"v1",
false,
test.maxPodsToEvictPerNode,
nil,
nil,
nodes,
false,
false,
@@ -597,7 +586,7 @@ func TestHighNodeUtilization(t *testing.T) {
podsEvicted := podEvictor.TotalEvicted()
if test.expectedPodsEvicted != podsEvicted {
t.Errorf("Expected %#v pods to be evicted but %#v got evicted", test.expectedPodsEvicted, podsEvicted)
t.Errorf("Expected %v pods to be evicted but %v got evicted", test.expectedPodsEvicted, podsEvicted)
}
if evictionFailed {
t.Errorf("Pod evictions failed unexpectedly")
@@ -710,7 +699,7 @@ func TestHighNodeUtilizationWithTaints(t *testing.T) {
name string
nodes []*v1.Node
pods []*v1.Pod
evictionsExpected int
evictionsExpected uint
}{
{
name: "No taints",
@@ -767,7 +756,8 @@ func TestHighNodeUtilizationWithTaints(t *testing.T) {
fakeClient,
"policy/v1",
false,
item.evictionsExpected,
&item.evictionsExpected,
nil,
item.nodes,
false,
false,

View File

@@ -50,8 +50,7 @@ func TestLowNodeUtilization(t *testing.T) {
thresholds, targetThresholds api.ResourceThresholds
nodes map[string]*v1.Node
pods map[string]*v1.PodList
maxPodsToEvictPerNode int
expectedPodsEvicted int
expectedPodsEvicted uint
evictedPods []string
}{
{
@@ -108,8 +107,7 @@ func TestLowNodeUtilization(t *testing.T) {
},
n3NodeName: {},
},
maxPodsToEvictPerNode: 0,
expectedPodsEvicted: 0,
expectedPodsEvicted: 0,
},
{
name: "without priorities",
@@ -167,8 +165,7 @@ func TestLowNodeUtilization(t *testing.T) {
},
n3NodeName: {},
},
maxPodsToEvictPerNode: 0,
expectedPodsEvicted: 4,
expectedPodsEvicted: 4,
},
{
name: "without priorities stop when cpu capacity is depleted",
@@ -226,7 +223,6 @@ func TestLowNodeUtilization(t *testing.T) {
},
n3NodeName: {},
},
maxPodsToEvictPerNode: 0,
// 4 pods available for eviction based on v1.ResourcePods, only 3 pods can be evicted before cpu is depleted
expectedPodsEvicted: 3,
},
@@ -305,8 +301,7 @@ func TestLowNodeUtilization(t *testing.T) {
},
n3NodeName: {},
},
maxPodsToEvictPerNode: 0,
expectedPodsEvicted: 4,
expectedPodsEvicted: 4,
},
{
name: "without priorities evicting best-effort pods only",
@@ -381,9 +376,8 @@ func TestLowNodeUtilization(t *testing.T) {
},
n3NodeName: {},
},
maxPodsToEvictPerNode: 0,
expectedPodsEvicted: 4,
evictedPods: []string{"p1", "p2", "p4", "p5"},
expectedPodsEvicted: 4,
evictedPods: []string{"p1", "p2", "p4", "p5"},
},
{
name: "with extended resource",
@@ -469,7 +463,6 @@ func TestLowNodeUtilization(t *testing.T) {
},
n3NodeName: {},
},
maxPodsToEvictPerNode: 0,
// 4 pods available for eviction based on v1.ResourcePods, only 3 pods can be evicted before extended resource is depleted
expectedPodsEvicted: 3,
},
@@ -507,7 +500,6 @@ func TestLowNodeUtilization(t *testing.T) {
},
n3NodeName: {},
},
maxPodsToEvictPerNode: 0,
// 0 pods available for eviction because there's no enough extended resource in node2
expectedPodsEvicted: 0,
},
@@ -561,8 +553,7 @@ func TestLowNodeUtilization(t *testing.T) {
},
n2NodeName: {},
},
maxPodsToEvictPerNode: 0,
expectedPodsEvicted: 0,
expectedPodsEvicted: 0,
},
{
name: "without priorities, but only other node doesn't match pod node selector for p4 and p5",
@@ -630,8 +621,7 @@ func TestLowNodeUtilization(t *testing.T) {
},
n2NodeName: {},
},
maxPodsToEvictPerNode: 0,
expectedPodsEvicted: 3,
expectedPodsEvicted: 3,
},
{
name: "without priorities, but only other node doesn't match pod node affinity for p4 and p5",
@@ -729,8 +719,7 @@ func TestLowNodeUtilization(t *testing.T) {
*test.BuildTestPod("p9", 0, 0, n2NodeName, test.SetRSOwnerRef),
}},
},
maxPodsToEvictPerNode: 0,
expectedPodsEvicted: 3,
expectedPodsEvicted: 3,
},
}
@@ -788,7 +777,8 @@ func TestLowNodeUtilization(t *testing.T) {
fakeClient,
policyv1.SchemeGroupVersion.String(),
false,
test.maxPodsToEvictPerNode,
nil,
nil,
nodes,
false,
false,
@@ -809,7 +799,7 @@ func TestLowNodeUtilization(t *testing.T) {
podsEvicted := podEvictor.TotalEvicted()
if test.expectedPodsEvicted != podsEvicted {
t.Errorf("Expected %#v pods to be evicted but %#v got evicted", test.expectedPodsEvicted, podsEvicted)
t.Errorf("Expected %v pods to be evicted but %v got evicted", test.expectedPodsEvicted, podsEvicted)
}
if evictionFailed {
t.Errorf("Pod evictions failed unexpectedly")
@@ -1013,7 +1003,7 @@ func TestLowNodeUtilizationWithTaints(t *testing.T) {
name string
nodes []*v1.Node
pods []*v1.Pod
evictionsExpected int
evictionsExpected uint
}{
{
name: "No taints",
@@ -1088,7 +1078,8 @@ func TestLowNodeUtilizationWithTaints(t *testing.T) {
fakeClient,
policyv1.SchemeGroupVersion.String(),
false,
item.evictionsExpected,
&item.evictionsExpected,
nil,
item.nodes,
false,
false,

View File

@@ -99,52 +99,61 @@ func TestPodAntiAffinity(t *testing.T) {
"datacenter": "west",
}
var uint1 uint = 1
var uint3 uint = 3
tests := []struct {
description string
maxPodsToEvictPerNode int
pods []v1.Pod
expectedEvictedPodCount int
nodeFit bool
nodes []*v1.Node
description string
maxPodsToEvictPerNode *uint
maxNoOfPodsToEvictPerNamespace *uint
pods []v1.Pod
expectedEvictedPodCount uint
nodeFit bool
nodes []*v1.Node
}{
{
description: "Maximum pods to evict - 0",
maxPodsToEvictPerNode: 0,
pods: []v1.Pod{*p1, *p2, *p3, *p4},
nodes: []*v1.Node{node1},
expectedEvictedPodCount: 3,
},
{
description: "Maximum pods to evict - 3",
maxPodsToEvictPerNode: 3,
maxPodsToEvictPerNode: &uint3,
pods: []v1.Pod{*p1, *p2, *p3, *p4},
nodes: []*v1.Node{node1},
expectedEvictedPodCount: 3,
},
{
description: "Maximum pods to evict (maxPodsToEvictPerNamespace=3) - 3",
maxNoOfPodsToEvictPerNamespace: &uint3,
pods: []v1.Pod{*p1, *p2, *p3, *p4},
nodes: []*v1.Node{node1},
expectedEvictedPodCount: 3,
},
{
description: "Evict only 1 pod after sorting",
maxPodsToEvictPerNode: 0,
pods: []v1.Pod{*p5, *p6, *p7},
nodes: []*v1.Node{node1},
expectedEvictedPodCount: 1,
},
{
description: "Evicts pod that conflicts with critical pod (but does not evict critical pod)",
maxPodsToEvictPerNode: 1,
maxPodsToEvictPerNode: &uint1,
pods: []v1.Pod{*p1, *nonEvictablePod},
nodes: []*v1.Node{node1},
expectedEvictedPodCount: 1,
},
{
description: "Evicts pod that conflicts with critical pod (but does not evict critical pod)",
maxPodsToEvictPerNode: 1,
maxPodsToEvictPerNode: &uint1,
pods: []v1.Pod{*p1, *nonEvictablePod},
nodes: []*v1.Node{node1},
expectedEvictedPodCount: 1,
},
{
description: "Won't evict pods because node selectors don't match available nodes",
maxPodsToEvictPerNode: 1,
maxPodsToEvictPerNode: &uint1,
pods: []v1.Pod{*p8, *nonEvictablePod},
nodes: []*v1.Node{node1, node2},
expectedEvictedPodCount: 0,
@@ -152,7 +161,7 @@ func TestPodAntiAffinity(t *testing.T) {
},
{
description: "Won't evict pods because only other node is not schedulable",
maxPodsToEvictPerNode: 1,
maxPodsToEvictPerNode: &uint1,
pods: []v1.Pod{*p8, *nonEvictablePod},
nodes: []*v1.Node{node1, node3},
expectedEvictedPodCount: 0,
@@ -160,7 +169,6 @@ func TestPodAntiAffinity(t *testing.T) {
},
{
description: "No pod to evicted since all pod terminating",
maxPodsToEvictPerNode: 0,
pods: []v1.Pod{*p9, *p10},
nodes: []*v1.Node{node1},
expectedEvictedPodCount: 0,
@@ -182,6 +190,7 @@ func TestPodAntiAffinity(t *testing.T) {
policyv1.SchemeGroupVersion.String(),
false,
test.maxPodsToEvictPerNode,
test.maxNoOfPodsToEvictPerNamespace,
test.nodes,
false,
false,

View File

@@ -140,10 +140,9 @@ func TestPodLifeTime(t *testing.T) {
testCases := []struct {
description string
strategy api.DeschedulerStrategy
maxPodsToEvictPerNode int
pods []v1.Pod
nodes []*v1.Node
expectedEvictedPodCount int
expectedEvictedPodCount uint
ignorePvcPods bool
}{
{
@@ -154,7 +153,6 @@ func TestPodLifeTime(t *testing.T) {
PodLifeTime: &api.PodLifeTime{MaxPodLifeTimeSeconds: &maxLifeTime},
},
},
maxPodsToEvictPerNode: 5,
pods: []v1.Pod{*p1, *p2},
nodes: []*v1.Node{node1},
expectedEvictedPodCount: 1,
@@ -167,7 +165,6 @@ func TestPodLifeTime(t *testing.T) {
PodLifeTime: &api.PodLifeTime{MaxPodLifeTimeSeconds: &maxLifeTime},
},
},
maxPodsToEvictPerNode: 5,
pods: []v1.Pod{*p3, *p4},
nodes: []*v1.Node{node1},
expectedEvictedPodCount: 0,
@@ -180,7 +177,6 @@ func TestPodLifeTime(t *testing.T) {
PodLifeTime: &api.PodLifeTime{MaxPodLifeTimeSeconds: &maxLifeTime},
},
},
maxPodsToEvictPerNode: 5,
pods: []v1.Pod{*p5, *p6},
nodes: []*v1.Node{node1},
expectedEvictedPodCount: 1,
@@ -193,7 +189,6 @@ func TestPodLifeTime(t *testing.T) {
PodLifeTime: &api.PodLifeTime{MaxPodLifeTimeSeconds: &maxLifeTime},
},
},
maxPodsToEvictPerNode: 5,
pods: []v1.Pod{*p7, *p8},
nodes: []*v1.Node{node1},
expectedEvictedPodCount: 0,
@@ -209,7 +204,6 @@ func TestPodLifeTime(t *testing.T) {
},
},
},
maxPodsToEvictPerNode: 5,
pods: []v1.Pod{*p9, *p10},
nodes: []*v1.Node{node1},
expectedEvictedPodCount: 1,
@@ -222,7 +216,6 @@ func TestPodLifeTime(t *testing.T) {
PodLifeTime: &api.PodLifeTime{MaxPodLifeTimeSeconds: &maxLifeTime},
},
},
maxPodsToEvictPerNode: 5,
pods: []v1.Pod{*p11},
nodes: []*v1.Node{node1},
expectedEvictedPodCount: 0,
@@ -236,7 +229,6 @@ func TestPodLifeTime(t *testing.T) {
PodLifeTime: &api.PodLifeTime{MaxPodLifeTimeSeconds: &maxLifeTime},
},
},
maxPodsToEvictPerNode: 5,
pods: []v1.Pod{*p11},
nodes: []*v1.Node{node1},
expectedEvictedPodCount: 1,
@@ -252,7 +244,6 @@ func TestPodLifeTime(t *testing.T) {
},
},
},
maxPodsToEvictPerNode: 5,
pods: []v1.Pod{*p12, *p13},
nodes: []*v1.Node{node1},
expectedEvictedPodCount: 1,
@@ -268,7 +259,6 @@ func TestPodLifeTime(t *testing.T) {
},
},
},
maxPodsToEvictPerNode: 5,
pods: []v1.Pod{*p14, *p15},
nodes: []*v1.Node{node1},
expectedEvictedPodCount: 0,
@@ -285,7 +275,8 @@ func TestPodLifeTime(t *testing.T) {
fakeClient,
policyv1.SchemeGroupVersion.String(),
false,
tc.maxPodsToEvictPerNode,
nil,
nil,
tc.nodes,
false,
false,

View File

@@ -114,89 +114,91 @@ func TestRemovePodsHavingTooManyRestarts(t *testing.T) {
}
}
var uint3 uint = 3
tests := []struct {
description string
nodes []*v1.Node
strategy api.DeschedulerStrategy
expectedEvictedPodCount int
maxPodsToEvictPerNode int
description string
nodes []*v1.Node
strategy api.DeschedulerStrategy
expectedEvictedPodCount uint
maxPodsToEvictPerNode *uint
maxNoOfPodsToEvictPerNamespace *uint
}{
{
description: "All pods have total restarts under threshold, no pod evictions",
strategy: createStrategy(true, true, 10000, false),
nodes: []*v1.Node{node1},
expectedEvictedPodCount: 0,
maxPodsToEvictPerNode: 0,
},
{
description: "Some pods have total restarts bigger than threshold",
strategy: createStrategy(true, true, 1, false),
nodes: []*v1.Node{node1},
expectedEvictedPodCount: 6,
maxPodsToEvictPerNode: 0,
},
{
description: "Nine pods have total restarts equals threshold(includingInitContainers=true), 6 pod evictions",
strategy: createStrategy(true, true, 1*25, false),
nodes: []*v1.Node{node1},
expectedEvictedPodCount: 6,
maxPodsToEvictPerNode: 0,
},
{
description: "Nine pods have total restarts equals threshold(includingInitContainers=false), 5 pod evictions",
strategy: createStrategy(true, false, 1*25, false),
nodes: []*v1.Node{node1},
expectedEvictedPodCount: 5,
maxPodsToEvictPerNode: 0,
},
{
description: "All pods have total restarts equals threshold(includingInitContainers=true), 6 pod evictions",
strategy: createStrategy(true, true, 1*20, false),
nodes: []*v1.Node{node1},
expectedEvictedPodCount: 6,
maxPodsToEvictPerNode: 0,
},
{
description: "Nine pods have total restarts equals threshold(includingInitContainers=false), 6 pod evictions",
strategy: createStrategy(true, false, 1*20, false),
nodes: []*v1.Node{node1},
expectedEvictedPodCount: 6,
maxPodsToEvictPerNode: 0,
},
{
description: "Five pods have total restarts bigger than threshold(includingInitContainers=true), but only 1 pod eviction",
strategy: createStrategy(true, true, 5*25+1, false),
nodes: []*v1.Node{node1},
expectedEvictedPodCount: 1,
maxPodsToEvictPerNode: 0,
},
{
description: "Five pods have total restarts bigger than threshold(includingInitContainers=false), but only 1 pod eviction",
strategy: createStrategy(true, false, 5*20+1, false),
nodes: []*v1.Node{node1},
expectedEvictedPodCount: 1,
maxPodsToEvictPerNode: 0,
},
{
description: "All pods have total restarts equals threshold(maxPodsToEvictPerNode=3), 3 pod evictions",
strategy: createStrategy(true, true, 1, false),
nodes: []*v1.Node{node1},
expectedEvictedPodCount: 3,
maxPodsToEvictPerNode: 3,
maxPodsToEvictPerNode: &uint3,
},
{
description: "All pods have total restarts equals threshold(maxNoOfPodsToEvictPerNamespace=3), 3 pod evictions",
strategy: createStrategy(true, true, 1, false),
nodes: []*v1.Node{node1},
expectedEvictedPodCount: 3,
maxNoOfPodsToEvictPerNamespace: &uint3,
},
{
description: "All pods have total restarts equals threshold(maxPodsToEvictPerNode=3) but the only other node is tained, 0 pod evictions",
strategy: createStrategy(true, true, 1, true),
nodes: []*v1.Node{node1, node2},
expectedEvictedPodCount: 0,
maxPodsToEvictPerNode: 3,
maxPodsToEvictPerNode: &uint3,
},
{
description: "All pods have total restarts equals threshold(maxPodsToEvictPerNode=3) but the only other node is not schedulable, 0 pod evictions",
strategy: createStrategy(true, true, 1, true),
nodes: []*v1.Node{node1, node3},
expectedEvictedPodCount: 0,
maxPodsToEvictPerNode: 3,
maxPodsToEvictPerNode: &uint3,
},
}
@@ -212,6 +214,7 @@ func TestRemovePodsHavingTooManyRestarts(t *testing.T) {
policyv1.SchemeGroupVersion.String(),
false,
tc.maxPodsToEvictPerNode,
tc.maxNoOfPodsToEvictPerNamespace,
tc.nodes,
false,
false,

View File

@@ -20,7 +20,7 @@ func TestTopologySpreadConstraint(t *testing.T) {
testCases := []struct {
name string
pods []*v1.Pod
expectedEvictedCount int
expectedEvictedCount uint
nodes []*v1.Node
strategy api.DeschedulerStrategy
namespaces []string
@@ -886,7 +886,8 @@ func TestTopologySpreadConstraint(t *testing.T) {
fakeClient,
"v1",
false,
100,
nil,
nil,
tc.nodes,
false,
false,

View File

@@ -18,17 +18,18 @@ package e2e
import (
"context"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
"strings"
"testing"
"time"
appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
deschedulerapi "sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
eutils "sigs.k8s.io/descheduler/pkg/descheduler/evictions/utils"
@@ -87,7 +88,7 @@ func TestRemoveDuplicates(t *testing.T) {
description string
replicasNum int
beforeFunc func(deployment *appsv1.Deployment)
expectedEvictedPodCount int
expectedEvictedPodCount uint
}{
{
description: "Evict Pod even Pods schedule to specific node",
@@ -143,7 +144,8 @@ func TestRemoveDuplicates(t *testing.T) {
clientSet,
evictionPolicyGroupVersion,
false,
0,
nil,
nil,
nodes,
true,
false,

View File

@@ -2,15 +2,16 @@ package e2e
import (
"context"
"strings"
"testing"
"time"
batchv1 "k8s.io/api/batch/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
"strings"
"testing"
"time"
deschedulerapi "sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/descheduler/strategies"
@@ -34,7 +35,7 @@ func TestFailedPods(t *testing.T) {
}
defer clientSet.CoreV1().Namespaces().Delete(ctx, testNamespace.Name, metav1.DeleteOptions{})
testCases := map[string]struct {
expectedEvictedCount int
expectedEvictedCount uint
strategyParams *deschedulerapi.StrategyParameters
}{
"test-failed-pods-nil-strategy": {

View File

@@ -166,7 +166,8 @@ func runPodLifetimeStrategy(
clientset,
evictionPolicyGroupVersion,
false,
0,
nil,
nil,
nodes,
false,
evictCritical,
@@ -1290,7 +1291,8 @@ func initPodEvictorOrFail(t *testing.T, clientSet clientset.Interface, nodes []*
clientSet,
evictionPolicyGroupVersion,
false,
0,
nil,
nil,
nodes,
true,
false,

View File

@@ -19,6 +19,10 @@ package e2e
import (
"context"
"fmt"
"strings"
"testing"
"time"
appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -28,9 +32,6 @@ import (
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
eutils "sigs.k8s.io/descheduler/pkg/descheduler/evictions/utils"
"sigs.k8s.io/descheduler/pkg/descheduler/strategies"
"strings"
"testing"
"time"
)
func TestTooManyRestarts(t *testing.T) {
@@ -108,7 +109,7 @@ func TestTooManyRestarts(t *testing.T) {
name string
podRestartThreshold int32
includingInitContainers bool
expectedEvictedPodCount int
expectedEvictedPodCount uint
}{
{
name: "test-no-evictions",
@@ -133,7 +134,8 @@ func TestTooManyRestarts(t *testing.T) {
clientSet,
evictionPolicyGroupVersion,
false,
0,
nil,
nil,
nodes,
true,
false,