mirror of
https://github.com/kubernetes-sigs/descheduler.git
synced 2026-01-25 20:59:28 +01:00
Drop deprecated flags
This commit is contained in:
@@ -85,12 +85,6 @@ func (rs *DeschedulerServer) AddFlags(fs *pflag.FlagSet) {
|
||||
fs.StringVar(&rs.KubeconfigFile, "kubeconfig", rs.KubeconfigFile, "File with kube configuration.")
|
||||
fs.StringVar(&rs.PolicyConfigFile, "policy-config-file", rs.PolicyConfigFile, "File with descheduler policy configuration.")
|
||||
fs.BoolVar(&rs.DryRun, "dry-run", rs.DryRun, "execute descheduler in dry run mode.")
|
||||
// node-selector query causes descheduler to run only on nodes that matches the node labels in the query
|
||||
fs.StringVar(&rs.NodeSelector, "node-selector", rs.NodeSelector, "DEPRECATED: selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2)")
|
||||
// max-no-pods-to-evict limits the maximum number of pods to be evicted per node by descheduler.
|
||||
fs.IntVar(&rs.MaxNoOfPodsToEvictPerNode, "max-pods-to-evict-per-node", rs.MaxNoOfPodsToEvictPerNode, "DEPRECATED: limits the maximum number of pods to be evicted per node by descheduler")
|
||||
// evict-local-storage-pods allows eviction of pods that are using local storage. This is false by default.
|
||||
fs.BoolVar(&rs.EvictLocalStoragePods, "evict-local-storage-pods", rs.EvictLocalStoragePods, "DEPRECATED: enables evicting pods using local storage by descheduler")
|
||||
fs.BoolVar(&rs.DisableMetrics, "disable-metrics", rs.DisableMetrics, "Disables metrics. The metrics are by default served through https://localhost:10258/metrics. Secure address, resp. port can be changed through --bind-address, resp. --secure-port flags.")
|
||||
|
||||
rs.SecureServing.AddFlags(fs)
|
||||
|
||||
@@ -42,7 +42,7 @@ type DeschedulerPolicy struct {
|
||||
IgnorePVCPods *bool
|
||||
|
||||
// MaxNoOfPodsToEvictPerNode restricts maximum of pods to be evicted per node.
|
||||
MaxNoOfPodsToEvictPerNode *int
|
||||
MaxNoOfPodsToEvictPerNode *uint
|
||||
}
|
||||
|
||||
type StrategyName string
|
||||
|
||||
@@ -136,7 +136,13 @@ func autoConvert_v1alpha1_DeschedulerPolicy_To_api_DeschedulerPolicy(in *Desched
|
||||
out.EvictLocalStoragePods = (*bool)(unsafe.Pointer(in.EvictLocalStoragePods))
|
||||
out.EvictSystemCriticalPods = (*bool)(unsafe.Pointer(in.EvictSystemCriticalPods))
|
||||
out.IgnorePVCPods = (*bool)(unsafe.Pointer(in.IgnorePVCPods))
|
||||
out.MaxNoOfPodsToEvictPerNode = (*int)(unsafe.Pointer(in.MaxNoOfPodsToEvictPerNode))
|
||||
if in.MaxNoOfPodsToEvictPerNode != nil {
|
||||
in, out := &in.MaxNoOfPodsToEvictPerNode, &out.MaxNoOfPodsToEvictPerNode
|
||||
*out = new(uint)
|
||||
**out = uint(**in)
|
||||
} else {
|
||||
out.MaxNoOfPodsToEvictPerNode = nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -151,7 +157,13 @@ func autoConvert_api_DeschedulerPolicy_To_v1alpha1_DeschedulerPolicy(in *api.Des
|
||||
out.EvictLocalStoragePods = (*bool)(unsafe.Pointer(in.EvictLocalStoragePods))
|
||||
out.EvictSystemCriticalPods = (*bool)(unsafe.Pointer(in.EvictSystemCriticalPods))
|
||||
out.IgnorePVCPods = (*bool)(unsafe.Pointer(in.IgnorePVCPods))
|
||||
out.MaxNoOfPodsToEvictPerNode = (*int)(unsafe.Pointer(in.MaxNoOfPodsToEvictPerNode))
|
||||
if in.MaxNoOfPodsToEvictPerNode != nil {
|
||||
in, out := &in.MaxNoOfPodsToEvictPerNode, &out.MaxNoOfPodsToEvictPerNode
|
||||
*out = new(int)
|
||||
**out = int(**in)
|
||||
} else {
|
||||
out.MaxNoOfPodsToEvictPerNode = nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -59,7 +59,7 @@ func (in *DeschedulerPolicy) DeepCopyInto(out *DeschedulerPolicy) {
|
||||
}
|
||||
if in.MaxNoOfPodsToEvictPerNode != nil {
|
||||
in, out := &in.MaxNoOfPodsToEvictPerNode, &out.MaxNoOfPodsToEvictPerNode
|
||||
*out = new(int)
|
||||
*out = new(uint)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
|
||||
@@ -17,9 +17,10 @@ limitations under the License.
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
componentbaseconfig "k8s.io/component-base/config"
|
||||
"time"
|
||||
|
||||
componentbaseconfig "k8s.io/component-base/config"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
|
||||
@@ -19,14 +19,13 @@ package descheduler
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/strategies/nodeutilization"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/informers"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/informers"
|
||||
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
|
||||
"sigs.k8s.io/descheduler/metrics"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
@@ -35,6 +34,7 @@ import (
|
||||
eutils "sigs.k8s.io/descheduler/pkg/descheduler/evictions/utils"
|
||||
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/strategies"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/strategies/nodeutilization"
|
||||
)
|
||||
|
||||
func Run(rs *options.DeschedulerServer) error {
|
||||
@@ -86,12 +86,12 @@ func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer
|
||||
"RemoveFailedPods": strategies.RemoveFailedPods,
|
||||
}
|
||||
|
||||
nodeSelector := rs.NodeSelector
|
||||
var nodeSelector string
|
||||
if deschedulerPolicy.NodeSelector != nil {
|
||||
nodeSelector = *deschedulerPolicy.NodeSelector
|
||||
}
|
||||
|
||||
evictLocalStoragePods := rs.EvictLocalStoragePods
|
||||
var evictLocalStoragePods bool
|
||||
if deschedulerPolicy.EvictLocalStoragePods != nil {
|
||||
evictLocalStoragePods = *deschedulerPolicy.EvictLocalStoragePods
|
||||
}
|
||||
@@ -109,11 +109,6 @@ func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer
|
||||
ignorePvcPods = *deschedulerPolicy.IgnorePVCPods
|
||||
}
|
||||
|
||||
maxNoOfPodsToEvictPerNode := rs.MaxNoOfPodsToEvictPerNode
|
||||
if deschedulerPolicy.MaxNoOfPodsToEvictPerNode != nil {
|
||||
maxNoOfPodsToEvictPerNode = *deschedulerPolicy.MaxNoOfPodsToEvictPerNode
|
||||
}
|
||||
|
||||
wait.Until(func() {
|
||||
nodes, err := nodeutil.ReadyNodes(ctx, rs.Client, nodeInformer, nodeSelector)
|
||||
if err != nil {
|
||||
@@ -132,7 +127,7 @@ func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer
|
||||
rs.Client,
|
||||
evictionPolicyGroupVersion,
|
||||
rs.DryRun,
|
||||
maxNoOfPodsToEvictPerNode,
|
||||
deschedulerPolicy.MaxNoOfPodsToEvictPerNode,
|
||||
nodes,
|
||||
evictLocalStoragePods,
|
||||
evictSystemCriticalPods,
|
||||
|
||||
@@ -45,14 +45,14 @@ const (
|
||||
)
|
||||
|
||||
// nodePodEvictedCount keeps count of pods evicted on node
|
||||
type nodePodEvictedCount map[*v1.Node]int
|
||||
type nodePodEvictedCount map[*v1.Node]uint
|
||||
|
||||
type PodEvictor struct {
|
||||
client clientset.Interface
|
||||
nodes []*v1.Node
|
||||
policyGroupVersion string
|
||||
dryRun bool
|
||||
maxPodsToEvictPerNode int
|
||||
maxPodsToEvictPerNode *uint
|
||||
nodepodCount nodePodEvictedCount
|
||||
evictLocalStoragePods bool
|
||||
evictSystemCriticalPods bool
|
||||
@@ -63,7 +63,7 @@ func NewPodEvictor(
|
||||
client clientset.Interface,
|
||||
policyGroupVersion string,
|
||||
dryRun bool,
|
||||
maxPodsToEvictPerNode int,
|
||||
maxPodsToEvictPerNode *uint,
|
||||
nodes []*v1.Node,
|
||||
evictLocalStoragePods bool,
|
||||
evictSystemCriticalPods bool,
|
||||
@@ -89,13 +89,13 @@ func NewPodEvictor(
|
||||
}
|
||||
|
||||
// NodeEvicted gives a number of pods evicted for node
|
||||
func (pe *PodEvictor) NodeEvicted(node *v1.Node) int {
|
||||
func (pe *PodEvictor) NodeEvicted(node *v1.Node) uint {
|
||||
return pe.nodepodCount[node]
|
||||
}
|
||||
|
||||
// TotalEvicted gives a number of pods evicted through all nodes
|
||||
func (pe *PodEvictor) TotalEvicted() int {
|
||||
var total int
|
||||
func (pe *PodEvictor) TotalEvicted() uint {
|
||||
var total uint
|
||||
for _, count := range pe.nodepodCount {
|
||||
total += count
|
||||
}
|
||||
@@ -110,9 +110,9 @@ func (pe *PodEvictor) EvictPod(ctx context.Context, pod *v1.Pod, node *v1.Node,
|
||||
if len(reasons) > 0 {
|
||||
reason += " (" + strings.Join(reasons, ", ") + ")"
|
||||
}
|
||||
if pe.maxPodsToEvictPerNode > 0 && pe.nodepodCount[node]+1 > pe.maxPodsToEvictPerNode {
|
||||
if pe.maxPodsToEvictPerNode != nil && pe.nodepodCount[node]+1 > *pe.maxPodsToEvictPerNode {
|
||||
metrics.PodsEvicted.With(map[string]string{"result": "maximum number reached", "strategy": strategy, "namespace": pod.Namespace}).Inc()
|
||||
return false, fmt.Errorf("Maximum number %v of evicted pods per %q node reached", pe.maxPodsToEvictPerNode, node.Name)
|
||||
return false, fmt.Errorf("Maximum number %v of evicted pods per %q node reached", *pe.maxPodsToEvictPerNode, node.Name)
|
||||
}
|
||||
|
||||
err := evictPod(ctx, pe.client, pod, pe.policyGroupVersion, pe.dryRun)
|
||||
|
||||
@@ -173,15 +173,13 @@ func TestFindDuplicatePods(t *testing.T) {
|
||||
|
||||
testCases := []struct {
|
||||
description string
|
||||
maxPodsToEvictPerNode int
|
||||
pods []v1.Pod
|
||||
nodes []*v1.Node
|
||||
expectedEvictedPodCount int
|
||||
expectedEvictedPodCount uint
|
||||
strategy api.DeschedulerStrategy
|
||||
}{
|
||||
{
|
||||
description: "Three pods in the `dev` Namespace, bound to same ReplicaSet. 1 should be evicted.",
|
||||
maxPodsToEvictPerNode: 5,
|
||||
pods: []v1.Pod{*p1, *p2, *p3},
|
||||
nodes: []*v1.Node{node1, node2},
|
||||
expectedEvictedPodCount: 1,
|
||||
@@ -189,7 +187,6 @@ func TestFindDuplicatePods(t *testing.T) {
|
||||
},
|
||||
{
|
||||
description: "Three pods in the `dev` Namespace, bound to same ReplicaSet, but ReplicaSet kind is excluded. 0 should be evicted.",
|
||||
maxPodsToEvictPerNode: 5,
|
||||
pods: []v1.Pod{*p1, *p2, *p3},
|
||||
nodes: []*v1.Node{node1, node2},
|
||||
expectedEvictedPodCount: 0,
|
||||
@@ -197,7 +194,6 @@ func TestFindDuplicatePods(t *testing.T) {
|
||||
},
|
||||
{
|
||||
description: "Three Pods in the `test` Namespace, bound to same ReplicaSet. 1 should be evicted.",
|
||||
maxPodsToEvictPerNode: 5,
|
||||
pods: []v1.Pod{*p8, *p9, *p10},
|
||||
nodes: []*v1.Node{node1, node2},
|
||||
expectedEvictedPodCount: 1,
|
||||
@@ -205,7 +201,6 @@ func TestFindDuplicatePods(t *testing.T) {
|
||||
},
|
||||
{
|
||||
description: "Three Pods in the `dev` Namespace, three Pods in the `test` Namespace. Bound to ReplicaSet with same name. 4 should be evicted.",
|
||||
maxPodsToEvictPerNode: 5,
|
||||
pods: []v1.Pod{*p1, *p2, *p3, *p8, *p9, *p10},
|
||||
nodes: []*v1.Node{node1, node2},
|
||||
expectedEvictedPodCount: 2,
|
||||
@@ -213,7 +208,6 @@ func TestFindDuplicatePods(t *testing.T) {
|
||||
},
|
||||
{
|
||||
description: "Pods are: part of DaemonSet, with local storage, mirror pod annotation, critical pod annotation - none should be evicted.",
|
||||
maxPodsToEvictPerNode: 2,
|
||||
pods: []v1.Pod{*p4, *p5, *p6, *p7},
|
||||
nodes: []*v1.Node{node1, node2},
|
||||
expectedEvictedPodCount: 0,
|
||||
@@ -221,7 +215,6 @@ func TestFindDuplicatePods(t *testing.T) {
|
||||
},
|
||||
{
|
||||
description: "Test all Pods: 4 should be evicted.",
|
||||
maxPodsToEvictPerNode: 5,
|
||||
pods: []v1.Pod{*p1, *p2, *p3, *p4, *p5, *p6, *p7, *p8, *p9, *p10},
|
||||
nodes: []*v1.Node{node1, node2},
|
||||
expectedEvictedPodCount: 2,
|
||||
@@ -229,7 +222,6 @@ func TestFindDuplicatePods(t *testing.T) {
|
||||
},
|
||||
{
|
||||
description: "Pods with the same owner but different images should not be evicted",
|
||||
maxPodsToEvictPerNode: 5,
|
||||
pods: []v1.Pod{*p11, *p12},
|
||||
nodes: []*v1.Node{node1, node2},
|
||||
expectedEvictedPodCount: 0,
|
||||
@@ -237,7 +229,6 @@ func TestFindDuplicatePods(t *testing.T) {
|
||||
},
|
||||
{
|
||||
description: "Pods with multiple containers should not match themselves",
|
||||
maxPodsToEvictPerNode: 5,
|
||||
pods: []v1.Pod{*p13},
|
||||
nodes: []*v1.Node{node1, node2},
|
||||
expectedEvictedPodCount: 0,
|
||||
@@ -245,7 +236,6 @@ func TestFindDuplicatePods(t *testing.T) {
|
||||
},
|
||||
{
|
||||
description: "Pods with matching ownerrefs and at not all matching image should not trigger an eviction",
|
||||
maxPodsToEvictPerNode: 5,
|
||||
pods: []v1.Pod{*p11, *p13},
|
||||
nodes: []*v1.Node{node1, node2},
|
||||
expectedEvictedPodCount: 0,
|
||||
@@ -253,7 +243,6 @@ func TestFindDuplicatePods(t *testing.T) {
|
||||
},
|
||||
{
|
||||
description: "Three pods in the `dev` Namespace, bound to same ReplicaSet. Only node available has a taint, and nodeFit set to true. 0 should be evicted.",
|
||||
maxPodsToEvictPerNode: 5,
|
||||
pods: []v1.Pod{*p1, *p2, *p3},
|
||||
nodes: []*v1.Node{node1, node3},
|
||||
expectedEvictedPodCount: 0,
|
||||
@@ -261,7 +250,6 @@ func TestFindDuplicatePods(t *testing.T) {
|
||||
},
|
||||
{
|
||||
description: "Three pods in the `node-fit` Namespace, bound to same ReplicaSet, all with a nodeSelector. Only node available has an incorrect node label, and nodeFit set to true. 0 should be evicted.",
|
||||
maxPodsToEvictPerNode: 5,
|
||||
pods: []v1.Pod{*p15, *p16, *p17},
|
||||
nodes: []*v1.Node{node1, node4},
|
||||
expectedEvictedPodCount: 0,
|
||||
@@ -269,7 +257,6 @@ func TestFindDuplicatePods(t *testing.T) {
|
||||
},
|
||||
{
|
||||
description: "Three pods in the `node-fit` Namespace, bound to same ReplicaSet. Only node available is not schedulable, and nodeFit set to true. 0 should be evicted.",
|
||||
maxPodsToEvictPerNode: 5,
|
||||
pods: []v1.Pod{*p1, *p2, *p3},
|
||||
nodes: []*v1.Node{node1, node5},
|
||||
expectedEvictedPodCount: 0,
|
||||
@@ -287,7 +274,7 @@ func TestFindDuplicatePods(t *testing.T) {
|
||||
fakeClient,
|
||||
"v1",
|
||||
false,
|
||||
testCase.maxPodsToEvictPerNode,
|
||||
nil,
|
||||
testCase.nodes,
|
||||
false,
|
||||
false,
|
||||
@@ -432,10 +419,9 @@ func TestRemoveDuplicatesUniformly(t *testing.T) {
|
||||
|
||||
testCases := []struct {
|
||||
description string
|
||||
maxPodsToEvictPerNode int
|
||||
pods []v1.Pod
|
||||
nodes []*v1.Node
|
||||
expectedEvictedPodCount int
|
||||
expectedEvictedPodCount uint
|
||||
strategy api.DeschedulerStrategy
|
||||
}{
|
||||
{
|
||||
@@ -695,7 +681,7 @@ func TestRemoveDuplicatesUniformly(t *testing.T) {
|
||||
fakeClient,
|
||||
policyv1.SchemeGroupVersion.String(),
|
||||
false,
|
||||
testCase.maxPodsToEvictPerNode,
|
||||
nil,
|
||||
testCase.nodes,
|
||||
false,
|
||||
false,
|
||||
|
||||
@@ -2,9 +2,10 @@ package strategies
|
||||
|
||||
import (
|
||||
"context"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"testing"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
policyv1 "k8s.io/api/policy/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
@@ -41,7 +42,7 @@ func TestRemoveFailedPods(t *testing.T) {
|
||||
description string
|
||||
nodes []*v1.Node
|
||||
strategy api.DeschedulerStrategy
|
||||
expectedEvictedPodCount int
|
||||
expectedEvictedPodCount uint
|
||||
pods []v1.Pod
|
||||
}{
|
||||
{
|
||||
@@ -220,7 +221,7 @@ func TestRemoveFailedPods(t *testing.T) {
|
||||
fakeClient,
|
||||
policyv1.SchemeGroupVersion.String(),
|
||||
false,
|
||||
100,
|
||||
nil,
|
||||
tc.nodes,
|
||||
false,
|
||||
false,
|
||||
|
||||
@@ -102,13 +102,14 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
var uint1 uint = 1
|
||||
tests := []struct {
|
||||
description string
|
||||
nodes []*v1.Node
|
||||
pods []v1.Pod
|
||||
strategy api.DeschedulerStrategy
|
||||
expectedEvictedPodCount int
|
||||
maxPodsToEvictPerNode int
|
||||
expectedEvictedPodCount uint
|
||||
maxPodsToEvictPerNode *uint
|
||||
}{
|
||||
{
|
||||
description: "Invalid strategy type, should not evict any pods",
|
||||
@@ -123,7 +124,6 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||
expectedEvictedPodCount: 0,
|
||||
pods: addPodsToNode(nodeWithoutLabels, nil),
|
||||
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
|
||||
maxPodsToEvictPerNode: 0,
|
||||
},
|
||||
{
|
||||
description: "Pod is correctly scheduled on node, no eviction expected",
|
||||
@@ -131,7 +131,6 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||
expectedEvictedPodCount: 0,
|
||||
pods: addPodsToNode(nodeWithLabels, nil),
|
||||
nodes: []*v1.Node{nodeWithLabels},
|
||||
maxPodsToEvictPerNode: 0,
|
||||
},
|
||||
{
|
||||
description: "Pod is scheduled on node without matching labels, another schedulable node available, should be evicted",
|
||||
@@ -139,7 +138,6 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||
strategy: requiredDuringSchedulingIgnoredDuringExecutionStrategy,
|
||||
pods: addPodsToNode(nodeWithoutLabels, nil),
|
||||
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
|
||||
maxPodsToEvictPerNode: 0,
|
||||
},
|
||||
{
|
||||
description: "Pod is scheduled on node without matching labels, another schedulable node available, maxPodsToEvictPerNode set to 1, should not be evicted",
|
||||
@@ -147,7 +145,7 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||
strategy: requiredDuringSchedulingIgnoredDuringExecutionStrategy,
|
||||
pods: addPodsToNode(nodeWithoutLabels, nil),
|
||||
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
|
||||
maxPodsToEvictPerNode: 1,
|
||||
maxPodsToEvictPerNode: &uint1,
|
||||
},
|
||||
{
|
||||
description: "Pod is scheduled on node without matching labels, another schedulable node available, maxPodsToEvictPerNode set to 1, no pod evicted since pod terminting",
|
||||
@@ -155,7 +153,7 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||
strategy: requiredDuringSchedulingIgnoredDuringExecutionStrategy,
|
||||
pods: addPodsToNode(nodeWithoutLabels, &metav1.Time{}),
|
||||
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
|
||||
maxPodsToEvictPerNode: 1,
|
||||
maxPodsToEvictPerNode: &uint1,
|
||||
},
|
||||
{
|
||||
description: "Pod is scheduled on node without matching labels, but no node where pod fits is available, should not evict",
|
||||
@@ -163,7 +161,6 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||
strategy: requiredDuringSchedulingIgnoredDuringExecutionWithNodeFitStrategy,
|
||||
pods: addPodsToNode(nodeWithoutLabels, nil),
|
||||
nodes: []*v1.Node{nodeWithoutLabels, unschedulableNodeWithLabels},
|
||||
maxPodsToEvictPerNode: 0,
|
||||
},
|
||||
{
|
||||
description: "Pod is scheduled on node without matching labels, and node where pod fits is available, should evict",
|
||||
@@ -171,7 +168,7 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||
strategy: requiredDuringSchedulingIgnoredDuringExecutionWithNodeFitStrategy,
|
||||
pods: addPodsToNode(nodeWithoutLabels, nil),
|
||||
nodes: []*v1.Node{nodeWithLabels, unschedulableNodeWithLabels},
|
||||
maxPodsToEvictPerNode: 1,
|
||||
maxPodsToEvictPerNode: &uint1,
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@@ -116,14 +116,16 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
|
||||
"datacenter": "west",
|
||||
}
|
||||
|
||||
var uint1 uint = 1
|
||||
|
||||
tests := []struct {
|
||||
description string
|
||||
nodes []*v1.Node
|
||||
pods []v1.Pod
|
||||
evictLocalStoragePods bool
|
||||
evictSystemCriticalPods bool
|
||||
maxPodsToEvictPerNode int
|
||||
expectedEvictedPodCount int
|
||||
maxPodsToEvictPerNode *uint
|
||||
expectedEvictedPodCount uint
|
||||
nodeFit bool
|
||||
}{
|
||||
|
||||
@@ -133,7 +135,6 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
|
||||
nodes: []*v1.Node{node1},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
maxPodsToEvictPerNode: 0,
|
||||
expectedEvictedPodCount: 1, //p2 gets evicted
|
||||
},
|
||||
{
|
||||
@@ -142,7 +143,6 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
|
||||
nodes: []*v1.Node{node1},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
maxPodsToEvictPerNode: 0,
|
||||
expectedEvictedPodCount: 1, //p4 gets evicted
|
||||
},
|
||||
{
|
||||
@@ -151,7 +151,7 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
|
||||
nodes: []*v1.Node{node1},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
maxPodsToEvictPerNode: 1,
|
||||
maxPodsToEvictPerNode: &uint1,
|
||||
expectedEvictedPodCount: 1, //p5 or p6 gets evicted
|
||||
},
|
||||
{
|
||||
@@ -160,7 +160,6 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
|
||||
nodes: []*v1.Node{node2},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
maxPodsToEvictPerNode: 0,
|
||||
expectedEvictedPodCount: 0, //nothing is evicted
|
||||
},
|
||||
{
|
||||
@@ -169,7 +168,6 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
|
||||
nodes: []*v1.Node{node2},
|
||||
evictLocalStoragePods: true,
|
||||
evictSystemCriticalPods: false,
|
||||
maxPodsToEvictPerNode: 0,
|
||||
expectedEvictedPodCount: 1, //p9 gets evicted
|
||||
},
|
||||
{
|
||||
@@ -178,7 +176,6 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
|
||||
nodes: []*v1.Node{node2},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
maxPodsToEvictPerNode: 0,
|
||||
expectedEvictedPodCount: 1, //p11 gets evicted
|
||||
},
|
||||
{
|
||||
@@ -187,7 +184,6 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
|
||||
nodes: []*v1.Node{node2},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: true,
|
||||
maxPodsToEvictPerNode: 0,
|
||||
expectedEvictedPodCount: 2, //p2 and p7 are evicted
|
||||
},
|
||||
{
|
||||
@@ -196,7 +192,6 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
|
||||
nodes: []*v1.Node{node1, node2},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
maxPodsToEvictPerNode: 0,
|
||||
expectedEvictedPodCount: 0, //p2 gets evicted
|
||||
nodeFit: true,
|
||||
},
|
||||
@@ -206,7 +201,6 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
|
||||
nodes: []*v1.Node{node1, node3},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
maxPodsToEvictPerNode: 0,
|
||||
expectedEvictedPodCount: 0, //p2 gets evicted
|
||||
nodeFit: true,
|
||||
},
|
||||
@@ -216,7 +210,6 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
|
||||
nodes: []*v1.Node{node1, node4},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
maxPodsToEvictPerNode: 0,
|
||||
expectedEvictedPodCount: 0, //p2 gets evicted
|
||||
nodeFit: true,
|
||||
},
|
||||
|
||||
@@ -44,13 +44,12 @@ func TestHighNodeUtilization(t *testing.T) {
|
||||
nodeSelectorValue := "west"
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
thresholds api.ResourceThresholds
|
||||
nodes map[string]*v1.Node
|
||||
pods map[string]*v1.PodList
|
||||
maxPodsToEvictPerNode int
|
||||
expectedPodsEvicted int
|
||||
evictedPods []string
|
||||
name string
|
||||
thresholds api.ResourceThresholds
|
||||
nodes map[string]*v1.Node
|
||||
pods map[string]*v1.PodList
|
||||
expectedPodsEvicted uint
|
||||
evictedPods []string
|
||||
}{
|
||||
{
|
||||
name: "no node below threshold usage",
|
||||
@@ -89,8 +88,7 @@ func TestHighNodeUtilization(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
maxPodsToEvictPerNode: 0,
|
||||
expectedPodsEvicted: 0,
|
||||
expectedPodsEvicted: 0,
|
||||
},
|
||||
{
|
||||
name: "no evictable pods",
|
||||
@@ -147,8 +145,7 @@ func TestHighNodeUtilization(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
maxPodsToEvictPerNode: 0,
|
||||
expectedPodsEvicted: 0,
|
||||
expectedPodsEvicted: 0,
|
||||
},
|
||||
{
|
||||
name: "no node to schedule evicted pods",
|
||||
@@ -182,8 +179,7 @@ func TestHighNodeUtilization(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
maxPodsToEvictPerNode: 0,
|
||||
expectedPodsEvicted: 0,
|
||||
expectedPodsEvicted: 0,
|
||||
},
|
||||
{
|
||||
name: "without priorities",
|
||||
@@ -224,9 +220,8 @@ func TestHighNodeUtilization(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
maxPodsToEvictPerNode: 0,
|
||||
expectedPodsEvicted: 2,
|
||||
evictedPods: []string{"p1", "p7"},
|
||||
expectedPodsEvicted: 2,
|
||||
evictedPods: []string{"p1", "p7"},
|
||||
},
|
||||
{
|
||||
name: "without priorities stop when resource capacity is depleted",
|
||||
@@ -260,8 +255,7 @@ func TestHighNodeUtilization(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
maxPodsToEvictPerNode: 0,
|
||||
expectedPodsEvicted: 1,
|
||||
expectedPodsEvicted: 1,
|
||||
},
|
||||
{
|
||||
name: "with priorities",
|
||||
@@ -302,9 +296,8 @@ func TestHighNodeUtilization(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
maxPodsToEvictPerNode: 0,
|
||||
expectedPodsEvicted: 1,
|
||||
evictedPods: []string{"p1"},
|
||||
expectedPodsEvicted: 1,
|
||||
evictedPods: []string{"p1"},
|
||||
},
|
||||
{
|
||||
name: "without priorities evicting best-effort pods only",
|
||||
@@ -342,9 +335,8 @@ func TestHighNodeUtilization(t *testing.T) {
|
||||
Items: []v1.Pod{},
|
||||
},
|
||||
},
|
||||
maxPodsToEvictPerNode: 0,
|
||||
expectedPodsEvicted: 1,
|
||||
evictedPods: []string{"p1"},
|
||||
expectedPodsEvicted: 1,
|
||||
evictedPods: []string{"p1"},
|
||||
},
|
||||
{
|
||||
name: "with extended resource",
|
||||
@@ -403,9 +395,8 @@ func TestHighNodeUtilization(t *testing.T) {
|
||||
Items: []v1.Pod{},
|
||||
},
|
||||
},
|
||||
maxPodsToEvictPerNode: 0,
|
||||
expectedPodsEvicted: 2,
|
||||
evictedPods: []string{"p1", "p2"},
|
||||
expectedPodsEvicted: 2,
|
||||
evictedPods: []string{"p1", "p2"},
|
||||
},
|
||||
{
|
||||
name: "with extended resource in some of nodes",
|
||||
@@ -446,8 +437,7 @@ func TestHighNodeUtilization(t *testing.T) {
|
||||
Items: []v1.Pod{},
|
||||
},
|
||||
},
|
||||
maxPodsToEvictPerNode: 0,
|
||||
expectedPodsEvicted: 0,
|
||||
expectedPodsEvicted: 0,
|
||||
},
|
||||
{
|
||||
name: "Other node match pod node selector",
|
||||
@@ -484,8 +474,7 @@ func TestHighNodeUtilization(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
maxPodsToEvictPerNode: 0,
|
||||
expectedPodsEvicted: 1,
|
||||
expectedPodsEvicted: 1,
|
||||
},
|
||||
{
|
||||
name: "Other node does not match pod node selector",
|
||||
@@ -518,8 +507,7 @@ func TestHighNodeUtilization(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
maxPodsToEvictPerNode: 0,
|
||||
expectedPodsEvicted: 0,
|
||||
expectedPodsEvicted: 0,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -577,7 +565,7 @@ func TestHighNodeUtilization(t *testing.T) {
|
||||
fakeClient,
|
||||
"v1",
|
||||
false,
|
||||
test.maxPodsToEvictPerNode,
|
||||
nil,
|
||||
nodes,
|
||||
false,
|
||||
false,
|
||||
@@ -597,7 +585,7 @@ func TestHighNodeUtilization(t *testing.T) {
|
||||
|
||||
podsEvicted := podEvictor.TotalEvicted()
|
||||
if test.expectedPodsEvicted != podsEvicted {
|
||||
t.Errorf("Expected %#v pods to be evicted but %#v got evicted", test.expectedPodsEvicted, podsEvicted)
|
||||
t.Errorf("Expected %v pods to be evicted but %v got evicted", test.expectedPodsEvicted, podsEvicted)
|
||||
}
|
||||
if evictionFailed {
|
||||
t.Errorf("Pod evictions failed unexpectedly")
|
||||
@@ -710,7 +698,7 @@ func TestHighNodeUtilizationWithTaints(t *testing.T) {
|
||||
name string
|
||||
nodes []*v1.Node
|
||||
pods []*v1.Pod
|
||||
evictionsExpected int
|
||||
evictionsExpected uint
|
||||
}{
|
||||
{
|
||||
name: "No taints",
|
||||
@@ -767,7 +755,7 @@ func TestHighNodeUtilizationWithTaints(t *testing.T) {
|
||||
fakeClient,
|
||||
"policy/v1",
|
||||
false,
|
||||
item.evictionsExpected,
|
||||
&item.evictionsExpected,
|
||||
item.nodes,
|
||||
false,
|
||||
false,
|
||||
|
||||
@@ -50,8 +50,7 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
thresholds, targetThresholds api.ResourceThresholds
|
||||
nodes map[string]*v1.Node
|
||||
pods map[string]*v1.PodList
|
||||
maxPodsToEvictPerNode int
|
||||
expectedPodsEvicted int
|
||||
expectedPodsEvicted uint
|
||||
evictedPods []string
|
||||
}{
|
||||
{
|
||||
@@ -108,8 +107,7 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
},
|
||||
n3NodeName: {},
|
||||
},
|
||||
maxPodsToEvictPerNode: 0,
|
||||
expectedPodsEvicted: 0,
|
||||
expectedPodsEvicted: 0,
|
||||
},
|
||||
{
|
||||
name: "without priorities",
|
||||
@@ -167,8 +165,7 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
},
|
||||
n3NodeName: {},
|
||||
},
|
||||
maxPodsToEvictPerNode: 0,
|
||||
expectedPodsEvicted: 4,
|
||||
expectedPodsEvicted: 4,
|
||||
},
|
||||
{
|
||||
name: "without priorities stop when cpu capacity is depleted",
|
||||
@@ -226,7 +223,6 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
},
|
||||
n3NodeName: {},
|
||||
},
|
||||
maxPodsToEvictPerNode: 0,
|
||||
// 4 pods available for eviction based on v1.ResourcePods, only 3 pods can be evicted before cpu is depleted
|
||||
expectedPodsEvicted: 3,
|
||||
},
|
||||
@@ -305,8 +301,7 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
},
|
||||
n3NodeName: {},
|
||||
},
|
||||
maxPodsToEvictPerNode: 0,
|
||||
expectedPodsEvicted: 4,
|
||||
expectedPodsEvicted: 4,
|
||||
},
|
||||
{
|
||||
name: "without priorities evicting best-effort pods only",
|
||||
@@ -381,9 +376,8 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
},
|
||||
n3NodeName: {},
|
||||
},
|
||||
maxPodsToEvictPerNode: 0,
|
||||
expectedPodsEvicted: 4,
|
||||
evictedPods: []string{"p1", "p2", "p4", "p5"},
|
||||
expectedPodsEvicted: 4,
|
||||
evictedPods: []string{"p1", "p2", "p4", "p5"},
|
||||
},
|
||||
{
|
||||
name: "with extended resource",
|
||||
@@ -469,7 +463,6 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
},
|
||||
n3NodeName: {},
|
||||
},
|
||||
maxPodsToEvictPerNode: 0,
|
||||
// 4 pods available for eviction based on v1.ResourcePods, only 3 pods can be evicted before extended resource is depleted
|
||||
expectedPodsEvicted: 3,
|
||||
},
|
||||
@@ -507,7 +500,6 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
},
|
||||
n3NodeName: {},
|
||||
},
|
||||
maxPodsToEvictPerNode: 0,
|
||||
// 0 pods available for eviction because there's no enough extended resource in node2
|
||||
expectedPodsEvicted: 0,
|
||||
},
|
||||
@@ -561,8 +553,7 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
},
|
||||
n2NodeName: {},
|
||||
},
|
||||
maxPodsToEvictPerNode: 0,
|
||||
expectedPodsEvicted: 0,
|
||||
expectedPodsEvicted: 0,
|
||||
},
|
||||
{
|
||||
name: "without priorities, but only other node doesn't match pod node selector for p4 and p5",
|
||||
@@ -630,8 +621,7 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
},
|
||||
n2NodeName: {},
|
||||
},
|
||||
maxPodsToEvictPerNode: 0,
|
||||
expectedPodsEvicted: 3,
|
||||
expectedPodsEvicted: 3,
|
||||
},
|
||||
{
|
||||
name: "without priorities, but only other node doesn't match pod node affinity for p4 and p5",
|
||||
@@ -729,8 +719,7 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
*test.BuildTestPod("p9", 0, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
}},
|
||||
},
|
||||
maxPodsToEvictPerNode: 0,
|
||||
expectedPodsEvicted: 3,
|
||||
expectedPodsEvicted: 3,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -788,7 +777,7 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
fakeClient,
|
||||
policyv1.SchemeGroupVersion.String(),
|
||||
false,
|
||||
test.maxPodsToEvictPerNode,
|
||||
nil,
|
||||
nodes,
|
||||
false,
|
||||
false,
|
||||
@@ -809,7 +798,7 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
|
||||
podsEvicted := podEvictor.TotalEvicted()
|
||||
if test.expectedPodsEvicted != podsEvicted {
|
||||
t.Errorf("Expected %#v pods to be evicted but %#v got evicted", test.expectedPodsEvicted, podsEvicted)
|
||||
t.Errorf("Expected %v pods to be evicted but %v got evicted", test.expectedPodsEvicted, podsEvicted)
|
||||
}
|
||||
if evictionFailed {
|
||||
t.Errorf("Pod evictions failed unexpectedly")
|
||||
@@ -1013,7 +1002,7 @@ func TestLowNodeUtilizationWithTaints(t *testing.T) {
|
||||
name string
|
||||
nodes []*v1.Node
|
||||
pods []*v1.Pod
|
||||
evictionsExpected int
|
||||
evictionsExpected uint
|
||||
}{
|
||||
{
|
||||
name: "No taints",
|
||||
@@ -1088,7 +1077,7 @@ func TestLowNodeUtilizationWithTaints(t *testing.T) {
|
||||
fakeClient,
|
||||
policyv1.SchemeGroupVersion.String(),
|
||||
false,
|
||||
item.evictionsExpected,
|
||||
&item.evictionsExpected,
|
||||
item.nodes,
|
||||
false,
|
||||
false,
|
||||
|
||||
@@ -99,52 +99,53 @@ func TestPodAntiAffinity(t *testing.T) {
|
||||
"datacenter": "west",
|
||||
}
|
||||
|
||||
var uint1 uint = 1
|
||||
var uint3 uint = 3
|
||||
|
||||
tests := []struct {
|
||||
description string
|
||||
maxPodsToEvictPerNode int
|
||||
maxPodsToEvictPerNode *uint
|
||||
pods []v1.Pod
|
||||
expectedEvictedPodCount int
|
||||
expectedEvictedPodCount uint
|
||||
nodeFit bool
|
||||
nodes []*v1.Node
|
||||
}{
|
||||
{
|
||||
description: "Maximum pods to evict - 0",
|
||||
maxPodsToEvictPerNode: 0,
|
||||
pods: []v1.Pod{*p1, *p2, *p3, *p4},
|
||||
nodes: []*v1.Node{node1},
|
||||
expectedEvictedPodCount: 3,
|
||||
},
|
||||
{
|
||||
description: "Maximum pods to evict - 3",
|
||||
maxPodsToEvictPerNode: 3,
|
||||
maxPodsToEvictPerNode: &uint3,
|
||||
pods: []v1.Pod{*p1, *p2, *p3, *p4},
|
||||
nodes: []*v1.Node{node1},
|
||||
expectedEvictedPodCount: 3,
|
||||
},
|
||||
{
|
||||
description: "Evict only 1 pod after sorting",
|
||||
maxPodsToEvictPerNode: 0,
|
||||
pods: []v1.Pod{*p5, *p6, *p7},
|
||||
nodes: []*v1.Node{node1},
|
||||
expectedEvictedPodCount: 1,
|
||||
},
|
||||
{
|
||||
description: "Evicts pod that conflicts with critical pod (but does not evict critical pod)",
|
||||
maxPodsToEvictPerNode: 1,
|
||||
maxPodsToEvictPerNode: &uint1,
|
||||
pods: []v1.Pod{*p1, *nonEvictablePod},
|
||||
nodes: []*v1.Node{node1},
|
||||
expectedEvictedPodCount: 1,
|
||||
},
|
||||
{
|
||||
description: "Evicts pod that conflicts with critical pod (but does not evict critical pod)",
|
||||
maxPodsToEvictPerNode: 1,
|
||||
maxPodsToEvictPerNode: &uint1,
|
||||
pods: []v1.Pod{*p1, *nonEvictablePod},
|
||||
nodes: []*v1.Node{node1},
|
||||
expectedEvictedPodCount: 1,
|
||||
},
|
||||
{
|
||||
description: "Won't evict pods because node selectors don't match available nodes",
|
||||
maxPodsToEvictPerNode: 1,
|
||||
maxPodsToEvictPerNode: &uint1,
|
||||
pods: []v1.Pod{*p8, *nonEvictablePod},
|
||||
nodes: []*v1.Node{node1, node2},
|
||||
expectedEvictedPodCount: 0,
|
||||
@@ -152,7 +153,7 @@ func TestPodAntiAffinity(t *testing.T) {
|
||||
},
|
||||
{
|
||||
description: "Won't evict pods because only other node is not schedulable",
|
||||
maxPodsToEvictPerNode: 1,
|
||||
maxPodsToEvictPerNode: &uint1,
|
||||
pods: []v1.Pod{*p8, *nonEvictablePod},
|
||||
nodes: []*v1.Node{node1, node3},
|
||||
expectedEvictedPodCount: 0,
|
||||
@@ -160,7 +161,6 @@ func TestPodAntiAffinity(t *testing.T) {
|
||||
},
|
||||
{
|
||||
description: "No pod to evicted since all pod terminating",
|
||||
maxPodsToEvictPerNode: 0,
|
||||
pods: []v1.Pod{*p9, *p10},
|
||||
nodes: []*v1.Node{node1},
|
||||
expectedEvictedPodCount: 0,
|
||||
|
||||
@@ -140,10 +140,9 @@ func TestPodLifeTime(t *testing.T) {
|
||||
testCases := []struct {
|
||||
description string
|
||||
strategy api.DeschedulerStrategy
|
||||
maxPodsToEvictPerNode int
|
||||
pods []v1.Pod
|
||||
nodes []*v1.Node
|
||||
expectedEvictedPodCount int
|
||||
expectedEvictedPodCount uint
|
||||
ignorePvcPods bool
|
||||
}{
|
||||
{
|
||||
@@ -154,7 +153,6 @@ func TestPodLifeTime(t *testing.T) {
|
||||
PodLifeTime: &api.PodLifeTime{MaxPodLifeTimeSeconds: &maxLifeTime},
|
||||
},
|
||||
},
|
||||
maxPodsToEvictPerNode: 5,
|
||||
pods: []v1.Pod{*p1, *p2},
|
||||
nodes: []*v1.Node{node1},
|
||||
expectedEvictedPodCount: 1,
|
||||
@@ -167,7 +165,6 @@ func TestPodLifeTime(t *testing.T) {
|
||||
PodLifeTime: &api.PodLifeTime{MaxPodLifeTimeSeconds: &maxLifeTime},
|
||||
},
|
||||
},
|
||||
maxPodsToEvictPerNode: 5,
|
||||
pods: []v1.Pod{*p3, *p4},
|
||||
nodes: []*v1.Node{node1},
|
||||
expectedEvictedPodCount: 0,
|
||||
@@ -180,7 +177,6 @@ func TestPodLifeTime(t *testing.T) {
|
||||
PodLifeTime: &api.PodLifeTime{MaxPodLifeTimeSeconds: &maxLifeTime},
|
||||
},
|
||||
},
|
||||
maxPodsToEvictPerNode: 5,
|
||||
pods: []v1.Pod{*p5, *p6},
|
||||
nodes: []*v1.Node{node1},
|
||||
expectedEvictedPodCount: 1,
|
||||
@@ -193,7 +189,6 @@ func TestPodLifeTime(t *testing.T) {
|
||||
PodLifeTime: &api.PodLifeTime{MaxPodLifeTimeSeconds: &maxLifeTime},
|
||||
},
|
||||
},
|
||||
maxPodsToEvictPerNode: 5,
|
||||
pods: []v1.Pod{*p7, *p8},
|
||||
nodes: []*v1.Node{node1},
|
||||
expectedEvictedPodCount: 0,
|
||||
@@ -209,7 +204,6 @@ func TestPodLifeTime(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
maxPodsToEvictPerNode: 5,
|
||||
pods: []v1.Pod{*p9, *p10},
|
||||
nodes: []*v1.Node{node1},
|
||||
expectedEvictedPodCount: 1,
|
||||
@@ -222,7 +216,6 @@ func TestPodLifeTime(t *testing.T) {
|
||||
PodLifeTime: &api.PodLifeTime{MaxPodLifeTimeSeconds: &maxLifeTime},
|
||||
},
|
||||
},
|
||||
maxPodsToEvictPerNode: 5,
|
||||
pods: []v1.Pod{*p11},
|
||||
nodes: []*v1.Node{node1},
|
||||
expectedEvictedPodCount: 0,
|
||||
@@ -236,7 +229,6 @@ func TestPodLifeTime(t *testing.T) {
|
||||
PodLifeTime: &api.PodLifeTime{MaxPodLifeTimeSeconds: &maxLifeTime},
|
||||
},
|
||||
},
|
||||
maxPodsToEvictPerNode: 5,
|
||||
pods: []v1.Pod{*p11},
|
||||
nodes: []*v1.Node{node1},
|
||||
expectedEvictedPodCount: 1,
|
||||
@@ -252,7 +244,6 @@ func TestPodLifeTime(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
maxPodsToEvictPerNode: 5,
|
||||
pods: []v1.Pod{*p12, *p13},
|
||||
nodes: []*v1.Node{node1},
|
||||
expectedEvictedPodCount: 1,
|
||||
@@ -268,7 +259,6 @@ func TestPodLifeTime(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
maxPodsToEvictPerNode: 5,
|
||||
pods: []v1.Pod{*p14, *p15},
|
||||
nodes: []*v1.Node{node1},
|
||||
expectedEvictedPodCount: 0,
|
||||
@@ -285,7 +275,7 @@ func TestPodLifeTime(t *testing.T) {
|
||||
fakeClient,
|
||||
policyv1.SchemeGroupVersion.String(),
|
||||
false,
|
||||
tc.maxPodsToEvictPerNode,
|
||||
nil,
|
||||
tc.nodes,
|
||||
false,
|
||||
false,
|
||||
|
||||
@@ -114,89 +114,83 @@ func TestRemovePodsHavingTooManyRestarts(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
var uint3 uint = 3
|
||||
|
||||
tests := []struct {
|
||||
description string
|
||||
nodes []*v1.Node
|
||||
strategy api.DeschedulerStrategy
|
||||
expectedEvictedPodCount int
|
||||
maxPodsToEvictPerNode int
|
||||
expectedEvictedPodCount uint
|
||||
maxPodsToEvictPerNode *uint
|
||||
}{
|
||||
{
|
||||
description: "All pods have total restarts under threshold, no pod evictions",
|
||||
strategy: createStrategy(true, true, 10000, false),
|
||||
nodes: []*v1.Node{node1},
|
||||
expectedEvictedPodCount: 0,
|
||||
maxPodsToEvictPerNode: 0,
|
||||
},
|
||||
{
|
||||
description: "Some pods have total restarts bigger than threshold",
|
||||
strategy: createStrategy(true, true, 1, false),
|
||||
nodes: []*v1.Node{node1},
|
||||
expectedEvictedPodCount: 6,
|
||||
maxPodsToEvictPerNode: 0,
|
||||
},
|
||||
{
|
||||
description: "Nine pods have total restarts equals threshold(includingInitContainers=true), 6 pod evictions",
|
||||
strategy: createStrategy(true, true, 1*25, false),
|
||||
nodes: []*v1.Node{node1},
|
||||
expectedEvictedPodCount: 6,
|
||||
maxPodsToEvictPerNode: 0,
|
||||
},
|
||||
{
|
||||
description: "Nine pods have total restarts equals threshold(includingInitContainers=false), 5 pod evictions",
|
||||
strategy: createStrategy(true, false, 1*25, false),
|
||||
nodes: []*v1.Node{node1},
|
||||
expectedEvictedPodCount: 5,
|
||||
maxPodsToEvictPerNode: 0,
|
||||
},
|
||||
{
|
||||
description: "All pods have total restarts equals threshold(includingInitContainers=true), 6 pod evictions",
|
||||
strategy: createStrategy(true, true, 1*20, false),
|
||||
nodes: []*v1.Node{node1},
|
||||
expectedEvictedPodCount: 6,
|
||||
maxPodsToEvictPerNode: 0,
|
||||
},
|
||||
{
|
||||
description: "Nine pods have total restarts equals threshold(includingInitContainers=false), 6 pod evictions",
|
||||
strategy: createStrategy(true, false, 1*20, false),
|
||||
nodes: []*v1.Node{node1},
|
||||
expectedEvictedPodCount: 6,
|
||||
maxPodsToEvictPerNode: 0,
|
||||
},
|
||||
{
|
||||
description: "Five pods have total restarts bigger than threshold(includingInitContainers=true), but only 1 pod eviction",
|
||||
strategy: createStrategy(true, true, 5*25+1, false),
|
||||
nodes: []*v1.Node{node1},
|
||||
expectedEvictedPodCount: 1,
|
||||
maxPodsToEvictPerNode: 0,
|
||||
},
|
||||
{
|
||||
description: "Five pods have total restarts bigger than threshold(includingInitContainers=false), but only 1 pod eviction",
|
||||
strategy: createStrategy(true, false, 5*20+1, false),
|
||||
nodes: []*v1.Node{node1},
|
||||
expectedEvictedPodCount: 1,
|
||||
maxPodsToEvictPerNode: 0,
|
||||
},
|
||||
{
|
||||
description: "All pods have total restarts equals threshold(maxPodsToEvictPerNode=3), 3 pod evictions",
|
||||
strategy: createStrategy(true, true, 1, false),
|
||||
nodes: []*v1.Node{node1},
|
||||
expectedEvictedPodCount: 3,
|
||||
maxPodsToEvictPerNode: 3,
|
||||
maxPodsToEvictPerNode: &uint3,
|
||||
},
|
||||
{
|
||||
description: "All pods have total restarts equals threshold(maxPodsToEvictPerNode=3) but the only other node is tained, 0 pod evictions",
|
||||
strategy: createStrategy(true, true, 1, true),
|
||||
nodes: []*v1.Node{node1, node2},
|
||||
expectedEvictedPodCount: 0,
|
||||
maxPodsToEvictPerNode: 3,
|
||||
maxPodsToEvictPerNode: &uint3,
|
||||
},
|
||||
{
|
||||
description: "All pods have total restarts equals threshold(maxPodsToEvictPerNode=3) but the only other node is not schedulable, 0 pod evictions",
|
||||
strategy: createStrategy(true, true, 1, true),
|
||||
nodes: []*v1.Node{node1, node3},
|
||||
expectedEvictedPodCount: 0,
|
||||
maxPodsToEvictPerNode: 3,
|
||||
maxPodsToEvictPerNode: &uint3,
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@@ -20,7 +20,7 @@ func TestTopologySpreadConstraint(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
pods []*v1.Pod
|
||||
expectedEvictedCount int
|
||||
expectedEvictedCount uint
|
||||
nodes []*v1.Node
|
||||
strategy api.DeschedulerStrategy
|
||||
namespaces []string
|
||||
@@ -886,7 +886,7 @@ func TestTopologySpreadConstraint(t *testing.T) {
|
||||
fakeClient,
|
||||
"v1",
|
||||
false,
|
||||
100,
|
||||
nil,
|
||||
tc.nodes,
|
||||
false,
|
||||
false,
|
||||
|
||||
@@ -18,17 +18,18 @@ package e2e
|
||||
|
||||
import (
|
||||
"context"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
|
||||
deschedulerapi "sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
eutils "sigs.k8s.io/descheduler/pkg/descheduler/evictions/utils"
|
||||
@@ -87,7 +88,7 @@ func TestRemoveDuplicates(t *testing.T) {
|
||||
description string
|
||||
replicasNum int
|
||||
beforeFunc func(deployment *appsv1.Deployment)
|
||||
expectedEvictedPodCount int
|
||||
expectedEvictedPodCount uint
|
||||
}{
|
||||
{
|
||||
description: "Evict Pod even Pods schedule to specific node",
|
||||
@@ -143,7 +144,7 @@ func TestRemoveDuplicates(t *testing.T) {
|
||||
clientSet,
|
||||
evictionPolicyGroupVersion,
|
||||
false,
|
||||
0,
|
||||
nil,
|
||||
nodes,
|
||||
true,
|
||||
false,
|
||||
|
||||
@@ -2,15 +2,16 @@ package e2e
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
batchv1 "k8s.io/api/batch/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
deschedulerapi "sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/strategies"
|
||||
@@ -34,7 +35,7 @@ func TestFailedPods(t *testing.T) {
|
||||
}
|
||||
defer clientSet.CoreV1().Namespaces().Delete(ctx, testNamespace.Name, metav1.DeleteOptions{})
|
||||
testCases := map[string]struct {
|
||||
expectedEvictedCount int
|
||||
expectedEvictedCount uint
|
||||
strategyParams *deschedulerapi.StrategyParameters
|
||||
}{
|
||||
"test-failed-pods-nil-strategy": {
|
||||
|
||||
@@ -166,7 +166,7 @@ func runPodLifetimeStrategy(
|
||||
clientset,
|
||||
evictionPolicyGroupVersion,
|
||||
false,
|
||||
0,
|
||||
nil,
|
||||
nodes,
|
||||
false,
|
||||
evictCritical,
|
||||
@@ -1290,7 +1290,7 @@ func initPodEvictorOrFail(t *testing.T, clientSet clientset.Interface, nodes []*
|
||||
clientSet,
|
||||
evictionPolicyGroupVersion,
|
||||
false,
|
||||
0,
|
||||
nil,
|
||||
nodes,
|
||||
true,
|
||||
false,
|
||||
|
||||
@@ -19,6 +19,10 @@ package e2e
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -28,9 +32,6 @@ import (
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
eutils "sigs.k8s.io/descheduler/pkg/descheduler/evictions/utils"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/strategies"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestTooManyRestarts(t *testing.T) {
|
||||
@@ -108,7 +109,7 @@ func TestTooManyRestarts(t *testing.T) {
|
||||
name string
|
||||
podRestartThreshold int32
|
||||
includingInitContainers bool
|
||||
expectedEvictedPodCount int
|
||||
expectedEvictedPodCount uint
|
||||
}{
|
||||
{
|
||||
name: "test-no-evictions",
|
||||
@@ -133,7 +134,7 @@ func TestTooManyRestarts(t *testing.T) {
|
||||
clientSet,
|
||||
evictionPolicyGroupVersion,
|
||||
false,
|
||||
0,
|
||||
nil,
|
||||
nodes,
|
||||
true,
|
||||
false,
|
||||
|
||||
Reference in New Issue
Block a user