1
0
mirror of https://github.com/kubernetes-sigs/descheduler.git synced 2026-01-26 05:14:13 +01:00

Merge pull request #332 from lixiang233/fix_test_struct_lownodeutilization

Add maxPodsToEvictPerNode to LowNodeUtilization testcase struct
This commit is contained in:
Kubernetes Prow Robot
2020-07-09 01:22:02 -07:00
committed by GitHub
8 changed files with 71 additions and 68 deletions

View File

@@ -48,7 +48,7 @@ type PodEvictor struct {
client clientset.Interface
policyGroupVersion string
dryRun bool
maxPodsToEvict int
maxPodsToEvictPerNode int
nodepodCount nodePodEvictedCount
evictLocalStoragePods bool
}
@@ -57,7 +57,7 @@ func NewPodEvictor(
client clientset.Interface,
policyGroupVersion string,
dryRun bool,
maxPodsToEvict int,
maxPodsToEvictPerNode int,
nodes []*v1.Node,
evictLocalStoragePods bool,
) *PodEvictor {
@@ -71,7 +71,7 @@ func NewPodEvictor(
client: client,
policyGroupVersion: policyGroupVersion,
dryRun: dryRun,
maxPodsToEvict: maxPodsToEvict,
maxPodsToEvictPerNode: maxPodsToEvictPerNode,
nodepodCount: nodePodCount,
evictLocalStoragePods: evictLocalStoragePods,
}
@@ -123,15 +123,15 @@ func (pe *PodEvictor) TotalEvicted() int {
}
// EvictPod returns non-nil error only when evicting a pod on a node is not
// possible (due to maxPodsToEvict constraint). Success is true when the pod
// possible (due to maxPodsToEvictPerNode constraint). Success is true when the pod
// is evicted on the server side.
func (pe *PodEvictor) EvictPod(ctx context.Context, pod *v1.Pod, node *v1.Node, reasons ...string) (bool, error) {
var reason string
if len(reasons) > 0 {
reason = " (" + strings.Join(reasons, ", ") + ")"
}
if pe.maxPodsToEvict > 0 && pe.nodepodCount[node]+1 > pe.maxPodsToEvict {
return false, fmt.Errorf("Maximum number %v of evicted pods per %q node reached", pe.maxPodsToEvict, node.Name)
if pe.maxPodsToEvictPerNode > 0 && pe.nodepodCount[node]+1 > pe.maxPodsToEvictPerNode {
return false, fmt.Errorf("Maximum number %v of evicted pods per %q node reached", pe.maxPodsToEvictPerNode, node.Name)
}
err := evictPod(ctx, pe.client, pod, pe.policyGroupVersion, pe.dryRun)

View File

@@ -115,70 +115,70 @@ func TestFindDuplicatePods(t *testing.T) {
testCases := []struct {
description string
maxPodsToEvict int
maxPodsToEvictPerNode int
pods []v1.Pod
expectedEvictedPodCount int
strategy api.DeschedulerStrategy
}{
{
description: "Three pods in the `dev` Namespace, bound to same ReplicaSet. 2 should be evicted.",
maxPodsToEvict: 5,
maxPodsToEvictPerNode: 5,
pods: []v1.Pod{*p1, *p2, *p3},
expectedEvictedPodCount: 2,
strategy: api.DeschedulerStrategy{},
},
{
description: "Three pods in the `dev` Namespace, bound to same ReplicaSet, but ReplicaSet kind is excluded. 0 should be evicted.",
maxPodsToEvict: 5,
maxPodsToEvictPerNode: 5,
pods: []v1.Pod{*p1, *p2, *p3},
expectedEvictedPodCount: 0,
strategy: api.DeschedulerStrategy{Params: &api.StrategyParameters{RemoveDuplicates: &api.RemoveDuplicates{ExcludeOwnerKinds: []string{"ReplicaSet"}}}},
},
{
description: "Three Pods in the `test` Namespace, bound to same ReplicaSet. 2 should be evicted.",
maxPodsToEvict: 5,
maxPodsToEvictPerNode: 5,
pods: []v1.Pod{*p8, *p9, *p10},
expectedEvictedPodCount: 2,
strategy: api.DeschedulerStrategy{},
},
{
description: "Three Pods in the `dev` Namespace, three Pods in the `test` Namespace. Bound to ReplicaSet with same name. 4 should be evicted.",
maxPodsToEvict: 5,
maxPodsToEvictPerNode: 5,
pods: []v1.Pod{*p1, *p2, *p3, *p8, *p9, *p10},
expectedEvictedPodCount: 4,
strategy: api.DeschedulerStrategy{},
},
{
description: "Pods are: part of DaemonSet, with local storage, mirror pod annotation, critical pod annotation - none should be evicted.",
maxPodsToEvict: 2,
maxPodsToEvictPerNode: 2,
pods: []v1.Pod{*p4, *p5, *p6, *p7},
expectedEvictedPodCount: 0,
strategy: api.DeschedulerStrategy{},
},
{
description: "Test all Pods: 4 should be evicted.",
maxPodsToEvict: 5,
maxPodsToEvictPerNode: 5,
pods: []v1.Pod{*p1, *p2, *p3, *p4, *p5, *p6, *p7, *p8, *p9, *p10},
expectedEvictedPodCount: 4,
strategy: api.DeschedulerStrategy{},
},
{
description: "Pods with the same owner but different images should not be evicted",
maxPodsToEvict: 5,
maxPodsToEvictPerNode: 5,
pods: []v1.Pod{*p11, *p12},
expectedEvictedPodCount: 0,
strategy: api.DeschedulerStrategy{},
},
{
description: "Pods with multiple containers should not match themselves",
maxPodsToEvict: 5,
maxPodsToEvictPerNode: 5,
pods: []v1.Pod{*p13},
expectedEvictedPodCount: 0,
strategy: api.DeschedulerStrategy{},
},
{
description: "Pods with matching ownerrefs and at not all matching image should not trigger an eviction",
maxPodsToEvict: 5,
maxPodsToEvictPerNode: 5,
pods: []v1.Pod{*p11, *p13},
expectedEvictedPodCount: 0,
strategy: api.DeschedulerStrategy{},
@@ -197,7 +197,7 @@ func TestFindDuplicatePods(t *testing.T) {
fakeClient,
"v1",
false,
testCase.maxPodsToEvict,
testCase.maxPodsToEvictPerNode,
[]*v1.Node{node},
false,
)

View File

@@ -54,9 +54,7 @@ func TestLowNodeUtilization(t *testing.T) {
thresholds, targetThresholds api.ResourceThresholds
nodes map[string]*v1.Node
pods map[string]*v1.PodList
// TODO: divide expectedPodsEvicted into two params like other tests
// expectedPodsEvicted should be the result num of pods that this testCase expected but now it represents both
// MaxNoOfPodsToEvictPerNode and the test's expected result
maxPodsToEvictPerNode int
expectedPodsEvicted int
evictedPods []string
}{
@@ -114,6 +112,7 @@ func TestLowNodeUtilization(t *testing.T) {
},
n3NodeName: {},
},
maxPodsToEvictPerNode: 0,
expectedPodsEvicted: 0,
},
{
@@ -172,7 +171,8 @@ func TestLowNodeUtilization(t *testing.T) {
},
n3NodeName: {},
},
expectedPodsEvicted: 3,
maxPodsToEvictPerNode: 0,
expectedPodsEvicted: 4,
},
{
name: "without priorities stop when cpu capacity is depleted",
@@ -230,6 +230,7 @@ func TestLowNodeUtilization(t *testing.T) {
},
n3NodeName: {},
},
maxPodsToEvictPerNode: 0,
// 4 pods available for eviction based on v1.ResourcePods, only 3 pods can be evicted before cpu is depleted
expectedPodsEvicted: 3,
},
@@ -308,7 +309,8 @@ func TestLowNodeUtilization(t *testing.T) {
},
n3NodeName: {},
},
expectedPodsEvicted: 3,
maxPodsToEvictPerNode: 0,
expectedPodsEvicted: 4,
},
{
name: "without priorities evicting best-effort pods only",
@@ -383,6 +385,7 @@ func TestLowNodeUtilization(t *testing.T) {
},
n3NodeName: {},
},
maxPodsToEvictPerNode: 0,
expectedPodsEvicted: 4,
evictedPods: []string{"p1", "p2", "p4", "p5"},
},
@@ -442,7 +445,7 @@ func TestLowNodeUtilization(t *testing.T) {
fakeClient,
"v1",
false,
test.expectedPodsEvicted,
test.maxPodsToEvictPerNode,
nodes,
false,
)

View File

@@ -93,7 +93,7 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
pods []v1.Pod
strategy api.DeschedulerStrategy
expectedEvictedPodCount int
maxPodsToEvict int
maxPodsToEvictPerNode int
}{
{
description: "Invalid strategy type, should not evict any pods",
@@ -108,7 +108,7 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
expectedEvictedPodCount: 0,
pods: addPodsToNode(nodeWithoutLabels),
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
maxPodsToEvict: 0,
maxPodsToEvictPerNode: 0,
},
{
description: "Pod is correctly scheduled on node, no eviction expected",
@@ -116,7 +116,7 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
expectedEvictedPodCount: 0,
pods: addPodsToNode(nodeWithLabels),
nodes: []*v1.Node{nodeWithLabels},
maxPodsToEvict: 0,
maxPodsToEvictPerNode: 0,
},
{
description: "Pod is scheduled on node without matching labels, another schedulable node available, should be evicted",
@@ -124,15 +124,15 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
strategy: requiredDuringSchedulingIgnoredDuringExecutionStrategy,
pods: addPodsToNode(nodeWithoutLabels),
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
maxPodsToEvict: 0,
maxPodsToEvictPerNode: 0,
},
{
description: "Pod is scheduled on node without matching labels, another schedulable node available, maxPodsToEvict set to 1, should not be evicted",
description: "Pod is scheduled on node without matching labels, another schedulable node available, maxPodsToEvictPerNode set to 1, should not be evicted",
expectedEvictedPodCount: 1,
strategy: requiredDuringSchedulingIgnoredDuringExecutionStrategy,
pods: addPodsToNode(nodeWithoutLabels),
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
maxPodsToEvict: 1,
maxPodsToEvictPerNode: 1,
},
{
description: "Pod is scheduled on node without matching labels, but no node where pod fits is available, should not evict",
@@ -140,7 +140,7 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
strategy: requiredDuringSchedulingIgnoredDuringExecutionStrategy,
pods: addPodsToNode(nodeWithoutLabels),
nodes: []*v1.Node{nodeWithoutLabels, unschedulableNodeWithLabels},
maxPodsToEvict: 0,
maxPodsToEvictPerNode: 0,
},
}
@@ -155,7 +155,7 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
fakeClient,
"v1",
false,
tc.maxPodsToEvict,
tc.maxPodsToEvictPerNode,
tc.nodes,
false,
)

View File

@@ -102,7 +102,7 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
nodes []*v1.Node
pods []v1.Pod
evictLocalStoragePods bool
maxPodsToEvict int
maxPodsToEvictPerNode int
expectedEvictedPodCount int
}{
@@ -111,7 +111,7 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
pods: []v1.Pod{*p1, *p2, *p3},
nodes: []*v1.Node{node1},
evictLocalStoragePods: false,
maxPodsToEvict: 0,
maxPodsToEvictPerNode: 0,
expectedEvictedPodCount: 1, //p2 gets evicted
},
{
@@ -119,15 +119,15 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
pods: []v1.Pod{*p1, *p3, *p4},
nodes: []*v1.Node{node1},
evictLocalStoragePods: false,
maxPodsToEvict: 0,
maxPodsToEvictPerNode: 0,
expectedEvictedPodCount: 1, //p4 gets evicted
},
{
description: "Only <maxPodsToEvict> number of Pods not tolerating node taint should be evicted",
description: "Only <maxPodsToEvictPerNode> number of Pods not tolerating node taint should be evicted",
pods: []v1.Pod{*p1, *p5, *p6},
nodes: []*v1.Node{node1},
evictLocalStoragePods: false,
maxPodsToEvict: 1,
maxPodsToEvictPerNode: 1,
expectedEvictedPodCount: 1, //p5 or p6 gets evicted
},
{
@@ -135,7 +135,7 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
pods: []v1.Pod{*p7, *p8, *p9, *p10},
nodes: []*v1.Node{node2},
evictLocalStoragePods: false,
maxPodsToEvict: 0,
maxPodsToEvictPerNode: 0,
expectedEvictedPodCount: 0,
},
{
@@ -143,7 +143,7 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
pods: []v1.Pod{*p7, *p8, *p9, *p10},
nodes: []*v1.Node{node2},
evictLocalStoragePods: true,
maxPodsToEvict: 0,
maxPodsToEvictPerNode: 0,
expectedEvictedPodCount: 1,
},
{
@@ -151,7 +151,7 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
pods: []v1.Pod{*p7, *p8, *p10, *p11},
nodes: []*v1.Node{node2},
evictLocalStoragePods: false,
maxPodsToEvict: 0,
maxPodsToEvictPerNode: 0,
expectedEvictedPodCount: 1,
},
}
@@ -168,7 +168,7 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
fakeClient,
"v1",
false,
tc.maxPodsToEvict,
tc.maxPodsToEvictPerNode,
tc.nodes,
tc.evictLocalStoragePods,
)

View File

@@ -67,25 +67,25 @@ func TestPodAntiAffinity(t *testing.T) {
tests := []struct {
description string
maxPodsToEvict int
maxPodsToEvictPerNode int
pods []v1.Pod
expectedEvictedPodCount int
}{
{
description: "Maximum pods to evict - 0",
maxPodsToEvict: 0,
maxPodsToEvictPerNode: 0,
pods: []v1.Pod{*p1, *p2, *p3, *p4},
expectedEvictedPodCount: 3,
},
{
description: "Maximum pods to evict - 3",
maxPodsToEvict: 3,
maxPodsToEvictPerNode: 3,
pods: []v1.Pod{*p1, *p2, *p3, *p4},
expectedEvictedPodCount: 3,
},
{
description: "Evict only 1 pod after sorting",
maxPodsToEvict: 0,
maxPodsToEvictPerNode: 0,
pods: []v1.Pod{*p5, *p6, *p7},
expectedEvictedPodCount: 1,
},
@@ -105,7 +105,7 @@ func TestPodAntiAffinity(t *testing.T) {
fakeClient,
"v1",
false,
test.maxPodsToEvict,
test.maxPodsToEvictPerNode,
[]*v1.Node{node},
false,
)

View File

@@ -89,7 +89,7 @@ func TestPodLifeTime(t *testing.T) {
testCases := []struct {
description string
strategy api.DeschedulerStrategy
maxPodsToEvict int
maxPodsToEvictPerNode int
pods []v1.Pod
expectedEvictedPodCount int
}{
@@ -101,7 +101,7 @@ func TestPodLifeTime(t *testing.T) {
MaxPodLifeTimeSeconds: &maxLifeTime,
},
},
maxPodsToEvict: 5,
maxPodsToEvictPerNode: 5,
pods: []v1.Pod{*p1, *p2},
expectedEvictedPodCount: 1,
},
@@ -113,7 +113,7 @@ func TestPodLifeTime(t *testing.T) {
MaxPodLifeTimeSeconds: &maxLifeTime,
},
},
maxPodsToEvict: 5,
maxPodsToEvictPerNode: 5,
pods: []v1.Pod{*p3, *p4},
expectedEvictedPodCount: 0,
},
@@ -125,7 +125,7 @@ func TestPodLifeTime(t *testing.T) {
MaxPodLifeTimeSeconds: &maxLifeTime,
},
},
maxPodsToEvict: 5,
maxPodsToEvictPerNode: 5,
pods: []v1.Pod{*p5, *p6},
expectedEvictedPodCount: 1,
},
@@ -137,7 +137,7 @@ func TestPodLifeTime(t *testing.T) {
MaxPodLifeTimeSeconds: &maxLifeTime,
},
},
maxPodsToEvict: 5,
maxPodsToEvictPerNode: 5,
pods: []v1.Pod{*p7, *p8},
expectedEvictedPodCount: 0,
},
@@ -155,7 +155,7 @@ func TestPodLifeTime(t *testing.T) {
fakeClient,
"v1",
false,
tc.maxPodsToEvict,
tc.maxPodsToEvictPerNode,
[]*v1.Node{node},
false,
)

View File

@@ -98,61 +98,61 @@ func TestRemovePodsHavingTooManyRestarts(t *testing.T) {
pods []v1.Pod
strategy api.DeschedulerStrategy
expectedEvictedPodCount int
maxPodsToEvict int
maxPodsToEvictPerNode int
}{
{
description: "All pods have total restarts under threshold, no pod evictions",
strategy: createStrategy(true, true, 10000),
expectedEvictedPodCount: 0,
maxPodsToEvict: 0,
maxPodsToEvictPerNode: 0,
},
{
description: "Some pods have total restarts bigger than threshold",
strategy: createStrategy(true, true, 1),
expectedEvictedPodCount: 6,
maxPodsToEvict: 0,
maxPodsToEvictPerNode: 0,
},
{
description: "Nine pods have total restarts equals threshold(includingInitContainers=true), 6 pods evictions",
strategy: createStrategy(true, true, 1*25),
expectedEvictedPodCount: 6,
maxPodsToEvict: 0,
maxPodsToEvictPerNode: 0,
},
{
description: "Nine pods have total restarts equals threshold(includingInitContainers=false), 5 pods evictions",
strategy: createStrategy(true, false, 1*25),
expectedEvictedPodCount: 5,
maxPodsToEvict: 0,
maxPodsToEvictPerNode: 0,
},
{
description: "All pods have total restarts equals threshold(includingInitContainers=true), 6 pods evictions",
strategy: createStrategy(true, true, 1*20),
expectedEvictedPodCount: 6,
maxPodsToEvict: 0,
maxPodsToEvictPerNode: 0,
},
{
description: "Nine pods have total restarts equals threshold(includingInitContainers=false), 6 pods evictions",
strategy: createStrategy(true, false, 1*20),
expectedEvictedPodCount: 6,
maxPodsToEvict: 0,
maxPodsToEvictPerNode: 0,
},
{
description: "Five pods have total restarts bigger than threshold(includingInitContainers=true), but only 1 pod eviction",
strategy: createStrategy(true, true, 5*25+1),
expectedEvictedPodCount: 1,
maxPodsToEvict: 0,
maxPodsToEvictPerNode: 0,
},
{
description: "Five pods have total restarts bigger than threshold(includingInitContainers=false), but only 1 pod eviction",
strategy: createStrategy(true, false, 5*20+1),
expectedEvictedPodCount: 1,
maxPodsToEvict: 0,
maxPodsToEvictPerNode: 0,
},
{
description: "All pods have total restarts equals threshold(maxPodsToEvict=3), 3 pods evictions",
description: "All pods have total restarts equals threshold(maxPodsToEvictPerNode=3), 3 pods evictions",
strategy: createStrategy(true, true, 1),
expectedEvictedPodCount: 3,
maxPodsToEvict: 3,
maxPodsToEvictPerNode: 3,
},
}
@@ -169,7 +169,7 @@ func TestRemovePodsHavingTooManyRestarts(t *testing.T) {
fakeClient,
"v1",
false,
tc.maxPodsToEvict,
tc.maxPodsToEvictPerNode,
[]*v1.Node{node},
false,
)