1
0
mirror of https://github.com/kubernetes-sigs/descheduler.git synced 2026-01-26 05:14:13 +01:00

Merge pull request #658 from JaneLiuL/master

Add maxNoOfPodsToEvictPerNamespace policy
This commit is contained in:
Kubernetes Prow Robot
2021-12-03 01:50:27 -08:00
committed by GitHub
20 changed files with 148 additions and 44 deletions

View File

@@ -43,6 +43,9 @@ type DeschedulerPolicy struct {
// MaxNoOfPodsToEvictPerNode restricts maximum of pods to be evicted per node.
MaxNoOfPodsToEvictPerNode *uint
// MaxNoOfPodsToEvictPerNamespace restricts maximum of pods to be evicted per namespace.
MaxNoOfPodsToEvictPerNamespace *uint
}
type StrategyName string

View File

@@ -43,6 +43,9 @@ type DeschedulerPolicy struct {
// MaxNoOfPodsToEvictPerNode restricts maximum of pods to be evicted per node.
MaxNoOfPodsToEvictPerNode *int `json:"maxNoOfPodsToEvictPerNode,omitempty"`
// MaxNoOfPodsToEvictPerNamespace restricts maximum of pods to be evicted per namespace.
MaxNoOfPodsToEvictPerNamespace *int `json:"maxNoOfPodsToEvictPerNamespace,omitempty"`
}
type StrategyName string

View File

@@ -143,6 +143,13 @@ func autoConvert_v1alpha1_DeschedulerPolicy_To_api_DeschedulerPolicy(in *Desched
} else {
out.MaxNoOfPodsToEvictPerNode = nil
}
if in.MaxNoOfPodsToEvictPerNamespace != nil {
in, out := &in.MaxNoOfPodsToEvictPerNamespace, &out.MaxNoOfPodsToEvictPerNamespace
*out = new(uint)
**out = uint(**in)
} else {
out.MaxNoOfPodsToEvictPerNamespace = nil
}
return nil
}
@@ -164,6 +171,13 @@ func autoConvert_api_DeschedulerPolicy_To_v1alpha1_DeschedulerPolicy(in *api.Des
} else {
out.MaxNoOfPodsToEvictPerNode = nil
}
if in.MaxNoOfPodsToEvictPerNamespace != nil {
in, out := &in.MaxNoOfPodsToEvictPerNamespace, &out.MaxNoOfPodsToEvictPerNamespace
*out = new(int)
**out = int(**in)
} else {
out.MaxNoOfPodsToEvictPerNamespace = nil
}
return nil
}

View File

@@ -62,6 +62,11 @@ func (in *DeschedulerPolicy) DeepCopyInto(out *DeschedulerPolicy) {
*out = new(int)
**out = **in
}
if in.MaxNoOfPodsToEvictPerNamespace != nil {
in, out := &in.MaxNoOfPodsToEvictPerNamespace, &out.MaxNoOfPodsToEvictPerNamespace
*out = new(int)
**out = **in
}
return
}

View File

@@ -62,6 +62,11 @@ func (in *DeschedulerPolicy) DeepCopyInto(out *DeschedulerPolicy) {
*out = new(uint)
**out = **in
}
if in.MaxNoOfPodsToEvictPerNamespace != nil {
in, out := &in.MaxNoOfPodsToEvictPerNamespace, &out.MaxNoOfPodsToEvictPerNamespace
*out = new(uint)
**out = **in
}
return
}

View File

@@ -128,6 +128,7 @@ func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer
evictionPolicyGroupVersion,
rs.DryRun,
deschedulerPolicy.MaxNoOfPodsToEvictPerNode,
deschedulerPolicy.MaxNoOfPodsToEvictPerNamespace,
nodes,
evictLocalStoragePods,
evictSystemCriticalPods,

View File

@@ -46,6 +46,7 @@ const (
// nodePodEvictedCount keeps count of pods evicted on node
type nodePodEvictedCount map[*v1.Node]uint
type namespacePodEvictCount map[string]uint
type PodEvictor struct {
client clientset.Interface
@@ -53,7 +54,9 @@ type PodEvictor struct {
policyGroupVersion string
dryRun bool
maxPodsToEvictPerNode *uint
maxPodsToEvictPerNamespace *uint
nodepodCount nodePodEvictedCount
namespacePodCount namespacePodEvictCount
evictLocalStoragePods bool
evictSystemCriticalPods bool
ignorePvcPods bool
@@ -64,12 +67,14 @@ func NewPodEvictor(
policyGroupVersion string,
dryRun bool,
maxPodsToEvictPerNode *uint,
maxPodsToEvictPerNamespace *uint,
nodes []*v1.Node,
evictLocalStoragePods bool,
evictSystemCriticalPods bool,
ignorePvcPods bool,
) *PodEvictor {
var nodePodCount = make(nodePodEvictedCount)
var namespacePodCount = make(namespacePodEvictCount)
for _, node := range nodes {
// Initialize podsEvicted till now with 0.
nodePodCount[node] = 0
@@ -81,7 +86,9 @@ func NewPodEvictor(
policyGroupVersion: policyGroupVersion,
dryRun: dryRun,
maxPodsToEvictPerNode: maxPodsToEvictPerNode,
maxPodsToEvictPerNamespace: maxPodsToEvictPerNamespace,
nodepodCount: nodePodCount,
namespacePodCount: namespacePodCount,
evictLocalStoragePods: evictLocalStoragePods,
evictSystemCriticalPods: evictSystemCriticalPods,
ignorePvcPods: ignorePvcPods,
@@ -111,10 +118,15 @@ func (pe *PodEvictor) EvictPod(ctx context.Context, pod *v1.Pod, node *v1.Node,
reason += " (" + strings.Join(reasons, ", ") + ")"
}
if pe.maxPodsToEvictPerNode != nil && pe.nodepodCount[node]+1 > *pe.maxPodsToEvictPerNode {
metrics.PodsEvicted.With(map[string]string{"result": "maximum number reached", "strategy": strategy, "namespace": pod.Namespace}).Inc()
metrics.PodsEvicted.With(map[string]string{"result": "maximum number of pods per node reached", "strategy": strategy, "namespace": pod.Namespace}).Inc()
return false, fmt.Errorf("Maximum number %v of evicted pods per %q node reached", *pe.maxPodsToEvictPerNode, node.Name)
}
if pe.maxPodsToEvictPerNamespace != nil && pe.namespacePodCount[pod.Namespace]+1 > *pe.maxPodsToEvictPerNamespace {
metrics.PodsEvicted.With(map[string]string{"result": "maximum number of pods per namespace reached", "strategy": strategy, "namespace": pod.Namespace}).Inc()
return false, fmt.Errorf("Maximum number %v of evicted pods per %q namespace reached", *pe.maxPodsToEvictPerNamespace, pod.Namespace)
}
err := evictPod(ctx, pe.client, pod, pe.policyGroupVersion, pe.dryRun)
if err != nil {
// err is used only for logging purposes
@@ -124,6 +136,7 @@ func (pe *PodEvictor) EvictPod(ctx context.Context, pod *v1.Pod, node *v1.Node,
}
pe.nodepodCount[node]++
pe.namespacePodCount[pod.Namespace]++
if pe.dryRun {
klog.V(1).InfoS("Evicted pod in dry run mode", "pod", klog.KObj(pod), "reason", reason)
} else {

View File

@@ -275,6 +275,7 @@ func TestFindDuplicatePods(t *testing.T) {
"v1",
false,
nil,
nil,
testCase.nodes,
false,
false,
@@ -682,6 +683,7 @@ func TestRemoveDuplicatesUniformly(t *testing.T) {
policyv1.SchemeGroupVersion.String(),
false,
nil,
nil,
testCase.nodes,
false,
false,

View File

@@ -222,6 +222,7 @@ func TestRemoveFailedPods(t *testing.T) {
policyv1.SchemeGroupVersion.String(),
false,
nil,
nil,
tc.nodes,
false,
false,

View File

@@ -110,6 +110,7 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
strategy api.DeschedulerStrategy
expectedEvictedPodCount uint
maxPodsToEvictPerNode *uint
maxNoOfPodsToEvictPerNamespace *uint
}{
{
description: "Invalid strategy type, should not evict any pods",
@@ -155,6 +156,22 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
maxPodsToEvictPerNode: &uint1,
},
{
description: "Pod is scheduled on node without matching labels, another schedulable node available, maxNoOfPodsToEvictPerNamespace set to 1, should not be evicted",
expectedEvictedPodCount: 1,
strategy: requiredDuringSchedulingIgnoredDuringExecutionStrategy,
pods: addPodsToNode(nodeWithoutLabels, nil),
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
maxNoOfPodsToEvictPerNamespace: &uint1,
},
{
description: "Pod is scheduled on node without matching labels, another schedulable node available, maxNoOfPodsToEvictPerNamespace set to 1, no pod evicted since pod terminting",
expectedEvictedPodCount: 1,
strategy: requiredDuringSchedulingIgnoredDuringExecutionStrategy,
pods: addPodsToNode(nodeWithoutLabels, &metav1.Time{}),
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
maxNoOfPodsToEvictPerNamespace: &uint1,
},
{
description: "Pod is scheduled on node without matching labels, but no node where pod fits is available, should not evict",
expectedEvictedPodCount: 0,
@@ -184,6 +201,7 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
policyv1.SchemeGroupVersion.String(),
false,
tc.maxPodsToEvictPerNode,
tc.maxNoOfPodsToEvictPerNamespace,
tc.nodes,
false,
false,

View File

@@ -125,6 +125,7 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
evictLocalStoragePods bool
evictSystemCriticalPods bool
maxPodsToEvictPerNode *uint
maxNoOfPodsToEvictPerNamespace *uint
expectedEvictedPodCount uint
nodeFit bool
}{
@@ -154,6 +155,15 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
maxPodsToEvictPerNode: &uint1,
expectedEvictedPodCount: 1, //p5 or p6 gets evicted
},
{
description: "Only <maxNoOfPodsToEvictPerNamespace> number of Pods not tolerating node taint should be evicted",
pods: []v1.Pod{*p1, *p5, *p6},
nodes: []*v1.Node{node1},
evictLocalStoragePods: false,
evictSystemCriticalPods: false,
maxNoOfPodsToEvictPerNamespace: &uint1,
expectedEvictedPodCount: 1, //p5 or p6 gets evicted
},
{
description: "Critical pods not tolerating node taint should not be evicted",
pods: []v1.Pod{*p7, *p8, *p9, *p10},
@@ -228,6 +238,7 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
policyv1.SchemeGroupVersion.String(),
false,
tc.maxPodsToEvictPerNode,
tc.maxNoOfPodsToEvictPerNamespace,
tc.nodes,
tc.evictLocalStoragePods,
tc.evictSystemCriticalPods,

View File

@@ -566,6 +566,7 @@ func TestHighNodeUtilization(t *testing.T) {
"v1",
false,
nil,
nil,
nodes,
false,
false,
@@ -756,6 +757,7 @@ func TestHighNodeUtilizationWithTaints(t *testing.T) {
"policy/v1",
false,
&item.evictionsExpected,
nil,
item.nodes,
false,
false,

View File

@@ -778,6 +778,7 @@ func TestLowNodeUtilization(t *testing.T) {
policyv1.SchemeGroupVersion.String(),
false,
nil,
nil,
nodes,
false,
false,
@@ -1078,6 +1079,7 @@ func TestLowNodeUtilizationWithTaints(t *testing.T) {
policyv1.SchemeGroupVersion.String(),
false,
&item.evictionsExpected,
nil,
item.nodes,
false,
false,

View File

@@ -105,6 +105,7 @@ func TestPodAntiAffinity(t *testing.T) {
tests := []struct {
description string
maxPodsToEvictPerNode *uint
maxNoOfPodsToEvictPerNamespace *uint
pods []v1.Pod
expectedEvictedPodCount uint
nodeFit bool
@@ -123,6 +124,13 @@ func TestPodAntiAffinity(t *testing.T) {
nodes: []*v1.Node{node1},
expectedEvictedPodCount: 3,
},
{
description: "Maximum pods to evict (maxPodsToEvictPerNamespace=3) - 3",
maxNoOfPodsToEvictPerNamespace: &uint3,
pods: []v1.Pod{*p1, *p2, *p3, *p4},
nodes: []*v1.Node{node1},
expectedEvictedPodCount: 3,
},
{
description: "Evict only 1 pod after sorting",
pods: []v1.Pod{*p5, *p6, *p7},
@@ -182,6 +190,7 @@ func TestPodAntiAffinity(t *testing.T) {
policyv1.SchemeGroupVersion.String(),
false,
test.maxPodsToEvictPerNode,
test.maxNoOfPodsToEvictPerNamespace,
test.nodes,
false,
false,

View File

@@ -276,6 +276,7 @@ func TestPodLifeTime(t *testing.T) {
policyv1.SchemeGroupVersion.String(),
false,
nil,
nil,
tc.nodes,
false,
false,

View File

@@ -122,6 +122,7 @@ func TestRemovePodsHavingTooManyRestarts(t *testing.T) {
strategy api.DeschedulerStrategy
expectedEvictedPodCount uint
maxPodsToEvictPerNode *uint
maxNoOfPodsToEvictPerNamespace *uint
}{
{
description: "All pods have total restarts under threshold, no pod evictions",
@@ -178,6 +179,13 @@ func TestRemovePodsHavingTooManyRestarts(t *testing.T) {
expectedEvictedPodCount: 3,
maxPodsToEvictPerNode: &uint3,
},
{
description: "All pods have total restarts equals threshold(maxNoOfPodsToEvictPerNamespace=3), 3 pod evictions",
strategy: createStrategy(true, true, 1, false),
nodes: []*v1.Node{node1},
expectedEvictedPodCount: 3,
maxNoOfPodsToEvictPerNamespace: &uint3,
},
{
description: "All pods have total restarts equals threshold(maxPodsToEvictPerNode=3) but the only other node is tained, 0 pod evictions",
strategy: createStrategy(true, true, 1, true),
@@ -206,6 +214,7 @@ func TestRemovePodsHavingTooManyRestarts(t *testing.T) {
policyv1.SchemeGroupVersion.String(),
false,
tc.maxPodsToEvictPerNode,
tc.maxNoOfPodsToEvictPerNamespace,
tc.nodes,
false,
false,

View File

@@ -887,6 +887,7 @@ func TestTopologySpreadConstraint(t *testing.T) {
"v1",
false,
nil,
nil,
tc.nodes,
false,
false,

View File

@@ -145,6 +145,7 @@ func TestRemoveDuplicates(t *testing.T) {
evictionPolicyGroupVersion,
false,
nil,
nil,
nodes,
true,
false,

View File

@@ -167,6 +167,7 @@ func runPodLifetimeStrategy(
evictionPolicyGroupVersion,
false,
nil,
nil,
nodes,
false,
evictCritical,
@@ -1291,6 +1292,7 @@ func initPodEvictorOrFail(t *testing.T, clientSet clientset.Interface, nodes []*
evictionPolicyGroupVersion,
false,
nil,
nil,
nodes,
true,
false,

View File

@@ -135,6 +135,7 @@ func TestTooManyRestarts(t *testing.T) {
evictionPolicyGroupVersion,
false,
nil,
nil,
nodes,
true,
false,