1
0
mirror of https://github.com/kubernetes-sigs/descheduler.git synced 2026-01-26 05:14:13 +01:00

feat: pod evictor options

This commit is contained in:
Amir Alavi
2024-07-05 21:15:55 -04:00
parent 686417b6de
commit e26f6429a2
18 changed files with 109 additions and 158 deletions

View File

@@ -97,12 +97,13 @@ func newDescheduler(rs *options.DeschedulerServer, deschedulerPolicy *api.Desche
podEvictor := evictions.NewPodEvictor(
nil,
evictionPolicyGroupVersion,
rs.DryRun,
deschedulerPolicy.MaxNoOfPodsToEvictPerNode,
deschedulerPolicy.MaxNoOfPodsToEvictPerNamespace,
!rs.DisableMetrics,
eventRecorder,
evictions.NewOptions().
WithPolicyGroupVersion(evictionPolicyGroupVersion).
WithMaxPodsToEvictPerNode(deschedulerPolicy.MaxNoOfPodsToEvictPerNode).
WithMaxPodsToEvictPerNamespace(deschedulerPolicy.MaxNoOfPodsToEvictPerNamespace).
WithDryRun(rs.DryRun).
WithMetricsEnabled(!rs.DisableMetrics),
)
return &descheduler{

View File

@@ -55,23 +55,23 @@ type PodEvictor struct {
func NewPodEvictor(
client clientset.Interface,
policyGroupVersion string,
dryRun bool,
maxPodsToEvictPerNode *uint,
maxPodsToEvictPerNamespace *uint,
metricsEnabled bool,
eventRecorder events.EventRecorder,
options *Options,
) *PodEvictor {
if options == nil {
options = NewOptions()
}
return &PodEvictor{
client: client,
policyGroupVersion: policyGroupVersion,
dryRun: dryRun,
maxPodsToEvictPerNode: maxPodsToEvictPerNode,
maxPodsToEvictPerNamespace: maxPodsToEvictPerNamespace,
eventRecorder: eventRecorder,
policyGroupVersion: options.policyGroupVersion,
dryRun: options.dryRun,
maxPodsToEvictPerNode: options.maxPodsToEvictPerNode,
maxPodsToEvictPerNamespace: options.maxPodsToEvictPerNamespace,
metricsEnabled: options.metricsEnabled,
nodepodCount: make(nodePodEvictedCount),
namespacePodCount: make(namespacePodEvictCount),
metricsEnabled: metricsEnabled,
eventRecorder: eventRecorder,
}
}

View File

@@ -126,12 +126,8 @@ func TestNewPodEvictor(t *testing.T) {
podEvictor := NewPodEvictor(
fakeClient,
"policy/v1",
false,
utilpointer.Uint(1),
nil,
false,
eventRecorder,
NewOptions().WithMaxPodsToEvictPerNode(utilpointer.Uint(1)),
)
stubNode := &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "node"}}

View File

@@ -0,0 +1,45 @@
package evictions
import (
policy "k8s.io/api/policy/v1"
)
type Options struct {
policyGroupVersion string
dryRun bool
maxPodsToEvictPerNode *uint
maxPodsToEvictPerNamespace *uint
metricsEnabled bool
}
// NewOptions returns an Options with default values.
func NewOptions() *Options {
return &Options{
policyGroupVersion: policy.SchemeGroupVersion.String(),
}
}
func (o *Options) WithPolicyGroupVersion(policyGroupVersion string) *Options {
o.policyGroupVersion = policyGroupVersion
return o
}
func (o *Options) WithDryRun(dryRun bool) *Options {
o.dryRun = dryRun
return o
}
func (o *Options) WithMaxPodsToEvictPerNode(maxPodsToEvictPerNode *uint) *Options {
o.maxPodsToEvictPerNode = maxPodsToEvictPerNode
return o
}
func (o *Options) WithMaxPodsToEvictPerNamespace(maxPodsToEvictPerNamespace *uint) *Options {
o.maxPodsToEvictPerNamespace = maxPodsToEvictPerNamespace
return o
}
func (o *Options) WithMetricsEnabled(metricsEnabled bool) *Options {
o.metricsEnabled = metricsEnabled
return o
}

View File

@@ -486,15 +486,7 @@ func TestHighNodeUtilization(t *testing.T) {
eventRecorder := &events.FakeRecorder{}
podEvictor := evictions.NewPodEvictor(
fakeClient,
"v1",
false,
nil,
nil,
false,
eventRecorder,
)
podEvictor := evictions.NewPodEvictor(fakeClient, eventRecorder, nil)
defaultevictorArgs := &defaultevictor.DefaultEvictorArgs{
EvictLocalStoragePods: false,
@@ -639,12 +631,8 @@ func TestHighNodeUtilizationWithTaints(t *testing.T) {
podEvictor := evictions.NewPodEvictor(
fakeClient,
"policy/v1",
false,
&item.evictionsExpected,
nil,
false,
eventRecorder,
evictions.NewOptions().WithMaxPodsToEvictPerNode(&item.evictionsExpected),
)
defaultevictorArgs := &defaultevictor.DefaultEvictorArgs{

View File

@@ -430,7 +430,7 @@ func TestLowNodeUtilization(t *testing.T) {
test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil),
test.BuildTestNode(n3NodeName, 4000, 3000, 10, test.SetNodeUnschedulable),
},
// All pods are assumed to be burstable (test.BuildTestNode always sets both cpu/memory resource requests to some value)
// All pods are assumed to be burstable (tc.BuildTestNode always sets both cpu/memory resource requests to some value)
pods: []*v1.Pod{
test.BuildTestPod("p1", 400, 0, n1NodeName, func(pod *v1.Pod) {
test.SetRSOwnerRef(pod)
@@ -855,16 +855,16 @@ func TestLowNodeUtilization(t *testing.T) {
},
}
for _, test := range testCases {
t.Run(test.name, func(t *testing.T) {
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
var objs []runtime.Object
for _, node := range test.nodes {
for _, node := range tc.nodes {
objs = append(objs, node)
}
for _, pod := range test.pods {
for _, pod := range tc.pods {
objs = append(objs, pod)
}
fakeClient := fake.NewSimpleClientset(objs...)
@@ -878,12 +878,12 @@ func TestLowNodeUtilization(t *testing.T) {
}
podsForEviction := make(map[string]struct{})
for _, pod := range test.evictedPods {
for _, pod := range tc.evictedPods {
podsForEviction[pod] = struct{}{}
}
evictionFailed := false
if len(test.evictedPods) > 0 {
if len(tc.evictedPods) > 0 {
fakeClient.Fake.AddReactor("create", "pods", func(action core.Action) (bool, runtime.Object, error) {
getAction := action.(core.CreateAction)
obj := getAction.GetObject()
@@ -903,15 +903,7 @@ func TestLowNodeUtilization(t *testing.T) {
eventRecorder := &events.FakeRecorder{}
podEvictor := evictions.NewPodEvictor(
fakeClient,
policy.SchemeGroupVersion.String(),
false,
nil,
nil,
false,
eventRecorder,
)
podEvictor := evictions.NewPodEvictor(fakeClient, eventRecorder, nil)
defaultEvictorFilterArgs := &defaultevictor.DefaultEvictorArgs{
EvictLocalStoragePods: false,
@@ -942,20 +934,20 @@ func TestLowNodeUtilization(t *testing.T) {
}
plugin, err := NewLowNodeUtilization(&LowNodeUtilizationArgs{
Thresholds: test.thresholds,
TargetThresholds: test.targetThresholds,
UseDeviationThresholds: test.useDeviationThresholds,
EvictableNamespaces: test.evictableNamespaces,
Thresholds: tc.thresholds,
TargetThresholds: tc.targetThresholds,
UseDeviationThresholds: tc.useDeviationThresholds,
EvictableNamespaces: tc.evictableNamespaces,
},
handle)
if err != nil {
t.Fatalf("Unable to initialize the plugin: %v", err)
}
plugin.(frameworktypes.BalancePlugin).Balance(ctx, test.nodes)
plugin.(frameworktypes.BalancePlugin).Balance(ctx, tc.nodes)
podsEvicted := podEvictor.TotalEvicted()
if test.expectedPodsEvicted != podsEvicted {
t.Errorf("Expected %v pods to be evicted but %v got evicted", test.expectedPodsEvicted, podsEvicted)
if tc.expectedPodsEvicted != podsEvicted {
t.Errorf("Expected %v pods to be evicted but %v got evicted", tc.expectedPodsEvicted, podsEvicted)
}
if evictionFailed {
t.Errorf("Pod evictions failed unexpectedly")
@@ -1076,12 +1068,8 @@ func TestLowNodeUtilizationWithTaints(t *testing.T) {
podEvictor := evictions.NewPodEvictor(
fakeClient,
policy.SchemeGroupVersion.String(),
false,
&item.evictionsExpected,
nil,
false,
eventRecorder,
evictions.NewOptions().WithMaxPodsToEvictPerNode(&item.evictionsExpected),
)
defaultEvictorFilterArgs := &defaultevictor.DefaultEvictorArgs{

View File

@@ -22,7 +22,6 @@ import (
"time"
v1 "k8s.io/api/core/v1"
policyv1 "k8s.io/api/policy/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/informers"
@@ -558,12 +557,10 @@ func TestPodLifeTime(t *testing.T) {
podEvictor := evictions.NewPodEvictor(
fakeClient,
policyv1.SchemeGroupVersion.String(),
false,
tc.maxPodsToEvictPerNode,
tc.maxPodsToEvictPerNamespace,
false,
eventRecorder,
evictions.NewOptions().
WithMaxPodsToEvictPerNode(tc.maxPodsToEvictPerNode).
WithMaxPodsToEvictPerNamespace(tc.maxPodsToEvictPerNamespace),
)
defaultEvictorFilterArgs := &defaultevictor.DefaultEvictorArgs{

View File

@@ -26,7 +26,6 @@ import (
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
v1 "k8s.io/api/core/v1"
policyv1 "k8s.io/api/policy/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
@@ -313,15 +312,7 @@ func TestFindDuplicatePods(t *testing.T) {
eventRecorder := &events.FakeRecorder{}
podEvictor := evictions.NewPodEvictor(
fakeClient,
"v1",
false,
nil,
nil,
false,
eventRecorder,
)
podEvictor := evictions.NewPodEvictor(fakeClient, eventRecorder, nil)
nodeFit := testCase.nodefit
@@ -761,15 +752,7 @@ func TestRemoveDuplicatesUniformly(t *testing.T) {
eventRecorder := &events.FakeRecorder{}
podEvictor := evictions.NewPodEvictor(
fakeClient,
policyv1.SchemeGroupVersion.String(),
false,
nil,
nil,
false,
eventRecorder,
)
podEvictor := evictions.NewPodEvictor(fakeClient, eventRecorder, nil)
defaultEvictorFilterArgs := &defaultevictor.DefaultEvictorArgs{
EvictLocalStoragePods: false,

View File

@@ -21,7 +21,6 @@ import (
"testing"
v1 "k8s.io/api/core/v1"
policyv1 "k8s.io/api/policy/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/informers"
@@ -375,15 +374,7 @@ func TestRemoveFailedPods(t *testing.T) {
eventRecorder := &events.FakeRecorder{}
podEvictor := evictions.NewPodEvictor(
fakeClient,
policyv1.SchemeGroupVersion.String(),
false,
nil,
nil,
false,
eventRecorder,
)
podEvictor := evictions.NewPodEvictor(fakeClient, eventRecorder, nil)
defaultevictorArgs := &defaultevictor.DefaultEvictorArgs{
EvictLocalStoragePods: false,

View File

@@ -22,7 +22,6 @@ import (
"testing"
v1 "k8s.io/api/core/v1"
policyv1 "k8s.io/api/policy/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/informers"
@@ -346,12 +345,10 @@ func TestRemovePodsHavingTooManyRestarts(t *testing.T) {
podEvictor := evictions.NewPodEvictor(
fakeClient,
policyv1.SchemeGroupVersion.String(),
false,
tc.maxPodsToEvictPerNode,
tc.maxNoOfPodsToEvictPerNamespace,
false,
eventRecorder,
evictions.NewOptions().
WithMaxPodsToEvictPerNode(tc.maxPodsToEvictPerNode).
WithMaxPodsToEvictPerNamespace(tc.maxNoOfPodsToEvictPerNamespace),
)
defaultevictorArgs := &defaultevictor.DefaultEvictorArgs{

View File

@@ -21,7 +21,6 @@ import (
"testing"
v1 "k8s.io/api/core/v1"
policyv1 "k8s.io/api/policy/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/informers"
@@ -235,12 +234,10 @@ func TestPodAntiAffinity(t *testing.T) {
podEvictor := evictions.NewPodEvictor(
fakeClient,
policyv1.SchemeGroupVersion.String(),
false,
test.maxPodsToEvictPerNode,
test.maxNoOfPodsToEvictPerNamespace,
false,
eventRecorder,
evictions.NewOptions().
WithMaxPodsToEvictPerNode(test.maxPodsToEvictPerNode).
WithMaxPodsToEvictPerNamespace(test.maxNoOfPodsToEvictPerNamespace),
)
defaultevictorArgs := &defaultevictor.DefaultEvictorArgs{

View File

@@ -21,7 +21,6 @@ import (
"testing"
v1 "k8s.io/api/core/v1"
policyv1 "k8s.io/api/policy/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/informers"
@@ -361,12 +360,10 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
podEvictor := evictions.NewPodEvictor(
fakeClient,
policyv1.SchemeGroupVersion.String(),
false,
tc.maxPodsToEvictPerNode,
tc.maxNoOfPodsToEvictPerNamespace,
false,
eventRecorder,
evictions.NewOptions().
WithMaxPodsToEvictPerNode(tc.maxPodsToEvictPerNode).
WithMaxPodsToEvictPerNamespace(tc.maxNoOfPodsToEvictPerNamespace),
)
defaultevictorArgs := &defaultevictor.DefaultEvictorArgs{

View File

@@ -22,7 +22,6 @@ import (
"testing"
v1 "k8s.io/api/core/v1"
policyv1 "k8s.io/api/policy/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/informers"
@@ -406,12 +405,10 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
podEvictor := evictions.NewPodEvictor(
fakeClient,
policyv1.SchemeGroupVersion.String(),
false,
tc.maxPodsToEvictPerNode,
tc.maxNoOfPodsToEvictPerNamespace,
false,
eventRecorder,
evictions.NewOptions().
WithMaxPodsToEvictPerNode(tc.maxPodsToEvictPerNode).
WithMaxPodsToEvictPerNamespace(tc.maxNoOfPodsToEvictPerNamespace),
)
defaultevictorArgs := &defaultevictor.DefaultEvictorArgs{

View File

@@ -1456,15 +1456,7 @@ func TestTopologySpreadConstraint(t *testing.T) {
eventRecorder := &events.FakeRecorder{}
podEvictor := evictions.NewPodEvictor(
fakeClient,
"v1",
false,
nil,
nil,
false,
eventRecorder,
)
podEvictor := evictions.NewPodEvictor(fakeClient, eventRecorder, nil)
defaultevictorArgs := &defaultevictor.DefaultEvictorArgs{
EvictLocalStoragePods: false,

View File

@@ -244,7 +244,7 @@ func TestProfileDescheduleBalanceExtensionPointsEviction(t *testing.T) {
eventBroadcaster, eventRecorder := utils.GetRecorderAndBroadcaster(ctx, eventClient)
defer eventBroadcaster.Shutdown()
podEvictor := evictions.NewPodEvictor(client, "policy/v1", false, nil, nil, true, eventRecorder)
podEvictor := evictions.NewPodEvictor(client, eventRecorder, nil)
prfl, err := NewProfile(
test.config,
@@ -392,7 +392,7 @@ func TestProfileExtensionPoints(t *testing.T) {
eventBroadcaster, eventRecorder := utils.GetRecorderAndBroadcaster(ctx, eventClient)
defer eventBroadcaster.Shutdown()
podEvictor := evictions.NewPodEvictor(client, "policy/v1", false, nil, nil, true, eventRecorder)
podEvictor := evictions.NewPodEvictor(client, eventRecorder, nil)
prfl, err := NewProfile(
api.DeschedulerProfile{
@@ -604,7 +604,7 @@ func TestProfileExtensionPointOrdering(t *testing.T) {
eventBroadcaster, eventRecorder := utils.GetRecorderAndBroadcaster(ctx, eventClient)
defer eventBroadcaster.Shutdown()
podEvictor := evictions.NewPodEvictor(client, "policy/v1", false, nil, nil, true, eventRecorder)
podEvictor := evictions.NewPodEvictor(client, eventRecorder, nil)
prfl, err := NewProfile(
api.DeschedulerProfile{

View File

@@ -171,15 +171,7 @@ func TestRemoveDuplicates(t *testing.T) {
eventRecorder := &events.FakeRecorder{}
podEvictor := evictions.NewPodEvictor(
clientSet,
evictionPolicyGroupVersion,
false,
nil,
nil,
false,
eventRecorder,
)
podEvictor := evictions.NewPodEvictor(clientSet, eventRecorder, nil)
defaultevictorArgs := &defaultevictor.DefaultEvictorArgs{
EvictLocalStoragePods: true,

View File

@@ -192,12 +192,10 @@ func runPodLifetimePlugin(
podEvictor := evictions.NewPodEvictor(
clientset,
evictionPolicyGroupVersion,
false,
nil,
maxPodsToEvictPerNamespace,
false,
&events.FakeRecorder{},
evictions.NewOptions().
WithPolicyGroupVersion(evictionPolicyGroupVersion).
WithMaxPodsToEvictPerNamespace(maxPodsToEvictPerNamespace),
)
var thresholdPriority int32
@@ -1579,11 +1577,7 @@ func initPodEvictorOrFail(t *testing.T, clientSet clientset.Interface, getPodsAs
return evictions.NewPodEvictor(
clientSet,
evictionPolicyGroupVersion,
false,
nil,
nil,
false,
eventRecorder,
evictions.NewOptions().WithPolicyGroupVersion(evictionPolicyGroupVersion),
)
}

View File

@@ -163,12 +163,8 @@ func TestTooManyRestarts(t *testing.T) {
podEvictor := evictions.NewPodEvictor(
clientSet,
evictionPolicyGroupVersion,
false,
nil,
nil,
false,
eventRecorder,
evictions.NewOptions().WithPolicyGroupVersion(evictionPolicyGroupVersion),
)
defaultevictorArgs := &defaultevictor.DefaultEvictorArgs{