diff --git a/pkg/descheduler/descheduler.go b/pkg/descheduler/descheduler.go index 502f4338f..2062319ae 100644 --- a/pkg/descheduler/descheduler.go +++ b/pkg/descheduler/descheduler.go @@ -70,16 +70,17 @@ type profileRunner struct { } type descheduler struct { - rs *options.DeschedulerServer - podLister listersv1.PodLister - nodeLister listersv1.NodeLister - namespaceLister listersv1.NamespaceLister - priorityClassLister schedulingv1.PriorityClassLister - getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc - sharedInformerFactory informers.SharedInformerFactory - deschedulerPolicy *api.DeschedulerPolicy - eventRecorder events.EventRecorder - podEvictor *evictions.PodEvictor + rs *options.DeschedulerServer + podLister listersv1.PodLister + nodeLister listersv1.NodeLister + namespaceLister listersv1.NamespaceLister + priorityClassLister schedulingv1.PriorityClassLister + getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc + sharedInformerFactory informers.SharedInformerFactory + deschedulerPolicy *api.DeschedulerPolicy + eventRecorder events.EventRecorder + podEvictor *evictions.PodEvictor + podEvictionReactionFnc func(*fakeclientset.Clientset) func(action core.Action) (bool, runtime.Object, error) } func newDescheduler(rs *options.DeschedulerServer, deschedulerPolicy *api.DeschedulerPolicy, evictionPolicyGroupVersion string, eventRecorder events.EventRecorder, sharedInformerFactory informers.SharedInformerFactory) (*descheduler, error) { @@ -105,16 +106,17 @@ func newDescheduler(rs *options.DeschedulerServer, deschedulerPolicy *api.Desche ) return &descheduler{ - rs: rs, - podLister: podLister, - nodeLister: nodeLister, - namespaceLister: namespaceLister, - priorityClassLister: priorityClassLister, - getPodsAssignedToNode: getPodsAssignedToNode, - sharedInformerFactory: sharedInformerFactory, - deschedulerPolicy: deschedulerPolicy, - eventRecorder: eventRecorder, - podEvictor: podEvictor, + rs: rs, + podLister: podLister, + nodeLister: nodeLister, + namespaceLister: namespaceLister, + priorityClassLister: priorityClassLister, + getPodsAssignedToNode: getPodsAssignedToNode, + sharedInformerFactory: sharedInformerFactory, + deschedulerPolicy: deschedulerPolicy, + eventRecorder: eventRecorder, + podEvictor: podEvictor, + podEvictionReactionFnc: podEvictionReactionFnc, }, nil } @@ -141,7 +143,7 @@ func (d *descheduler) runDeschedulerLoop(ctx context.Context, nodes []*v1.Node) // Create a new cache so we start from scratch without any leftovers fakeClient := fakeclientset.NewSimpleClientset() // simulate a pod eviction by deleting a pod - fakeClient.PrependReactor("create", "pods", podEvictionReactionFnc(fakeClient)) + fakeClient.PrependReactor("create", "pods", d.podEvictionReactionFnc(fakeClient)) err := cachedClient(d.rs.Client, fakeClient, d.podLister, d.nodeLister, d.namespaceLister, d.priorityClassLister) if err != nil { return err diff --git a/pkg/descheduler/descheduler_test.go b/pkg/descheduler/descheduler_test.go index 6e5ce01b0..a72621be9 100644 --- a/pkg/descheduler/descheduler_test.go +++ b/pkg/descheduler/descheduler_test.go @@ -390,6 +390,11 @@ func TestPodEvictorReset(t *testing.T) { if err != nil { t.Fatalf("Unable to create a descheduler instance: %v", err) } + var fakeEvictedPods []string + descheduler.podEvictionReactionFnc = func(*fakeclientset.Clientset) func(action core.Action) (bool, runtime.Object, error) { + return podEvictionReactionTestingFnc(&fakeEvictedPods) + } + ctx, cancel := context.WithCancel(ctx) defer cancel() @@ -406,8 +411,8 @@ func TestPodEvictorReset(t *testing.T) { if err != nil { t.Fatalf("Unable to run a descheduling loop: %v", err) } - if descheduler.podEvictor.TotalEvicted() != 2 || len(evictedPods) != 2 { - t.Fatalf("Expected (2,2) pods evicted, got (%v, %v) instead", descheduler.podEvictor.TotalEvicted(), len(evictedPods)) + if descheduler.podEvictor.TotalEvicted() != 2 || len(evictedPods) != 2 || len(fakeEvictedPods) != 0 { + t.Fatalf("Expected (2,2,0) pods evicted, got (%v, %v, %v) instead", descheduler.podEvictor.TotalEvicted(), len(evictedPods), len(fakeEvictedPods)) } // a single pod eviction expected @@ -415,7 +420,27 @@ func TestPodEvictorReset(t *testing.T) { if err != nil { t.Fatalf("Unable to run a descheduling loop: %v", err) } - if descheduler.podEvictor.TotalEvicted() != 2 || len(evictedPods) != 4 { - t.Fatalf("Expected (2,4) pods evicted, got (%v, %v) instead", descheduler.podEvictor.TotalEvicted(), len(evictedPods)) + if descheduler.podEvictor.TotalEvicted() != 2 || len(evictedPods) != 4 || len(fakeEvictedPods) != 0 { + t.Fatalf("Expected (2,4,0) pods evicted, got (%v, %v, %v) instead", descheduler.podEvictor.TotalEvicted(), len(evictedPods), len(fakeEvictedPods)) + } + + // check the fake client syncing and the right pods evicted + rs.DryRun = true + evictedPods = []string{} + // a single pod eviction expected + err = descheduler.runDeschedulerLoop(ctx, nodes) + if err != nil { + t.Fatalf("Unable to run a descheduling loop: %v", err) + } + if descheduler.podEvictor.TotalEvicted() != 2 || len(evictedPods) != 0 || len(fakeEvictedPods) != 2 { + t.Fatalf("Expected (2,0,2) pods evicted, got (%v, %v, %v) instead", descheduler.podEvictor.TotalEvicted(), len(evictedPods), len(fakeEvictedPods)) + } + // a single pod eviction expected + err = descheduler.runDeschedulerLoop(ctx, nodes) + if err != nil { + t.Fatalf("Unable to run a descheduling loop: %v", err) + } + if descheduler.podEvictor.TotalEvicted() != 2 || len(evictedPods) != 0 || len(fakeEvictedPods) != 4 { + t.Fatalf("Expected (2,0,4) pods evicted, got (%v, %v, %v) instead", descheduler.podEvictor.TotalEvicted(), len(evictedPods), len(fakeEvictedPods)) } }