1
0
mirror of https://github.com/kubernetes-sigs/descheduler.git synced 2026-01-26 05:14:13 +01:00

Compare commits

...

2 Commits

Author SHA1 Message Date
Kubernetes Prow Robot
85b1d97dda Merge pull request #1810 from ingvagabund/refactorings
chore(pkg/descheduler): make TestPodEvictorReset table driven
2026-01-20 19:08:49 +05:30
Jan Chaloupka
b6aadc1643 chore(pkg/descheduler): make TestPodEvictorReset table driven 2026-01-20 12:51:58 +01:00

View File

@@ -177,7 +177,7 @@ func lowNodeUtilizationPolicy(thresholds, targetThresholds api.ResourceThreshold
} }
} }
func initDescheduler(t *testing.T, ctx context.Context, featureGates featuregate.FeatureGate, internalDeschedulerPolicy *api.DeschedulerPolicy, metricsClient metricsclient.Interface, objects ...runtime.Object) (*options.DeschedulerServer, *descheduler, *fakeclientset.Clientset) { func initDescheduler(t *testing.T, ctx context.Context, featureGates featuregate.FeatureGate, internalDeschedulerPolicy *api.DeschedulerPolicy, metricsClient metricsclient.Interface, dryRun bool, objects ...runtime.Object) (*options.DeschedulerServer, *descheduler, *fakeclientset.Clientset) {
client := fakeclientset.NewSimpleClientset(objects...) client := fakeclientset.NewSimpleClientset(objects...)
eventClient := fakeclientset.NewSimpleClientset(objects...) eventClient := fakeclientset.NewSimpleClientset(objects...)
@@ -189,6 +189,7 @@ func initDescheduler(t *testing.T, ctx context.Context, featureGates featuregate
rs.EventClient = eventClient rs.EventClient = eventClient
rs.DefaultFeatureGates = featureGates rs.DefaultFeatureGates = featureGates
rs.MetricsClient = metricsClient rs.MetricsClient = metricsClient
rs.DryRun = dryRun
sharedInformerFactory := informers.NewSharedInformerFactoryWithOptions(rs.Client, 0, informers.WithTransform(trimManagedFields)) sharedInformerFactory := informers.NewSharedInformerFactoryWithOptions(rs.Client, 0, informers.WithTransform(trimManagedFields))
eventBroadcaster, eventRecorder := utils.GetRecorderAndBroadcaster(ctx, client) eventBroadcaster, eventRecorder := utils.GetRecorderAndBroadcaster(ctx, client)
@@ -477,22 +478,53 @@ func taintNodeNoSchedule(node *v1.Node) {
func TestPodEvictorReset(t *testing.T) { func TestPodEvictorReset(t *testing.T) {
initPluginRegistry() initPluginRegistry()
tests := []struct {
name string
dryRun bool
cycles []struct {
expectedTotalEvicted uint
expectedRealEvictions int
expectedFakeEvictions int
}
}{
{
name: "real mode",
dryRun: false,
cycles: []struct {
expectedTotalEvicted uint
expectedRealEvictions int
expectedFakeEvictions int
}{
{expectedTotalEvicted: 2, expectedRealEvictions: 2, expectedFakeEvictions: 0},
{expectedTotalEvicted: 2, expectedRealEvictions: 4, expectedFakeEvictions: 0},
},
},
{
name: "dry mode",
dryRun: true,
cycles: []struct {
expectedTotalEvicted uint
expectedRealEvictions int
expectedFakeEvictions int
}{
{expectedTotalEvicted: 2, expectedRealEvictions: 0, expectedFakeEvictions: 2},
{expectedTotalEvicted: 2, expectedRealEvictions: 0, expectedFakeEvictions: 4},
},
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
ctx := context.Background() ctx := context.Background()
node1 := test.BuildTestNode("n1", 2000, 3000, 10, taintNodeNoSchedule) node1 := test.BuildTestNode("n1", 2000, 3000, 10, taintNodeNoSchedule)
node2 := test.BuildTestNode("n2", 2000, 3000, 10, nil) node2 := test.BuildTestNode("n2", 2000, 3000, 10, nil)
ownerRef1 := test.GetReplicaSetOwnerRefList() p1 := test.BuildTestPod("p1", 100, 0, node1.Name, test.SetRSOwnerRef)
updatePod := func(pod *v1.Pod) { p2 := test.BuildTestPod("p2", 100, 0, node1.Name, test.SetRSOwnerRef)
pod.Namespace = "dev"
pod.ObjectMeta.OwnerReferences = ownerRef1
}
p1 := test.BuildTestPod("p1", 100, 0, node1.Name, updatePod)
p2 := test.BuildTestPod("p2", 100, 0, node1.Name, updatePod)
internalDeschedulerPolicy := removePodsViolatingNodeTaintsPolicy() internalDeschedulerPolicy := removePodsViolatingNodeTaintsPolicy()
ctxCancel, cancel := context.WithCancel(ctx) ctxCancel, cancel := context.WithCancel(ctx)
rs, descheduler, client := initDescheduler(t, ctxCancel, initFeatureGates(), internalDeschedulerPolicy, nil, node1, node2, p1, p2) _, descheduler, client := initDescheduler(t, ctxCancel, initFeatureGates(), internalDeschedulerPolicy, nil, tc.dryRun, node1, node2, p1, p2)
defer cancel() defer cancel()
var evictedPods []string var evictedPods []string
@@ -503,43 +535,15 @@ func TestPodEvictorReset(t *testing.T) {
return podEvictionReactionTestingFnc(&fakeEvictedPods, nil, nil) return podEvictionReactionTestingFnc(&fakeEvictedPods, nil, nil)
} }
// a single pod eviction expected for i, cycle := range tc.cycles {
klog.Infof("2 pod eviction expected per a descheduling cycle, 2 real evictions in total")
if err := descheduler.runDeschedulerLoop(ctx); err != nil { if err := descheduler.runDeschedulerLoop(ctx); err != nil {
t.Fatalf("Unable to run a descheduling loop: %v", err) t.Fatalf("Cycle %d: Unable to run a descheduling loop: %v", i+1, err)
} }
if descheduler.podEvictor.TotalEvicted() != 2 || len(evictedPods) != 2 || len(fakeEvictedPods) != 0 { if descheduler.podEvictor.TotalEvicted() != cycle.expectedTotalEvicted || len(evictedPods) != cycle.expectedRealEvictions || len(fakeEvictedPods) != cycle.expectedFakeEvictions {
t.Fatalf("Expected (2,2,0) pods evicted, got (%v, %v, %v) instead", descheduler.podEvictor.TotalEvicted(), len(evictedPods), len(fakeEvictedPods)) t.Fatalf("Cycle %d: Expected (%v,%v,%v) pods evicted, got (%v,%v,%v) instead", i+1, cycle.expectedTotalEvicted, cycle.expectedRealEvictions, cycle.expectedFakeEvictions, descheduler.podEvictor.TotalEvicted(), len(evictedPods), len(fakeEvictedPods))
} }
// a single pod eviction expected
klog.Infof("2 pod eviction expected per a descheduling cycle, 4 real evictions in total")
if err := descheduler.runDeschedulerLoop(ctx); err != nil {
t.Fatalf("Unable to run a descheduling loop: %v", err)
} }
if descheduler.podEvictor.TotalEvicted() != 2 || len(evictedPods) != 4 || len(fakeEvictedPods) != 0 { })
t.Fatalf("Expected (2,4,0) pods evicted, got (%v, %v, %v) instead", descheduler.podEvictor.TotalEvicted(), len(evictedPods), len(fakeEvictedPods))
}
// check the fake client syncing and the right pods evicted
klog.Infof("Enabling the dry run mode")
rs.DryRun = true
evictedPods = []string{}
klog.Infof("2 pod eviction expected per a descheduling cycle, 2 fake evictions in total")
if err := descheduler.runDeschedulerLoop(ctx); err != nil {
t.Fatalf("Unable to run a descheduling loop: %v", err)
}
if descheduler.podEvictor.TotalEvicted() != 2 || len(evictedPods) != 0 || len(fakeEvictedPods) != 2 {
t.Fatalf("Expected (2,0,2) pods evicted, got (%v, %v, %v) instead", descheduler.podEvictor.TotalEvicted(), len(evictedPods), len(fakeEvictedPods))
}
klog.Infof("2 pod eviction expected per a descheduling cycle, 4 fake evictions in total")
if err := descheduler.runDeschedulerLoop(ctx); err != nil {
t.Fatalf("Unable to run a descheduling loop: %v", err)
}
if descheduler.podEvictor.TotalEvicted() != 2 || len(evictedPods) != 0 || len(fakeEvictedPods) != 4 {
t.Fatalf("Expected (2,0,4) pods evicted, got (%v, %v, %v) instead", descheduler.podEvictor.TotalEvicted(), len(evictedPods), len(fakeEvictedPods))
} }
} }
@@ -594,7 +598,7 @@ func TestEvictionRequestsCache(t *testing.T) {
featureGates.Add(map[featuregate.Feature]featuregate.FeatureSpec{ featureGates.Add(map[featuregate.Feature]featuregate.FeatureSpec{
features.EvictionsInBackground: {Default: true, PreRelease: featuregate.Alpha}, features.EvictionsInBackground: {Default: true, PreRelease: featuregate.Alpha},
}) })
_, descheduler, client := initDescheduler(t, ctxCancel, featureGates, internalDeschedulerPolicy, nil, node1, node2, p1, p2, p3, p4) _, descheduler, client := initDescheduler(t, ctxCancel, featureGates, internalDeschedulerPolicy, nil, false, node1, node2, p1, p2, p3, p4)
defer cancel() defer cancel()
var fakeEvictedPods []string var fakeEvictedPods []string
@@ -735,7 +739,7 @@ func TestDeschedulingLimits(t *testing.T) {
featureGates.Add(map[featuregate.Feature]featuregate.FeatureSpec{ featureGates.Add(map[featuregate.Feature]featuregate.FeatureSpec{
features.EvictionsInBackground: {Default: true, PreRelease: featuregate.Alpha}, features.EvictionsInBackground: {Default: true, PreRelease: featuregate.Alpha},
}) })
_, descheduler, client := initDescheduler(t, ctxCancel, featureGates, tc.policy, nil, node1, node2) _, descheduler, client := initDescheduler(t, ctxCancel, featureGates, tc.policy, nil, false, node1, node2)
defer cancel() defer cancel()
var fakeEvictedPods []string var fakeEvictedPods []string
@@ -936,12 +940,9 @@ func TestNodeLabelSelectorBasedEviction(t *testing.T) {
} }
ctxCancel, cancel := context.WithCancel(ctx) ctxCancel, cancel := context.WithCancel(ctx)
rs, deschedulerInstance, client := initDescheduler(t, ctxCancel, initFeatureGates(), policy, nil, node1, node2, node3, node4, p1, p2, p3, p4) _, deschedulerInstance, client := initDescheduler(t, ctxCancel, initFeatureGates(), policy, nil, tc.dryRun, node1, node2, node3, node4, p1, p2, p3, p4)
defer cancel() defer cancel()
// Set dry run mode if specified
rs.DryRun = tc.dryRun
// Verify all pods are created initially // Verify all pods are created initially
pods, err := client.CoreV1().Pods(p1.Namespace).List(ctx, metav1.ListOptions{}) pods, err := client.CoreV1().Pods(p1.Namespace).List(ctx, metav1.ListOptions{})
if err != nil { if err != nil {
@@ -1063,6 +1064,7 @@ func TestLoadAwareDescheduling(t *testing.T) {
initFeatureGates(), initFeatureGates(),
policy, policy,
metricsClientset, metricsClientset,
false,
node1, node2, p1, p2, p3, p4, p5) node1, node2, p1, p2, p3, p4, p5)
defer cancel() defer cancel()