mirror of
https://github.com/kubernetes-sigs/descheduler.git
synced 2026-01-26 05:14:13 +01:00
chore(pkg/descheduler): make TestPodEvictorReset table driven
This commit is contained in:
@@ -177,7 +177,7 @@ func lowNodeUtilizationPolicy(thresholds, targetThresholds api.ResourceThreshold
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func initDescheduler(t *testing.T, ctx context.Context, featureGates featuregate.FeatureGate, internalDeschedulerPolicy *api.DeschedulerPolicy, metricsClient metricsclient.Interface, objects ...runtime.Object) (*options.DeschedulerServer, *descheduler, *fakeclientset.Clientset) {
|
func initDescheduler(t *testing.T, ctx context.Context, featureGates featuregate.FeatureGate, internalDeschedulerPolicy *api.DeschedulerPolicy, metricsClient metricsclient.Interface, dryRun bool, objects ...runtime.Object) (*options.DeschedulerServer, *descheduler, *fakeclientset.Clientset) {
|
||||||
client := fakeclientset.NewSimpleClientset(objects...)
|
client := fakeclientset.NewSimpleClientset(objects...)
|
||||||
eventClient := fakeclientset.NewSimpleClientset(objects...)
|
eventClient := fakeclientset.NewSimpleClientset(objects...)
|
||||||
|
|
||||||
@@ -189,6 +189,7 @@ func initDescheduler(t *testing.T, ctx context.Context, featureGates featuregate
|
|||||||
rs.EventClient = eventClient
|
rs.EventClient = eventClient
|
||||||
rs.DefaultFeatureGates = featureGates
|
rs.DefaultFeatureGates = featureGates
|
||||||
rs.MetricsClient = metricsClient
|
rs.MetricsClient = metricsClient
|
||||||
|
rs.DryRun = dryRun
|
||||||
|
|
||||||
sharedInformerFactory := informers.NewSharedInformerFactoryWithOptions(rs.Client, 0, informers.WithTransform(trimManagedFields))
|
sharedInformerFactory := informers.NewSharedInformerFactoryWithOptions(rs.Client, 0, informers.WithTransform(trimManagedFields))
|
||||||
eventBroadcaster, eventRecorder := utils.GetRecorderAndBroadcaster(ctx, client)
|
eventBroadcaster, eventRecorder := utils.GetRecorderAndBroadcaster(ctx, client)
|
||||||
@@ -477,69 +478,72 @@ func taintNodeNoSchedule(node *v1.Node) {
|
|||||||
func TestPodEvictorReset(t *testing.T) {
|
func TestPodEvictorReset(t *testing.T) {
|
||||||
initPluginRegistry()
|
initPluginRegistry()
|
||||||
|
|
||||||
ctx := context.Background()
|
tests := []struct {
|
||||||
node1 := test.BuildTestNode("n1", 2000, 3000, 10, taintNodeNoSchedule)
|
name string
|
||||||
node2 := test.BuildTestNode("n2", 2000, 3000, 10, nil)
|
dryRun bool
|
||||||
|
cycles []struct {
|
||||||
ownerRef1 := test.GetReplicaSetOwnerRefList()
|
expectedTotalEvicted uint
|
||||||
updatePod := func(pod *v1.Pod) {
|
expectedRealEvictions int
|
||||||
pod.Namespace = "dev"
|
expectedFakeEvictions int
|
||||||
pod.ObjectMeta.OwnerReferences = ownerRef1
|
}
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "real mode",
|
||||||
|
dryRun: false,
|
||||||
|
cycles: []struct {
|
||||||
|
expectedTotalEvicted uint
|
||||||
|
expectedRealEvictions int
|
||||||
|
expectedFakeEvictions int
|
||||||
|
}{
|
||||||
|
{expectedTotalEvicted: 2, expectedRealEvictions: 2, expectedFakeEvictions: 0},
|
||||||
|
{expectedTotalEvicted: 2, expectedRealEvictions: 4, expectedFakeEvictions: 0},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "dry mode",
|
||||||
|
dryRun: true,
|
||||||
|
cycles: []struct {
|
||||||
|
expectedTotalEvicted uint
|
||||||
|
expectedRealEvictions int
|
||||||
|
expectedFakeEvictions int
|
||||||
|
}{
|
||||||
|
{expectedTotalEvicted: 2, expectedRealEvictions: 0, expectedFakeEvictions: 2},
|
||||||
|
{expectedTotalEvicted: 2, expectedRealEvictions: 0, expectedFakeEvictions: 4},
|
||||||
|
},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
p1 := test.BuildTestPod("p1", 100, 0, node1.Name, updatePod)
|
for _, tc := range tests {
|
||||||
p2 := test.BuildTestPod("p2", 100, 0, node1.Name, updatePod)
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
node1 := test.BuildTestNode("n1", 2000, 3000, 10, taintNodeNoSchedule)
|
||||||
|
node2 := test.BuildTestNode("n2", 2000, 3000, 10, nil)
|
||||||
|
|
||||||
internalDeschedulerPolicy := removePodsViolatingNodeTaintsPolicy()
|
p1 := test.BuildTestPod("p1", 100, 0, node1.Name, test.SetRSOwnerRef)
|
||||||
ctxCancel, cancel := context.WithCancel(ctx)
|
p2 := test.BuildTestPod("p2", 100, 0, node1.Name, test.SetRSOwnerRef)
|
||||||
rs, descheduler, client := initDescheduler(t, ctxCancel, initFeatureGates(), internalDeschedulerPolicy, nil, node1, node2, p1, p2)
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
var evictedPods []string
|
internalDeschedulerPolicy := removePodsViolatingNodeTaintsPolicy()
|
||||||
client.PrependReactor("create", "pods", podEvictionReactionTestingFnc(&evictedPods, nil, nil))
|
ctxCancel, cancel := context.WithCancel(ctx)
|
||||||
|
_, descheduler, client := initDescheduler(t, ctxCancel, initFeatureGates(), internalDeschedulerPolicy, nil, tc.dryRun, node1, node2, p1, p2)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
var fakeEvictedPods []string
|
var evictedPods []string
|
||||||
descheduler.podEvictionReactionFnc = func(*fakeclientset.Clientset) func(action core.Action) (bool, runtime.Object, error) {
|
client.PrependReactor("create", "pods", podEvictionReactionTestingFnc(&evictedPods, nil, nil))
|
||||||
return podEvictionReactionTestingFnc(&fakeEvictedPods, nil, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// a single pod eviction expected
|
var fakeEvictedPods []string
|
||||||
klog.Infof("2 pod eviction expected per a descheduling cycle, 2 real evictions in total")
|
descheduler.podEvictionReactionFnc = func(*fakeclientset.Clientset) func(action core.Action) (bool, runtime.Object, error) {
|
||||||
if err := descheduler.runDeschedulerLoop(ctx); err != nil {
|
return podEvictionReactionTestingFnc(&fakeEvictedPods, nil, nil)
|
||||||
t.Fatalf("Unable to run a descheduling loop: %v", err)
|
}
|
||||||
}
|
|
||||||
if descheduler.podEvictor.TotalEvicted() != 2 || len(evictedPods) != 2 || len(fakeEvictedPods) != 0 {
|
|
||||||
t.Fatalf("Expected (2,2,0) pods evicted, got (%v, %v, %v) instead", descheduler.podEvictor.TotalEvicted(), len(evictedPods), len(fakeEvictedPods))
|
|
||||||
}
|
|
||||||
|
|
||||||
// a single pod eviction expected
|
for i, cycle := range tc.cycles {
|
||||||
klog.Infof("2 pod eviction expected per a descheduling cycle, 4 real evictions in total")
|
if err := descheduler.runDeschedulerLoop(ctx); err != nil {
|
||||||
if err := descheduler.runDeschedulerLoop(ctx); err != nil {
|
t.Fatalf("Cycle %d: Unable to run a descheduling loop: %v", i+1, err)
|
||||||
t.Fatalf("Unable to run a descheduling loop: %v", err)
|
}
|
||||||
}
|
if descheduler.podEvictor.TotalEvicted() != cycle.expectedTotalEvicted || len(evictedPods) != cycle.expectedRealEvictions || len(fakeEvictedPods) != cycle.expectedFakeEvictions {
|
||||||
if descheduler.podEvictor.TotalEvicted() != 2 || len(evictedPods) != 4 || len(fakeEvictedPods) != 0 {
|
t.Fatalf("Cycle %d: Expected (%v,%v,%v) pods evicted, got (%v,%v,%v) instead", i+1, cycle.expectedTotalEvicted, cycle.expectedRealEvictions, cycle.expectedFakeEvictions, descheduler.podEvictor.TotalEvicted(), len(evictedPods), len(fakeEvictedPods))
|
||||||
t.Fatalf("Expected (2,4,0) pods evicted, got (%v, %v, %v) instead", descheduler.podEvictor.TotalEvicted(), len(evictedPods), len(fakeEvictedPods))
|
}
|
||||||
}
|
}
|
||||||
|
})
|
||||||
// check the fake client syncing and the right pods evicted
|
|
||||||
klog.Infof("Enabling the dry run mode")
|
|
||||||
rs.DryRun = true
|
|
||||||
evictedPods = []string{}
|
|
||||||
|
|
||||||
klog.Infof("2 pod eviction expected per a descheduling cycle, 2 fake evictions in total")
|
|
||||||
if err := descheduler.runDeschedulerLoop(ctx); err != nil {
|
|
||||||
t.Fatalf("Unable to run a descheduling loop: %v", err)
|
|
||||||
}
|
|
||||||
if descheduler.podEvictor.TotalEvicted() != 2 || len(evictedPods) != 0 || len(fakeEvictedPods) != 2 {
|
|
||||||
t.Fatalf("Expected (2,0,2) pods evicted, got (%v, %v, %v) instead", descheduler.podEvictor.TotalEvicted(), len(evictedPods), len(fakeEvictedPods))
|
|
||||||
}
|
|
||||||
|
|
||||||
klog.Infof("2 pod eviction expected per a descheduling cycle, 4 fake evictions in total")
|
|
||||||
if err := descheduler.runDeschedulerLoop(ctx); err != nil {
|
|
||||||
t.Fatalf("Unable to run a descheduling loop: %v", err)
|
|
||||||
}
|
|
||||||
if descheduler.podEvictor.TotalEvicted() != 2 || len(evictedPods) != 0 || len(fakeEvictedPods) != 4 {
|
|
||||||
t.Fatalf("Expected (2,0,4) pods evicted, got (%v, %v, %v) instead", descheduler.podEvictor.TotalEvicted(), len(evictedPods), len(fakeEvictedPods))
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -594,7 +598,7 @@ func TestEvictionRequestsCache(t *testing.T) {
|
|||||||
featureGates.Add(map[featuregate.Feature]featuregate.FeatureSpec{
|
featureGates.Add(map[featuregate.Feature]featuregate.FeatureSpec{
|
||||||
features.EvictionsInBackground: {Default: true, PreRelease: featuregate.Alpha},
|
features.EvictionsInBackground: {Default: true, PreRelease: featuregate.Alpha},
|
||||||
})
|
})
|
||||||
_, descheduler, client := initDescheduler(t, ctxCancel, featureGates, internalDeschedulerPolicy, nil, node1, node2, p1, p2, p3, p4)
|
_, descheduler, client := initDescheduler(t, ctxCancel, featureGates, internalDeschedulerPolicy, nil, false, node1, node2, p1, p2, p3, p4)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
var fakeEvictedPods []string
|
var fakeEvictedPods []string
|
||||||
@@ -735,7 +739,7 @@ func TestDeschedulingLimits(t *testing.T) {
|
|||||||
featureGates.Add(map[featuregate.Feature]featuregate.FeatureSpec{
|
featureGates.Add(map[featuregate.Feature]featuregate.FeatureSpec{
|
||||||
features.EvictionsInBackground: {Default: true, PreRelease: featuregate.Alpha},
|
features.EvictionsInBackground: {Default: true, PreRelease: featuregate.Alpha},
|
||||||
})
|
})
|
||||||
_, descheduler, client := initDescheduler(t, ctxCancel, featureGates, tc.policy, nil, node1, node2)
|
_, descheduler, client := initDescheduler(t, ctxCancel, featureGates, tc.policy, nil, false, node1, node2)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
var fakeEvictedPods []string
|
var fakeEvictedPods []string
|
||||||
@@ -936,12 +940,9 @@ func TestNodeLabelSelectorBasedEviction(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
ctxCancel, cancel := context.WithCancel(ctx)
|
ctxCancel, cancel := context.WithCancel(ctx)
|
||||||
rs, deschedulerInstance, client := initDescheduler(t, ctxCancel, initFeatureGates(), policy, nil, node1, node2, node3, node4, p1, p2, p3, p4)
|
_, deschedulerInstance, client := initDescheduler(t, ctxCancel, initFeatureGates(), policy, nil, tc.dryRun, node1, node2, node3, node4, p1, p2, p3, p4)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
// Set dry run mode if specified
|
|
||||||
rs.DryRun = tc.dryRun
|
|
||||||
|
|
||||||
// Verify all pods are created initially
|
// Verify all pods are created initially
|
||||||
pods, err := client.CoreV1().Pods(p1.Namespace).List(ctx, metav1.ListOptions{})
|
pods, err := client.CoreV1().Pods(p1.Namespace).List(ctx, metav1.ListOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -1063,6 +1064,7 @@ func TestLoadAwareDescheduling(t *testing.T) {
|
|||||||
initFeatureGates(),
|
initFeatureGates(),
|
||||||
policy,
|
policy,
|
||||||
metricsClientset,
|
metricsClientset,
|
||||||
|
false,
|
||||||
node1, node2, p1, p2, p3, p4, p5)
|
node1, node2, p1, p2, p3, p4, p5)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user