mirror of
https://github.com/kubernetes-sigs/descheduler.git
synced 2026-01-26 05:14:13 +01:00
refactor(kubeClientSandbox): set the create pods reactor in buildSandbox
This commit is contained in:
@@ -93,7 +93,6 @@ type descheduler struct {
|
|||||||
deschedulerPolicy *api.DeschedulerPolicy
|
deschedulerPolicy *api.DeschedulerPolicy
|
||||||
eventRecorder events.EventRecorder
|
eventRecorder events.EventRecorder
|
||||||
podEvictor *evictions.PodEvictor
|
podEvictor *evictions.PodEvictor
|
||||||
podEvictionReactionFnc func(*fakeclientset.Clientset) func(action core.Action) (bool, runtime.Object, error)
|
|
||||||
metricsCollector *metricscollector.MetricsCollector
|
metricsCollector *metricscollector.MetricsCollector
|
||||||
prometheusClient promapi.Client
|
prometheusClient promapi.Client
|
||||||
previousPrometheusClientTransport *http.Transport
|
previousPrometheusClientTransport *http.Transport
|
||||||
@@ -105,18 +104,20 @@ type descheduler struct {
|
|||||||
// kubeClientSandbox creates a sandbox environment with a fake client and informer factory
|
// kubeClientSandbox creates a sandbox environment with a fake client and informer factory
|
||||||
// that mirrors resources from a real client, useful for dry-run testing scenarios
|
// that mirrors resources from a real client, useful for dry-run testing scenarios
|
||||||
type kubeClientSandbox struct {
|
type kubeClientSandbox struct {
|
||||||
client clientset.Interface
|
client clientset.Interface
|
||||||
sharedInformerFactory informers.SharedInformerFactory
|
sharedInformerFactory informers.SharedInformerFactory
|
||||||
fakeKubeClient *fakeclientset.Clientset
|
fakeKubeClient *fakeclientset.Clientset
|
||||||
fakeFactory informers.SharedInformerFactory
|
fakeFactory informers.SharedInformerFactory
|
||||||
resourceToInformer map[schema.GroupVersionResource]informers.GenericInformer
|
resourceToInformer map[schema.GroupVersionResource]informers.GenericInformer
|
||||||
|
podEvictionReactionFnc func(*fakeclientset.Clientset) func(action core.Action) (bool, runtime.Object, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
func newKubeClientSandbox(client clientset.Interface, sharedInformerFactory informers.SharedInformerFactory, resources ...schema.GroupVersionResource) (*kubeClientSandbox, error) {
|
func newKubeClientSandbox(client clientset.Interface, sharedInformerFactory informers.SharedInformerFactory, resources ...schema.GroupVersionResource) (*kubeClientSandbox, error) {
|
||||||
sandbox := &kubeClientSandbox{
|
sandbox := &kubeClientSandbox{
|
||||||
client: client,
|
client: client,
|
||||||
sharedInformerFactory: sharedInformerFactory,
|
sharedInformerFactory: sharedInformerFactory,
|
||||||
resourceToInformer: make(map[schema.GroupVersionResource]informers.GenericInformer),
|
resourceToInformer: make(map[schema.GroupVersionResource]informers.GenericInformer),
|
||||||
|
podEvictionReactionFnc: podEvictionReactionFnc,
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, resource := range resources {
|
for _, resource := range resources {
|
||||||
@@ -132,6 +133,8 @@ func newKubeClientSandbox(client clientset.Interface, sharedInformerFactory info
|
|||||||
|
|
||||||
func (sandbox *kubeClientSandbox) buildSandbox() error {
|
func (sandbox *kubeClientSandbox) buildSandbox() error {
|
||||||
sandbox.fakeKubeClient = fakeclientset.NewSimpleClientset()
|
sandbox.fakeKubeClient = fakeclientset.NewSimpleClientset()
|
||||||
|
// simulate a pod eviction by deleting a pod
|
||||||
|
sandbox.fakeKubeClient.PrependReactor("create", "pods", sandbox.podEvictionReactionFnc(sandbox.fakeKubeClient))
|
||||||
sandbox.fakeFactory = informers.NewSharedInformerFactory(sandbox.fakeKubeClient, 0)
|
sandbox.fakeFactory = informers.NewSharedInformerFactory(sandbox.fakeKubeClient, 0)
|
||||||
|
|
||||||
for resource, informer := range sandbox.resourceToInformer {
|
for resource, informer := range sandbox.resourceToInformer {
|
||||||
@@ -214,17 +217,16 @@ func newDescheduler(ctx context.Context, rs *options.DeschedulerServer, deschedu
|
|||||||
}
|
}
|
||||||
|
|
||||||
desch := &descheduler{
|
desch := &descheduler{
|
||||||
rs: rs,
|
rs: rs,
|
||||||
kubeClientSandbox: kubeClientSandbox,
|
kubeClientSandbox: kubeClientSandbox,
|
||||||
getPodsAssignedToNode: getPodsAssignedToNode,
|
getPodsAssignedToNode: getPodsAssignedToNode,
|
||||||
sharedInformerFactory: sharedInformerFactory,
|
sharedInformerFactory: sharedInformerFactory,
|
||||||
deschedulerPolicy: deschedulerPolicy,
|
deschedulerPolicy: deschedulerPolicy,
|
||||||
eventRecorder: eventRecorder,
|
eventRecorder: eventRecorder,
|
||||||
podEvictor: podEvictor,
|
podEvictor: podEvictor,
|
||||||
podEvictionReactionFnc: podEvictionReactionFnc,
|
prometheusClient: rs.PrometheusClient,
|
||||||
prometheusClient: rs.PrometheusClient,
|
queue: workqueue.NewRateLimitingQueueWithConfig(workqueue.DefaultControllerRateLimiter(), workqueue.RateLimitingQueueConfig{Name: "descheduler"}),
|
||||||
queue: workqueue.NewRateLimitingQueueWithConfig(workqueue.DefaultControllerRateLimiter(), workqueue.RateLimitingQueueConfig{Name: "descheduler"}),
|
metricsProviders: metricsProviderListToMap(deschedulerPolicy.MetricsProviders),
|
||||||
metricsProviders: metricsProviderListToMap(deschedulerPolicy.MetricsProviders),
|
|
||||||
}
|
}
|
||||||
|
|
||||||
nodeSelector := labels.Everything()
|
nodeSelector := labels.Everything()
|
||||||
@@ -392,15 +394,9 @@ func (d *descheduler) runDeschedulerLoop(ctx context.Context) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
fakeClient := d.kubeClientSandbox.fakeClient()
|
|
||||||
fakeSharedInformerFactory := d.kubeClientSandbox.fakeSharedInformerFactory()
|
|
||||||
|
|
||||||
// simulate a pod eviction by deleting a pod
|
|
||||||
fakeClient.PrependReactor("create", "pods", d.podEvictionReactionFnc(fakeClient))
|
|
||||||
|
|
||||||
// create a new instance of the shared informer factor from the cached client
|
// create a new instance of the shared informer factor from the cached client
|
||||||
// register the pod informer, otherwise it will not get running
|
// register the pod informer, otherwise it will not get running
|
||||||
d.getPodsAssignedToNode, err = podutil.BuildGetPodsAssignedToNodeFunc(fakeSharedInformerFactory.Core().V1().Pods().Informer())
|
d.getPodsAssignedToNode, err = podutil.BuildGetPodsAssignedToNodeFunc(d.kubeClientSandbox.fakeSharedInformerFactory().Core().V1().Pods().Informer())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("build get pods assigned to node function error: %v", err)
|
return fmt.Errorf("build get pods assigned to node function error: %v", err)
|
||||||
}
|
}
|
||||||
@@ -417,17 +413,17 @@ func (d *descheduler) runDeschedulerLoop(ctx context.Context) error {
|
|||||||
// TODO(ingvagabund): register one indexer per each profile. Respect the precedence of no profile-level node selector is specified.
|
// TODO(ingvagabund): register one indexer per each profile. Respect the precedence of no profile-level node selector is specified.
|
||||||
// Also, keep a cache of node label selectors to detect duplicates to avoid creating an extra informer.
|
// Also, keep a cache of node label selectors to detect duplicates to avoid creating an extra informer.
|
||||||
|
|
||||||
if err := nodeutil.AddNodeSelectorIndexer(fakeSharedInformerFactory.Core().V1().Nodes().Informer(), indexerNodeSelectorGlobal, nodeSelector); err != nil {
|
if err := nodeutil.AddNodeSelectorIndexer(d.kubeClientSandbox.fakeSharedInformerFactory().Core().V1().Nodes().Informer(), indexerNodeSelectorGlobal, nodeSelector); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
fakeCtx, cncl := context.WithCancel(context.TODO())
|
fakeCtx, cncl := context.WithCancel(context.TODO())
|
||||||
defer cncl()
|
defer cncl()
|
||||||
fakeSharedInformerFactory.Start(fakeCtx.Done())
|
d.kubeClientSandbox.fakeSharedInformerFactory().Start(fakeCtx.Done())
|
||||||
fakeSharedInformerFactory.WaitForCacheSync(fakeCtx.Done())
|
d.kubeClientSandbox.fakeSharedInformerFactory().WaitForCacheSync(fakeCtx.Done())
|
||||||
|
|
||||||
client = fakeClient
|
client = d.kubeClientSandbox.fakeClient()
|
||||||
d.sharedInformerFactory = fakeSharedInformerFactory
|
d.sharedInformerFactory = d.kubeClientSandbox.fakeSharedInformerFactory()
|
||||||
} else {
|
} else {
|
||||||
client = d.rs.Client
|
client = d.rs.Client
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -531,7 +531,7 @@ func TestPodEvictorReset(t *testing.T) {
|
|||||||
client.PrependReactor("create", "pods", podEvictionReactionTestingFnc(&evictedPods, nil, nil))
|
client.PrependReactor("create", "pods", podEvictionReactionTestingFnc(&evictedPods, nil, nil))
|
||||||
|
|
||||||
var fakeEvictedPods []string
|
var fakeEvictedPods []string
|
||||||
descheduler.podEvictionReactionFnc = func(*fakeclientset.Clientset) func(action core.Action) (bool, runtime.Object, error) {
|
descheduler.kubeClientSandbox.podEvictionReactionFnc = func(*fakeclientset.Clientset) func(action core.Action) (bool, runtime.Object, error) {
|
||||||
return podEvictionReactionTestingFnc(&fakeEvictedPods, nil, nil)
|
return podEvictionReactionTestingFnc(&fakeEvictedPods, nil, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -602,7 +602,7 @@ func TestEvictionRequestsCache(t *testing.T) {
|
|||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
var fakeEvictedPods []string
|
var fakeEvictedPods []string
|
||||||
descheduler.podEvictionReactionFnc = func(*fakeclientset.Clientset) func(action core.Action) (bool, runtime.Object, error) {
|
descheduler.kubeClientSandbox.podEvictionReactionFnc = func(*fakeclientset.Clientset) func(action core.Action) (bool, runtime.Object, error) {
|
||||||
return podEvictionReactionTestingFnc(&fakeEvictedPods, nil, podEvictionError)
|
return podEvictionReactionTestingFnc(&fakeEvictedPods, nil, podEvictionError)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -743,7 +743,7 @@ func TestDeschedulingLimits(t *testing.T) {
|
|||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
var fakeEvictedPods []string
|
var fakeEvictedPods []string
|
||||||
descheduler.podEvictionReactionFnc = func(*fakeclientset.Clientset) func(action core.Action) (bool, runtime.Object, error) {
|
descheduler.kubeClientSandbox.podEvictionReactionFnc = func(*fakeclientset.Clientset) func(action core.Action) (bool, runtime.Object, error) {
|
||||||
return podEvictionReactionTestingFnc(&fakeEvictedPods, nil, podEvictionError)
|
return podEvictionReactionTestingFnc(&fakeEvictedPods, nil, podEvictionError)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -956,7 +956,7 @@ func TestNodeLabelSelectorBasedEviction(t *testing.T) {
|
|||||||
if !tc.dryRun {
|
if !tc.dryRun {
|
||||||
client.PrependReactor("create", "pods", podEvictionReactionTestingFnc(&evictedPods, nil, nil))
|
client.PrependReactor("create", "pods", podEvictionReactionTestingFnc(&evictedPods, nil, nil))
|
||||||
} else {
|
} else {
|
||||||
deschedulerInstance.podEvictionReactionFnc = func(*fakeclientset.Clientset) func(action core.Action) (bool, runtime.Object, error) {
|
deschedulerInstance.kubeClientSandbox.podEvictionReactionFnc = func(*fakeclientset.Clientset) func(action core.Action) (bool, runtime.Object, error) {
|
||||||
return podEvictionReactionTestingFnc(&evictedPods, nil, nil)
|
return podEvictionReactionTestingFnc(&evictedPods, nil, nil)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
Reference in New Issue
Block a user