mirror of
https://github.com/kubernetes-sigs/descheduler.git
synced 2026-01-26 05:14:13 +01:00
Make sure dry runs sees all the resources a normal run would do (#1526)
* generic resource handling, so that dry run has all the expected resource types and objects * simpler code and better names * fix imports
This commit is contained in:
@@ -23,6 +23,9 @@ import (
|
|||||||
"strconv"
|
"strconv"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
schedulingv1 "k8s.io/api/scheduling/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
|
|
||||||
"go.opentelemetry.io/otel/attribute"
|
"go.opentelemetry.io/otel/attribute"
|
||||||
"go.opentelemetry.io/otel/trace"
|
"go.opentelemetry.io/otel/trace"
|
||||||
"k8s.io/apimachinery/pkg/api/meta"
|
"k8s.io/apimachinery/pkg/api/meta"
|
||||||
@@ -34,15 +37,12 @@ import (
|
|||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
policy "k8s.io/api/policy/v1"
|
policy "k8s.io/api/policy/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
||||||
"k8s.io/apimachinery/pkg/labels"
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
utilversion "k8s.io/apimachinery/pkg/util/version"
|
utilversion "k8s.io/apimachinery/pkg/util/version"
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
fakeclientset "k8s.io/client-go/kubernetes/fake"
|
fakeclientset "k8s.io/client-go/kubernetes/fake"
|
||||||
listersv1 "k8s.io/client-go/listers/core/v1"
|
|
||||||
schedulingv1 "k8s.io/client-go/listers/scheduling/v1"
|
|
||||||
core "k8s.io/client-go/testing"
|
core "k8s.io/client-go/testing"
|
||||||
|
|
||||||
"sigs.k8s.io/descheduler/pkg/descheduler/client"
|
"sigs.k8s.io/descheduler/pkg/descheduler/client"
|
||||||
@@ -71,10 +71,7 @@ type profileRunner struct {
|
|||||||
|
|
||||||
type descheduler struct {
|
type descheduler struct {
|
||||||
rs *options.DeschedulerServer
|
rs *options.DeschedulerServer
|
||||||
podLister listersv1.PodLister
|
ir *informerResources
|
||||||
nodeLister listersv1.NodeLister
|
|
||||||
namespaceLister listersv1.NamespaceLister
|
|
||||||
priorityClassLister schedulingv1.PriorityClassLister
|
|
||||||
getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc
|
getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc
|
||||||
sharedInformerFactory informers.SharedInformerFactory
|
sharedInformerFactory informers.SharedInformerFactory
|
||||||
deschedulerPolicy *api.DeschedulerPolicy
|
deschedulerPolicy *api.DeschedulerPolicy
|
||||||
@@ -83,12 +80,60 @@ type descheduler struct {
|
|||||||
podEvictionReactionFnc func(*fakeclientset.Clientset) func(action core.Action) (bool, runtime.Object, error)
|
podEvictionReactionFnc func(*fakeclientset.Clientset) func(action core.Action) (bool, runtime.Object, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type informerResources struct {
|
||||||
|
sharedInformerFactory informers.SharedInformerFactory
|
||||||
|
resourceToInformer map[schema.GroupVersionResource]informers.GenericInformer
|
||||||
|
}
|
||||||
|
|
||||||
|
func newInformerResources(sharedInformerFactory informers.SharedInformerFactory) *informerResources {
|
||||||
|
return &informerResources{
|
||||||
|
sharedInformerFactory: sharedInformerFactory,
|
||||||
|
resourceToInformer: make(map[schema.GroupVersionResource]informers.GenericInformer),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ir *informerResources) Uses(resources ...schema.GroupVersionResource) error {
|
||||||
|
for _, resource := range resources {
|
||||||
|
informer, err := ir.sharedInformerFactory.ForResource(resource)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
ir.resourceToInformer[resource] = informer
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// CopyTo Copy informer subscriptions to the new factory and objects to the fake client so that the backing caches are populated for when listers are used.
|
||||||
|
func (ir *informerResources) CopyTo(fakeClient *fakeclientset.Clientset, newFactory informers.SharedInformerFactory) error {
|
||||||
|
for resource, informer := range ir.resourceToInformer {
|
||||||
|
_, err := newFactory.ForResource(resource)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error getting resource %s: %w", resource, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
objects, err := informer.Lister().List(labels.Everything())
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error listing %s: %w", informer, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, object := range objects {
|
||||||
|
fakeClient.Tracker().Add(object)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func newDescheduler(rs *options.DeschedulerServer, deschedulerPolicy *api.DeschedulerPolicy, evictionPolicyGroupVersion string, eventRecorder events.EventRecorder, sharedInformerFactory informers.SharedInformerFactory) (*descheduler, error) {
|
func newDescheduler(rs *options.DeschedulerServer, deschedulerPolicy *api.DeschedulerPolicy, evictionPolicyGroupVersion string, eventRecorder events.EventRecorder, sharedInformerFactory informers.SharedInformerFactory) (*descheduler, error) {
|
||||||
podInformer := sharedInformerFactory.Core().V1().Pods().Informer()
|
podInformer := sharedInformerFactory.Core().V1().Pods().Informer()
|
||||||
podLister := sharedInformerFactory.Core().V1().Pods().Lister()
|
|
||||||
nodeLister := sharedInformerFactory.Core().V1().Nodes().Lister()
|
ir := newInformerResources(sharedInformerFactory)
|
||||||
namespaceLister := sharedInformerFactory.Core().V1().Namespaces().Lister()
|
ir.Uses(v1.SchemeGroupVersion.WithResource("pods"),
|
||||||
priorityClassLister := sharedInformerFactory.Scheduling().V1().PriorityClasses().Lister()
|
v1.SchemeGroupVersion.WithResource("nodes"),
|
||||||
|
// Future work could be to let each plugin declare what type of resources it needs; that way dry runs would stay
|
||||||
|
// consistent with the real runs without having to keep the list here in sync.
|
||||||
|
v1.SchemeGroupVersion.WithResource("namespaces"), // Used by the defaultevictor plugin
|
||||||
|
schedulingv1.SchemeGroupVersion.WithResource("priorityclasses")) // Used by the defaultevictor plugin
|
||||||
|
|
||||||
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
|
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -109,10 +154,7 @@ func newDescheduler(rs *options.DeschedulerServer, deschedulerPolicy *api.Desche
|
|||||||
|
|
||||||
return &descheduler{
|
return &descheduler{
|
||||||
rs: rs,
|
rs: rs,
|
||||||
podLister: podLister,
|
ir: ir,
|
||||||
nodeLister: nodeLister,
|
|
||||||
namespaceLister: namespaceLister,
|
|
||||||
priorityClassLister: priorityClassLister,
|
|
||||||
getPodsAssignedToNode: getPodsAssignedToNode,
|
getPodsAssignedToNode: getPodsAssignedToNode,
|
||||||
sharedInformerFactory: sharedInformerFactory,
|
sharedInformerFactory: sharedInformerFactory,
|
||||||
deschedulerPolicy: deschedulerPolicy,
|
deschedulerPolicy: deschedulerPolicy,
|
||||||
@@ -146,13 +188,14 @@ func (d *descheduler) runDeschedulerLoop(ctx context.Context, nodes []*v1.Node)
|
|||||||
fakeClient := fakeclientset.NewSimpleClientset()
|
fakeClient := fakeclientset.NewSimpleClientset()
|
||||||
// simulate a pod eviction by deleting a pod
|
// simulate a pod eviction by deleting a pod
|
||||||
fakeClient.PrependReactor("create", "pods", d.podEvictionReactionFnc(fakeClient))
|
fakeClient.PrependReactor("create", "pods", d.podEvictionReactionFnc(fakeClient))
|
||||||
err := cachedClient(d.rs.Client, fakeClient, d.podLister, d.nodeLister, d.namespaceLister, d.priorityClassLister)
|
fakeSharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
|
||||||
|
|
||||||
|
err := d.ir.CopyTo(fakeClient, fakeSharedInformerFactory)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// create a new instance of the shared informer factor from the cached client
|
// create a new instance of the shared informer factor from the cached client
|
||||||
fakeSharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
|
|
||||||
// register the pod informer, otherwise it will not get running
|
// register the pod informer, otherwise it will not get running
|
||||||
d.getPodsAssignedToNode, err = podutil.BuildGetPodsAssignedToNodeFunc(fakeSharedInformerFactory.Core().V1().Pods().Informer())
|
d.getPodsAssignedToNode, err = podutil.BuildGetPodsAssignedToNodeFunc(fakeSharedInformerFactory.Core().V1().Pods().Informer())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -336,62 +379,6 @@ func podEvictionReactionFnc(fakeClient *fakeclientset.Clientset) func(action cor
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func cachedClient(
|
|
||||||
realClient clientset.Interface,
|
|
||||||
fakeClient *fakeclientset.Clientset,
|
|
||||||
podLister listersv1.PodLister,
|
|
||||||
nodeLister listersv1.NodeLister,
|
|
||||||
namespaceLister listersv1.NamespaceLister,
|
|
||||||
priorityClassLister schedulingv1.PriorityClassLister,
|
|
||||||
) error {
|
|
||||||
klog.V(3).Infof("Pulling resources for the cached client from the cluster")
|
|
||||||
pods, err := podLister.List(labels.Everything())
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("unable to list pods: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, item := range pods {
|
|
||||||
if _, err := fakeClient.CoreV1().Pods(item.Namespace).Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
|
|
||||||
return fmt.Errorf("unable to copy pod: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
nodes, err := nodeLister.List(labels.Everything())
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("unable to list nodes: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, item := range nodes {
|
|
||||||
if _, err := fakeClient.CoreV1().Nodes().Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
|
|
||||||
return fmt.Errorf("unable to copy node: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
namespaces, err := namespaceLister.List(labels.Everything())
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("unable to list namespaces: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, item := range namespaces {
|
|
||||||
if _, err := fakeClient.CoreV1().Namespaces().Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
|
|
||||||
return fmt.Errorf("unable to copy namespace: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
priorityClasses, err := priorityClassLister.List(labels.Everything())
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("unable to list priorityclasses: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, item := range priorityClasses {
|
|
||||||
if _, err := fakeClient.SchedulingV1().PriorityClasses().Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
|
|
||||||
return fmt.Errorf("unable to copy priorityclass: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer, deschedulerPolicy *api.DeschedulerPolicy, evictionPolicyGroupVersion string) error {
|
func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer, deschedulerPolicy *api.DeschedulerPolicy, evictionPolicyGroupVersion string) error {
|
||||||
var span trace.Span
|
var span trace.Span
|
||||||
ctx, span = tracing.Tracer().Start(ctx, "RunDeschedulerStrategies")
|
ctx, span = tracing.Tracer().Start(ctx, "RunDeschedulerStrategies")
|
||||||
@@ -428,7 +415,8 @@ func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer
|
|||||||
// A next context is created here intentionally to avoid nesting the spans via context.
|
// A next context is created here intentionally to avoid nesting the spans via context.
|
||||||
sCtx, sSpan := tracing.Tracer().Start(ctx, "NonSlidingUntil")
|
sCtx, sSpan := tracing.Tracer().Start(ctx, "NonSlidingUntil")
|
||||||
defer sSpan.End()
|
defer sSpan.End()
|
||||||
nodes, err := nodeutil.ReadyNodes(sCtx, rs.Client, descheduler.nodeLister, nodeSelector)
|
|
||||||
|
nodes, err := nodeutil.ReadyNodes(sCtx, rs.Client, descheduler.sharedInformerFactory.Core().V1().Nodes().Lister(), nodeSelector)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
sSpan.AddEvent("Failed to detect ready nodes", trace.WithAttributes(attribute.String("err", err.Error())))
|
sSpan.AddEvent("Failed to detect ready nodes", trace.WithAttributes(attribute.String("err", err.Error())))
|
||||||
klog.Error(err)
|
klog.Error(err)
|
||||||
|
|||||||
Reference in New Issue
Block a user