mirror of
https://github.com/kubernetes-sigs/descheduler.git
synced 2026-01-26 05:14:13 +01:00
Merge pull request #1447 from ingvagabund/refactorings
PodEvictor: refactoring and preparation for eviction requests
This commit is contained in:
@@ -70,16 +70,17 @@ type profileRunner struct {
|
||||
}
|
||||
|
||||
type descheduler struct {
|
||||
rs *options.DeschedulerServer
|
||||
podLister listersv1.PodLister
|
||||
nodeLister listersv1.NodeLister
|
||||
namespaceLister listersv1.NamespaceLister
|
||||
priorityClassLister schedulingv1.PriorityClassLister
|
||||
getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc
|
||||
sharedInformerFactory informers.SharedInformerFactory
|
||||
evictionPolicyGroupVersion string
|
||||
deschedulerPolicy *api.DeschedulerPolicy
|
||||
eventRecorder events.EventRecorder
|
||||
rs *options.DeschedulerServer
|
||||
podLister listersv1.PodLister
|
||||
nodeLister listersv1.NodeLister
|
||||
namespaceLister listersv1.NamespaceLister
|
||||
priorityClassLister schedulingv1.PriorityClassLister
|
||||
getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc
|
||||
sharedInformerFactory informers.SharedInformerFactory
|
||||
deschedulerPolicy *api.DeschedulerPolicy
|
||||
eventRecorder events.EventRecorder
|
||||
podEvictor *evictions.PodEvictor
|
||||
podEvictionReactionFnc func(*fakeclientset.Clientset) func(action core.Action) (bool, runtime.Object, error)
|
||||
}
|
||||
|
||||
func newDescheduler(rs *options.DeschedulerServer, deschedulerPolicy *api.DeschedulerPolicy, evictionPolicyGroupVersion string, eventRecorder events.EventRecorder, sharedInformerFactory informers.SharedInformerFactory) (*descheduler, error) {
|
||||
@@ -94,17 +95,28 @@ func newDescheduler(rs *options.DeschedulerServer, deschedulerPolicy *api.Desche
|
||||
return nil, fmt.Errorf("build get pods assigned to node function error: %v", err)
|
||||
}
|
||||
|
||||
podEvictor := evictions.NewPodEvictor(
|
||||
nil,
|
||||
evictionPolicyGroupVersion,
|
||||
rs.DryRun,
|
||||
deschedulerPolicy.MaxNoOfPodsToEvictPerNode,
|
||||
deschedulerPolicy.MaxNoOfPodsToEvictPerNamespace,
|
||||
!rs.DisableMetrics,
|
||||
eventRecorder,
|
||||
)
|
||||
|
||||
return &descheduler{
|
||||
rs: rs,
|
||||
podLister: podLister,
|
||||
nodeLister: nodeLister,
|
||||
namespaceLister: namespaceLister,
|
||||
priorityClassLister: priorityClassLister,
|
||||
getPodsAssignedToNode: getPodsAssignedToNode,
|
||||
sharedInformerFactory: sharedInformerFactory,
|
||||
evictionPolicyGroupVersion: evictionPolicyGroupVersion,
|
||||
deschedulerPolicy: deschedulerPolicy,
|
||||
eventRecorder: eventRecorder,
|
||||
rs: rs,
|
||||
podLister: podLister,
|
||||
nodeLister: nodeLister,
|
||||
namespaceLister: namespaceLister,
|
||||
priorityClassLister: priorityClassLister,
|
||||
getPodsAssignedToNode: getPodsAssignedToNode,
|
||||
sharedInformerFactory: sharedInformerFactory,
|
||||
deschedulerPolicy: deschedulerPolicy,
|
||||
eventRecorder: eventRecorder,
|
||||
podEvictor: podEvictor,
|
||||
podEvictionReactionFnc: podEvictionReactionFnc,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -129,7 +141,10 @@ func (d *descheduler) runDeschedulerLoop(ctx context.Context, nodes []*v1.Node)
|
||||
if d.rs.DryRun {
|
||||
klog.V(3).Infof("Building a cached client from the cluster for the dry run")
|
||||
// Create a new cache so we start from scratch without any leftovers
|
||||
fakeClient, err := cachedClient(d.rs.Client, d.podLister, d.nodeLister, d.namespaceLister, d.priorityClassLister)
|
||||
fakeClient := fakeclientset.NewSimpleClientset()
|
||||
// simulate a pod eviction by deleting a pod
|
||||
fakeClient.PrependReactor("create", "pods", d.podEvictionReactionFnc(fakeClient))
|
||||
err := cachedClient(d.rs.Client, fakeClient, d.podLister, d.nodeLister, d.namespaceLister, d.priorityClassLister)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -153,21 +168,13 @@ func (d *descheduler) runDeschedulerLoop(ctx context.Context, nodes []*v1.Node)
|
||||
client = d.rs.Client
|
||||
}
|
||||
|
||||
klog.V(3).Infof("Building a pod evictor")
|
||||
podEvictor := evictions.NewPodEvictor(
|
||||
client,
|
||||
d.evictionPolicyGroupVersion,
|
||||
d.rs.DryRun,
|
||||
d.deschedulerPolicy.MaxNoOfPodsToEvictPerNode,
|
||||
d.deschedulerPolicy.MaxNoOfPodsToEvictPerNamespace,
|
||||
nodes,
|
||||
!d.rs.DisableMetrics,
|
||||
d.eventRecorder,
|
||||
)
|
||||
klog.V(3).Infof("Setting up the pod evictor")
|
||||
d.podEvictor.SetClient(client)
|
||||
d.podEvictor.ResetCounters()
|
||||
|
||||
d.runProfiles(ctx, client, nodes, podEvictor)
|
||||
d.runProfiles(ctx, client, nodes)
|
||||
|
||||
klog.V(1).InfoS("Number of evicted pods", "totalEvicted", podEvictor.TotalEvicted())
|
||||
klog.V(1).InfoS("Number of evicted pods", "totalEvicted", d.podEvictor.TotalEvicted())
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -175,7 +182,7 @@ func (d *descheduler) runDeschedulerLoop(ctx context.Context, nodes []*v1.Node)
|
||||
// runProfiles runs all the deschedule plugins of all profiles and
|
||||
// later runs through all balance plugins of all profiles. (All Balance plugins should come after all Deschedule plugins)
|
||||
// see https://github.com/kubernetes-sigs/descheduler/issues/979
|
||||
func (d *descheduler) runProfiles(ctx context.Context, client clientset.Interface, nodes []*v1.Node, podEvictor *evictions.PodEvictor) {
|
||||
func (d *descheduler) runProfiles(ctx context.Context, client clientset.Interface, nodes []*v1.Node) {
|
||||
var span trace.Span
|
||||
ctx, span = tracing.Tracer().Start(ctx, "runProfiles")
|
||||
defer span.End()
|
||||
@@ -186,7 +193,7 @@ func (d *descheduler) runProfiles(ctx context.Context, client clientset.Interfac
|
||||
pluginregistry.PluginRegistry,
|
||||
frameworkprofile.WithClientSet(client),
|
||||
frameworkprofile.WithSharedInformerFactory(d.sharedInformerFactory),
|
||||
frameworkprofile.WithPodEvictor(podEvictor),
|
||||
frameworkprofile.WithPodEvictor(d.podEvictor),
|
||||
frameworkprofile.WithGetPodsAssignedToNodeFnc(d.getPodsAssignedToNode),
|
||||
)
|
||||
if err != nil {
|
||||
@@ -306,16 +313,8 @@ func validateVersionCompatibility(discovery discovery.DiscoveryInterface, versio
|
||||
return nil
|
||||
}
|
||||
|
||||
func cachedClient(
|
||||
realClient clientset.Interface,
|
||||
podLister listersv1.PodLister,
|
||||
nodeLister listersv1.NodeLister,
|
||||
namespaceLister listersv1.NamespaceLister,
|
||||
priorityClassLister schedulingv1.PriorityClassLister,
|
||||
) (clientset.Interface, error) {
|
||||
fakeClient := fakeclientset.NewSimpleClientset()
|
||||
// simulate a pod eviction by deleting a pod
|
||||
fakeClient.PrependReactor("create", "pods", func(action core.Action) (bool, runtime.Object, error) {
|
||||
func podEvictionReactionFnc(fakeClient *fakeclientset.Clientset) func(action core.Action) (bool, runtime.Object, error) {
|
||||
return func(action core.Action) (bool, runtime.Object, error) {
|
||||
if action.GetSubresource() == "eviction" {
|
||||
createAct, matched := action.(core.CreateActionImpl)
|
||||
if !matched {
|
||||
@@ -332,54 +331,63 @@ func cachedClient(
|
||||
}
|
||||
// fallback to the default reactor
|
||||
return false, nil, nil
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func cachedClient(
|
||||
realClient clientset.Interface,
|
||||
fakeClient *fakeclientset.Clientset,
|
||||
podLister listersv1.PodLister,
|
||||
nodeLister listersv1.NodeLister,
|
||||
namespaceLister listersv1.NamespaceLister,
|
||||
priorityClassLister schedulingv1.PriorityClassLister,
|
||||
) error {
|
||||
klog.V(3).Infof("Pulling resources for the cached client from the cluster")
|
||||
pods, err := podLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to list pods: %v", err)
|
||||
return fmt.Errorf("unable to list pods: %v", err)
|
||||
}
|
||||
|
||||
for _, item := range pods {
|
||||
if _, err := fakeClient.CoreV1().Pods(item.Namespace).Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
|
||||
return nil, fmt.Errorf("unable to copy pod: %v", err)
|
||||
return fmt.Errorf("unable to copy pod: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
nodes, err := nodeLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to list nodes: %v", err)
|
||||
return fmt.Errorf("unable to list nodes: %v", err)
|
||||
}
|
||||
|
||||
for _, item := range nodes {
|
||||
if _, err := fakeClient.CoreV1().Nodes().Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
|
||||
return nil, fmt.Errorf("unable to copy node: %v", err)
|
||||
return fmt.Errorf("unable to copy node: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
namespaces, err := namespaceLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to list namespaces: %v", err)
|
||||
return fmt.Errorf("unable to list namespaces: %v", err)
|
||||
}
|
||||
|
||||
for _, item := range namespaces {
|
||||
if _, err := fakeClient.CoreV1().Namespaces().Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
|
||||
return nil, fmt.Errorf("unable to copy namespace: %v", err)
|
||||
return fmt.Errorf("unable to copy namespace: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
priorityClasses, err := priorityClassLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to list priorityclasses: %v", err)
|
||||
return fmt.Errorf("unable to list priorityclasses: %v", err)
|
||||
}
|
||||
|
||||
for _, item := range priorityClasses {
|
||||
if _, err := fakeClient.SchedulingV1().PriorityClasses().Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
|
||||
return nil, fmt.Errorf("unable to copy priorityclass: %v", err)
|
||||
return fmt.Errorf("unable to copy priorityclass: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
return fakeClient, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer, deschedulerPolicy *api.DeschedulerPolicy, evictionPolicyGroupVersion string) error {
|
||||
|
||||
@@ -13,15 +13,18 @@ import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
apiversion "k8s.io/apimachinery/pkg/version"
|
||||
fakediscovery "k8s.io/client-go/discovery/fake"
|
||||
"k8s.io/client-go/informers"
|
||||
fakeclientset "k8s.io/client-go/kubernetes/fake"
|
||||
core "k8s.io/client-go/testing"
|
||||
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/api/v1alpha1"
|
||||
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/pluginregistry"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/defaultevictor"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/removeduplicates"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/removepodsviolatingnodetaints"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
deschedulerversion "sigs.k8s.io/descheduler/pkg/version"
|
||||
"sigs.k8s.io/descheduler/test"
|
||||
)
|
||||
@@ -95,7 +98,7 @@ func TestTaintsUpdated(t *testing.T) {
|
||||
}
|
||||
|
||||
var evictedPods []string
|
||||
client.PrependReactor("create", "pods", podEvictionReactionFuc(&evictedPods))
|
||||
client.PrependReactor("create", "pods", podEvictionReactionTestingFnc(&evictedPods))
|
||||
|
||||
internalDeschedulerPolicy := &api.DeschedulerPolicy{}
|
||||
scope := scope{}
|
||||
@@ -161,7 +164,7 @@ func TestDuplicate(t *testing.T) {
|
||||
}
|
||||
|
||||
var evictedPods []string
|
||||
client.PrependReactor("create", "pods", podEvictionReactionFuc(&evictedPods))
|
||||
client.PrependReactor("create", "pods", podEvictionReactionTestingFnc(&evictedPods))
|
||||
|
||||
internalDeschedulerPolicy := &api.DeschedulerPolicy{}
|
||||
scope := scope{}
|
||||
@@ -174,7 +177,7 @@ func TestDuplicate(t *testing.T) {
|
||||
}
|
||||
|
||||
if len(evictedPods) == 0 {
|
||||
t.Fatalf("Unable to evict pod, node taint did not get propagated to descheduler strategies %v\n", err)
|
||||
t.Fatalf("Unable to evict pods\n")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -309,7 +312,7 @@ func TestValidateVersionCompatibility(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func podEvictionReactionFuc(evictedPods *[]string) func(action core.Action) (bool, runtime.Object, error) {
|
||||
func podEvictionReactionTestingFnc(evictedPods *[]string) func(action core.Action) (bool, runtime.Object, error) {
|
||||
return func(action core.Action) (bool, runtime.Object, error) {
|
||||
if action.GetSubresource() == "eviction" {
|
||||
createAct, matched := action.(core.CreateActionImpl)
|
||||
@@ -323,3 +326,121 @@ func podEvictionReactionFuc(evictedPods *[]string) func(action core.Action) (boo
|
||||
return false, nil, nil // fallback to the default reactor
|
||||
}
|
||||
}
|
||||
|
||||
func initPluginRegistry() {
|
||||
pluginregistry.PluginRegistry = pluginregistry.NewRegistry()
|
||||
pluginregistry.Register(removeduplicates.PluginName, removeduplicates.New, &removeduplicates.RemoveDuplicates{}, &removeduplicates.RemoveDuplicatesArgs{}, removeduplicates.ValidateRemoveDuplicatesArgs, removeduplicates.SetDefaults_RemoveDuplicatesArgs, pluginregistry.PluginRegistry)
|
||||
pluginregistry.Register(defaultevictor.PluginName, defaultevictor.New, &defaultevictor.DefaultEvictor{}, &defaultevictor.DefaultEvictorArgs{}, defaultevictor.ValidateDefaultEvictorArgs, defaultevictor.SetDefaults_DefaultEvictorArgs, pluginregistry.PluginRegistry)
|
||||
}
|
||||
|
||||
func TestPodEvictorReset(t *testing.T) {
|
||||
initPluginRegistry()
|
||||
|
||||
ctx := context.Background()
|
||||
node1 := test.BuildTestNode("n1", 2000, 3000, 10, nil)
|
||||
node2 := test.BuildTestNode("n2", 2000, 3000, 10, nil)
|
||||
|
||||
p1 := test.BuildTestPod("p1", 100, 0, node1.Name, nil)
|
||||
p1.Namespace = "dev"
|
||||
p2 := test.BuildTestPod("p2", 100, 0, node1.Name, nil)
|
||||
p2.Namespace = "dev"
|
||||
p3 := test.BuildTestPod("p3", 100, 0, node1.Name, nil)
|
||||
p3.Namespace = "dev"
|
||||
p4 := test.BuildTestPod("p4", 100, 0, node1.Name, nil)
|
||||
p4.Namespace = "dev"
|
||||
|
||||
ownerRef1 := test.GetReplicaSetOwnerRefList()
|
||||
p1.ObjectMeta.OwnerReferences = ownerRef1
|
||||
p2.ObjectMeta.OwnerReferences = ownerRef1
|
||||
p3.ObjectMeta.OwnerReferences = ownerRef1
|
||||
p4.ObjectMeta.OwnerReferences = ownerRef1
|
||||
|
||||
client := fakeclientset.NewSimpleClientset(node1, node2, p1, p2, p3, p4)
|
||||
eventClient := fakeclientset.NewSimpleClientset(node1, node2, p1, p2, p3, p4)
|
||||
dp := &v1alpha1.DeschedulerPolicy{
|
||||
Strategies: v1alpha1.StrategyList{
|
||||
"RemoveDuplicates": v1alpha1.DeschedulerStrategy{
|
||||
Enabled: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
internalDeschedulerPolicy := &api.DeschedulerPolicy{}
|
||||
scope := scope{}
|
||||
err := v1alpha1.V1alpha1ToInternal(dp, pluginregistry.PluginRegistry, internalDeschedulerPolicy, scope)
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to convert v1alpha1 to v1alpha2: %v", err)
|
||||
}
|
||||
|
||||
rs, err := options.NewDeschedulerServer()
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to initialize server: %v", err)
|
||||
}
|
||||
rs.Client = client
|
||||
rs.EventClient = eventClient
|
||||
|
||||
var evictedPods []string
|
||||
client.PrependReactor("create", "pods", podEvictionReactionTestingFnc(&evictedPods))
|
||||
|
||||
sharedInformerFactory := informers.NewSharedInformerFactoryWithOptions(rs.Client, 0, informers.WithTransform(trimManagedFields))
|
||||
eventBroadcaster, eventRecorder := utils.GetRecorderAndBroadcaster(ctx, client)
|
||||
defer eventBroadcaster.Shutdown()
|
||||
|
||||
descheduler, err := newDescheduler(rs, internalDeschedulerPolicy, "v1", eventRecorder, sharedInformerFactory)
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to create a descheduler instance: %v", err)
|
||||
}
|
||||
var fakeEvictedPods []string
|
||||
descheduler.podEvictionReactionFnc = func(*fakeclientset.Clientset) func(action core.Action) (bool, runtime.Object, error) {
|
||||
return podEvictionReactionTestingFnc(&fakeEvictedPods)
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
sharedInformerFactory.Start(ctx.Done())
|
||||
sharedInformerFactory.WaitForCacheSync(ctx.Done())
|
||||
|
||||
nodes, err := nodeutil.ReadyNodes(ctx, rs.Client, descheduler.nodeLister, "")
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to get ready nodes: %v", err)
|
||||
}
|
||||
|
||||
// a single pod eviction expected
|
||||
err = descheduler.runDeschedulerLoop(ctx, nodes)
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to run a descheduling loop: %v", err)
|
||||
}
|
||||
if descheduler.podEvictor.TotalEvicted() != 2 || len(evictedPods) != 2 || len(fakeEvictedPods) != 0 {
|
||||
t.Fatalf("Expected (2,2,0) pods evicted, got (%v, %v, %v) instead", descheduler.podEvictor.TotalEvicted(), len(evictedPods), len(fakeEvictedPods))
|
||||
}
|
||||
|
||||
// a single pod eviction expected
|
||||
err = descheduler.runDeschedulerLoop(ctx, nodes)
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to run a descheduling loop: %v", err)
|
||||
}
|
||||
if descheduler.podEvictor.TotalEvicted() != 2 || len(evictedPods) != 4 || len(fakeEvictedPods) != 0 {
|
||||
t.Fatalf("Expected (2,4,0) pods evicted, got (%v, %v, %v) instead", descheduler.podEvictor.TotalEvicted(), len(evictedPods), len(fakeEvictedPods))
|
||||
}
|
||||
|
||||
// check the fake client syncing and the right pods evicted
|
||||
rs.DryRun = true
|
||||
evictedPods = []string{}
|
||||
// a single pod eviction expected
|
||||
err = descheduler.runDeschedulerLoop(ctx, nodes)
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to run a descheduling loop: %v", err)
|
||||
}
|
||||
if descheduler.podEvictor.TotalEvicted() != 2 || len(evictedPods) != 0 || len(fakeEvictedPods) != 2 {
|
||||
t.Fatalf("Expected (2,0,2) pods evicted, got (%v, %v, %v) instead", descheduler.podEvictor.TotalEvicted(), len(evictedPods), len(fakeEvictedPods))
|
||||
}
|
||||
// a single pod eviction expected
|
||||
err = descheduler.runDeschedulerLoop(ctx, nodes)
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to run a descheduling loop: %v", err)
|
||||
}
|
||||
if descheduler.podEvictor.TotalEvicted() != 2 || len(evictedPods) != 0 || len(fakeEvictedPods) != 4 {
|
||||
t.Fatalf("Expected (2,0,4) pods evicted, got (%v, %v, %v) instead", descheduler.podEvictor.TotalEvicted(), len(evictedPods), len(fakeEvictedPods))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -43,7 +43,6 @@ type (
|
||||
|
||||
type PodEvictor struct {
|
||||
client clientset.Interface
|
||||
nodes []*v1.Node
|
||||
policyGroupVersion string
|
||||
dryRun bool
|
||||
maxPodsToEvictPerNode *uint
|
||||
@@ -60,26 +59,17 @@ func NewPodEvictor(
|
||||
dryRun bool,
|
||||
maxPodsToEvictPerNode *uint,
|
||||
maxPodsToEvictPerNamespace *uint,
|
||||
nodes []*v1.Node,
|
||||
metricsEnabled bool,
|
||||
eventRecorder events.EventRecorder,
|
||||
) *PodEvictor {
|
||||
nodePodCount := make(nodePodEvictedCount)
|
||||
namespacePodCount := make(namespacePodEvictCount)
|
||||
for _, node := range nodes {
|
||||
// Initialize podsEvicted till now with 0.
|
||||
nodePodCount[node.Name] = 0
|
||||
}
|
||||
|
||||
return &PodEvictor{
|
||||
client: client,
|
||||
nodes: nodes,
|
||||
policyGroupVersion: policyGroupVersion,
|
||||
dryRun: dryRun,
|
||||
maxPodsToEvictPerNode: maxPodsToEvictPerNode,
|
||||
maxPodsToEvictPerNamespace: maxPodsToEvictPerNamespace,
|
||||
nodepodCount: nodePodCount,
|
||||
namespacePodCount: namespacePodCount,
|
||||
nodepodCount: make(nodePodEvictedCount),
|
||||
namespacePodCount: make(namespacePodEvictCount),
|
||||
metricsEnabled: metricsEnabled,
|
||||
eventRecorder: eventRecorder,
|
||||
}
|
||||
@@ -107,6 +97,15 @@ func (pe *PodEvictor) NodeLimitExceeded(node *v1.Node) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (pe *PodEvictor) ResetCounters() {
|
||||
pe.nodepodCount = make(nodePodEvictedCount)
|
||||
pe.namespacePodCount = make(namespacePodEvictCount)
|
||||
}
|
||||
|
||||
func (pe *PodEvictor) SetClient(client clientset.Interface) {
|
||||
pe.client = client
|
||||
}
|
||||
|
||||
// EvictOptions provides a handle for passing additional info to EvictPod
|
||||
type EvictOptions struct {
|
||||
// Reason allows for passing details about the specific eviction for logging.
|
||||
|
||||
@@ -22,9 +22,12 @@ import (
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
core "k8s.io/client-go/testing"
|
||||
"k8s.io/client-go/tools/events"
|
||||
utilpointer "k8s.io/utils/pointer"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
"sigs.k8s.io/descheduler/test"
|
||||
@@ -113,3 +116,52 @@ func TestPodTypes(t *testing.T) {
|
||||
t.Errorf("Expected p1 to be a normal pod.")
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewPodEvictor(t *testing.T) {
|
||||
pod1 := test.BuildTestPod("pod", 400, 0, "node", nil)
|
||||
|
||||
fakeClient := fake.NewSimpleClientset(pod1)
|
||||
|
||||
eventRecorder := &events.FakeRecorder{}
|
||||
|
||||
podEvictor := NewPodEvictor(
|
||||
fakeClient,
|
||||
"policy/v1",
|
||||
false,
|
||||
utilpointer.Uint(1),
|
||||
nil,
|
||||
false,
|
||||
eventRecorder,
|
||||
)
|
||||
|
||||
stubNode := &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "node"}}
|
||||
|
||||
// 0 evictions expected
|
||||
if evictions := podEvictor.NodeEvicted(stubNode); evictions != 0 {
|
||||
t.Errorf("Expected 0 node evictions, got %q instead", evictions)
|
||||
}
|
||||
// 0 evictions expected
|
||||
if evictions := podEvictor.TotalEvicted(); evictions != 0 {
|
||||
t.Errorf("Expected 0 total evictions, got %q instead", evictions)
|
||||
}
|
||||
|
||||
if podEvictor.NodeLimitExceeded(stubNode) {
|
||||
t.Errorf("Expected NodeLimitExceeded to return false, got true instead")
|
||||
}
|
||||
|
||||
if !podEvictor.EvictPod(context.TODO(), pod1, EvictOptions{}) {
|
||||
t.Errorf("Expected a pod eviction, got no eviction instead")
|
||||
}
|
||||
|
||||
// 1 node eviction expected
|
||||
if evictions := podEvictor.NodeEvicted(stubNode); evictions != 1 {
|
||||
t.Errorf("Expected 1 node eviction, got %q instead", evictions)
|
||||
}
|
||||
// 1 total eviction expected
|
||||
if evictions := podEvictor.TotalEvicted(); evictions != 1 {
|
||||
t.Errorf("Expected 1 total evictions, got %q instead", evictions)
|
||||
}
|
||||
if !podEvictor.NodeLimitExceeded(stubNode) {
|
||||
t.Errorf("Expected NodeLimitExceeded to return true, got false instead")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -492,7 +492,6 @@ func TestHighNodeUtilization(t *testing.T) {
|
||||
false,
|
||||
nil,
|
||||
nil,
|
||||
testCase.nodes,
|
||||
false,
|
||||
eventRecorder,
|
||||
)
|
||||
@@ -644,7 +643,6 @@ func TestHighNodeUtilizationWithTaints(t *testing.T) {
|
||||
false,
|
||||
&item.evictionsExpected,
|
||||
nil,
|
||||
item.nodes,
|
||||
false,
|
||||
eventRecorder,
|
||||
)
|
||||
|
||||
@@ -909,7 +909,6 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
false,
|
||||
nil,
|
||||
nil,
|
||||
test.nodes,
|
||||
false,
|
||||
eventRecorder,
|
||||
)
|
||||
@@ -1081,7 +1080,6 @@ func TestLowNodeUtilizationWithTaints(t *testing.T) {
|
||||
false,
|
||||
&item.evictionsExpected,
|
||||
nil,
|
||||
item.nodes,
|
||||
false,
|
||||
eventRecorder,
|
||||
)
|
||||
|
||||
@@ -562,7 +562,6 @@ func TestPodLifeTime(t *testing.T) {
|
||||
false,
|
||||
tc.maxPodsToEvictPerNode,
|
||||
tc.maxPodsToEvictPerNamespace,
|
||||
tc.nodes,
|
||||
false,
|
||||
eventRecorder,
|
||||
)
|
||||
|
||||
@@ -319,7 +319,6 @@ func TestFindDuplicatePods(t *testing.T) {
|
||||
false,
|
||||
nil,
|
||||
nil,
|
||||
testCase.nodes,
|
||||
false,
|
||||
eventRecorder,
|
||||
)
|
||||
@@ -768,7 +767,6 @@ func TestRemoveDuplicatesUniformly(t *testing.T) {
|
||||
false,
|
||||
nil,
|
||||
nil,
|
||||
testCase.nodes,
|
||||
false,
|
||||
eventRecorder,
|
||||
)
|
||||
|
||||
@@ -381,7 +381,6 @@ func TestRemoveFailedPods(t *testing.T) {
|
||||
false,
|
||||
nil,
|
||||
nil,
|
||||
tc.nodes,
|
||||
false,
|
||||
eventRecorder,
|
||||
)
|
||||
|
||||
@@ -350,7 +350,6 @@ func TestRemovePodsHavingTooManyRestarts(t *testing.T) {
|
||||
false,
|
||||
tc.maxPodsToEvictPerNode,
|
||||
tc.maxNoOfPodsToEvictPerNamespace,
|
||||
tc.nodes,
|
||||
false,
|
||||
eventRecorder,
|
||||
)
|
||||
|
||||
@@ -239,7 +239,6 @@ func TestPodAntiAffinity(t *testing.T) {
|
||||
false,
|
||||
test.maxPodsToEvictPerNode,
|
||||
test.maxNoOfPodsToEvictPerNamespace,
|
||||
test.nodes,
|
||||
false,
|
||||
eventRecorder,
|
||||
)
|
||||
|
||||
@@ -365,7 +365,6 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||
false,
|
||||
tc.maxPodsToEvictPerNode,
|
||||
tc.maxNoOfPodsToEvictPerNamespace,
|
||||
tc.nodes,
|
||||
false,
|
||||
eventRecorder,
|
||||
)
|
||||
|
||||
@@ -410,7 +410,6 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
|
||||
false,
|
||||
tc.maxPodsToEvictPerNode,
|
||||
tc.maxNoOfPodsToEvictPerNamespace,
|
||||
tc.nodes,
|
||||
false,
|
||||
eventRecorder,
|
||||
)
|
||||
|
||||
@@ -1462,7 +1462,6 @@ func TestTopologySpreadConstraint(t *testing.T) {
|
||||
false,
|
||||
nil,
|
||||
nil,
|
||||
tc.nodes,
|
||||
false,
|
||||
eventRecorder,
|
||||
)
|
||||
|
||||
@@ -244,7 +244,7 @@ func TestProfileDescheduleBalanceExtensionPointsEviction(t *testing.T) {
|
||||
eventBroadcaster, eventRecorder := utils.GetRecorderAndBroadcaster(ctx, eventClient)
|
||||
defer eventBroadcaster.Shutdown()
|
||||
|
||||
podEvictor := evictions.NewPodEvictor(client, "policy/v1", false, nil, nil, nodes, true, eventRecorder)
|
||||
podEvictor := evictions.NewPodEvictor(client, "policy/v1", false, nil, nil, true, eventRecorder)
|
||||
|
||||
prfl, err := NewProfile(
|
||||
test.config,
|
||||
@@ -306,7 +306,6 @@ func TestProfileExtensionPoints(t *testing.T) {
|
||||
|
||||
n1 := testutils.BuildTestNode("n1", 2000, 3000, 10, nil)
|
||||
n2 := testutils.BuildTestNode("n2", 2000, 3000, 10, nil)
|
||||
nodes := []*v1.Node{n1, n2}
|
||||
|
||||
p1 := testutils.BuildTestPod(fmt.Sprintf("pod_1_%s", n1.Name), 200, 0, n1.Name, nil)
|
||||
p1.ObjectMeta.OwnerReferences = []metav1.OwnerReference{{}}
|
||||
@@ -393,7 +392,7 @@ func TestProfileExtensionPoints(t *testing.T) {
|
||||
eventBroadcaster, eventRecorder := utils.GetRecorderAndBroadcaster(ctx, eventClient)
|
||||
defer eventBroadcaster.Shutdown()
|
||||
|
||||
podEvictor := evictions.NewPodEvictor(client, "policy/v1", false, nil, nil, nodes, true, eventRecorder)
|
||||
podEvictor := evictions.NewPodEvictor(client, "policy/v1", false, nil, nil, true, eventRecorder)
|
||||
|
||||
prfl, err := NewProfile(
|
||||
api.DeschedulerProfile{
|
||||
@@ -605,7 +604,7 @@ func TestProfileExtensionPointOrdering(t *testing.T) {
|
||||
eventBroadcaster, eventRecorder := utils.GetRecorderAndBroadcaster(ctx, eventClient)
|
||||
defer eventBroadcaster.Shutdown()
|
||||
|
||||
podEvictor := evictions.NewPodEvictor(client, "policy/v1", false, nil, nil, nodes, true, eventRecorder)
|
||||
podEvictor := evictions.NewPodEvictor(client, "policy/v1", false, nil, nil, true, eventRecorder)
|
||||
|
||||
prfl, err := NewProfile(
|
||||
api.DeschedulerProfile{
|
||||
|
||||
@@ -48,7 +48,7 @@ func TestRemoveDuplicates(t *testing.T) {
|
||||
t.Errorf("Error listing node with %v", err)
|
||||
}
|
||||
|
||||
nodes, workerNodes := splitNodesAndWorkerNodes(nodeList.Items)
|
||||
_, workerNodes := splitNodesAndWorkerNodes(nodeList.Items)
|
||||
|
||||
t.Log("Creating testing namespace")
|
||||
testNamespace := &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "e2e-" + strings.ToLower(t.Name())}}
|
||||
@@ -177,7 +177,6 @@ func TestRemoveDuplicates(t *testing.T) {
|
||||
false,
|
||||
nil,
|
||||
nil,
|
||||
nodes,
|
||||
false,
|
||||
eventRecorder,
|
||||
)
|
||||
|
||||
@@ -75,7 +75,7 @@ func TestFailedPods(t *testing.T) {
|
||||
defer jobClient.Delete(ctx, job.Name, metav1.DeleteOptions{PropagationPolicy: &deletePropagationPolicy})
|
||||
waitForJobPodPhase(ctx, t, clientSet, job, v1.PodFailed)
|
||||
|
||||
podEvictor := initPodEvictorOrFail(t, clientSet, getPodsAssignedToNode, nodes)
|
||||
podEvictor := initPodEvictorOrFail(t, clientSet, getPodsAssignedToNode)
|
||||
|
||||
defaultevictorArgs := &defaultevictor.DefaultEvictorArgs{
|
||||
EvictLocalStoragePods: true,
|
||||
|
||||
@@ -196,7 +196,6 @@ func runPodLifetimePlugin(
|
||||
false,
|
||||
nil,
|
||||
maxPodsToEvictPerNamespace,
|
||||
nodes,
|
||||
false,
|
||||
&events.FakeRecorder{},
|
||||
)
|
||||
@@ -285,7 +284,7 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
t.Errorf("Error listing node with %v", err)
|
||||
}
|
||||
|
||||
nodes, workerNodes := splitNodesAndWorkerNodes(nodeList.Items)
|
||||
_, workerNodes := splitNodesAndWorkerNodes(nodeList.Items)
|
||||
|
||||
t.Log("Creating testing namespace")
|
||||
testNamespace := &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "e2e-" + strings.ToLower(t.Name())}}
|
||||
@@ -374,7 +373,7 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
waitForRCPodsRunning(ctx, t, clientSet, rc)
|
||||
|
||||
// Run LowNodeUtilization plugin
|
||||
podEvictor := initPodEvictorOrFail(t, clientSet, getPodsAssignedToNode, nodes)
|
||||
podEvictor := initPodEvictorOrFail(t, clientSet, getPodsAssignedToNode)
|
||||
|
||||
defaultevictorArgs := &defaultevictor.DefaultEvictorArgs{
|
||||
EvictLocalStoragePods: true,
|
||||
@@ -1570,7 +1569,7 @@ func splitNodesAndWorkerNodes(nodes []v1.Node) ([]*v1.Node, []*v1.Node) {
|
||||
return allNodes, workerNodes
|
||||
}
|
||||
|
||||
func initPodEvictorOrFail(t *testing.T, clientSet clientset.Interface, getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc, nodes []*v1.Node) *evictions.PodEvictor {
|
||||
func initPodEvictorOrFail(t *testing.T, clientSet clientset.Interface, getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc) *evictions.PodEvictor {
|
||||
evictionPolicyGroupVersion, err := eutils.SupportEviction(clientSet)
|
||||
if err != nil || len(evictionPolicyGroupVersion) == 0 {
|
||||
t.Fatalf("Error creating eviction policy group: %v", err)
|
||||
@@ -1584,7 +1583,6 @@ func initPodEvictorOrFail(t *testing.T, clientSet clientset.Interface, getPodsAs
|
||||
false,
|
||||
nil,
|
||||
nil,
|
||||
nodes,
|
||||
false,
|
||||
eventRecorder,
|
||||
)
|
||||
|
||||
@@ -50,7 +50,7 @@ func TestTooManyRestarts(t *testing.T) {
|
||||
t.Errorf("Error listing node with %v", err)
|
||||
}
|
||||
|
||||
nodes, workerNodes := splitNodesAndWorkerNodes(nodeList.Items)
|
||||
_, workerNodes := splitNodesAndWorkerNodes(nodeList.Items)
|
||||
|
||||
t.Logf("Creating testing namespace %v", t.Name())
|
||||
testNamespace := &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "e2e-" + strings.ToLower(t.Name())}}
|
||||
@@ -167,7 +167,6 @@ func TestTooManyRestarts(t *testing.T) {
|
||||
false,
|
||||
nil,
|
||||
nil,
|
||||
nodes,
|
||||
false,
|
||||
eventRecorder,
|
||||
)
|
||||
|
||||
@@ -27,7 +27,7 @@ func TestTopologySpreadConstraint(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Errorf("Error listing node with %v", err)
|
||||
}
|
||||
nodes, workerNodes := splitNodesAndWorkerNodes(nodeList.Items)
|
||||
_, workerNodes := splitNodesAndWorkerNodes(nodeList.Items)
|
||||
t.Log("Creating testing namespace")
|
||||
testNamespace := &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "e2e-" + strings.ToLower(t.Name())}}
|
||||
if _, err := clientSet.CoreV1().Namespaces().Create(ctx, testNamespace, metav1.CreateOptions{}); err != nil {
|
||||
@@ -138,7 +138,7 @@ func TestTopologySpreadConstraint(t *testing.T) {
|
||||
defer test.DeleteDeployment(ctx, t, clientSet, violatorDeployment)
|
||||
test.WaitForDeploymentPodsRunning(ctx, t, clientSet, violatorDeployment)
|
||||
|
||||
podEvictor := initPodEvictorOrFail(t, clientSet, getPodsAssignedToNode, nodes)
|
||||
podEvictor := initPodEvictorOrFail(t, clientSet, getPodsAssignedToNode)
|
||||
|
||||
// Run TopologySpreadConstraint strategy
|
||||
t.Logf("Running RemovePodsViolatingTopologySpreadConstraint strategy for %s", name)
|
||||
|
||||
Reference in New Issue
Block a user