mirror of
https://github.com/kubernetes-sigs/descheduler.git
synced 2026-01-26 05:14:13 +01:00
fix: indexer cache error when default evictor is re-initialized
Signed-off-by: Amir Alavi <amiralavi7@gmail.com>
This commit is contained in:
@@ -244,6 +244,15 @@ func (d *DefaultEvictor) Filter(pod *v1.Pod) bool {
|
||||
|
||||
func getPodIndexerByOwnerRefs(indexName string, handle frameworktypes.Handle) (cache.Indexer, error) {
|
||||
podInformer := handle.SharedInformerFactory().Core().V1().Pods().Informer()
|
||||
indexer := podInformer.GetIndexer()
|
||||
|
||||
// do not reinitialize the indexer, if it's been defined already
|
||||
for name := range indexer.GetIndexers() {
|
||||
if name == indexName {
|
||||
return indexer, nil
|
||||
}
|
||||
}
|
||||
|
||||
if err := podInformer.AddIndexers(cache.Indexers{
|
||||
indexName: func(obj interface{}) ([]string, error) {
|
||||
pod, ok := obj.(*v1.Pod)
|
||||
@@ -257,6 +266,5 @@ func getPodIndexerByOwnerRefs(indexName string, handle frameworktypes.Handle) (c
|
||||
return nil, err
|
||||
}
|
||||
|
||||
indexer := podInformer.GetIndexer()
|
||||
return indexer, nil
|
||||
}
|
||||
|
||||
@@ -15,6 +15,7 @@ package defaultevictor
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
@@ -31,14 +32,6 @@ import (
|
||||
"sigs.k8s.io/descheduler/test"
|
||||
)
|
||||
|
||||
func TestDefaultEvictorPreEvictionFilter(t *testing.T) {
|
||||
n1 := test.BuildTestNode("node1", 1000, 2000, 13, nil)
|
||||
|
||||
nodeTaintKey := "hardware"
|
||||
nodeTaintValue := "gpu"
|
||||
|
||||
nodeLabelKey := "datacenter"
|
||||
nodeLabelValue := "east"
|
||||
type testCase struct {
|
||||
description string
|
||||
pods []*v1.Pod
|
||||
@@ -48,9 +41,19 @@ func TestDefaultEvictorPreEvictionFilter(t *testing.T) {
|
||||
evictSystemCriticalPods bool
|
||||
priorityThreshold *int32
|
||||
nodeFit bool
|
||||
minReplicas uint
|
||||
result bool
|
||||
}
|
||||
|
||||
func TestDefaultEvictorPreEvictionFilter(t *testing.T) {
|
||||
n1 := test.BuildTestNode("node1", 1000, 2000, 13, nil)
|
||||
|
||||
nodeTaintKey := "hardware"
|
||||
nodeTaintValue := "gpu"
|
||||
|
||||
nodeLabelKey := "datacenter"
|
||||
nodeLabelValue := "east"
|
||||
|
||||
testCases := []testCase{
|
||||
{
|
||||
description: "Pod with no tolerations running on normal node, all other nodes tainted",
|
||||
@@ -305,45 +308,7 @@ func TestDefaultEvictorPreEvictionFilter(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
var objs []runtime.Object
|
||||
for _, node := range test.nodes {
|
||||
objs = append(objs, node)
|
||||
}
|
||||
for _, pod := range test.pods {
|
||||
objs = append(objs, pod)
|
||||
}
|
||||
|
||||
fakeClient := fake.NewSimpleClientset(objs...)
|
||||
|
||||
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
|
||||
podInformer := sharedInformerFactory.Core().V1().Pods().Informer()
|
||||
|
||||
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
|
||||
if err != nil {
|
||||
t.Errorf("Build get pods assigned to node function error: %v", err)
|
||||
}
|
||||
|
||||
sharedInformerFactory.Start(ctx.Done())
|
||||
sharedInformerFactory.WaitForCacheSync(ctx.Done())
|
||||
|
||||
defaultEvictorArgs := &DefaultEvictorArgs{
|
||||
EvictLocalStoragePods: test.evictLocalStoragePods,
|
||||
EvictSystemCriticalPods: test.evictSystemCriticalPods,
|
||||
IgnorePvcPods: false,
|
||||
EvictFailedBarePods: test.evictFailedBarePods,
|
||||
PriorityThreshold: &api.PriorityThreshold{
|
||||
Value: test.priorityThreshold,
|
||||
},
|
||||
NodeFit: test.nodeFit,
|
||||
}
|
||||
|
||||
evictorPlugin, err := New(
|
||||
defaultEvictorArgs,
|
||||
&frameworkfake.HandleImpl{
|
||||
ClientsetImpl: fakeClient,
|
||||
GetPodsAssignedToNodeFuncImpl: getPodsAssignedToNode,
|
||||
SharedInformerFactoryImpl: sharedInformerFactory,
|
||||
})
|
||||
evictorPlugin, err := initializePlugin(ctx, test)
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to initialize the plugin: %v", err)
|
||||
}
|
||||
@@ -366,19 +331,6 @@ func TestDefaultEvictorFilter(t *testing.T) {
|
||||
|
||||
ownerRefUUID := uuid.NewUUID()
|
||||
|
||||
type testCase struct {
|
||||
description string
|
||||
pods []*v1.Pod
|
||||
nodes []*v1.Node
|
||||
evictFailedBarePods bool
|
||||
evictLocalStoragePods bool
|
||||
evictSystemCriticalPods bool
|
||||
priorityThreshold *int32
|
||||
nodeFit bool
|
||||
minReplicas uint
|
||||
result bool
|
||||
}
|
||||
|
||||
testCases := []testCase{
|
||||
{
|
||||
description: "Failed pod eviction with no ownerRefs",
|
||||
@@ -757,6 +709,63 @@ func TestDefaultEvictorFilter(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
evictorPlugin, err := initializePlugin(ctx, test)
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to initialize the plugin: %v", err)
|
||||
}
|
||||
|
||||
result := evictorPlugin.(frameworktypes.EvictorPlugin).Filter(test.pods[0])
|
||||
if (result) != test.result {
|
||||
t.Errorf("Filter should return for pod %s %t, but it returns %t", test.pods[0].Name, test.result, result)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestReinitialization(t *testing.T) {
|
||||
n1 := test.BuildTestNode("node1", 1000, 2000, 13, nil)
|
||||
ownerRefUUID := uuid.NewUUID()
|
||||
|
||||
testCases := []testCase{
|
||||
{
|
||||
description: "minReplicas of 2, multiple owners, eviction",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p1", 1, 1, n1.Name, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = append(test.GetNormalPodOwnerRefList(), test.GetNormalPodOwnerRefList()...)
|
||||
pod.ObjectMeta.OwnerReferences[0].UID = ownerRefUUID
|
||||
}),
|
||||
test.BuildTestPod("p2", 1, 1, n1.Name, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
}),
|
||||
},
|
||||
minReplicas: 2,
|
||||
result: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range testCases {
|
||||
t.Run(test.description, func(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
evictorPlugin, err := initializePlugin(ctx, test)
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to initialize the plugin: %v", err)
|
||||
}
|
||||
|
||||
defaultEvictor, ok := evictorPlugin.(*DefaultEvictor)
|
||||
if !ok {
|
||||
t.Fatalf("Unable to initialize as a DefaultEvictor plugin")
|
||||
}
|
||||
_, err = New(defaultEvictor.args, defaultEvictor.handle)
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to reinitialize the plugin: %v", err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func initializePlugin(ctx context.Context, test testCase) (frameworktypes.Plugin, error) {
|
||||
var objs []runtime.Object
|
||||
for _, node := range test.nodes {
|
||||
objs = append(objs, node)
|
||||
@@ -772,7 +781,7 @@ func TestDefaultEvictorFilter(t *testing.T) {
|
||||
|
||||
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
|
||||
if err != nil {
|
||||
t.Errorf("Build get pods assigned to node function error: %v", err)
|
||||
return nil, fmt.Errorf("build get pods assigned to node function error: %v", err)
|
||||
}
|
||||
|
||||
sharedInformerFactory.Start(ctx.Done())
|
||||
@@ -798,13 +807,8 @@ func TestDefaultEvictorFilter(t *testing.T) {
|
||||
SharedInformerFactoryImpl: sharedInformerFactory,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to initialize the plugin: %v", err)
|
||||
return nil, fmt.Errorf("unable to initialize the plugin: %v", err)
|
||||
}
|
||||
|
||||
result := evictorPlugin.(frameworktypes.EvictorPlugin).Filter(test.pods[0])
|
||||
if (result) != test.result {
|
||||
t.Errorf("Filter should return for pod %s %t, but it returns %t", test.pods[0].Name, test.result, result)
|
||||
}
|
||||
})
|
||||
}
|
||||
return evictorPlugin, nil
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user