mirror of
https://github.com/kubernetes-sigs/descheduler.git
synced 2026-01-26 13:29:11 +01:00
Move framework types under framework/types
This commit is contained in:
@@ -26,7 +26,7 @@ import (
|
||||
"k8s.io/klog/v2"
|
||||
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/pkg/framework"
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
)
|
||||
|
||||
@@ -35,7 +35,7 @@ const (
|
||||
evictPodAnnotationKey = "descheduler.alpha.kubernetes.io/evict"
|
||||
)
|
||||
|
||||
var _ framework.EvictorPlugin = &DefaultEvictor{}
|
||||
var _ frameworktypes.EvictorPlugin = &DefaultEvictor{}
|
||||
|
||||
type constraint func(pod *v1.Pod) error
|
||||
|
||||
@@ -47,7 +47,7 @@ type constraint func(pod *v1.Pod) error
|
||||
type DefaultEvictor struct {
|
||||
args runtime.Object
|
||||
constraints []constraint
|
||||
handle framework.Handle
|
||||
handle frameworktypes.Handle
|
||||
}
|
||||
|
||||
// IsPodEvictableBasedOnPriority checks if the given pod is evictable based on priority resolved from pod Spec.
|
||||
@@ -62,7 +62,7 @@ func HaveEvictAnnotation(pod *v1.Pod) bool {
|
||||
}
|
||||
|
||||
// New builds plugin from its arguments while passing a handle
|
||||
func New(args runtime.Object, handle framework.Handle) (framework.Plugin, error) {
|
||||
func New(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
|
||||
defaultEvictorArgs, ok := args.(*DefaultEvictorArgs)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("want args to be of type defaultEvictorFilterArgs, got %T", args)
|
||||
|
||||
@@ -24,8 +24,8 @@ import (
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/pkg/framework"
|
||||
frameworkfake "sigs.k8s.io/descheduler/pkg/framework/fake"
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
"sigs.k8s.io/descheduler/test"
|
||||
)
|
||||
@@ -347,7 +347,7 @@ func TestDefaultEvictorPreEvictionFilter(t *testing.T) {
|
||||
t.Fatalf("Unable to initialize the plugin: %v", err)
|
||||
}
|
||||
|
||||
result := evictorPlugin.(framework.EvictorPlugin).PreEvictionFilter(test.pods[0])
|
||||
result := evictorPlugin.(frameworktypes.EvictorPlugin).PreEvictionFilter(test.pods[0])
|
||||
if (result) != test.result {
|
||||
t.Errorf("Filter should return for pod %s %t, but it returns %t", test.pods[0].Name, test.result, result)
|
||||
}
|
||||
@@ -755,7 +755,7 @@ func TestDefaultEvictorFilter(t *testing.T) {
|
||||
t.Fatalf("Unable to initialize the plugin: %v", err)
|
||||
}
|
||||
|
||||
result := evictorPlugin.(framework.EvictorPlugin).Filter(test.pods[0])
|
||||
result := evictorPlugin.(frameworktypes.EvictorPlugin).Filter(test.pods[0])
|
||||
if (result) != test.result {
|
||||
t.Errorf("Filter should return for pod %s %t, but it returns %t", test.pods[0].Name, test.result, result)
|
||||
}
|
||||
|
||||
@@ -28,7 +28,7 @@ import (
|
||||
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
|
||||
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/pkg/framework"
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
)
|
||||
|
||||
const HighNodeUtilizationPluginName = "HighNodeUtilization"
|
||||
@@ -37,15 +37,15 @@ const HighNodeUtilizationPluginName = "HighNodeUtilization"
|
||||
// Note that CPU/Memory requests are used to calculate nodes' utilization and not the actual resource usage.
|
||||
|
||||
type HighNodeUtilization struct {
|
||||
handle framework.Handle
|
||||
handle frameworktypes.Handle
|
||||
args *HighNodeUtilizationArgs
|
||||
podFilter func(pod *v1.Pod) bool
|
||||
}
|
||||
|
||||
var _ framework.BalancePlugin = &HighNodeUtilization{}
|
||||
var _ frameworktypes.BalancePlugin = &HighNodeUtilization{}
|
||||
|
||||
// NewHighNodeUtilization builds plugin from its arguments while passing a handle
|
||||
func NewHighNodeUtilization(args runtime.Object, handle framework.Handle) (framework.Plugin, error) {
|
||||
func NewHighNodeUtilization(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
|
||||
highNodeUtilizatioArgs, ok := args.(*HighNodeUtilizationArgs)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("want args to be of type HighNodeUtilizationArgs, got %T", args)
|
||||
@@ -71,7 +71,7 @@ func (h *HighNodeUtilization) Name() string {
|
||||
}
|
||||
|
||||
// Balance extension point implementation for the plugin
|
||||
func (h *HighNodeUtilization) Balance(ctx context.Context, nodes []*v1.Node) *framework.Status {
|
||||
func (h *HighNodeUtilization) Balance(ctx context.Context, nodes []*v1.Node) *frameworktypes.Status {
|
||||
thresholds := h.args.Thresholds
|
||||
targetThresholds := make(api.ResourceThresholds)
|
||||
|
||||
|
||||
@@ -33,9 +33,9 @@ import (
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/pkg/framework"
|
||||
frameworkfake "sigs.k8s.io/descheduler/pkg/framework/fake"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/defaultevictor"
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
"sigs.k8s.io/descheduler/test"
|
||||
)
|
||||
@@ -519,7 +519,7 @@ func TestHighNodeUtilization(t *testing.T) {
|
||||
ClientsetImpl: fakeClient,
|
||||
GetPodsAssignedToNodeFuncImpl: getPodsAssignedToNode,
|
||||
PodEvictorImpl: podEvictor,
|
||||
EvictorFilterImpl: evictorFilter.(framework.EvictorPlugin),
|
||||
EvictorFilterImpl: evictorFilter.(frameworktypes.EvictorPlugin),
|
||||
SharedInformerFactoryImpl: sharedInformerFactory,
|
||||
}
|
||||
|
||||
@@ -530,7 +530,7 @@ func TestHighNodeUtilization(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to initialize the plugin: %v", err)
|
||||
}
|
||||
plugin.(framework.BalancePlugin).Balance(ctx, testCase.nodes)
|
||||
plugin.(frameworktypes.BalancePlugin).Balance(ctx, testCase.nodes)
|
||||
|
||||
podsEvicted := podEvictor.TotalEvicted()
|
||||
if testCase.expectedPodsEvicted != podsEvicted {
|
||||
@@ -670,7 +670,7 @@ func TestHighNodeUtilizationWithTaints(t *testing.T) {
|
||||
ClientsetImpl: fakeClient,
|
||||
GetPodsAssignedToNodeFuncImpl: getPodsAssignedToNode,
|
||||
PodEvictorImpl: podEvictor,
|
||||
EvictorFilterImpl: evictorFilter.(framework.EvictorPlugin),
|
||||
EvictorFilterImpl: evictorFilter.(frameworktypes.EvictorPlugin),
|
||||
SharedInformerFactoryImpl: sharedInformerFactory,
|
||||
}
|
||||
|
||||
@@ -683,7 +683,7 @@ func TestHighNodeUtilizationWithTaints(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to initialize the plugin: %v", err)
|
||||
}
|
||||
plugin.(framework.BalancePlugin).Balance(ctx, item.nodes)
|
||||
plugin.(frameworktypes.BalancePlugin).Balance(ctx, item.nodes)
|
||||
|
||||
if item.evictionsExpected != podEvictor.TotalEvicted() {
|
||||
t.Errorf("Expected %v evictions, got %v", item.evictionsExpected, podEvictor.TotalEvicted())
|
||||
|
||||
@@ -26,7 +26,7 @@ import (
|
||||
"k8s.io/klog/v2"
|
||||
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/pkg/framework"
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
)
|
||||
|
||||
const LowNodeUtilizationPluginName = "LowNodeUtilization"
|
||||
@@ -35,15 +35,15 @@ const LowNodeUtilizationPluginName = "LowNodeUtilization"
|
||||
// to calculate nodes' utilization and not the actual resource usage.
|
||||
|
||||
type LowNodeUtilization struct {
|
||||
handle framework.Handle
|
||||
handle frameworktypes.Handle
|
||||
args *LowNodeUtilizationArgs
|
||||
podFilter func(pod *v1.Pod) bool
|
||||
}
|
||||
|
||||
var _ framework.BalancePlugin = &LowNodeUtilization{}
|
||||
var _ frameworktypes.BalancePlugin = &LowNodeUtilization{}
|
||||
|
||||
// NewLowNodeUtilization builds plugin from its arguments while passing a handle
|
||||
func NewLowNodeUtilization(args runtime.Object, handle framework.Handle) (framework.Plugin, error) {
|
||||
func NewLowNodeUtilization(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
|
||||
lowNodeUtilizationArgsArgs, ok := args.(*LowNodeUtilizationArgs)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("want args to be of type LowNodeUtilizationArgs, got %T", args)
|
||||
@@ -69,7 +69,7 @@ func (l *LowNodeUtilization) Name() string {
|
||||
}
|
||||
|
||||
// Balance extension point implementation for the plugin
|
||||
func (l *LowNodeUtilization) Balance(ctx context.Context, nodes []*v1.Node) *framework.Status {
|
||||
func (l *LowNodeUtilization) Balance(ctx context.Context, nodes []*v1.Node) *frameworktypes.Status {
|
||||
useDeviationThresholds := l.args.UseDeviationThresholds
|
||||
thresholds := l.args.Thresholds
|
||||
targetThresholds := l.args.TargetThresholds
|
||||
|
||||
@@ -22,9 +22,9 @@ import (
|
||||
"testing"
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/framework"
|
||||
frameworkfake "sigs.k8s.io/descheduler/pkg/framework/fake"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/defaultevictor"
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
policy "k8s.io/api/policy/v1"
|
||||
@@ -921,7 +921,7 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
ClientsetImpl: fakeClient,
|
||||
GetPodsAssignedToNodeFuncImpl: getPodsAssignedToNode,
|
||||
PodEvictorImpl: podEvictor,
|
||||
EvictorFilterImpl: evictorFilter.(framework.EvictorPlugin),
|
||||
EvictorFilterImpl: evictorFilter.(frameworktypes.EvictorPlugin),
|
||||
SharedInformerFactoryImpl: sharedInformerFactory,
|
||||
}
|
||||
|
||||
@@ -935,7 +935,7 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to initialize the plugin: %v", err)
|
||||
}
|
||||
plugin.(framework.BalancePlugin).Balance(ctx, test.nodes)
|
||||
plugin.(frameworktypes.BalancePlugin).Balance(ctx, test.nodes)
|
||||
|
||||
podsEvicted := podEvictor.TotalEvicted()
|
||||
if test.expectedPodsEvicted != podsEvicted {
|
||||
@@ -1093,7 +1093,7 @@ func TestLowNodeUtilizationWithTaints(t *testing.T) {
|
||||
ClientsetImpl: fakeClient,
|
||||
GetPodsAssignedToNodeFuncImpl: getPodsAssignedToNode,
|
||||
PodEvictorImpl: podEvictor,
|
||||
EvictorFilterImpl: evictorFilter.(framework.EvictorPlugin),
|
||||
EvictorFilterImpl: evictorFilter.(frameworktypes.EvictorPlugin),
|
||||
SharedInformerFactoryImpl: sharedInformerFactory,
|
||||
}
|
||||
|
||||
@@ -1109,7 +1109,7 @@ func TestLowNodeUtilizationWithTaints(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to initialize the plugin: %v", err)
|
||||
}
|
||||
plugin.(framework.BalancePlugin).Balance(ctx, item.nodes)
|
||||
plugin.(frameworktypes.BalancePlugin).Balance(ctx, item.nodes)
|
||||
|
||||
if item.evictionsExpected != podEvictor.TotalEvicted() {
|
||||
t.Errorf("Expected %v evictions, got %v", item.evictionsExpected, podEvictor.TotalEvicted())
|
||||
|
||||
@@ -30,7 +30,7 @@ import (
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/node"
|
||||
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/pkg/framework"
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
)
|
||||
|
||||
@@ -215,7 +215,7 @@ func evictPodsFromSourceNodes(
|
||||
ctx context.Context,
|
||||
evictableNamespaces *api.Namespaces,
|
||||
sourceNodes, destinationNodes []NodeInfo,
|
||||
podEvictor framework.Evictor,
|
||||
podEvictor frameworktypes.Evictor,
|
||||
podFilter func(pod *v1.Pod) bool,
|
||||
resourceNames []v1.ResourceName,
|
||||
continueEviction continueEvictionCond,
|
||||
@@ -279,7 +279,7 @@ func evictPods(
|
||||
nodeInfo NodeInfo,
|
||||
totalAvailableUsage map[v1.ResourceName]*resource.Quantity,
|
||||
taintsOfLowNodes map[string][]v1.Taint,
|
||||
podEvictor framework.Evictor,
|
||||
podEvictor frameworktypes.Evictor,
|
||||
continueEviction continueEvictionCond,
|
||||
) {
|
||||
var excludedNamespaces sets.String
|
||||
|
||||
@@ -25,7 +25,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/klog/v2"
|
||||
"sigs.k8s.io/descheduler/pkg/framework"
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
@@ -33,17 +33,17 @@ import (
|
||||
|
||||
const PluginName = "PodLifeTime"
|
||||
|
||||
var _ framework.DeschedulePlugin = &PodLifeTime{}
|
||||
var _ frameworktypes.DeschedulePlugin = &PodLifeTime{}
|
||||
|
||||
// PodLifeTime evicts pods on the node that violate the max pod lifetime threshold
|
||||
type PodLifeTime struct {
|
||||
handle framework.Handle
|
||||
handle frameworktypes.Handle
|
||||
args *PodLifeTimeArgs
|
||||
podFilter podutil.FilterFunc
|
||||
}
|
||||
|
||||
// New builds plugin from its arguments while passing a handle
|
||||
func New(args runtime.Object, handle framework.Handle) (framework.Plugin, error) {
|
||||
func New(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
|
||||
podLifeTimeArgs, ok := args.(*PodLifeTimeArgs)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("want args to be of type PodLifeTimeArgs, got %T", args)
|
||||
@@ -101,7 +101,7 @@ func (d *PodLifeTime) Name() string {
|
||||
}
|
||||
|
||||
// Deschedule extension point implementation for the plugin
|
||||
func (d *PodLifeTime) Deschedule(ctx context.Context, nodes []*v1.Node) *framework.Status {
|
||||
func (d *PodLifeTime) Deschedule(ctx context.Context, nodes []*v1.Node) *frameworktypes.Status {
|
||||
podsToEvict := make([]*v1.Pod, 0)
|
||||
nodeMap := make(map[string]*v1.Node, len(nodes))
|
||||
|
||||
@@ -110,7 +110,7 @@ func (d *PodLifeTime) Deschedule(ctx context.Context, nodes []*v1.Node) *framewo
|
||||
pods, err := podutil.ListAllPodsOnANode(node.Name, d.handle.GetPodsAssignedToNodeFunc(), d.podFilter)
|
||||
if err != nil {
|
||||
// no pods evicted as error encountered retrieving evictable Pods
|
||||
return &framework.Status{
|
||||
return &frameworktypes.Status{
|
||||
Err: fmt.Errorf("error listing pods on a node: %v", err),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -30,9 +30,9 @@ import (
|
||||
"k8s.io/client-go/tools/events"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/pkg/framework"
|
||||
frameworkfake "sigs.k8s.io/descheduler/pkg/framework/fake"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/defaultevictor"
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
"sigs.k8s.io/descheduler/test"
|
||||
)
|
||||
|
||||
@@ -378,14 +378,14 @@ func TestPodLifeTime(t *testing.T) {
|
||||
plugin, err := New(tc.args, &frameworkfake.HandleImpl{
|
||||
ClientsetImpl: fakeClient,
|
||||
PodEvictorImpl: podEvictor,
|
||||
EvictorFilterImpl: evictorFilter.(framework.EvictorPlugin),
|
||||
EvictorFilterImpl: evictorFilter.(frameworktypes.EvictorPlugin),
|
||||
GetPodsAssignedToNodeFuncImpl: getPodsAssignedToNode,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to initialize the plugin: %v", err)
|
||||
}
|
||||
|
||||
plugin.(framework.DeschedulePlugin).Deschedule(ctx, tc.nodes)
|
||||
plugin.(frameworktypes.DeschedulePlugin).Deschedule(ctx, tc.nodes)
|
||||
podsEvicted := podEvictor.TotalEvicted()
|
||||
if podsEvicted != tc.expectedEvictedPodCount {
|
||||
t.Errorf("Test error for description: %s. Expected evicted pods count %v, got %v", tc.description, tc.expectedEvictedPodCount, podsEvicted)
|
||||
|
||||
@@ -34,7 +34,7 @@ import (
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/pkg/framework"
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
)
|
||||
|
||||
const PluginName = "RemoveDuplicates"
|
||||
@@ -45,12 +45,12 @@ const PluginName = "RemoveDuplicates"
|
||||
// As of now, this plugin won't evict daemonsets, mirror pods, critical pods and pods with local storages.
|
||||
|
||||
type RemoveDuplicates struct {
|
||||
handle framework.Handle
|
||||
handle frameworktypes.Handle
|
||||
args *RemoveDuplicatesArgs
|
||||
podFilter podutil.FilterFunc
|
||||
}
|
||||
|
||||
var _ framework.BalancePlugin = &RemoveDuplicates{}
|
||||
var _ frameworktypes.BalancePlugin = &RemoveDuplicates{}
|
||||
|
||||
type podOwner struct {
|
||||
namespace, kind, name string
|
||||
@@ -58,7 +58,7 @@ type podOwner struct {
|
||||
}
|
||||
|
||||
// New builds plugin from its arguments while passing a handle
|
||||
func New(args runtime.Object, handle framework.Handle) (framework.Plugin, error) {
|
||||
func New(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
|
||||
removeDuplicatesArgs, ok := args.(*RemoveDuplicatesArgs)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("want args to be of type RemoveDuplicatesArgs, got %T", args)
|
||||
@@ -93,7 +93,7 @@ func (r *RemoveDuplicates) Name() string {
|
||||
}
|
||||
|
||||
// Balance extension point implementation for the plugin
|
||||
func (r *RemoveDuplicates) Balance(ctx context.Context, nodes []*v1.Node) *framework.Status {
|
||||
func (r *RemoveDuplicates) Balance(ctx context.Context, nodes []*v1.Node) *frameworktypes.Status {
|
||||
duplicatePods := make(map[podOwner]map[string][]*v1.Pod)
|
||||
ownerKeyOccurence := make(map[podOwner]int32)
|
||||
nodeCount := 0
|
||||
|
||||
@@ -21,9 +21,9 @@ import (
|
||||
"testing"
|
||||
|
||||
"k8s.io/client-go/tools/events"
|
||||
"sigs.k8s.io/descheduler/pkg/framework"
|
||||
frameworkfake "sigs.k8s.io/descheduler/pkg/framework/fake"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/defaultevictor"
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
policyv1 "k8s.io/api/policy/v1"
|
||||
@@ -347,7 +347,7 @@ func TestFindDuplicatePods(t *testing.T) {
|
||||
ClientsetImpl: fakeClient,
|
||||
GetPodsAssignedToNodeFuncImpl: getPodsAssignedToNode,
|
||||
PodEvictorImpl: podEvictor,
|
||||
EvictorFilterImpl: evictorFilter.(framework.EvictorPlugin),
|
||||
EvictorFilterImpl: evictorFilter.(frameworktypes.EvictorPlugin),
|
||||
SharedInformerFactoryImpl: sharedInformerFactory,
|
||||
}
|
||||
|
||||
@@ -360,7 +360,7 @@ func TestFindDuplicatePods(t *testing.T) {
|
||||
t.Fatalf("Unable to initialize the plugin: %v", err)
|
||||
}
|
||||
|
||||
plugin.(framework.BalancePlugin).Balance(ctx, testCase.nodes)
|
||||
plugin.(frameworktypes.BalancePlugin).Balance(ctx, testCase.nodes)
|
||||
actualEvictedPodCount := podEvictor.TotalEvicted()
|
||||
if actualEvictedPodCount != testCase.expectedEvictedPodCount {
|
||||
t.Errorf("Test %#v failed, Unexpected no of pods evicted: pods evicted: %d, expected: %d", testCase.description, actualEvictedPodCount, testCase.expectedEvictedPodCount)
|
||||
@@ -796,7 +796,7 @@ func TestRemoveDuplicatesUniformly(t *testing.T) {
|
||||
ClientsetImpl: fakeClient,
|
||||
GetPodsAssignedToNodeFuncImpl: getPodsAssignedToNode,
|
||||
PodEvictorImpl: podEvictor,
|
||||
EvictorFilterImpl: evictorFilter.(framework.EvictorPlugin),
|
||||
EvictorFilterImpl: evictorFilter.(frameworktypes.EvictorPlugin),
|
||||
SharedInformerFactoryImpl: sharedInformerFactory,
|
||||
}
|
||||
|
||||
@@ -807,7 +807,7 @@ func TestRemoveDuplicatesUniformly(t *testing.T) {
|
||||
t.Fatalf("Unable to initialize the plugin: %v", err)
|
||||
}
|
||||
|
||||
plugin.(framework.BalancePlugin).Balance(ctx, testCase.nodes)
|
||||
plugin.(frameworktypes.BalancePlugin).Balance(ctx, testCase.nodes)
|
||||
actualEvictedPodCount := podEvictor.TotalEvicted()
|
||||
if actualEvictedPodCount != testCase.expectedEvictedPodCount {
|
||||
t.Errorf("Test %#v failed, Unexpected no of pods evicted: pods evicted: %d, expected: %d", testCase.description, actualEvictedPodCount, testCase.expectedEvictedPodCount)
|
||||
|
||||
@@ -29,22 +29,22 @@ import (
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/pkg/framework"
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
)
|
||||
|
||||
const PluginName = "RemoveFailedPods"
|
||||
|
||||
// RemoveFailedPods evicts pods in failed status phase that match the given args criteria
|
||||
type RemoveFailedPods struct {
|
||||
handle framework.Handle
|
||||
handle frameworktypes.Handle
|
||||
args *RemoveFailedPodsArgs
|
||||
podFilter podutil.FilterFunc
|
||||
}
|
||||
|
||||
var _ framework.DeschedulePlugin = &RemoveFailedPods{}
|
||||
var _ frameworktypes.DeschedulePlugin = &RemoveFailedPods{}
|
||||
|
||||
// New builds plugin from its arguments while passing a handle
|
||||
func New(args runtime.Object, handle framework.Handle) (framework.Plugin, error) {
|
||||
func New(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
|
||||
failedPodsArgs, ok := args.(*RemoveFailedPodsArgs)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("want args to be of type RemoveFailedPodsArgs, got %T", args)
|
||||
@@ -91,13 +91,13 @@ func (d *RemoveFailedPods) Name() string {
|
||||
}
|
||||
|
||||
// Deschedule extension point implementation for the plugin
|
||||
func (d *RemoveFailedPods) Deschedule(ctx context.Context, nodes []*v1.Node) *framework.Status {
|
||||
func (d *RemoveFailedPods) Deschedule(ctx context.Context, nodes []*v1.Node) *frameworktypes.Status {
|
||||
for _, node := range nodes {
|
||||
klog.V(1).InfoS("Processing node", "node", klog.KObj(node))
|
||||
pods, err := podutil.ListAllPodsOnANode(node.Name, d.handle.GetPodsAssignedToNodeFunc(), d.podFilter)
|
||||
if err != nil {
|
||||
// no pods evicted as error encountered retrieving evictable Pods
|
||||
return &framework.Status{
|
||||
return &frameworktypes.Status{
|
||||
Err: fmt.Errorf("error listing pods on a node: %v", err),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -27,7 +27,7 @@ import (
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
"k8s.io/client-go/tools/events"
|
||||
"sigs.k8s.io/descheduler/pkg/framework"
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
@@ -331,7 +331,7 @@ func TestRemoveFailedPods(t *testing.T) {
|
||||
&frameworkfake.HandleImpl{
|
||||
ClientsetImpl: fakeClient,
|
||||
PodEvictorImpl: podEvictor,
|
||||
EvictorFilterImpl: evictorFilter.(framework.EvictorPlugin),
|
||||
EvictorFilterImpl: evictorFilter.(frameworktypes.EvictorPlugin),
|
||||
SharedInformerFactoryImpl: sharedInformerFactory,
|
||||
GetPodsAssignedToNodeFuncImpl: getPodsAssignedToNode,
|
||||
},
|
||||
@@ -340,7 +340,7 @@ func TestRemoveFailedPods(t *testing.T) {
|
||||
t.Fatalf("Unable to initialize the plugin: %v", err)
|
||||
}
|
||||
|
||||
plugin.(framework.DeschedulePlugin).Deschedule(ctx, tc.nodes)
|
||||
plugin.(frameworktypes.DeschedulePlugin).Deschedule(ctx, tc.nodes)
|
||||
actualEvictedPodCount := podEvictor.TotalEvicted()
|
||||
if actualEvictedPodCount != tc.expectedEvictedPodCount {
|
||||
t.Errorf("Test %#v failed, expected %v pod evictions, but got %v pod evictions\n", tc.description, tc.expectedEvictedPodCount, actualEvictedPodCount)
|
||||
|
||||
@@ -27,7 +27,7 @@ import (
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/pkg/framework"
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
)
|
||||
|
||||
const PluginName = "RemovePodsHavingTooManyRestarts"
|
||||
@@ -36,15 +36,15 @@ const PluginName = "RemovePodsHavingTooManyRestarts"
|
||||
// There are too many cases leading this issue: Volume mount failed, app error due to nodes' different settings.
|
||||
// As of now, this strategy won't evict daemonsets, mirror pods, critical pods and pods with local storages.
|
||||
type RemovePodsHavingTooManyRestarts struct {
|
||||
handle framework.Handle
|
||||
handle frameworktypes.Handle
|
||||
args *RemovePodsHavingTooManyRestartsArgs
|
||||
podFilter podutil.FilterFunc
|
||||
}
|
||||
|
||||
var _ framework.DeschedulePlugin = &RemovePodsHavingTooManyRestarts{}
|
||||
var _ frameworktypes.DeschedulePlugin = &RemovePodsHavingTooManyRestarts{}
|
||||
|
||||
// New builds plugin from its arguments while passing a handle
|
||||
func New(args runtime.Object, handle framework.Handle) (framework.Plugin, error) {
|
||||
func New(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
|
||||
tooManyRestartsArgs, ok := args.(*RemovePodsHavingTooManyRestartsArgs)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("want args to be of type RemovePodsHavingTooManyRestartsArgs, got %T", args)
|
||||
@@ -88,13 +88,13 @@ func (d *RemovePodsHavingTooManyRestarts) Name() string {
|
||||
}
|
||||
|
||||
// Deschedule extension point implementation for the plugin
|
||||
func (d *RemovePodsHavingTooManyRestarts) Deschedule(ctx context.Context, nodes []*v1.Node) *framework.Status {
|
||||
func (d *RemovePodsHavingTooManyRestarts) Deschedule(ctx context.Context, nodes []*v1.Node) *frameworktypes.Status {
|
||||
for _, node := range nodes {
|
||||
klog.V(1).InfoS("Processing node", "node", klog.KObj(node))
|
||||
pods, err := podutil.ListAllPodsOnANode(node.Name, d.handle.GetPodsAssignedToNodeFunc(), d.podFilter)
|
||||
if err != nil {
|
||||
// no pods evicted as error encountered retrieving evictable Pods
|
||||
return &framework.Status{
|
||||
return &frameworktypes.Status{
|
||||
Err: fmt.Errorf("error listing pods on a node: %v", err),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -31,9 +31,9 @@ import (
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/pkg/framework"
|
||||
frameworkfake "sigs.k8s.io/descheduler/pkg/framework/fake"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/defaultevictor"
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
"sigs.k8s.io/descheduler/test"
|
||||
)
|
||||
|
||||
@@ -287,7 +287,7 @@ func TestRemovePodsHavingTooManyRestarts(t *testing.T) {
|
||||
&frameworkfake.HandleImpl{
|
||||
ClientsetImpl: fakeClient,
|
||||
PodEvictorImpl: podEvictor,
|
||||
EvictorFilterImpl: evictorFilter.(framework.EvictorPlugin),
|
||||
EvictorFilterImpl: evictorFilter.(frameworktypes.EvictorPlugin),
|
||||
SharedInformerFactoryImpl: sharedInformerFactory,
|
||||
GetPodsAssignedToNodeFuncImpl: getPodsAssignedToNode,
|
||||
})
|
||||
@@ -295,7 +295,7 @@ func TestRemovePodsHavingTooManyRestarts(t *testing.T) {
|
||||
t.Fatalf("Unable to initialize the plugin: %v", err)
|
||||
}
|
||||
|
||||
plugin.(framework.DeschedulePlugin).Deschedule(ctx, tc.nodes)
|
||||
plugin.(frameworktypes.DeschedulePlugin).Deschedule(ctx, tc.nodes)
|
||||
actualEvictedPodCount := podEvictor.TotalEvicted()
|
||||
if actualEvictedPodCount != tc.expectedEvictedPodCount {
|
||||
t.Errorf("Test %#v failed, expected %v pod evictions, but got %v pod evictions\n", tc.description, tc.expectedEvictedPodCount, actualEvictedPodCount)
|
||||
|
||||
@@ -24,7 +24,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/pkg/framework"
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
@@ -36,15 +36,15 @@ const PluginName = "RemovePodsViolatingInterPodAntiAffinity"
|
||||
|
||||
// RemovePodsViolatingInterPodAntiAffinity evicts pods on the node which violate inter pod anti affinity
|
||||
type RemovePodsViolatingInterPodAntiAffinity struct {
|
||||
handle framework.Handle
|
||||
handle frameworktypes.Handle
|
||||
args *RemovePodsViolatingInterPodAntiAffinityArgs
|
||||
podFilter podutil.FilterFunc
|
||||
}
|
||||
|
||||
var _ framework.DeschedulePlugin = &RemovePodsViolatingInterPodAntiAffinity{}
|
||||
var _ frameworktypes.DeschedulePlugin = &RemovePodsViolatingInterPodAntiAffinity{}
|
||||
|
||||
// New builds plugin from its arguments while passing a handle
|
||||
func New(args runtime.Object, handle framework.Handle) (framework.Plugin, error) {
|
||||
func New(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
|
||||
interPodAntiAffinityArgs, ok := args.(*RemovePodsViolatingInterPodAntiAffinityArgs)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("want args to be of type RemovePodsViolatingInterPodAntiAffinityArgs, got %T", args)
|
||||
@@ -77,13 +77,13 @@ func (d *RemovePodsViolatingInterPodAntiAffinity) Name() string {
|
||||
return PluginName
|
||||
}
|
||||
|
||||
func (d *RemovePodsViolatingInterPodAntiAffinity) Deschedule(ctx context.Context, nodes []*v1.Node) *framework.Status {
|
||||
func (d *RemovePodsViolatingInterPodAntiAffinity) Deschedule(ctx context.Context, nodes []*v1.Node) *frameworktypes.Status {
|
||||
loop:
|
||||
for _, node := range nodes {
|
||||
klog.V(1).InfoS("Processing node", "node", klog.KObj(node))
|
||||
pods, err := podutil.ListPodsOnANode(node.Name, d.handle.GetPodsAssignedToNodeFunc(), d.podFilter)
|
||||
if err != nil {
|
||||
return &framework.Status{
|
||||
return &frameworktypes.Status{
|
||||
Err: fmt.Errorf("error listing pods: %v", err),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -27,7 +27,7 @@ import (
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
"k8s.io/client-go/tools/events"
|
||||
"sigs.k8s.io/descheduler/pkg/framework"
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
@@ -251,14 +251,14 @@ func TestPodAntiAffinity(t *testing.T) {
|
||||
GetPodsAssignedToNodeFuncImpl: getPodsAssignedToNode,
|
||||
PodEvictorImpl: podEvictor,
|
||||
SharedInformerFactoryImpl: sharedInformerFactory,
|
||||
EvictorFilterImpl: evictorFilter.(framework.EvictorPlugin),
|
||||
EvictorFilterImpl: evictorFilter.(frameworktypes.EvictorPlugin),
|
||||
}
|
||||
plugin, err := New(
|
||||
&RemovePodsViolatingInterPodAntiAffinityArgs{},
|
||||
handle,
|
||||
)
|
||||
|
||||
plugin.(framework.DeschedulePlugin).Deschedule(ctx, test.nodes)
|
||||
plugin.(frameworktypes.DeschedulePlugin).Deschedule(ctx, test.nodes)
|
||||
podsEvicted := podEvictor.TotalEvicted()
|
||||
if podsEvicted != test.expectedEvictedPodCount {
|
||||
t.Errorf("Unexpected no of pods evicted: pods evicted: %d, expected: %d", podsEvicted, test.expectedEvictedPodCount)
|
||||
|
||||
@@ -25,22 +25,22 @@ import (
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/pkg/framework"
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
)
|
||||
|
||||
const PluginName = "RemovePodsViolatingNodeAffinity"
|
||||
|
||||
// RemovePodsViolatingNodeAffinity evicts pods on the node which violate node affinity
|
||||
type RemovePodsViolatingNodeAffinity struct {
|
||||
handle framework.Handle
|
||||
handle frameworktypes.Handle
|
||||
args *RemovePodsViolatingNodeAffinityArgs
|
||||
podFilter podutil.FilterFunc
|
||||
}
|
||||
|
||||
var _ framework.DeschedulePlugin = &RemovePodsViolatingNodeAffinity{}
|
||||
var _ frameworktypes.DeschedulePlugin = &RemovePodsViolatingNodeAffinity{}
|
||||
|
||||
// New builds plugin from its arguments while passing a handle
|
||||
func New(args runtime.Object, handle framework.Handle) (framework.Plugin, error) {
|
||||
func New(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
|
||||
nodeAffinityArgs, ok := args.(*RemovePodsViolatingNodeAffinityArgs)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("want args to be of type RemovePodsViolatingNodeAffinityArgs, got %T", args)
|
||||
@@ -75,7 +75,7 @@ func (d *RemovePodsViolatingNodeAffinity) Name() string {
|
||||
return PluginName
|
||||
}
|
||||
|
||||
func (d *RemovePodsViolatingNodeAffinity) Deschedule(ctx context.Context, nodes []*v1.Node) *framework.Status {
|
||||
func (d *RemovePodsViolatingNodeAffinity) Deschedule(ctx context.Context, nodes []*v1.Node) *frameworktypes.Status {
|
||||
for _, nodeAffinity := range d.args.NodeAffinityType {
|
||||
klog.V(2).InfoS("Executing for nodeAffinityType", "nodeAffinity", nodeAffinity)
|
||||
|
||||
@@ -94,7 +94,7 @@ func (d *RemovePodsViolatingNodeAffinity) Deschedule(ctx context.Context, nodes
|
||||
}),
|
||||
)
|
||||
if err != nil {
|
||||
return &framework.Status{
|
||||
return &frameworktypes.Status{
|
||||
Err: fmt.Errorf("error listing pods on a node: %v", err),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -27,7 +27,7 @@ import (
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
"k8s.io/client-go/tools/events"
|
||||
"sigs.k8s.io/descheduler/pkg/framework"
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
@@ -251,7 +251,7 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||
GetPodsAssignedToNodeFuncImpl: getPodsAssignedToNode,
|
||||
PodEvictorImpl: podEvictor,
|
||||
SharedInformerFactoryImpl: sharedInformerFactory,
|
||||
EvictorFilterImpl: evictorFilter.(framework.EvictorPlugin),
|
||||
EvictorFilterImpl: evictorFilter.(frameworktypes.EvictorPlugin),
|
||||
}
|
||||
|
||||
plugin, err := New(
|
||||
@@ -264,7 +264,7 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||
t.Fatalf("Unable to initialize the plugin: %v", err)
|
||||
}
|
||||
|
||||
plugin.(framework.DeschedulePlugin).Deschedule(ctx, tc.nodes)
|
||||
plugin.(frameworktypes.DeschedulePlugin).Deschedule(ctx, tc.nodes)
|
||||
actualEvictedPodCount := podEvictor.TotalEvicted()
|
||||
if actualEvictedPodCount != tc.expectedEvictedPodCount {
|
||||
t.Errorf("Test %#v failed, expected %v pod evictions, but got %v pod evictions\n", tc.description, tc.expectedEvictedPodCount, actualEvictedPodCount)
|
||||
|
||||
@@ -27,7 +27,7 @@ import (
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/pkg/framework"
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
)
|
||||
|
||||
@@ -35,16 +35,16 @@ const PluginName = "RemovePodsViolatingNodeTaints"
|
||||
|
||||
// RemovePodsViolatingNodeTaints evicts pods on the node which violate NoSchedule Taints on nodes
|
||||
type RemovePodsViolatingNodeTaints struct {
|
||||
handle framework.Handle
|
||||
handle frameworktypes.Handle
|
||||
args *RemovePodsViolatingNodeTaintsArgs
|
||||
taintFilterFnc func(taint *v1.Taint) bool
|
||||
podFilter podutil.FilterFunc
|
||||
}
|
||||
|
||||
var _ framework.DeschedulePlugin = &RemovePodsViolatingNodeTaints{}
|
||||
var _ frameworktypes.DeschedulePlugin = &RemovePodsViolatingNodeTaints{}
|
||||
|
||||
// New builds plugin from its arguments while passing a handle
|
||||
func New(args runtime.Object, handle framework.Handle) (framework.Plugin, error) {
|
||||
func New(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
|
||||
nodeTaintsArgs, ok := args.(*RemovePodsViolatingNodeTaintsArgs)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("want args to be of type RemovePodsViolatingNodeTaintsArgs, got %T", args)
|
||||
@@ -94,13 +94,13 @@ func (d *RemovePodsViolatingNodeTaints) Name() string {
|
||||
}
|
||||
|
||||
// Deschedule extension point implementation for the plugin
|
||||
func (d *RemovePodsViolatingNodeTaints) Deschedule(ctx context.Context, nodes []*v1.Node) *framework.Status {
|
||||
func (d *RemovePodsViolatingNodeTaints) Deschedule(ctx context.Context, nodes []*v1.Node) *frameworktypes.Status {
|
||||
for _, node := range nodes {
|
||||
klog.V(1).InfoS("Processing node", "node", klog.KObj(node))
|
||||
pods, err := podutil.ListAllPodsOnANode(node.Name, d.handle.GetPodsAssignedToNodeFunc(), d.podFilter)
|
||||
if err != nil {
|
||||
// no pods evicted as error encountered retrieving evictable Pods
|
||||
return &framework.Status{
|
||||
return &frameworktypes.Status{
|
||||
Err: fmt.Errorf("error listing pods on a node: %v", err),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -31,9 +31,9 @@ import (
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/pkg/framework"
|
||||
frameworkfake "sigs.k8s.io/descheduler/pkg/framework/fake"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/defaultevictor"
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
"sigs.k8s.io/descheduler/test"
|
||||
)
|
||||
@@ -386,7 +386,7 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
|
||||
ClientsetImpl: fakeClient,
|
||||
GetPodsAssignedToNodeFuncImpl: getPodsAssignedToNode,
|
||||
PodEvictorImpl: podEvictor,
|
||||
EvictorFilterImpl: evictorFilter.(framework.EvictorPlugin),
|
||||
EvictorFilterImpl: evictorFilter.(frameworktypes.EvictorPlugin),
|
||||
SharedInformerFactoryImpl: sharedInformerFactory,
|
||||
}
|
||||
|
||||
@@ -400,7 +400,7 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
|
||||
t.Fatalf("Unable to initialize the plugin: %v", err)
|
||||
}
|
||||
|
||||
plugin.(framework.DeschedulePlugin).Deschedule(ctx, tc.nodes)
|
||||
plugin.(frameworktypes.DeschedulePlugin).Deschedule(ctx, tc.nodes)
|
||||
actualEvictedPodCount := podEvictor.TotalEvicted()
|
||||
if actualEvictedPodCount != tc.expectedEvictedPodCount {
|
||||
t.Errorf("Test %#v failed, Unexpected no of pods evicted: pods evicted: %d, expected: %d", tc.description, actualEvictedPodCount, tc.expectedEvictedPodCount)
|
||||
|
||||
@@ -30,7 +30,7 @@ import (
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/node"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/pkg/framework"
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
)
|
||||
|
||||
@@ -49,15 +49,15 @@ type topology struct {
|
||||
|
||||
// RemovePodsViolatingTopologySpreadConstraint evicts pods which violate their topology spread constraints
|
||||
type RemovePodsViolatingTopologySpreadConstraint struct {
|
||||
handle framework.Handle
|
||||
handle frameworktypes.Handle
|
||||
args *RemovePodsViolatingTopologySpreadConstraintArgs
|
||||
podFilter podutil.FilterFunc
|
||||
}
|
||||
|
||||
var _ framework.BalancePlugin = &RemovePodsViolatingTopologySpreadConstraint{}
|
||||
var _ frameworktypes.BalancePlugin = &RemovePodsViolatingTopologySpreadConstraint{}
|
||||
|
||||
// New builds plugin from its arguments while passing a handle
|
||||
func New(args runtime.Object, handle framework.Handle) (framework.Plugin, error) {
|
||||
func New(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
|
||||
pluginArgs, ok := args.(*RemovePodsViolatingTopologySpreadConstraintArgs)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("want args to be of type RemovePodsViolatingTopologySpreadConstraintArgs, got %T", args)
|
||||
@@ -84,7 +84,7 @@ func (d *RemovePodsViolatingTopologySpreadConstraint) Name() string {
|
||||
}
|
||||
|
||||
// nolint: gocyclo
|
||||
func (d *RemovePodsViolatingTopologySpreadConstraint) Balance(ctx context.Context, nodes []*v1.Node) *framework.Status {
|
||||
func (d *RemovePodsViolatingTopologySpreadConstraint) Balance(ctx context.Context, nodes []*v1.Node) *frameworktypes.Status {
|
||||
nodeMap := make(map[string]*v1.Node, len(nodes))
|
||||
for _, node := range nodes {
|
||||
nodeMap[node.Name] = node
|
||||
@@ -107,7 +107,7 @@ func (d *RemovePodsViolatingTopologySpreadConstraint) Balance(ctx context.Contex
|
||||
namespaces, err := client.CoreV1().Namespaces().List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Couldn't list namespaces")
|
||||
return &framework.Status{
|
||||
return &frameworktypes.Status{
|
||||
Err: fmt.Errorf("list namespace: %w", err),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -18,9 +18,9 @@ import (
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/pkg/framework"
|
||||
frameworkfake "sigs.k8s.io/descheduler/pkg/framework/fake"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/defaultevictor"
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
"sigs.k8s.io/descheduler/test"
|
||||
)
|
||||
|
||||
@@ -1196,7 +1196,7 @@ func TestTopologySpreadConstraint(t *testing.T) {
|
||||
ClientsetImpl: fakeClient,
|
||||
GetPodsAssignedToNodeFuncImpl: getPodsAssignedToNode,
|
||||
PodEvictorImpl: podEvictor,
|
||||
EvictorFilterImpl: evictorFilter.(framework.EvictorPlugin),
|
||||
EvictorFilterImpl: evictorFilter.(frameworktypes.EvictorPlugin),
|
||||
SharedInformerFactoryImpl: sharedInformerFactory,
|
||||
}
|
||||
|
||||
@@ -1208,7 +1208,7 @@ func TestTopologySpreadConstraint(t *testing.T) {
|
||||
t.Fatalf("Unable to initialize the plugin: %v", err)
|
||||
}
|
||||
|
||||
plugin.(framework.BalancePlugin).Balance(ctx, tc.nodes)
|
||||
plugin.(frameworktypes.BalancePlugin).Balance(ctx, tc.nodes)
|
||||
podsEvicted := podEvictor.TotalEvicted()
|
||||
if podsEvicted != tc.expectedEvictedCount {
|
||||
t.Errorf("Test error for description: %s. Expected evicted pods count %v, got %v", tc.name, tc.expectedEvictedCount, podsEvicted)
|
||||
|
||||
Reference in New Issue
Block a user