mirror of
https://github.com/kubernetes-sigs/descheduler.git
synced 2026-01-26 05:14:13 +01:00
Merge pull request #1097 from ingvagabund/list-of-extension-points
Populate extension points automatically for deschedule, balance, filter and preEvictionFilter
This commit is contained in:
@@ -30,9 +30,9 @@ import (
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/pkg/framework"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/pluginregistry"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/defaultevictor"
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -48,10 +48,10 @@ var (
|
||||
// can evict a pod without importing a specific pod evictor
|
||||
type evictorImpl struct {
|
||||
podEvictor *evictions.PodEvictor
|
||||
evictorFilter framework.EvictorPlugin
|
||||
evictorFilter frameworktypes.EvictorPlugin
|
||||
}
|
||||
|
||||
var _ framework.Evictor = &evictorImpl{}
|
||||
var _ frameworktypes.Evictor = &evictorImpl{}
|
||||
|
||||
// Filter checks if a pod can be evicted
|
||||
func (ei *evictorImpl) Filter(pod *v1.Pod) bool {
|
||||
@@ -80,7 +80,7 @@ type handleImpl struct {
|
||||
evictor *evictorImpl
|
||||
}
|
||||
|
||||
var _ framework.Handle = &handleImpl{}
|
||||
var _ frameworktypes.Handle = &handleImpl{}
|
||||
|
||||
// ClientSet retrieves kube client set
|
||||
func (hi *handleImpl) ClientSet() clientset.Interface {
|
||||
@@ -98,7 +98,7 @@ func (hi *handleImpl) SharedInformerFactory() informers.SharedInformerFactory {
|
||||
}
|
||||
|
||||
// Evictor retrieves evictor so plugins can filter and evict pods
|
||||
func (hi *handleImpl) Evictor() framework.Evictor {
|
||||
func (hi *handleImpl) Evictor() frameworktypes.Evictor {
|
||||
return hi.evictor
|
||||
}
|
||||
|
||||
@@ -233,14 +233,14 @@ func V1alpha1ToInternal(
|
||||
return nil
|
||||
}
|
||||
|
||||
func enableProfilePluginsByType(profilePlugins api.Plugins, pluginInstance framework.Plugin, pluginConfig *api.PluginConfig) api.Plugins {
|
||||
func enableProfilePluginsByType(profilePlugins api.Plugins, pluginInstance frameworktypes.Plugin, pluginConfig *api.PluginConfig) api.Plugins {
|
||||
profilePlugins = checkBalance(profilePlugins, pluginInstance, pluginConfig)
|
||||
profilePlugins = checkDeschedule(profilePlugins, pluginInstance, pluginConfig)
|
||||
return profilePlugins
|
||||
}
|
||||
|
||||
func checkBalance(profilePlugins api.Plugins, pluginInstance framework.Plugin, pluginConfig *api.PluginConfig) api.Plugins {
|
||||
_, ok := pluginInstance.(framework.BalancePlugin)
|
||||
func checkBalance(profilePlugins api.Plugins, pluginInstance frameworktypes.Plugin, pluginConfig *api.PluginConfig) api.Plugins {
|
||||
_, ok := pluginInstance.(frameworktypes.BalancePlugin)
|
||||
if ok {
|
||||
klog.V(3).Infof("converting Balance plugin: %s", pluginInstance.Name())
|
||||
profilePlugins.Balance.Enabled = []string{pluginConfig.Name}
|
||||
@@ -248,8 +248,8 @@ func checkBalance(profilePlugins api.Plugins, pluginInstance framework.Plugin, p
|
||||
return profilePlugins
|
||||
}
|
||||
|
||||
func checkDeschedule(profilePlugins api.Plugins, pluginInstance framework.Plugin, pluginConfig *api.PluginConfig) api.Plugins {
|
||||
_, ok := pluginInstance.(framework.DeschedulePlugin)
|
||||
func checkDeschedule(profilePlugins api.Plugins, pluginInstance frameworktypes.Plugin, pluginConfig *api.PluginConfig) api.Plugins {
|
||||
_, ok := pluginInstance.(frameworktypes.DeschedulePlugin)
|
||||
if ok {
|
||||
klog.V(3).Infof("converting Deschedule plugin: %s", pluginInstance.Name())
|
||||
profilePlugins.Deschedule.Enabled = []string{pluginConfig.Name}
|
||||
|
||||
@@ -41,8 +41,8 @@ func (s scope) Meta() *conversion.Meta {
|
||||
|
||||
func TestTaintsUpdated(t *testing.T) {
|
||||
pluginregistry.PluginRegistry = pluginregistry.NewRegistry()
|
||||
pluginregistry.Register(removepodsviolatingnodetaints.PluginName, removepodsviolatingnodetaints.New, &removepodsviolatingnodetaints.RemovePodsViolatingNodeTaintsArgs{}, removepodsviolatingnodetaints.ValidateRemovePodsViolatingNodeTaintsArgs, removepodsviolatingnodetaints.SetDefaults_RemovePodsViolatingNodeTaintsArgs, pluginregistry.PluginRegistry)
|
||||
pluginregistry.Register(defaultevictor.PluginName, defaultevictor.New, &defaultevictor.DefaultEvictorArgs{}, defaultevictor.ValidateDefaultEvictorArgs, defaultevictor.SetDefaults_DefaultEvictorArgs, pluginregistry.PluginRegistry)
|
||||
pluginregistry.Register(removepodsviolatingnodetaints.PluginName, removepodsviolatingnodetaints.New, &removepodsviolatingnodetaints.RemovePodsViolatingNodeTaints{}, &removepodsviolatingnodetaints.RemovePodsViolatingNodeTaintsArgs{}, removepodsviolatingnodetaints.ValidateRemovePodsViolatingNodeTaintsArgs, removepodsviolatingnodetaints.SetDefaults_RemovePodsViolatingNodeTaintsArgs, pluginregistry.PluginRegistry)
|
||||
pluginregistry.Register(defaultevictor.PluginName, defaultevictor.New, &defaultevictor.DefaultEvictor{}, &defaultevictor.DefaultEvictorArgs{}, defaultevictor.ValidateDefaultEvictorArgs, defaultevictor.SetDefaults_DefaultEvictorArgs, pluginregistry.PluginRegistry)
|
||||
|
||||
ctx := context.Background()
|
||||
n1 := test.BuildTestNode("n1", 2000, 3000, 10, nil)
|
||||
@@ -112,8 +112,8 @@ func TestTaintsUpdated(t *testing.T) {
|
||||
|
||||
func TestDuplicate(t *testing.T) {
|
||||
pluginregistry.PluginRegistry = pluginregistry.NewRegistry()
|
||||
pluginregistry.Register(removeduplicates.PluginName, removeduplicates.New, &removeduplicates.RemoveDuplicatesArgs{}, removeduplicates.ValidateRemoveDuplicatesArgs, removeduplicates.SetDefaults_RemoveDuplicatesArgs, pluginregistry.PluginRegistry)
|
||||
pluginregistry.Register(defaultevictor.PluginName, defaultevictor.New, &defaultevictor.DefaultEvictorArgs{}, defaultevictor.ValidateDefaultEvictorArgs, defaultevictor.SetDefaults_DefaultEvictorArgs, pluginregistry.PluginRegistry)
|
||||
pluginregistry.Register(removeduplicates.PluginName, removeduplicates.New, &removeduplicates.RemoveDuplicates{}, &removeduplicates.RemoveDuplicatesArgs{}, removeduplicates.ValidateRemoveDuplicatesArgs, removeduplicates.SetDefaults_RemoveDuplicatesArgs, pluginregistry.PluginRegistry)
|
||||
pluginregistry.Register(defaultevictor.PluginName, defaultevictor.New, &defaultevictor.DefaultEvictor{}, &defaultevictor.DefaultEvictorArgs{}, defaultevictor.ValidateDefaultEvictorArgs, defaultevictor.SetDefaults_DefaultEvictorArgs, pluginregistry.PluginRegistry)
|
||||
|
||||
ctx := context.Background()
|
||||
node1 := test.BuildTestNode("n1", 2000, 3000, 10, nil)
|
||||
|
||||
@@ -36,15 +36,15 @@ func SetupPlugins() {
|
||||
}
|
||||
|
||||
func RegisterDefaultPlugins(registry pluginregistry.Registry) {
|
||||
pluginregistry.Register(defaultevictor.PluginName, defaultevictor.New, &defaultevictor.DefaultEvictorArgs{}, defaultevictor.ValidateDefaultEvictorArgs, defaultevictor.SetDefaults_DefaultEvictorArgs, registry)
|
||||
pluginregistry.Register(nodeutilization.LowNodeUtilizationPluginName, nodeutilization.NewLowNodeUtilization, &nodeutilization.LowNodeUtilizationArgs{}, nodeutilization.ValidateLowNodeUtilizationArgs, nodeutilization.SetDefaults_LowNodeUtilizationArgs, registry)
|
||||
pluginregistry.Register(nodeutilization.HighNodeUtilizationPluginName, nodeutilization.NewHighNodeUtilization, &nodeutilization.HighNodeUtilizationArgs{}, nodeutilization.ValidateHighNodeUtilizationArgs, nodeutilization.SetDefaults_HighNodeUtilizationArgs, registry)
|
||||
pluginregistry.Register(podlifetime.PluginName, podlifetime.New, &podlifetime.PodLifeTimeArgs{}, podlifetime.ValidatePodLifeTimeArgs, podlifetime.SetDefaults_PodLifeTimeArgs, registry)
|
||||
pluginregistry.Register(removeduplicates.PluginName, removeduplicates.New, &removeduplicates.RemoveDuplicatesArgs{}, removeduplicates.ValidateRemoveDuplicatesArgs, removeduplicates.SetDefaults_RemoveDuplicatesArgs, registry)
|
||||
pluginregistry.Register(removefailedpods.PluginName, removefailedpods.New, &removefailedpods.RemoveFailedPodsArgs{}, removefailedpods.ValidateRemoveFailedPodsArgs, removefailedpods.SetDefaults_RemoveFailedPodsArgs, registry)
|
||||
pluginregistry.Register(removepodshavingtoomanyrestarts.PluginName, removepodshavingtoomanyrestarts.New, &removepodshavingtoomanyrestarts.RemovePodsHavingTooManyRestartsArgs{}, removepodshavingtoomanyrestarts.ValidateRemovePodsHavingTooManyRestartsArgs, removepodshavingtoomanyrestarts.SetDefaults_RemovePodsHavingTooManyRestartsArgs, registry)
|
||||
pluginregistry.Register(removepodsviolatinginterpodantiaffinity.PluginName, removepodsviolatinginterpodantiaffinity.New, &removepodsviolatinginterpodantiaffinity.RemovePodsViolatingInterPodAntiAffinityArgs{}, removepodsviolatinginterpodantiaffinity.ValidateRemovePodsViolatingInterPodAntiAffinityArgs, removepodsviolatinginterpodantiaffinity.SetDefaults_RemovePodsViolatingInterPodAntiAffinityArgs, registry)
|
||||
pluginregistry.Register(removepodsviolatingnodeaffinity.PluginName, removepodsviolatingnodeaffinity.New, &removepodsviolatingnodeaffinity.RemovePodsViolatingNodeAffinityArgs{}, removepodsviolatingnodeaffinity.ValidateRemovePodsViolatingNodeAffinityArgs, removepodsviolatingnodeaffinity.SetDefaults_RemovePodsViolatingNodeAffinityArgs, registry)
|
||||
pluginregistry.Register(removepodsviolatingnodetaints.PluginName, removepodsviolatingnodetaints.New, &removepodsviolatingnodetaints.RemovePodsViolatingNodeTaintsArgs{}, removepodsviolatingnodetaints.ValidateRemovePodsViolatingNodeTaintsArgs, removepodsviolatingnodetaints.SetDefaults_RemovePodsViolatingNodeTaintsArgs, registry)
|
||||
pluginregistry.Register(removepodsviolatingtopologyspreadconstraint.PluginName, removepodsviolatingtopologyspreadconstraint.New, &removepodsviolatingtopologyspreadconstraint.RemovePodsViolatingTopologySpreadConstraintArgs{}, removepodsviolatingtopologyspreadconstraint.ValidateRemovePodsViolatingTopologySpreadConstraintArgs, removepodsviolatingtopologyspreadconstraint.SetDefaults_RemovePodsViolatingTopologySpreadConstraintArgs, registry)
|
||||
pluginregistry.Register(defaultevictor.PluginName, defaultevictor.New, &defaultevictor.DefaultEvictor{}, &defaultevictor.DefaultEvictorArgs{}, defaultevictor.ValidateDefaultEvictorArgs, defaultevictor.SetDefaults_DefaultEvictorArgs, registry)
|
||||
pluginregistry.Register(nodeutilization.LowNodeUtilizationPluginName, nodeutilization.NewLowNodeUtilization, &nodeutilization.LowNodeUtilization{}, &nodeutilization.LowNodeUtilizationArgs{}, nodeutilization.ValidateLowNodeUtilizationArgs, nodeutilization.SetDefaults_LowNodeUtilizationArgs, registry)
|
||||
pluginregistry.Register(nodeutilization.HighNodeUtilizationPluginName, nodeutilization.NewHighNodeUtilization, &nodeutilization.HighNodeUtilization{}, &nodeutilization.HighNodeUtilizationArgs{}, nodeutilization.ValidateHighNodeUtilizationArgs, nodeutilization.SetDefaults_HighNodeUtilizationArgs, registry)
|
||||
pluginregistry.Register(podlifetime.PluginName, podlifetime.New, &podlifetime.PodLifeTime{}, &podlifetime.PodLifeTimeArgs{}, podlifetime.ValidatePodLifeTimeArgs, podlifetime.SetDefaults_PodLifeTimeArgs, registry)
|
||||
pluginregistry.Register(removeduplicates.PluginName, removeduplicates.New, &removeduplicates.RemoveDuplicates{}, &removeduplicates.RemoveDuplicatesArgs{}, removeduplicates.ValidateRemoveDuplicatesArgs, removeduplicates.SetDefaults_RemoveDuplicatesArgs, registry)
|
||||
pluginregistry.Register(removefailedpods.PluginName, removefailedpods.New, &removefailedpods.RemoveFailedPods{}, &removefailedpods.RemoveFailedPodsArgs{}, removefailedpods.ValidateRemoveFailedPodsArgs, removefailedpods.SetDefaults_RemoveFailedPodsArgs, registry)
|
||||
pluginregistry.Register(removepodshavingtoomanyrestarts.PluginName, removepodshavingtoomanyrestarts.New, &removepodshavingtoomanyrestarts.RemovePodsHavingTooManyRestarts{}, &removepodshavingtoomanyrestarts.RemovePodsHavingTooManyRestartsArgs{}, removepodshavingtoomanyrestarts.ValidateRemovePodsHavingTooManyRestartsArgs, removepodshavingtoomanyrestarts.SetDefaults_RemovePodsHavingTooManyRestartsArgs, registry)
|
||||
pluginregistry.Register(removepodsviolatinginterpodantiaffinity.PluginName, removepodsviolatinginterpodantiaffinity.New, &removepodsviolatinginterpodantiaffinity.RemovePodsViolatingInterPodAntiAffinity{}, &removepodsviolatinginterpodantiaffinity.RemovePodsViolatingInterPodAntiAffinityArgs{}, removepodsviolatinginterpodantiaffinity.ValidateRemovePodsViolatingInterPodAntiAffinityArgs, removepodsviolatinginterpodantiaffinity.SetDefaults_RemovePodsViolatingInterPodAntiAffinityArgs, registry)
|
||||
pluginregistry.Register(removepodsviolatingnodeaffinity.PluginName, removepodsviolatingnodeaffinity.New, &removepodsviolatingnodeaffinity.RemovePodsViolatingNodeAffinity{}, &removepodsviolatingnodeaffinity.RemovePodsViolatingNodeAffinityArgs{}, removepodsviolatingnodeaffinity.ValidateRemovePodsViolatingNodeAffinityArgs, removepodsviolatingnodeaffinity.SetDefaults_RemovePodsViolatingNodeAffinityArgs, registry)
|
||||
pluginregistry.Register(removepodsviolatingnodetaints.PluginName, removepodsviolatingnodetaints.New, &removepodsviolatingnodetaints.RemovePodsViolatingNodeTaints{}, &removepodsviolatingnodetaints.RemovePodsViolatingNodeTaintsArgs{}, removepodsviolatingnodetaints.ValidateRemovePodsViolatingNodeTaintsArgs, removepodsviolatingnodetaints.SetDefaults_RemovePodsViolatingNodeTaintsArgs, registry)
|
||||
pluginregistry.Register(removepodsviolatingtopologyspreadconstraint.PluginName, removepodsviolatingtopologyspreadconstraint.New, &removepodsviolatingtopologyspreadconstraint.RemovePodsViolatingTopologySpreadConstraint{}, &removepodsviolatingtopologyspreadconstraint.RemovePodsViolatingTopologySpreadConstraintArgs{}, removepodsviolatingtopologyspreadconstraint.ValidateRemovePodsViolatingTopologySpreadConstraintArgs, removepodsviolatingtopologyspreadconstraint.SetDefaults_RemovePodsViolatingTopologySpreadConstraintArgs, registry)
|
||||
}
|
||||
|
||||
@@ -9,18 +9,18 @@ import (
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/pkg/framework"
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
)
|
||||
|
||||
type HandleImpl struct {
|
||||
ClientsetImpl clientset.Interface
|
||||
GetPodsAssignedToNodeFuncImpl podutil.GetPodsAssignedToNodeFunc
|
||||
SharedInformerFactoryImpl informers.SharedInformerFactory
|
||||
EvictorFilterImpl framework.EvictorPlugin
|
||||
EvictorFilterImpl frameworktypes.EvictorPlugin
|
||||
PodEvictorImpl *evictions.PodEvictor
|
||||
}
|
||||
|
||||
var _ framework.Handle = &HandleImpl{}
|
||||
var _ frameworktypes.Handle = &HandleImpl{}
|
||||
|
||||
func (hi *HandleImpl) ClientSet() clientset.Interface {
|
||||
return hi.ClientsetImpl
|
||||
@@ -34,7 +34,7 @@ func (hi *HandleImpl) SharedInformerFactory() informers.SharedInformerFactory {
|
||||
return hi.SharedInformerFactoryImpl
|
||||
}
|
||||
|
||||
func (hi *HandleImpl) Evictor() framework.Evictor {
|
||||
func (hi *HandleImpl) Evictor() frameworktypes.Evictor {
|
||||
return hi
|
||||
}
|
||||
|
||||
|
||||
@@ -20,8 +20,8 @@ import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"sigs.k8s.io/descheduler/pkg/framework"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/pluginregistry"
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
)
|
||||
|
||||
// +k8s:deepcopy-gen=true
|
||||
@@ -39,9 +39,12 @@ func ValidateFakePluginArgs(obj runtime.Object) error {
|
||||
func SetDefaults_FakePluginArgs(obj runtime.Object) {}
|
||||
|
||||
var (
|
||||
_ framework.EvictorPlugin = &FakePlugin{}
|
||||
_ framework.DeschedulePlugin = &FakePlugin{}
|
||||
_ framework.BalancePlugin = &FakePlugin{}
|
||||
_ frameworktypes.EvictorPlugin = &FakePlugin{}
|
||||
_ frameworktypes.DeschedulePlugin = &FakePlugin{}
|
||||
_ frameworktypes.BalancePlugin = &FakePlugin{}
|
||||
_ frameworktypes.EvictorPlugin = &FakeFilterPlugin{}
|
||||
_ frameworktypes.DeschedulePlugin = &FakeDeschedulePlugin{}
|
||||
_ frameworktypes.BalancePlugin = &FakeBalancePlugin{}
|
||||
)
|
||||
|
||||
// FakePlugin is a configurable plugin used for testing
|
||||
@@ -53,11 +56,11 @@ type FakePlugin struct {
|
||||
ReactionChain []Reactor
|
||||
|
||||
args runtime.Object
|
||||
handle framework.Handle
|
||||
handle frameworktypes.Handle
|
||||
}
|
||||
|
||||
func NewPluginFncFromFake(fp *FakePlugin) pluginregistry.PluginBuilder {
|
||||
return func(args runtime.Object, handle framework.Handle) (framework.Plugin, error) {
|
||||
return func(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
|
||||
fakePluginArgs, ok := args.(*FakePluginArgs)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("want args to be of type FakePluginArgs, got %T", args)
|
||||
@@ -71,7 +74,7 @@ func NewPluginFncFromFake(fp *FakePlugin) pluginregistry.PluginBuilder {
|
||||
}
|
||||
|
||||
// New builds plugin from its arguments while passing a handle
|
||||
func New(args runtime.Object, handle framework.Handle) (framework.Plugin, error) {
|
||||
func New(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
|
||||
fakePluginArgs, ok := args.(*FakePluginArgs)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("want args to be of type FakePluginArgs, got %T", args)
|
||||
@@ -84,6 +87,10 @@ func New(args runtime.Object, handle framework.Handle) (framework.Plugin, error)
|
||||
return ev, nil
|
||||
}
|
||||
|
||||
func (c *FakePlugin) AddReactor(extensionPoint string, reaction ReactionFunc) {
|
||||
c.ReactionChain = append(c.ReactionChain, &SimpleReactor{ExtensionPoint: extensionPoint, Reaction: reaction})
|
||||
}
|
||||
|
||||
// Name retrieves the plugin name
|
||||
func (d *FakePlugin) Name() string {
|
||||
return d.PluginName
|
||||
@@ -97,42 +104,307 @@ func (d *FakePlugin) Filter(pod *v1.Pod) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (d *FakePlugin) handleAction(action Action) *framework.Status {
|
||||
func (d *FakePlugin) handleAction(action Action) *frameworktypes.Status {
|
||||
actionCopy := action.DeepCopy()
|
||||
for _, reactor := range d.ReactionChain {
|
||||
if !reactor.Handles(actionCopy) {
|
||||
continue
|
||||
}
|
||||
handled, err := reactor.React(actionCopy)
|
||||
handled, _, err := reactor.React(actionCopy)
|
||||
if !handled {
|
||||
continue
|
||||
}
|
||||
|
||||
return &framework.Status{
|
||||
return &frameworktypes.Status{
|
||||
Err: err,
|
||||
}
|
||||
}
|
||||
return &framework.Status{
|
||||
return &frameworktypes.Status{
|
||||
Err: fmt.Errorf("unhandled %q action", action.GetExtensionPoint()),
|
||||
}
|
||||
}
|
||||
|
||||
func (d *FakePlugin) Deschedule(ctx context.Context, nodes []*v1.Node) *framework.Status {
|
||||
func (d *FakePlugin) Deschedule(ctx context.Context, nodes []*v1.Node) *frameworktypes.Status {
|
||||
return d.handleAction(&DescheduleActionImpl{
|
||||
ActionImpl: ActionImpl{
|
||||
handle: d.handle,
|
||||
extensionPoint: string(framework.DescheduleExtensionPoint),
|
||||
extensionPoint: string(frameworktypes.DescheduleExtensionPoint),
|
||||
},
|
||||
nodes: nodes,
|
||||
})
|
||||
}
|
||||
|
||||
func (d *FakePlugin) Balance(ctx context.Context, nodes []*v1.Node) *framework.Status {
|
||||
func (d *FakePlugin) Balance(ctx context.Context, nodes []*v1.Node) *frameworktypes.Status {
|
||||
return d.handleAction(&BalanceActionImpl{
|
||||
ActionImpl: ActionImpl{
|
||||
handle: d.handle,
|
||||
extensionPoint: string(framework.BalanceExtensionPoint),
|
||||
extensionPoint: string(frameworktypes.BalanceExtensionPoint),
|
||||
},
|
||||
nodes: nodes,
|
||||
})
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen=true
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// FakeDeschedulePluginArgs holds arguments used to configure FakeDeschedulePlugin plugin.
|
||||
type FakeDeschedulePluginArgs struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
}
|
||||
|
||||
// FakeDeschedulePlugin is a configurable plugin used for testing
|
||||
type FakeDeschedulePlugin struct {
|
||||
PluginName string
|
||||
|
||||
// ReactionChain is the list of reactors that will be attempted for every
|
||||
// request in the order they are tried.
|
||||
ReactionChain []Reactor
|
||||
|
||||
args runtime.Object
|
||||
handle frameworktypes.Handle
|
||||
}
|
||||
|
||||
func NewFakeDeschedulePluginFncFromFake(fp *FakeDeschedulePlugin) pluginregistry.PluginBuilder {
|
||||
return func(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
|
||||
fakePluginArgs, ok := args.(*FakeDeschedulePluginArgs)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("want args to be of type FakeDeschedulePluginArgs, got %T", args)
|
||||
}
|
||||
|
||||
fp.handle = handle
|
||||
fp.args = fakePluginArgs
|
||||
|
||||
return fp, nil
|
||||
}
|
||||
}
|
||||
|
||||
// New builds plugin from its arguments while passing a handle
|
||||
func NewFakeDeschedule(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
|
||||
fakePluginArgs, ok := args.(*FakeDeschedulePluginArgs)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("want args to be of type FakePluginArgs, got %T", args)
|
||||
}
|
||||
|
||||
ev := &FakeDeschedulePlugin{}
|
||||
ev.handle = handle
|
||||
ev.args = fakePluginArgs
|
||||
|
||||
return ev, nil
|
||||
}
|
||||
|
||||
func (c *FakeDeschedulePlugin) AddReactor(extensionPoint string, reaction ReactionFunc) {
|
||||
c.ReactionChain = append(c.ReactionChain, &SimpleReactor{ExtensionPoint: extensionPoint, Reaction: reaction})
|
||||
}
|
||||
|
||||
// Name retrieves the plugin name
|
||||
func (d *FakeDeschedulePlugin) Name() string {
|
||||
return d.PluginName
|
||||
}
|
||||
|
||||
func (d *FakeDeschedulePlugin) Deschedule(ctx context.Context, nodes []*v1.Node) *frameworktypes.Status {
|
||||
return d.handleAction(&DescheduleActionImpl{
|
||||
ActionImpl: ActionImpl{
|
||||
handle: d.handle,
|
||||
extensionPoint: string(frameworktypes.DescheduleExtensionPoint),
|
||||
},
|
||||
nodes: nodes,
|
||||
})
|
||||
}
|
||||
|
||||
func (d *FakeDeschedulePlugin) handleAction(action Action) *frameworktypes.Status {
|
||||
actionCopy := action.DeepCopy()
|
||||
for _, reactor := range d.ReactionChain {
|
||||
if !reactor.Handles(actionCopy) {
|
||||
continue
|
||||
}
|
||||
handled, _, err := reactor.React(actionCopy)
|
||||
if !handled {
|
||||
continue
|
||||
}
|
||||
|
||||
return &frameworktypes.Status{
|
||||
Err: err,
|
||||
}
|
||||
}
|
||||
return &frameworktypes.Status{
|
||||
Err: fmt.Errorf("unhandled %q action", action.GetExtensionPoint()),
|
||||
}
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen=true
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// FakeBalancePluginArgs holds arguments used to configure FakeBalancePlugin plugin.
|
||||
type FakeBalancePluginArgs struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
}
|
||||
|
||||
// FakeBalancePlugin is a configurable plugin used for testing
|
||||
type FakeBalancePlugin struct {
|
||||
PluginName string
|
||||
|
||||
// ReactionChain is the list of reactors that will be attempted for every
|
||||
// request in the order they are tried.
|
||||
ReactionChain []Reactor
|
||||
|
||||
args runtime.Object
|
||||
handle frameworktypes.Handle
|
||||
}
|
||||
|
||||
func NewFakeBalancePluginFncFromFake(fp *FakeBalancePlugin) pluginregistry.PluginBuilder {
|
||||
return func(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
|
||||
fakePluginArgs, ok := args.(*FakeBalancePluginArgs)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("want args to be of type FakeBalancePluginArgs, got %T", args)
|
||||
}
|
||||
|
||||
fp.handle = handle
|
||||
fp.args = fakePluginArgs
|
||||
|
||||
return fp, nil
|
||||
}
|
||||
}
|
||||
|
||||
// New builds plugin from its arguments while passing a handle
|
||||
func NewFakeBalance(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
|
||||
fakePluginArgs, ok := args.(*FakeBalancePluginArgs)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("want args to be of type FakePluginArgs, got %T", args)
|
||||
}
|
||||
|
||||
ev := &FakeBalancePlugin{}
|
||||
ev.handle = handle
|
||||
ev.args = fakePluginArgs
|
||||
|
||||
return ev, nil
|
||||
}
|
||||
|
||||
func (c *FakeBalancePlugin) AddReactor(extensionPoint string, reaction ReactionFunc) {
|
||||
c.ReactionChain = append(c.ReactionChain, &SimpleReactor{ExtensionPoint: extensionPoint, Reaction: reaction})
|
||||
}
|
||||
|
||||
// Name retrieves the plugin name
|
||||
func (d *FakeBalancePlugin) Name() string {
|
||||
return d.PluginName
|
||||
}
|
||||
|
||||
func (d *FakeBalancePlugin) Balance(ctx context.Context, nodes []*v1.Node) *frameworktypes.Status {
|
||||
return d.handleAction(&BalanceActionImpl{
|
||||
ActionImpl: ActionImpl{
|
||||
handle: d.handle,
|
||||
extensionPoint: string(frameworktypes.BalanceExtensionPoint),
|
||||
},
|
||||
nodes: nodes,
|
||||
})
|
||||
}
|
||||
|
||||
func (d *FakeBalancePlugin) handleAction(action Action) *frameworktypes.Status {
|
||||
actionCopy := action.DeepCopy()
|
||||
for _, reactor := range d.ReactionChain {
|
||||
if !reactor.Handles(actionCopy) {
|
||||
continue
|
||||
}
|
||||
handled, _, err := reactor.React(actionCopy)
|
||||
if !handled {
|
||||
continue
|
||||
}
|
||||
|
||||
return &frameworktypes.Status{
|
||||
Err: err,
|
||||
}
|
||||
}
|
||||
return &frameworktypes.Status{
|
||||
Err: fmt.Errorf("unhandled %q action", action.GetExtensionPoint()),
|
||||
}
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen=true
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// FakeFilterPluginArgs holds arguments used to configure FakeFilterPlugin plugin.
|
||||
type FakeFilterPluginArgs struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
}
|
||||
|
||||
// FakeFilterPlugin is a configurable plugin used for testing
|
||||
type FakeFilterPlugin struct {
|
||||
PluginName string
|
||||
|
||||
// ReactionChain is the list of reactors that will be attempted for every
|
||||
// request in the order they are tried.
|
||||
ReactionChain []Reactor
|
||||
|
||||
args runtime.Object
|
||||
handle frameworktypes.Handle
|
||||
}
|
||||
|
||||
func NewFakeFilterPluginFncFromFake(fp *FakeFilterPlugin) pluginregistry.PluginBuilder {
|
||||
return func(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
|
||||
fakePluginArgs, ok := args.(*FakeFilterPluginArgs)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("want args to be of type FakeFilterPluginArgs, got %T", args)
|
||||
}
|
||||
|
||||
fp.handle = handle
|
||||
fp.args = fakePluginArgs
|
||||
|
||||
return fp, nil
|
||||
}
|
||||
}
|
||||
|
||||
// New builds plugin from its arguments while passing a handle
|
||||
func NewFakeFilter(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
|
||||
fakePluginArgs, ok := args.(*FakeFilterPluginArgs)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("want args to be of type FakePluginArgs, got %T", args)
|
||||
}
|
||||
|
||||
ev := &FakeFilterPlugin{}
|
||||
ev.handle = handle
|
||||
ev.args = fakePluginArgs
|
||||
|
||||
return ev, nil
|
||||
}
|
||||
|
||||
func (c *FakeFilterPlugin) AddReactor(extensionPoint string, reaction ReactionFunc) {
|
||||
c.ReactionChain = append(c.ReactionChain, &SimpleReactor{ExtensionPoint: extensionPoint, Reaction: reaction})
|
||||
}
|
||||
|
||||
// Name retrieves the plugin name
|
||||
func (d *FakeFilterPlugin) Name() string {
|
||||
return d.PluginName
|
||||
}
|
||||
|
||||
func (d *FakeFilterPlugin) Filter(pod *v1.Pod) bool {
|
||||
return d.handleBoolAction(&FilterActionImpl{
|
||||
ActionImpl: ActionImpl{
|
||||
handle: d.handle,
|
||||
extensionPoint: string(frameworktypes.FilterExtensionPoint),
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func (d *FakeFilterPlugin) PreEvictionFilter(pod *v1.Pod) bool {
|
||||
return d.handleBoolAction(&PreEvictionFilterActionImpl{
|
||||
ActionImpl: ActionImpl{
|
||||
handle: d.handle,
|
||||
extensionPoint: string(frameworktypes.PreEvictionFilterExtensionPoint),
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func (d *FakeFilterPlugin) handleBoolAction(action Action) bool {
|
||||
actionCopy := action.DeepCopy()
|
||||
for _, reactor := range d.ReactionChain {
|
||||
if !reactor.Handles(actionCopy) {
|
||||
continue
|
||||
}
|
||||
handled, filter, _ := reactor.React(actionCopy)
|
||||
if !handled {
|
||||
continue
|
||||
}
|
||||
|
||||
return filter
|
||||
}
|
||||
panic(fmt.Errorf("unhandled %q action", action.GetExtensionPoint()))
|
||||
}
|
||||
|
||||
@@ -15,15 +15,17 @@ package plugin
|
||||
|
||||
import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"sigs.k8s.io/descheduler/pkg/framework"
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
)
|
||||
|
||||
type Action interface {
|
||||
Handle() framework.Handle
|
||||
Handle() frameworktypes.Handle
|
||||
GetExtensionPoint() string
|
||||
DeepCopy() Action
|
||||
}
|
||||
|
||||
type ReactionFunc func(action Action) (handled, filter bool, err error)
|
||||
|
||||
// Reactor is an interface to allow the composition of reaction functions.
|
||||
type Reactor interface {
|
||||
// Handles indicates whether or not this Reactor deals with a given
|
||||
@@ -31,7 +33,8 @@ type Reactor interface {
|
||||
Handles(action Action) bool
|
||||
// React handles the action. It may choose to
|
||||
// delegate by indicated handled=false.
|
||||
React(action Action) (handled bool, err error)
|
||||
// filter is used to store results of filter based actions
|
||||
React(action Action) (handled, filter bool, err error)
|
||||
}
|
||||
|
||||
// SimpleReactor is a Reactor. Each reaction function is attached to a given extensionPoint. "*" in either field matches everything for that value.
|
||||
@@ -44,12 +47,10 @@ func (r *SimpleReactor) Handles(action Action) bool {
|
||||
return r.ExtensionPoint == "*" || r.ExtensionPoint == action.GetExtensionPoint()
|
||||
}
|
||||
|
||||
func (r *SimpleReactor) React(action Action) (bool, error) {
|
||||
func (r *SimpleReactor) React(action Action) (bool, bool, error) {
|
||||
return r.Reaction(action)
|
||||
}
|
||||
|
||||
type ReactionFunc func(action Action) (handled bool, err error)
|
||||
|
||||
type DescheduleAction interface {
|
||||
Action
|
||||
CanDeschedule() bool
|
||||
@@ -62,12 +63,22 @@ type BalanceAction interface {
|
||||
Nodes() []*v1.Node
|
||||
}
|
||||
|
||||
type FilterAction interface {
|
||||
Action
|
||||
CanFilter() bool
|
||||
}
|
||||
|
||||
type PreEvictionFilterAction interface {
|
||||
Action
|
||||
CanPreEvictionFilter() bool
|
||||
}
|
||||
|
||||
type ActionImpl struct {
|
||||
handle framework.Handle
|
||||
handle frameworktypes.Handle
|
||||
extensionPoint string
|
||||
}
|
||||
|
||||
func (a ActionImpl) Handle() framework.Handle {
|
||||
func (a ActionImpl) Handle() frameworktypes.Handle {
|
||||
return a.handle
|
||||
}
|
||||
|
||||
@@ -130,6 +141,30 @@ func (a BalanceActionImpl) DeepCopy() Action {
|
||||
}
|
||||
}
|
||||
|
||||
func (c *FakePlugin) AddReactor(extensionPoint string, reaction ReactionFunc) {
|
||||
c.ReactionChain = append(c.ReactionChain, &SimpleReactor{ExtensionPoint: extensionPoint, Reaction: reaction})
|
||||
type FilterActionImpl struct {
|
||||
ActionImpl
|
||||
}
|
||||
|
||||
func (d FilterActionImpl) CanFilter() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (a FilterActionImpl) DeepCopy() Action {
|
||||
return FilterActionImpl{
|
||||
ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl),
|
||||
}
|
||||
}
|
||||
|
||||
type PreEvictionFilterActionImpl struct {
|
||||
ActionImpl
|
||||
}
|
||||
|
||||
func (d PreEvictionFilterActionImpl) CanPreEvictionFilter() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (a PreEvictionFilterActionImpl) DeepCopy() Action {
|
||||
return PreEvictionFilterActionImpl{
|
||||
ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -25,6 +25,81 @@ import (
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *FakeBalancePluginArgs) DeepCopyInto(out *FakeBalancePluginArgs) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FakeBalancePluginArgs.
|
||||
func (in *FakeBalancePluginArgs) DeepCopy() *FakeBalancePluginArgs {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(FakeBalancePluginArgs)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *FakeBalancePluginArgs) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *FakeDeschedulePluginArgs) DeepCopyInto(out *FakeDeschedulePluginArgs) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FakeDeschedulePluginArgs.
|
||||
func (in *FakeDeschedulePluginArgs) DeepCopy() *FakeDeschedulePluginArgs {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(FakeDeschedulePluginArgs)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *FakeDeschedulePluginArgs) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *FakeFilterPluginArgs) DeepCopyInto(out *FakeFilterPluginArgs) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FakeFilterPluginArgs.
|
||||
func (in *FakeFilterPluginArgs) DeepCopy() *FakeFilterPluginArgs {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(FakeFilterPluginArgs)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *FakeFilterPluginArgs) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *FakePluginArgs) DeepCopyInto(out *FakePluginArgs) {
|
||||
*out = *in
|
||||
|
||||
@@ -19,13 +19,15 @@ package pluginregistry
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/klog/v2"
|
||||
"sigs.k8s.io/descheduler/pkg/framework"
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
)
|
||||
|
||||
var PluginRegistry Registry
|
||||
|
||||
type PluginUtilities struct {
|
||||
PluginBuilder PluginBuilder
|
||||
|
||||
PluginType interface{}
|
||||
// Just an example instance of this PluginArg so we can avoid having
|
||||
// to deal with reflect Types
|
||||
PluginArgInstance runtime.Object
|
||||
@@ -33,25 +35,34 @@ type PluginUtilities struct {
|
||||
PluginArgDefaulter PluginArgDefaulter
|
||||
}
|
||||
|
||||
type PluginBuilder = func(args runtime.Object, handle framework.Handle) (framework.Plugin, error)
|
||||
type PluginBuilder = func(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error)
|
||||
|
||||
type (
|
||||
PluginArgValidator = func(args runtime.Object) error
|
||||
PluginArgDefaulter = func(args runtime.Object)
|
||||
)
|
||||
|
||||
type Registry = map[string]PluginUtilities
|
||||
type Registry map[string]PluginUtilities
|
||||
|
||||
func NewRegistry() Registry {
|
||||
return Registry{}
|
||||
}
|
||||
|
||||
func Register(name string, builderFunc PluginBuilder, exampleArg runtime.Object, pluginArgValidator PluginArgValidator, pluginArgDefaulter PluginArgDefaulter, registry Registry) {
|
||||
func Register(
|
||||
name string,
|
||||
builderFunc PluginBuilder,
|
||||
pluginType interface{},
|
||||
exampleArg runtime.Object,
|
||||
pluginArgValidator PluginArgValidator,
|
||||
pluginArgDefaulter PluginArgDefaulter,
|
||||
registry Registry,
|
||||
) {
|
||||
if _, ok := registry[name]; ok {
|
||||
klog.V(10).InfoS("Plugin already registered", "plugin", name)
|
||||
} else {
|
||||
registry[name] = PluginUtilities{
|
||||
PluginBuilder: builderFunc,
|
||||
PluginType: pluginType,
|
||||
PluginArgInstance: exampleArg,
|
||||
PluginArgValidator: pluginArgValidator,
|
||||
PluginArgDefaulter: pluginArgDefaulter,
|
||||
|
||||
@@ -26,7 +26,7 @@ import (
|
||||
"k8s.io/klog/v2"
|
||||
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/pkg/framework"
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
)
|
||||
|
||||
@@ -35,7 +35,7 @@ const (
|
||||
evictPodAnnotationKey = "descheduler.alpha.kubernetes.io/evict"
|
||||
)
|
||||
|
||||
var _ framework.EvictorPlugin = &DefaultEvictor{}
|
||||
var _ frameworktypes.EvictorPlugin = &DefaultEvictor{}
|
||||
|
||||
type constraint func(pod *v1.Pod) error
|
||||
|
||||
@@ -47,7 +47,7 @@ type constraint func(pod *v1.Pod) error
|
||||
type DefaultEvictor struct {
|
||||
args runtime.Object
|
||||
constraints []constraint
|
||||
handle framework.Handle
|
||||
handle frameworktypes.Handle
|
||||
}
|
||||
|
||||
// IsPodEvictableBasedOnPriority checks if the given pod is evictable based on priority resolved from pod Spec.
|
||||
@@ -62,7 +62,7 @@ func HaveEvictAnnotation(pod *v1.Pod) bool {
|
||||
}
|
||||
|
||||
// New builds plugin from its arguments while passing a handle
|
||||
func New(args runtime.Object, handle framework.Handle) (framework.Plugin, error) {
|
||||
func New(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
|
||||
defaultEvictorArgs, ok := args.(*DefaultEvictorArgs)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("want args to be of type defaultEvictorFilterArgs, got %T", args)
|
||||
|
||||
@@ -24,8 +24,8 @@ import (
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/pkg/framework"
|
||||
frameworkfake "sigs.k8s.io/descheduler/pkg/framework/fake"
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
"sigs.k8s.io/descheduler/test"
|
||||
)
|
||||
@@ -347,7 +347,7 @@ func TestDefaultEvictorPreEvictionFilter(t *testing.T) {
|
||||
t.Fatalf("Unable to initialize the plugin: %v", err)
|
||||
}
|
||||
|
||||
result := evictorPlugin.(framework.EvictorPlugin).PreEvictionFilter(test.pods[0])
|
||||
result := evictorPlugin.(frameworktypes.EvictorPlugin).PreEvictionFilter(test.pods[0])
|
||||
if (result) != test.result {
|
||||
t.Errorf("Filter should return for pod %s %t, but it returns %t", test.pods[0].Name, test.result, result)
|
||||
}
|
||||
@@ -755,7 +755,7 @@ func TestDefaultEvictorFilter(t *testing.T) {
|
||||
t.Fatalf("Unable to initialize the plugin: %v", err)
|
||||
}
|
||||
|
||||
result := evictorPlugin.(framework.EvictorPlugin).Filter(test.pods[0])
|
||||
result := evictorPlugin.(frameworktypes.EvictorPlugin).Filter(test.pods[0])
|
||||
if (result) != test.result {
|
||||
t.Errorf("Filter should return for pod %s %t, but it returns %t", test.pods[0].Name, test.result, result)
|
||||
}
|
||||
|
||||
@@ -28,7 +28,7 @@ import (
|
||||
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
|
||||
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/pkg/framework"
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
)
|
||||
|
||||
const HighNodeUtilizationPluginName = "HighNodeUtilization"
|
||||
@@ -37,15 +37,15 @@ const HighNodeUtilizationPluginName = "HighNodeUtilization"
|
||||
// Note that CPU/Memory requests are used to calculate nodes' utilization and not the actual resource usage.
|
||||
|
||||
type HighNodeUtilization struct {
|
||||
handle framework.Handle
|
||||
handle frameworktypes.Handle
|
||||
args *HighNodeUtilizationArgs
|
||||
podFilter func(pod *v1.Pod) bool
|
||||
}
|
||||
|
||||
var _ framework.BalancePlugin = &HighNodeUtilization{}
|
||||
var _ frameworktypes.BalancePlugin = &HighNodeUtilization{}
|
||||
|
||||
// NewHighNodeUtilization builds plugin from its arguments while passing a handle
|
||||
func NewHighNodeUtilization(args runtime.Object, handle framework.Handle) (framework.Plugin, error) {
|
||||
func NewHighNodeUtilization(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
|
||||
highNodeUtilizatioArgs, ok := args.(*HighNodeUtilizationArgs)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("want args to be of type HighNodeUtilizationArgs, got %T", args)
|
||||
@@ -71,7 +71,7 @@ func (h *HighNodeUtilization) Name() string {
|
||||
}
|
||||
|
||||
// Balance extension point implementation for the plugin
|
||||
func (h *HighNodeUtilization) Balance(ctx context.Context, nodes []*v1.Node) *framework.Status {
|
||||
func (h *HighNodeUtilization) Balance(ctx context.Context, nodes []*v1.Node) *frameworktypes.Status {
|
||||
thresholds := h.args.Thresholds
|
||||
targetThresholds := make(api.ResourceThresholds)
|
||||
|
||||
|
||||
@@ -33,9 +33,9 @@ import (
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/pkg/framework"
|
||||
frameworkfake "sigs.k8s.io/descheduler/pkg/framework/fake"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/defaultevictor"
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
"sigs.k8s.io/descheduler/test"
|
||||
)
|
||||
@@ -519,7 +519,7 @@ func TestHighNodeUtilization(t *testing.T) {
|
||||
ClientsetImpl: fakeClient,
|
||||
GetPodsAssignedToNodeFuncImpl: getPodsAssignedToNode,
|
||||
PodEvictorImpl: podEvictor,
|
||||
EvictorFilterImpl: evictorFilter.(framework.EvictorPlugin),
|
||||
EvictorFilterImpl: evictorFilter.(frameworktypes.EvictorPlugin),
|
||||
SharedInformerFactoryImpl: sharedInformerFactory,
|
||||
}
|
||||
|
||||
@@ -530,7 +530,7 @@ func TestHighNodeUtilization(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to initialize the plugin: %v", err)
|
||||
}
|
||||
plugin.(framework.BalancePlugin).Balance(ctx, testCase.nodes)
|
||||
plugin.(frameworktypes.BalancePlugin).Balance(ctx, testCase.nodes)
|
||||
|
||||
podsEvicted := podEvictor.TotalEvicted()
|
||||
if testCase.expectedPodsEvicted != podsEvicted {
|
||||
@@ -670,7 +670,7 @@ func TestHighNodeUtilizationWithTaints(t *testing.T) {
|
||||
ClientsetImpl: fakeClient,
|
||||
GetPodsAssignedToNodeFuncImpl: getPodsAssignedToNode,
|
||||
PodEvictorImpl: podEvictor,
|
||||
EvictorFilterImpl: evictorFilter.(framework.EvictorPlugin),
|
||||
EvictorFilterImpl: evictorFilter.(frameworktypes.EvictorPlugin),
|
||||
SharedInformerFactoryImpl: sharedInformerFactory,
|
||||
}
|
||||
|
||||
@@ -683,7 +683,7 @@ func TestHighNodeUtilizationWithTaints(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to initialize the plugin: %v", err)
|
||||
}
|
||||
plugin.(framework.BalancePlugin).Balance(ctx, item.nodes)
|
||||
plugin.(frameworktypes.BalancePlugin).Balance(ctx, item.nodes)
|
||||
|
||||
if item.evictionsExpected != podEvictor.TotalEvicted() {
|
||||
t.Errorf("Expected %v evictions, got %v", item.evictionsExpected, podEvictor.TotalEvicted())
|
||||
|
||||
@@ -26,7 +26,7 @@ import (
|
||||
"k8s.io/klog/v2"
|
||||
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/pkg/framework"
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
)
|
||||
|
||||
const LowNodeUtilizationPluginName = "LowNodeUtilization"
|
||||
@@ -35,15 +35,15 @@ const LowNodeUtilizationPluginName = "LowNodeUtilization"
|
||||
// to calculate nodes' utilization and not the actual resource usage.
|
||||
|
||||
type LowNodeUtilization struct {
|
||||
handle framework.Handle
|
||||
handle frameworktypes.Handle
|
||||
args *LowNodeUtilizationArgs
|
||||
podFilter func(pod *v1.Pod) bool
|
||||
}
|
||||
|
||||
var _ framework.BalancePlugin = &LowNodeUtilization{}
|
||||
var _ frameworktypes.BalancePlugin = &LowNodeUtilization{}
|
||||
|
||||
// NewLowNodeUtilization builds plugin from its arguments while passing a handle
|
||||
func NewLowNodeUtilization(args runtime.Object, handle framework.Handle) (framework.Plugin, error) {
|
||||
func NewLowNodeUtilization(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
|
||||
lowNodeUtilizationArgsArgs, ok := args.(*LowNodeUtilizationArgs)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("want args to be of type LowNodeUtilizationArgs, got %T", args)
|
||||
@@ -69,7 +69,7 @@ func (l *LowNodeUtilization) Name() string {
|
||||
}
|
||||
|
||||
// Balance extension point implementation for the plugin
|
||||
func (l *LowNodeUtilization) Balance(ctx context.Context, nodes []*v1.Node) *framework.Status {
|
||||
func (l *LowNodeUtilization) Balance(ctx context.Context, nodes []*v1.Node) *frameworktypes.Status {
|
||||
useDeviationThresholds := l.args.UseDeviationThresholds
|
||||
thresholds := l.args.Thresholds
|
||||
targetThresholds := l.args.TargetThresholds
|
||||
|
||||
@@ -22,9 +22,9 @@ import (
|
||||
"testing"
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/framework"
|
||||
frameworkfake "sigs.k8s.io/descheduler/pkg/framework/fake"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/defaultevictor"
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
policy "k8s.io/api/policy/v1"
|
||||
@@ -921,7 +921,7 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
ClientsetImpl: fakeClient,
|
||||
GetPodsAssignedToNodeFuncImpl: getPodsAssignedToNode,
|
||||
PodEvictorImpl: podEvictor,
|
||||
EvictorFilterImpl: evictorFilter.(framework.EvictorPlugin),
|
||||
EvictorFilterImpl: evictorFilter.(frameworktypes.EvictorPlugin),
|
||||
SharedInformerFactoryImpl: sharedInformerFactory,
|
||||
}
|
||||
|
||||
@@ -935,7 +935,7 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to initialize the plugin: %v", err)
|
||||
}
|
||||
plugin.(framework.BalancePlugin).Balance(ctx, test.nodes)
|
||||
plugin.(frameworktypes.BalancePlugin).Balance(ctx, test.nodes)
|
||||
|
||||
podsEvicted := podEvictor.TotalEvicted()
|
||||
if test.expectedPodsEvicted != podsEvicted {
|
||||
@@ -1093,7 +1093,7 @@ func TestLowNodeUtilizationWithTaints(t *testing.T) {
|
||||
ClientsetImpl: fakeClient,
|
||||
GetPodsAssignedToNodeFuncImpl: getPodsAssignedToNode,
|
||||
PodEvictorImpl: podEvictor,
|
||||
EvictorFilterImpl: evictorFilter.(framework.EvictorPlugin),
|
||||
EvictorFilterImpl: evictorFilter.(frameworktypes.EvictorPlugin),
|
||||
SharedInformerFactoryImpl: sharedInformerFactory,
|
||||
}
|
||||
|
||||
@@ -1109,7 +1109,7 @@ func TestLowNodeUtilizationWithTaints(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to initialize the plugin: %v", err)
|
||||
}
|
||||
plugin.(framework.BalancePlugin).Balance(ctx, item.nodes)
|
||||
plugin.(frameworktypes.BalancePlugin).Balance(ctx, item.nodes)
|
||||
|
||||
if item.evictionsExpected != podEvictor.TotalEvicted() {
|
||||
t.Errorf("Expected %v evictions, got %v", item.evictionsExpected, podEvictor.TotalEvicted())
|
||||
|
||||
@@ -30,7 +30,7 @@ import (
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/node"
|
||||
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/pkg/framework"
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
)
|
||||
|
||||
@@ -215,7 +215,7 @@ func evictPodsFromSourceNodes(
|
||||
ctx context.Context,
|
||||
evictableNamespaces *api.Namespaces,
|
||||
sourceNodes, destinationNodes []NodeInfo,
|
||||
podEvictor framework.Evictor,
|
||||
podEvictor frameworktypes.Evictor,
|
||||
podFilter func(pod *v1.Pod) bool,
|
||||
resourceNames []v1.ResourceName,
|
||||
continueEviction continueEvictionCond,
|
||||
@@ -279,7 +279,7 @@ func evictPods(
|
||||
nodeInfo NodeInfo,
|
||||
totalAvailableUsage map[v1.ResourceName]*resource.Quantity,
|
||||
taintsOfLowNodes map[string][]v1.Taint,
|
||||
podEvictor framework.Evictor,
|
||||
podEvictor frameworktypes.Evictor,
|
||||
continueEviction continueEvictionCond,
|
||||
) {
|
||||
var excludedNamespaces sets.String
|
||||
|
||||
@@ -25,7 +25,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/klog/v2"
|
||||
"sigs.k8s.io/descheduler/pkg/framework"
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
@@ -33,17 +33,17 @@ import (
|
||||
|
||||
const PluginName = "PodLifeTime"
|
||||
|
||||
var _ framework.DeschedulePlugin = &PodLifeTime{}
|
||||
var _ frameworktypes.DeschedulePlugin = &PodLifeTime{}
|
||||
|
||||
// PodLifeTime evicts pods on the node that violate the max pod lifetime threshold
|
||||
type PodLifeTime struct {
|
||||
handle framework.Handle
|
||||
handle frameworktypes.Handle
|
||||
args *PodLifeTimeArgs
|
||||
podFilter podutil.FilterFunc
|
||||
}
|
||||
|
||||
// New builds plugin from its arguments while passing a handle
|
||||
func New(args runtime.Object, handle framework.Handle) (framework.Plugin, error) {
|
||||
func New(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
|
||||
podLifeTimeArgs, ok := args.(*PodLifeTimeArgs)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("want args to be of type PodLifeTimeArgs, got %T", args)
|
||||
@@ -101,7 +101,7 @@ func (d *PodLifeTime) Name() string {
|
||||
}
|
||||
|
||||
// Deschedule extension point implementation for the plugin
|
||||
func (d *PodLifeTime) Deschedule(ctx context.Context, nodes []*v1.Node) *framework.Status {
|
||||
func (d *PodLifeTime) Deschedule(ctx context.Context, nodes []*v1.Node) *frameworktypes.Status {
|
||||
podsToEvict := make([]*v1.Pod, 0)
|
||||
nodeMap := make(map[string]*v1.Node, len(nodes))
|
||||
|
||||
@@ -110,7 +110,7 @@ func (d *PodLifeTime) Deschedule(ctx context.Context, nodes []*v1.Node) *framewo
|
||||
pods, err := podutil.ListAllPodsOnANode(node.Name, d.handle.GetPodsAssignedToNodeFunc(), d.podFilter)
|
||||
if err != nil {
|
||||
// no pods evicted as error encountered retrieving evictable Pods
|
||||
return &framework.Status{
|
||||
return &frameworktypes.Status{
|
||||
Err: fmt.Errorf("error listing pods on a node: %v", err),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -30,9 +30,9 @@ import (
|
||||
"k8s.io/client-go/tools/events"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/pkg/framework"
|
||||
frameworkfake "sigs.k8s.io/descheduler/pkg/framework/fake"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/defaultevictor"
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
"sigs.k8s.io/descheduler/test"
|
||||
)
|
||||
|
||||
@@ -378,14 +378,14 @@ func TestPodLifeTime(t *testing.T) {
|
||||
plugin, err := New(tc.args, &frameworkfake.HandleImpl{
|
||||
ClientsetImpl: fakeClient,
|
||||
PodEvictorImpl: podEvictor,
|
||||
EvictorFilterImpl: evictorFilter.(framework.EvictorPlugin),
|
||||
EvictorFilterImpl: evictorFilter.(frameworktypes.EvictorPlugin),
|
||||
GetPodsAssignedToNodeFuncImpl: getPodsAssignedToNode,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to initialize the plugin: %v", err)
|
||||
}
|
||||
|
||||
plugin.(framework.DeschedulePlugin).Deschedule(ctx, tc.nodes)
|
||||
plugin.(frameworktypes.DeschedulePlugin).Deschedule(ctx, tc.nodes)
|
||||
podsEvicted := podEvictor.TotalEvicted()
|
||||
if podsEvicted != tc.expectedEvictedPodCount {
|
||||
t.Errorf("Test error for description: %s. Expected evicted pods count %v, got %v", tc.description, tc.expectedEvictedPodCount, podsEvicted)
|
||||
|
||||
@@ -34,7 +34,7 @@ import (
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/pkg/framework"
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
)
|
||||
|
||||
const PluginName = "RemoveDuplicates"
|
||||
@@ -45,12 +45,12 @@ const PluginName = "RemoveDuplicates"
|
||||
// As of now, this plugin won't evict daemonsets, mirror pods, critical pods and pods with local storages.
|
||||
|
||||
type RemoveDuplicates struct {
|
||||
handle framework.Handle
|
||||
handle frameworktypes.Handle
|
||||
args *RemoveDuplicatesArgs
|
||||
podFilter podutil.FilterFunc
|
||||
}
|
||||
|
||||
var _ framework.BalancePlugin = &RemoveDuplicates{}
|
||||
var _ frameworktypes.BalancePlugin = &RemoveDuplicates{}
|
||||
|
||||
type podOwner struct {
|
||||
namespace, kind, name string
|
||||
@@ -58,7 +58,7 @@ type podOwner struct {
|
||||
}
|
||||
|
||||
// New builds plugin from its arguments while passing a handle
|
||||
func New(args runtime.Object, handle framework.Handle) (framework.Plugin, error) {
|
||||
func New(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
|
||||
removeDuplicatesArgs, ok := args.(*RemoveDuplicatesArgs)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("want args to be of type RemoveDuplicatesArgs, got %T", args)
|
||||
@@ -93,7 +93,7 @@ func (r *RemoveDuplicates) Name() string {
|
||||
}
|
||||
|
||||
// Balance extension point implementation for the plugin
|
||||
func (r *RemoveDuplicates) Balance(ctx context.Context, nodes []*v1.Node) *framework.Status {
|
||||
func (r *RemoveDuplicates) Balance(ctx context.Context, nodes []*v1.Node) *frameworktypes.Status {
|
||||
duplicatePods := make(map[podOwner]map[string][]*v1.Pod)
|
||||
ownerKeyOccurence := make(map[podOwner]int32)
|
||||
nodeCount := 0
|
||||
|
||||
@@ -21,9 +21,9 @@ import (
|
||||
"testing"
|
||||
|
||||
"k8s.io/client-go/tools/events"
|
||||
"sigs.k8s.io/descheduler/pkg/framework"
|
||||
frameworkfake "sigs.k8s.io/descheduler/pkg/framework/fake"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/defaultevictor"
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
policyv1 "k8s.io/api/policy/v1"
|
||||
@@ -347,7 +347,7 @@ func TestFindDuplicatePods(t *testing.T) {
|
||||
ClientsetImpl: fakeClient,
|
||||
GetPodsAssignedToNodeFuncImpl: getPodsAssignedToNode,
|
||||
PodEvictorImpl: podEvictor,
|
||||
EvictorFilterImpl: evictorFilter.(framework.EvictorPlugin),
|
||||
EvictorFilterImpl: evictorFilter.(frameworktypes.EvictorPlugin),
|
||||
SharedInformerFactoryImpl: sharedInformerFactory,
|
||||
}
|
||||
|
||||
@@ -360,7 +360,7 @@ func TestFindDuplicatePods(t *testing.T) {
|
||||
t.Fatalf("Unable to initialize the plugin: %v", err)
|
||||
}
|
||||
|
||||
plugin.(framework.BalancePlugin).Balance(ctx, testCase.nodes)
|
||||
plugin.(frameworktypes.BalancePlugin).Balance(ctx, testCase.nodes)
|
||||
actualEvictedPodCount := podEvictor.TotalEvicted()
|
||||
if actualEvictedPodCount != testCase.expectedEvictedPodCount {
|
||||
t.Errorf("Test %#v failed, Unexpected no of pods evicted: pods evicted: %d, expected: %d", testCase.description, actualEvictedPodCount, testCase.expectedEvictedPodCount)
|
||||
@@ -796,7 +796,7 @@ func TestRemoveDuplicatesUniformly(t *testing.T) {
|
||||
ClientsetImpl: fakeClient,
|
||||
GetPodsAssignedToNodeFuncImpl: getPodsAssignedToNode,
|
||||
PodEvictorImpl: podEvictor,
|
||||
EvictorFilterImpl: evictorFilter.(framework.EvictorPlugin),
|
||||
EvictorFilterImpl: evictorFilter.(frameworktypes.EvictorPlugin),
|
||||
SharedInformerFactoryImpl: sharedInformerFactory,
|
||||
}
|
||||
|
||||
@@ -807,7 +807,7 @@ func TestRemoveDuplicatesUniformly(t *testing.T) {
|
||||
t.Fatalf("Unable to initialize the plugin: %v", err)
|
||||
}
|
||||
|
||||
plugin.(framework.BalancePlugin).Balance(ctx, testCase.nodes)
|
||||
plugin.(frameworktypes.BalancePlugin).Balance(ctx, testCase.nodes)
|
||||
actualEvictedPodCount := podEvictor.TotalEvicted()
|
||||
if actualEvictedPodCount != testCase.expectedEvictedPodCount {
|
||||
t.Errorf("Test %#v failed, Unexpected no of pods evicted: pods evicted: %d, expected: %d", testCase.description, actualEvictedPodCount, testCase.expectedEvictedPodCount)
|
||||
|
||||
@@ -29,22 +29,22 @@ import (
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/pkg/framework"
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
)
|
||||
|
||||
const PluginName = "RemoveFailedPods"
|
||||
|
||||
// RemoveFailedPods evicts pods in failed status phase that match the given args criteria
|
||||
type RemoveFailedPods struct {
|
||||
handle framework.Handle
|
||||
handle frameworktypes.Handle
|
||||
args *RemoveFailedPodsArgs
|
||||
podFilter podutil.FilterFunc
|
||||
}
|
||||
|
||||
var _ framework.DeschedulePlugin = &RemoveFailedPods{}
|
||||
var _ frameworktypes.DeschedulePlugin = &RemoveFailedPods{}
|
||||
|
||||
// New builds plugin from its arguments while passing a handle
|
||||
func New(args runtime.Object, handle framework.Handle) (framework.Plugin, error) {
|
||||
func New(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
|
||||
failedPodsArgs, ok := args.(*RemoveFailedPodsArgs)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("want args to be of type RemoveFailedPodsArgs, got %T", args)
|
||||
@@ -91,13 +91,13 @@ func (d *RemoveFailedPods) Name() string {
|
||||
}
|
||||
|
||||
// Deschedule extension point implementation for the plugin
|
||||
func (d *RemoveFailedPods) Deschedule(ctx context.Context, nodes []*v1.Node) *framework.Status {
|
||||
func (d *RemoveFailedPods) Deschedule(ctx context.Context, nodes []*v1.Node) *frameworktypes.Status {
|
||||
for _, node := range nodes {
|
||||
klog.V(1).InfoS("Processing node", "node", klog.KObj(node))
|
||||
pods, err := podutil.ListAllPodsOnANode(node.Name, d.handle.GetPodsAssignedToNodeFunc(), d.podFilter)
|
||||
if err != nil {
|
||||
// no pods evicted as error encountered retrieving evictable Pods
|
||||
return &framework.Status{
|
||||
return &frameworktypes.Status{
|
||||
Err: fmt.Errorf("error listing pods on a node: %v", err),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -27,7 +27,7 @@ import (
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
"k8s.io/client-go/tools/events"
|
||||
"sigs.k8s.io/descheduler/pkg/framework"
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
@@ -331,7 +331,7 @@ func TestRemoveFailedPods(t *testing.T) {
|
||||
&frameworkfake.HandleImpl{
|
||||
ClientsetImpl: fakeClient,
|
||||
PodEvictorImpl: podEvictor,
|
||||
EvictorFilterImpl: evictorFilter.(framework.EvictorPlugin),
|
||||
EvictorFilterImpl: evictorFilter.(frameworktypes.EvictorPlugin),
|
||||
SharedInformerFactoryImpl: sharedInformerFactory,
|
||||
GetPodsAssignedToNodeFuncImpl: getPodsAssignedToNode,
|
||||
},
|
||||
@@ -340,7 +340,7 @@ func TestRemoveFailedPods(t *testing.T) {
|
||||
t.Fatalf("Unable to initialize the plugin: %v", err)
|
||||
}
|
||||
|
||||
plugin.(framework.DeschedulePlugin).Deschedule(ctx, tc.nodes)
|
||||
plugin.(frameworktypes.DeschedulePlugin).Deschedule(ctx, tc.nodes)
|
||||
actualEvictedPodCount := podEvictor.TotalEvicted()
|
||||
if actualEvictedPodCount != tc.expectedEvictedPodCount {
|
||||
t.Errorf("Test %#v failed, expected %v pod evictions, but got %v pod evictions\n", tc.description, tc.expectedEvictedPodCount, actualEvictedPodCount)
|
||||
|
||||
@@ -27,7 +27,7 @@ import (
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/pkg/framework"
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
)
|
||||
|
||||
const PluginName = "RemovePodsHavingTooManyRestarts"
|
||||
@@ -36,15 +36,15 @@ const PluginName = "RemovePodsHavingTooManyRestarts"
|
||||
// There are too many cases leading this issue: Volume mount failed, app error due to nodes' different settings.
|
||||
// As of now, this strategy won't evict daemonsets, mirror pods, critical pods and pods with local storages.
|
||||
type RemovePodsHavingTooManyRestarts struct {
|
||||
handle framework.Handle
|
||||
handle frameworktypes.Handle
|
||||
args *RemovePodsHavingTooManyRestartsArgs
|
||||
podFilter podutil.FilterFunc
|
||||
}
|
||||
|
||||
var _ framework.DeschedulePlugin = &RemovePodsHavingTooManyRestarts{}
|
||||
var _ frameworktypes.DeschedulePlugin = &RemovePodsHavingTooManyRestarts{}
|
||||
|
||||
// New builds plugin from its arguments while passing a handle
|
||||
func New(args runtime.Object, handle framework.Handle) (framework.Plugin, error) {
|
||||
func New(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
|
||||
tooManyRestartsArgs, ok := args.(*RemovePodsHavingTooManyRestartsArgs)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("want args to be of type RemovePodsHavingTooManyRestartsArgs, got %T", args)
|
||||
@@ -88,13 +88,13 @@ func (d *RemovePodsHavingTooManyRestarts) Name() string {
|
||||
}
|
||||
|
||||
// Deschedule extension point implementation for the plugin
|
||||
func (d *RemovePodsHavingTooManyRestarts) Deschedule(ctx context.Context, nodes []*v1.Node) *framework.Status {
|
||||
func (d *RemovePodsHavingTooManyRestarts) Deschedule(ctx context.Context, nodes []*v1.Node) *frameworktypes.Status {
|
||||
for _, node := range nodes {
|
||||
klog.V(1).InfoS("Processing node", "node", klog.KObj(node))
|
||||
pods, err := podutil.ListAllPodsOnANode(node.Name, d.handle.GetPodsAssignedToNodeFunc(), d.podFilter)
|
||||
if err != nil {
|
||||
// no pods evicted as error encountered retrieving evictable Pods
|
||||
return &framework.Status{
|
||||
return &frameworktypes.Status{
|
||||
Err: fmt.Errorf("error listing pods on a node: %v", err),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -31,9 +31,9 @@ import (
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/pkg/framework"
|
||||
frameworkfake "sigs.k8s.io/descheduler/pkg/framework/fake"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/defaultevictor"
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
"sigs.k8s.io/descheduler/test"
|
||||
)
|
||||
|
||||
@@ -287,7 +287,7 @@ func TestRemovePodsHavingTooManyRestarts(t *testing.T) {
|
||||
&frameworkfake.HandleImpl{
|
||||
ClientsetImpl: fakeClient,
|
||||
PodEvictorImpl: podEvictor,
|
||||
EvictorFilterImpl: evictorFilter.(framework.EvictorPlugin),
|
||||
EvictorFilterImpl: evictorFilter.(frameworktypes.EvictorPlugin),
|
||||
SharedInformerFactoryImpl: sharedInformerFactory,
|
||||
GetPodsAssignedToNodeFuncImpl: getPodsAssignedToNode,
|
||||
})
|
||||
@@ -295,7 +295,7 @@ func TestRemovePodsHavingTooManyRestarts(t *testing.T) {
|
||||
t.Fatalf("Unable to initialize the plugin: %v", err)
|
||||
}
|
||||
|
||||
plugin.(framework.DeschedulePlugin).Deschedule(ctx, tc.nodes)
|
||||
plugin.(frameworktypes.DeschedulePlugin).Deschedule(ctx, tc.nodes)
|
||||
actualEvictedPodCount := podEvictor.TotalEvicted()
|
||||
if actualEvictedPodCount != tc.expectedEvictedPodCount {
|
||||
t.Errorf("Test %#v failed, expected %v pod evictions, but got %v pod evictions\n", tc.description, tc.expectedEvictedPodCount, actualEvictedPodCount)
|
||||
|
||||
@@ -24,7 +24,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/pkg/framework"
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
@@ -36,15 +36,15 @@ const PluginName = "RemovePodsViolatingInterPodAntiAffinity"
|
||||
|
||||
// RemovePodsViolatingInterPodAntiAffinity evicts pods on the node which violate inter pod anti affinity
|
||||
type RemovePodsViolatingInterPodAntiAffinity struct {
|
||||
handle framework.Handle
|
||||
handle frameworktypes.Handle
|
||||
args *RemovePodsViolatingInterPodAntiAffinityArgs
|
||||
podFilter podutil.FilterFunc
|
||||
}
|
||||
|
||||
var _ framework.DeschedulePlugin = &RemovePodsViolatingInterPodAntiAffinity{}
|
||||
var _ frameworktypes.DeschedulePlugin = &RemovePodsViolatingInterPodAntiAffinity{}
|
||||
|
||||
// New builds plugin from its arguments while passing a handle
|
||||
func New(args runtime.Object, handle framework.Handle) (framework.Plugin, error) {
|
||||
func New(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
|
||||
interPodAntiAffinityArgs, ok := args.(*RemovePodsViolatingInterPodAntiAffinityArgs)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("want args to be of type RemovePodsViolatingInterPodAntiAffinityArgs, got %T", args)
|
||||
@@ -77,13 +77,13 @@ func (d *RemovePodsViolatingInterPodAntiAffinity) Name() string {
|
||||
return PluginName
|
||||
}
|
||||
|
||||
func (d *RemovePodsViolatingInterPodAntiAffinity) Deschedule(ctx context.Context, nodes []*v1.Node) *framework.Status {
|
||||
func (d *RemovePodsViolatingInterPodAntiAffinity) Deschedule(ctx context.Context, nodes []*v1.Node) *frameworktypes.Status {
|
||||
loop:
|
||||
for _, node := range nodes {
|
||||
klog.V(1).InfoS("Processing node", "node", klog.KObj(node))
|
||||
pods, err := podutil.ListPodsOnANode(node.Name, d.handle.GetPodsAssignedToNodeFunc(), d.podFilter)
|
||||
if err != nil {
|
||||
return &framework.Status{
|
||||
return &frameworktypes.Status{
|
||||
Err: fmt.Errorf("error listing pods: %v", err),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -27,7 +27,7 @@ import (
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
"k8s.io/client-go/tools/events"
|
||||
"sigs.k8s.io/descheduler/pkg/framework"
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
@@ -251,14 +251,14 @@ func TestPodAntiAffinity(t *testing.T) {
|
||||
GetPodsAssignedToNodeFuncImpl: getPodsAssignedToNode,
|
||||
PodEvictorImpl: podEvictor,
|
||||
SharedInformerFactoryImpl: sharedInformerFactory,
|
||||
EvictorFilterImpl: evictorFilter.(framework.EvictorPlugin),
|
||||
EvictorFilterImpl: evictorFilter.(frameworktypes.EvictorPlugin),
|
||||
}
|
||||
plugin, err := New(
|
||||
&RemovePodsViolatingInterPodAntiAffinityArgs{},
|
||||
handle,
|
||||
)
|
||||
|
||||
plugin.(framework.DeschedulePlugin).Deschedule(ctx, test.nodes)
|
||||
plugin.(frameworktypes.DeschedulePlugin).Deschedule(ctx, test.nodes)
|
||||
podsEvicted := podEvictor.TotalEvicted()
|
||||
if podsEvicted != test.expectedEvictedPodCount {
|
||||
t.Errorf("Unexpected no of pods evicted: pods evicted: %d, expected: %d", podsEvicted, test.expectedEvictedPodCount)
|
||||
|
||||
@@ -25,22 +25,22 @@ import (
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/pkg/framework"
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
)
|
||||
|
||||
const PluginName = "RemovePodsViolatingNodeAffinity"
|
||||
|
||||
// RemovePodsViolatingNodeAffinity evicts pods on the node which violate node affinity
|
||||
type RemovePodsViolatingNodeAffinity struct {
|
||||
handle framework.Handle
|
||||
handle frameworktypes.Handle
|
||||
args *RemovePodsViolatingNodeAffinityArgs
|
||||
podFilter podutil.FilterFunc
|
||||
}
|
||||
|
||||
var _ framework.DeschedulePlugin = &RemovePodsViolatingNodeAffinity{}
|
||||
var _ frameworktypes.DeschedulePlugin = &RemovePodsViolatingNodeAffinity{}
|
||||
|
||||
// New builds plugin from its arguments while passing a handle
|
||||
func New(args runtime.Object, handle framework.Handle) (framework.Plugin, error) {
|
||||
func New(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
|
||||
nodeAffinityArgs, ok := args.(*RemovePodsViolatingNodeAffinityArgs)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("want args to be of type RemovePodsViolatingNodeAffinityArgs, got %T", args)
|
||||
@@ -75,7 +75,7 @@ func (d *RemovePodsViolatingNodeAffinity) Name() string {
|
||||
return PluginName
|
||||
}
|
||||
|
||||
func (d *RemovePodsViolatingNodeAffinity) Deschedule(ctx context.Context, nodes []*v1.Node) *framework.Status {
|
||||
func (d *RemovePodsViolatingNodeAffinity) Deschedule(ctx context.Context, nodes []*v1.Node) *frameworktypes.Status {
|
||||
for _, nodeAffinity := range d.args.NodeAffinityType {
|
||||
klog.V(2).InfoS("Executing for nodeAffinityType", "nodeAffinity", nodeAffinity)
|
||||
|
||||
@@ -94,7 +94,7 @@ func (d *RemovePodsViolatingNodeAffinity) Deschedule(ctx context.Context, nodes
|
||||
}),
|
||||
)
|
||||
if err != nil {
|
||||
return &framework.Status{
|
||||
return &frameworktypes.Status{
|
||||
Err: fmt.Errorf("error listing pods on a node: %v", err),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -27,7 +27,7 @@ import (
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
"k8s.io/client-go/tools/events"
|
||||
"sigs.k8s.io/descheduler/pkg/framework"
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
@@ -251,7 +251,7 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||
GetPodsAssignedToNodeFuncImpl: getPodsAssignedToNode,
|
||||
PodEvictorImpl: podEvictor,
|
||||
SharedInformerFactoryImpl: sharedInformerFactory,
|
||||
EvictorFilterImpl: evictorFilter.(framework.EvictorPlugin),
|
||||
EvictorFilterImpl: evictorFilter.(frameworktypes.EvictorPlugin),
|
||||
}
|
||||
|
||||
plugin, err := New(
|
||||
@@ -264,7 +264,7 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||
t.Fatalf("Unable to initialize the plugin: %v", err)
|
||||
}
|
||||
|
||||
plugin.(framework.DeschedulePlugin).Deschedule(ctx, tc.nodes)
|
||||
plugin.(frameworktypes.DeschedulePlugin).Deschedule(ctx, tc.nodes)
|
||||
actualEvictedPodCount := podEvictor.TotalEvicted()
|
||||
if actualEvictedPodCount != tc.expectedEvictedPodCount {
|
||||
t.Errorf("Test %#v failed, expected %v pod evictions, but got %v pod evictions\n", tc.description, tc.expectedEvictedPodCount, actualEvictedPodCount)
|
||||
|
||||
@@ -27,7 +27,7 @@ import (
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/pkg/framework"
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
)
|
||||
|
||||
@@ -35,16 +35,16 @@ const PluginName = "RemovePodsViolatingNodeTaints"
|
||||
|
||||
// RemovePodsViolatingNodeTaints evicts pods on the node which violate NoSchedule Taints on nodes
|
||||
type RemovePodsViolatingNodeTaints struct {
|
||||
handle framework.Handle
|
||||
handle frameworktypes.Handle
|
||||
args *RemovePodsViolatingNodeTaintsArgs
|
||||
taintFilterFnc func(taint *v1.Taint) bool
|
||||
podFilter podutil.FilterFunc
|
||||
}
|
||||
|
||||
var _ framework.DeschedulePlugin = &RemovePodsViolatingNodeTaints{}
|
||||
var _ frameworktypes.DeschedulePlugin = &RemovePodsViolatingNodeTaints{}
|
||||
|
||||
// New builds plugin from its arguments while passing a handle
|
||||
func New(args runtime.Object, handle framework.Handle) (framework.Plugin, error) {
|
||||
func New(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
|
||||
nodeTaintsArgs, ok := args.(*RemovePodsViolatingNodeTaintsArgs)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("want args to be of type RemovePodsViolatingNodeTaintsArgs, got %T", args)
|
||||
@@ -94,13 +94,13 @@ func (d *RemovePodsViolatingNodeTaints) Name() string {
|
||||
}
|
||||
|
||||
// Deschedule extension point implementation for the plugin
|
||||
func (d *RemovePodsViolatingNodeTaints) Deschedule(ctx context.Context, nodes []*v1.Node) *framework.Status {
|
||||
func (d *RemovePodsViolatingNodeTaints) Deschedule(ctx context.Context, nodes []*v1.Node) *frameworktypes.Status {
|
||||
for _, node := range nodes {
|
||||
klog.V(1).InfoS("Processing node", "node", klog.KObj(node))
|
||||
pods, err := podutil.ListAllPodsOnANode(node.Name, d.handle.GetPodsAssignedToNodeFunc(), d.podFilter)
|
||||
if err != nil {
|
||||
// no pods evicted as error encountered retrieving evictable Pods
|
||||
return &framework.Status{
|
||||
return &frameworktypes.Status{
|
||||
Err: fmt.Errorf("error listing pods on a node: %v", err),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -31,9 +31,9 @@ import (
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/pkg/framework"
|
||||
frameworkfake "sigs.k8s.io/descheduler/pkg/framework/fake"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/defaultevictor"
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
"sigs.k8s.io/descheduler/test"
|
||||
)
|
||||
@@ -386,7 +386,7 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
|
||||
ClientsetImpl: fakeClient,
|
||||
GetPodsAssignedToNodeFuncImpl: getPodsAssignedToNode,
|
||||
PodEvictorImpl: podEvictor,
|
||||
EvictorFilterImpl: evictorFilter.(framework.EvictorPlugin),
|
||||
EvictorFilterImpl: evictorFilter.(frameworktypes.EvictorPlugin),
|
||||
SharedInformerFactoryImpl: sharedInformerFactory,
|
||||
}
|
||||
|
||||
@@ -400,7 +400,7 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
|
||||
t.Fatalf("Unable to initialize the plugin: %v", err)
|
||||
}
|
||||
|
||||
plugin.(framework.DeschedulePlugin).Deschedule(ctx, tc.nodes)
|
||||
plugin.(frameworktypes.DeschedulePlugin).Deschedule(ctx, tc.nodes)
|
||||
actualEvictedPodCount := podEvictor.TotalEvicted()
|
||||
if actualEvictedPodCount != tc.expectedEvictedPodCount {
|
||||
t.Errorf("Test %#v failed, Unexpected no of pods evicted: pods evicted: %d, expected: %d", tc.description, actualEvictedPodCount, tc.expectedEvictedPodCount)
|
||||
|
||||
@@ -30,7 +30,7 @@ import (
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/node"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/pkg/framework"
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
)
|
||||
|
||||
@@ -49,15 +49,15 @@ type topology struct {
|
||||
|
||||
// RemovePodsViolatingTopologySpreadConstraint evicts pods which violate their topology spread constraints
|
||||
type RemovePodsViolatingTopologySpreadConstraint struct {
|
||||
handle framework.Handle
|
||||
handle frameworktypes.Handle
|
||||
args *RemovePodsViolatingTopologySpreadConstraintArgs
|
||||
podFilter podutil.FilterFunc
|
||||
}
|
||||
|
||||
var _ framework.BalancePlugin = &RemovePodsViolatingTopologySpreadConstraint{}
|
||||
var _ frameworktypes.BalancePlugin = &RemovePodsViolatingTopologySpreadConstraint{}
|
||||
|
||||
// New builds plugin from its arguments while passing a handle
|
||||
func New(args runtime.Object, handle framework.Handle) (framework.Plugin, error) {
|
||||
func New(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
|
||||
pluginArgs, ok := args.(*RemovePodsViolatingTopologySpreadConstraintArgs)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("want args to be of type RemovePodsViolatingTopologySpreadConstraintArgs, got %T", args)
|
||||
@@ -84,7 +84,7 @@ func (d *RemovePodsViolatingTopologySpreadConstraint) Name() string {
|
||||
}
|
||||
|
||||
// nolint: gocyclo
|
||||
func (d *RemovePodsViolatingTopologySpreadConstraint) Balance(ctx context.Context, nodes []*v1.Node) *framework.Status {
|
||||
func (d *RemovePodsViolatingTopologySpreadConstraint) Balance(ctx context.Context, nodes []*v1.Node) *frameworktypes.Status {
|
||||
nodeMap := make(map[string]*v1.Node, len(nodes))
|
||||
for _, node := range nodes {
|
||||
nodeMap[node.Name] = node
|
||||
@@ -107,7 +107,7 @@ func (d *RemovePodsViolatingTopologySpreadConstraint) Balance(ctx context.Contex
|
||||
namespaces, err := client.CoreV1().Namespaces().List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Couldn't list namespaces")
|
||||
return &framework.Status{
|
||||
return &frameworktypes.Status{
|
||||
Err: fmt.Errorf("list namespace: %w", err),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -18,9 +18,9 @@ import (
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/pkg/framework"
|
||||
frameworkfake "sigs.k8s.io/descheduler/pkg/framework/fake"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/defaultevictor"
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
"sigs.k8s.io/descheduler/test"
|
||||
)
|
||||
|
||||
@@ -1196,7 +1196,7 @@ func TestTopologySpreadConstraint(t *testing.T) {
|
||||
ClientsetImpl: fakeClient,
|
||||
GetPodsAssignedToNodeFuncImpl: getPodsAssignedToNode,
|
||||
PodEvictorImpl: podEvictor,
|
||||
EvictorFilterImpl: evictorFilter.(framework.EvictorPlugin),
|
||||
EvictorFilterImpl: evictorFilter.(frameworktypes.EvictorPlugin),
|
||||
SharedInformerFactoryImpl: sharedInformerFactory,
|
||||
}
|
||||
|
||||
@@ -1208,7 +1208,7 @@ func TestTopologySpreadConstraint(t *testing.T) {
|
||||
t.Fatalf("Unable to initialize the plugin: %v", err)
|
||||
}
|
||||
|
||||
plugin.(framework.BalancePlugin).Balance(ctx, tc.nodes)
|
||||
plugin.(frameworktypes.BalancePlugin).Balance(ctx, tc.nodes)
|
||||
podsEvicted := podEvictor.TotalEvicted()
|
||||
if podsEvicted != tc.expectedEvictedCount {
|
||||
t.Errorf("Test error for description: %s. Expected evicted pods count %v, got %v", tc.name, tc.expectedEvictedCount, podsEvicted)
|
||||
|
||||
@@ -22,12 +22,12 @@ import (
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/pkg/framework"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/pluginregistry"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/defaultevictor"
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/util/errors"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/client-go/informers"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
|
||||
@@ -37,20 +37,21 @@ import (
|
||||
// evictorImpl implements the Evictor interface so plugins
|
||||
// can evict a pod without importing a specific pod evictor
|
||||
type evictorImpl struct {
|
||||
podEvictor *evictions.PodEvictor
|
||||
evictorFilter framework.EvictorPlugin
|
||||
podEvictor *evictions.PodEvictor
|
||||
filter podutil.FilterFunc
|
||||
preEvictionFilter podutil.FilterFunc
|
||||
}
|
||||
|
||||
var _ framework.Evictor = &evictorImpl{}
|
||||
var _ frameworktypes.Evictor = &evictorImpl{}
|
||||
|
||||
// Filter checks if a pod can be evicted
|
||||
func (ei *evictorImpl) Filter(pod *v1.Pod) bool {
|
||||
return ei.evictorFilter.Filter(pod)
|
||||
return ei.filter(pod)
|
||||
}
|
||||
|
||||
// PreEvictionFilter checks if pod can be evicted right before eviction
|
||||
func (ei *evictorImpl) PreEvictionFilter(pod *v1.Pod) bool {
|
||||
return ei.evictorFilter.PreEvictionFilter(pod)
|
||||
return ei.preEvictionFilter(pod)
|
||||
}
|
||||
|
||||
// Evict evicts a pod (no pre-check performed)
|
||||
@@ -70,7 +71,7 @@ type handleImpl struct {
|
||||
evictor *evictorImpl
|
||||
}
|
||||
|
||||
var _ framework.Handle = &handleImpl{}
|
||||
var _ frameworktypes.Handle = &handleImpl{}
|
||||
|
||||
// ClientSet retrieves kube client set
|
||||
func (hi *handleImpl) ClientSet() clientset.Interface {
|
||||
@@ -88,16 +89,34 @@ func (hi *handleImpl) SharedInformerFactory() informers.SharedInformerFactory {
|
||||
}
|
||||
|
||||
// Evictor retrieves evictor so plugins can filter and evict pods
|
||||
func (hi *handleImpl) Evictor() framework.Evictor {
|
||||
func (hi *handleImpl) Evictor() frameworktypes.Evictor {
|
||||
return hi.evictor
|
||||
}
|
||||
|
||||
type filterPlugin interface {
|
||||
frameworktypes.Plugin
|
||||
Filter(pod *v1.Pod) bool
|
||||
}
|
||||
|
||||
type preEvictionFilterPlugin interface {
|
||||
frameworktypes.Plugin
|
||||
PreEvictionFilter(pod *v1.Pod) bool
|
||||
}
|
||||
|
||||
type profileImpl struct {
|
||||
profileName string
|
||||
podEvictor *evictions.PodEvictor
|
||||
|
||||
deschedulePlugins []framework.DeschedulePlugin
|
||||
balancePlugins []framework.BalancePlugin
|
||||
deschedulePlugins []frameworktypes.DeschedulePlugin
|
||||
balancePlugins []frameworktypes.BalancePlugin
|
||||
filterPlugins []filterPlugin
|
||||
preEvictionFilterPlugins []preEvictionFilterPlugin
|
||||
|
||||
// Each extension point with a list of plugins implementing the extension point.
|
||||
deschedule sets.String
|
||||
balance sets.String
|
||||
filter sets.String
|
||||
preEvictionFilter sets.String
|
||||
}
|
||||
|
||||
// Option for the handleImpl.
|
||||
@@ -144,7 +163,7 @@ func getPluginConfig(pluginName string, pluginConfigs []api.PluginConfig) (*api.
|
||||
return nil, 0
|
||||
}
|
||||
|
||||
func buildPlugin(config api.DeschedulerProfile, pluginName string, handle *handleImpl, reg pluginregistry.Registry) (framework.Plugin, error) {
|
||||
func buildPlugin(config api.DeschedulerProfile, pluginName string, handle *handleImpl, reg pluginregistry.Registry) (frameworktypes.Plugin, error) {
|
||||
pc, _ := getPluginConfig(pluginName, config.PluginConfigs)
|
||||
if pc == nil {
|
||||
klog.ErrorS(fmt.Errorf("unable to get plugin config"), "skipping plugin", "plugin", pluginName, "profile", config.Name)
|
||||
@@ -164,6 +183,26 @@ func buildPlugin(config api.DeschedulerProfile, pluginName string, handle *handl
|
||||
return pg, nil
|
||||
}
|
||||
|
||||
func (p *profileImpl) registryToExtensionPoints(registry pluginregistry.Registry) {
|
||||
p.deschedule = sets.NewString()
|
||||
p.balance = sets.NewString()
|
||||
p.filter = sets.NewString()
|
||||
p.preEvictionFilter = sets.NewString()
|
||||
|
||||
for plugin, pluginUtilities := range registry {
|
||||
if _, ok := pluginUtilities.PluginType.(frameworktypes.DeschedulePlugin); ok {
|
||||
p.deschedule.Insert(plugin)
|
||||
}
|
||||
if _, ok := pluginUtilities.PluginType.(frameworktypes.BalancePlugin); ok {
|
||||
p.balance.Insert(plugin)
|
||||
}
|
||||
if _, ok := pluginUtilities.PluginType.(frameworktypes.EvictorPlugin); ok {
|
||||
p.filter.Insert(plugin)
|
||||
p.preEvictionFilter.Insert(plugin)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func NewProfile(config api.DeschedulerProfile, reg pluginregistry.Registry, opts ...Option) (*profileImpl, error) {
|
||||
hOpts := &handleImplOpts{}
|
||||
for _, optFnc := range opts {
|
||||
@@ -182,16 +221,27 @@ func NewProfile(config api.DeschedulerProfile, reg pluginregistry.Registry, opts
|
||||
return nil, fmt.Errorf("podEvictor missing")
|
||||
}
|
||||
|
||||
evictorPlugin, err := buildPlugin(config, defaultevictor.PluginName, &handleImpl{
|
||||
clientSet: hOpts.clientSet,
|
||||
getPodsAssignedToNodeFunc: hOpts.getPodsAssignedToNodeFunc,
|
||||
sharedInformerFactory: hOpts.sharedInformerFactory,
|
||||
}, reg)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to build %v plugin: %v", defaultevictor.PluginName, err)
|
||||
pi := &profileImpl{
|
||||
profileName: config.Name,
|
||||
podEvictor: hOpts.podEvictor,
|
||||
deschedulePlugins: []frameworktypes.DeschedulePlugin{},
|
||||
balancePlugins: []frameworktypes.BalancePlugin{},
|
||||
filterPlugins: []filterPlugin{},
|
||||
preEvictionFilterPlugins: []preEvictionFilterPlugin{},
|
||||
}
|
||||
if evictorPlugin == nil {
|
||||
return nil, fmt.Errorf("empty plugin build for %v plugin: %v", defaultevictor.PluginName, err)
|
||||
pi.registryToExtensionPoints(reg)
|
||||
|
||||
if !pi.deschedule.HasAll(config.Plugins.Deschedule.Enabled...) {
|
||||
return nil, fmt.Errorf("profile %q configures deschedule extension point of non-existing plugins: %v", config.Name, sets.NewString(config.Plugins.Deschedule.Enabled...).Difference(pi.deschedule))
|
||||
}
|
||||
if !pi.balance.HasAll(config.Plugins.Balance.Enabled...) {
|
||||
return nil, fmt.Errorf("profile %q configures balance extension point of non-existing plugins: %v", config.Name, sets.NewString(config.Plugins.Balance.Enabled...).Difference(pi.balance))
|
||||
}
|
||||
if !pi.filter.HasAll(config.Plugins.Filter.Enabled...) {
|
||||
return nil, fmt.Errorf("profile %q configures filter extension point of non-existing plugins: %v", config.Name, sets.NewString(config.Plugins.Filter.Enabled...).Difference(pi.filter))
|
||||
}
|
||||
if !pi.preEvictionFilter.HasAll(config.Plugins.PreEvictionFilter.Enabled...) {
|
||||
return nil, fmt.Errorf("profile %q configures preEvictionFilter extension point of non-existing plugins: %v", config.Name, sets.NewString(config.Plugins.PreEvictionFilter.Enabled...).Difference(pi.preEvictionFilter))
|
||||
}
|
||||
|
||||
handle := &handleImpl{
|
||||
@@ -199,59 +249,55 @@ func NewProfile(config api.DeschedulerProfile, reg pluginregistry.Registry, opts
|
||||
getPodsAssignedToNodeFunc: hOpts.getPodsAssignedToNodeFunc,
|
||||
sharedInformerFactory: hOpts.sharedInformerFactory,
|
||||
evictor: &evictorImpl{
|
||||
podEvictor: hOpts.podEvictor,
|
||||
evictorFilter: evictorPlugin.(framework.EvictorPlugin),
|
||||
podEvictor: hOpts.podEvictor,
|
||||
},
|
||||
}
|
||||
|
||||
deschedulePlugins := []framework.DeschedulePlugin{}
|
||||
balancePlugins := []framework.BalancePlugin{}
|
||||
pluginNames := append(config.Plugins.Deschedule.Enabled, config.Plugins.Balance.Enabled...)
|
||||
pluginNames = append(pluginNames, config.Plugins.Filter.Enabled...)
|
||||
pluginNames = append(pluginNames, config.Plugins.PreEvictionFilter.Enabled...)
|
||||
|
||||
descheduleEnabled := make(map[string]struct{})
|
||||
balanceEnabled := make(map[string]struct{})
|
||||
for _, name := range config.Plugins.Deschedule.Enabled {
|
||||
descheduleEnabled[name] = struct{}{}
|
||||
}
|
||||
for _, name := range config.Plugins.Balance.Enabled {
|
||||
balanceEnabled[name] = struct{}{}
|
||||
}
|
||||
|
||||
// Assuming only a list of enabled extension points.
|
||||
// Later, when a default list of plugins and their extension points is established,
|
||||
// compute the list of enabled extension points as (DefaultEnabled + Enabled - Disabled)
|
||||
for _, plugin := range append(config.Plugins.Deschedule.Enabled, config.Plugins.Balance.Enabled...) {
|
||||
plugins := make(map[string]frameworktypes.Plugin)
|
||||
for _, plugin := range sets.NewString(pluginNames...).List() {
|
||||
pg, err := buildPlugin(config, plugin, handle, reg)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to build %v plugin: %v", plugin, err)
|
||||
}
|
||||
if pg != nil {
|
||||
// pg can be of any of each type, or both
|
||||
|
||||
if _, exists := descheduleEnabled[plugin]; exists {
|
||||
_, ok := pg.(framework.DeschedulePlugin)
|
||||
if ok {
|
||||
deschedulePlugins = append(deschedulePlugins, pg.(framework.DeschedulePlugin))
|
||||
}
|
||||
}
|
||||
|
||||
if _, exists := balanceEnabled[plugin]; exists {
|
||||
_, ok := pg.(framework.BalancePlugin)
|
||||
if ok {
|
||||
balancePlugins = append(balancePlugins, pg.(framework.BalancePlugin))
|
||||
}
|
||||
}
|
||||
if pg == nil {
|
||||
return nil, fmt.Errorf("got empty %v plugin build", plugin)
|
||||
}
|
||||
plugins[plugin] = pg
|
||||
}
|
||||
|
||||
return &profileImpl{
|
||||
profileName: config.Name,
|
||||
podEvictor: hOpts.podEvictor,
|
||||
deschedulePlugins: deschedulePlugins,
|
||||
balancePlugins: balancePlugins,
|
||||
}, nil
|
||||
// Later, when a default list of plugins and their extension points is established,
|
||||
// compute the list of enabled extension points as (DefaultEnabled + Enabled - Disabled)
|
||||
for _, pluginName := range config.Plugins.Deschedule.Enabled {
|
||||
pi.deschedulePlugins = append(pi.deschedulePlugins, plugins[pluginName].(frameworktypes.DeschedulePlugin))
|
||||
}
|
||||
|
||||
for _, pluginName := range config.Plugins.Balance.Enabled {
|
||||
pi.balancePlugins = append(pi.balancePlugins, plugins[pluginName].(frameworktypes.BalancePlugin))
|
||||
}
|
||||
|
||||
filters := []podutil.FilterFunc{}
|
||||
for _, pluginName := range config.Plugins.Filter.Enabled {
|
||||
pi.filterPlugins = append(pi.filterPlugins, plugins[pluginName].(filterPlugin))
|
||||
filters = append(filters, plugins[pluginName].(filterPlugin).Filter)
|
||||
}
|
||||
|
||||
preEvictionFilters := []podutil.FilterFunc{}
|
||||
for _, pluginName := range config.Plugins.PreEvictionFilter.Enabled {
|
||||
pi.preEvictionFilterPlugins = append(pi.preEvictionFilterPlugins, plugins[pluginName].(preEvictionFilterPlugin))
|
||||
preEvictionFilters = append(preEvictionFilters, plugins[pluginName].(preEvictionFilterPlugin).PreEvictionFilter)
|
||||
}
|
||||
|
||||
handle.evictor.filter = podutil.WrapFilterFuncs(filters...)
|
||||
handle.evictor.preEvictionFilter = podutil.WrapFilterFuncs(preEvictionFilters...)
|
||||
|
||||
return pi, nil
|
||||
}
|
||||
|
||||
func (d profileImpl) RunDeschedulePlugins(ctx context.Context, nodes []*v1.Node) *framework.Status {
|
||||
func (d profileImpl) RunDeschedulePlugins(ctx context.Context, nodes []*v1.Node) *frameworktypes.Status {
|
||||
errs := []error{}
|
||||
for _, pl := range d.deschedulePlugins {
|
||||
evicted := d.podEvictor.TotalEvicted()
|
||||
@@ -272,15 +318,15 @@ func (d profileImpl) RunDeschedulePlugins(ctx context.Context, nodes []*v1.Node)
|
||||
|
||||
aggrErr := errors.NewAggregate(errs)
|
||||
if aggrErr == nil {
|
||||
return &framework.Status{}
|
||||
return &frameworktypes.Status{}
|
||||
}
|
||||
|
||||
return &framework.Status{
|
||||
return &frameworktypes.Status{
|
||||
Err: fmt.Errorf("%v", aggrErr.Error()),
|
||||
}
|
||||
}
|
||||
|
||||
func (d profileImpl) RunBalancePlugins(ctx context.Context, nodes []*v1.Node) *framework.Status {
|
||||
func (d profileImpl) RunBalancePlugins(ctx context.Context, nodes []*v1.Node) *frameworktypes.Status {
|
||||
errs := []error{}
|
||||
for _, pl := range d.balancePlugins {
|
||||
evicted := d.podEvictor.TotalEvicted()
|
||||
@@ -301,10 +347,10 @@ func (d profileImpl) RunBalancePlugins(ctx context.Context, nodes []*v1.Node) *f
|
||||
|
||||
aggrErr := errors.NewAggregate(errs)
|
||||
if aggrErr == nil {
|
||||
return &framework.Status{}
|
||||
return &frameworktypes.Status{}
|
||||
}
|
||||
|
||||
return &framework.Status{
|
||||
return &frameworktypes.Status{
|
||||
Err: fmt.Errorf("%v", aggrErr.Error()),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,12 +3,16 @@ package profile
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
policy "k8s.io/api/policy/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/client-go/informers"
|
||||
fakeclientset "k8s.io/client-go/kubernetes/fake"
|
||||
core "k8s.io/client-go/testing"
|
||||
@@ -16,19 +20,19 @@ import (
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/pkg/framework"
|
||||
fakeplugin "sigs.k8s.io/descheduler/pkg/framework/fake/plugin"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/pluginregistry"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/defaultevictor"
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
testutils "sigs.k8s.io/descheduler/test"
|
||||
)
|
||||
|
||||
func TestProfileTopExtensionPoints(t *testing.T) {
|
||||
func TestProfileDescheduleBalanceExtensionPointsEviction(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
config api.DeschedulerProfile
|
||||
extensionPoint framework.ExtensionPoint
|
||||
extensionPoint frameworktypes.ExtensionPoint
|
||||
expectedEviction bool
|
||||
}{
|
||||
{
|
||||
@@ -58,7 +62,7 @@ func TestProfileTopExtensionPoints(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
extensionPoint: framework.DescheduleExtensionPoint,
|
||||
extensionPoint: frameworktypes.DescheduleExtensionPoint,
|
||||
expectedEviction: true,
|
||||
},
|
||||
{
|
||||
@@ -88,7 +92,7 @@ func TestProfileTopExtensionPoints(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
extensionPoint: framework.BalanceExtensionPoint,
|
||||
extensionPoint: frameworktypes.BalanceExtensionPoint,
|
||||
expectedEviction: true,
|
||||
},
|
||||
{
|
||||
@@ -118,7 +122,7 @@ func TestProfileTopExtensionPoints(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
extensionPoint: framework.DescheduleExtensionPoint,
|
||||
extensionPoint: frameworktypes.DescheduleExtensionPoint,
|
||||
expectedEviction: false,
|
||||
},
|
||||
{
|
||||
@@ -148,7 +152,7 @@ func TestProfileTopExtensionPoints(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
extensionPoint: framework.BalanceExtensionPoint,
|
||||
extensionPoint: frameworktypes.BalanceExtensionPoint,
|
||||
expectedEviction: false,
|
||||
},
|
||||
}
|
||||
@@ -166,26 +170,26 @@ func TestProfileTopExtensionPoints(t *testing.T) {
|
||||
p1.ObjectMeta.OwnerReferences = []metav1.OwnerReference{{}}
|
||||
|
||||
fakePlugin := fakeplugin.FakePlugin{}
|
||||
if test.extensionPoint == framework.DescheduleExtensionPoint {
|
||||
fakePlugin.AddReactor(string(framework.DescheduleExtensionPoint), func(action fakeplugin.Action) (handled bool, err error) {
|
||||
if test.extensionPoint == frameworktypes.DescheduleExtensionPoint {
|
||||
fakePlugin.AddReactor(string(frameworktypes.DescheduleExtensionPoint), func(action fakeplugin.Action) (handled, filter bool, err error) {
|
||||
if dAction, ok := action.(fakeplugin.DescheduleAction); ok {
|
||||
if dAction.Handle().Evictor().Evict(ctx, p1, evictions.EvictOptions{}) {
|
||||
return true, nil
|
||||
return true, false, nil
|
||||
}
|
||||
return true, fmt.Errorf("pod not evicted")
|
||||
return true, false, fmt.Errorf("pod not evicted")
|
||||
}
|
||||
return false, nil
|
||||
return false, false, nil
|
||||
})
|
||||
}
|
||||
if test.extensionPoint == framework.BalanceExtensionPoint {
|
||||
fakePlugin.AddReactor(string(framework.BalanceExtensionPoint), func(action fakeplugin.Action) (handled bool, err error) {
|
||||
if test.extensionPoint == frameworktypes.BalanceExtensionPoint {
|
||||
fakePlugin.AddReactor(string(frameworktypes.BalanceExtensionPoint), func(action fakeplugin.Action) (handled, filter bool, err error) {
|
||||
if dAction, ok := action.(fakeplugin.BalanceAction); ok {
|
||||
if dAction.Handle().Evictor().Evict(ctx, p1, evictions.EvictOptions{}) {
|
||||
return true, nil
|
||||
return true, false, nil
|
||||
}
|
||||
return true, fmt.Errorf("pod not evicted")
|
||||
return true, false, fmt.Errorf("pod not evicted")
|
||||
}
|
||||
return false, nil
|
||||
return false, false, nil
|
||||
})
|
||||
}
|
||||
|
||||
@@ -193,6 +197,7 @@ func TestProfileTopExtensionPoints(t *testing.T) {
|
||||
pluginregistry.Register(
|
||||
"FakePlugin",
|
||||
fakeplugin.NewPluginFncFromFake(&fakePlugin),
|
||||
&fakeplugin.FakePlugin{},
|
||||
&fakeplugin.FakePluginArgs{},
|
||||
fakeplugin.ValidateFakePluginArgs,
|
||||
fakeplugin.SetDefaults_FakePluginArgs,
|
||||
@@ -202,6 +207,7 @@ func TestProfileTopExtensionPoints(t *testing.T) {
|
||||
pluginregistry.Register(
|
||||
defaultevictor.PluginName,
|
||||
defaultevictor.New,
|
||||
&defaultevictor.DefaultEvictor{},
|
||||
&defaultevictor.DefaultEvictorArgs{},
|
||||
defaultevictor.ValidateDefaultEvictorArgs,
|
||||
defaultevictor.SetDefaults_DefaultEvictorArgs,
|
||||
@@ -240,11 +246,11 @@ func TestProfileTopExtensionPoints(t *testing.T) {
|
||||
t.Fatalf("unable to create %q profile: %v", test.config.Name, err)
|
||||
}
|
||||
|
||||
var status *framework.Status
|
||||
var status *frameworktypes.Status
|
||||
switch test.extensionPoint {
|
||||
case framework.DescheduleExtensionPoint:
|
||||
case frameworktypes.DescheduleExtensionPoint:
|
||||
status = prfl.RunDeschedulePlugins(ctx, nodes)
|
||||
case framework.BalanceExtensionPoint:
|
||||
case frameworktypes.BalanceExtensionPoint:
|
||||
status = prfl.RunBalancePlugins(ctx, nodes)
|
||||
default:
|
||||
t.Fatalf("unknown %q extension point", test.extensionPoint)
|
||||
@@ -281,3 +287,400 @@ func podEvictionReactionFuc(evictedPods *[]string) func(action core.Action) (boo
|
||||
return false, nil, nil // fallback to the default reactor
|
||||
}
|
||||
}
|
||||
|
||||
func TestProfileExtensionPoints(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.TODO())
|
||||
defer cancel()
|
||||
|
||||
n1 := testutils.BuildTestNode("n1", 2000, 3000, 10, nil)
|
||||
n2 := testutils.BuildTestNode("n2", 2000, 3000, 10, nil)
|
||||
nodes := []*v1.Node{n1, n2}
|
||||
|
||||
p1 := testutils.BuildTestPod(fmt.Sprintf("pod_1_%s", n1.Name), 200, 0, n1.Name, nil)
|
||||
p1.ObjectMeta.OwnerReferences = []metav1.OwnerReference{{}}
|
||||
|
||||
pluginregistry.PluginRegistry = pluginregistry.NewRegistry()
|
||||
|
||||
for i := 0; i < 3; i++ {
|
||||
fakePluginName := fmt.Sprintf("FakePlugin_%v", i)
|
||||
deschedulePluginName := fmt.Sprintf("DeschedulePlugin_%v", i)
|
||||
balancePluginName := fmt.Sprintf("BalancePlugin_%v", i)
|
||||
filterPluginName := fmt.Sprintf("FilterPlugin_%v", i)
|
||||
|
||||
fakePlugin := &fakeplugin.FakePlugin{PluginName: fakePluginName}
|
||||
fakeDeschedulePlugin := &fakeplugin.FakeDeschedulePlugin{PluginName: deschedulePluginName}
|
||||
fakeBalancePlugin := &fakeplugin.FakeBalancePlugin{PluginName: balancePluginName}
|
||||
fakeFilterPlugin := &fakeplugin.FakeFilterPlugin{PluginName: filterPluginName}
|
||||
|
||||
pluginregistry.Register(
|
||||
fakePluginName,
|
||||
fakeplugin.NewPluginFncFromFake(fakePlugin),
|
||||
&fakeplugin.FakePlugin{},
|
||||
&fakeplugin.FakePluginArgs{},
|
||||
fakeplugin.ValidateFakePluginArgs,
|
||||
fakeplugin.SetDefaults_FakePluginArgs,
|
||||
pluginregistry.PluginRegistry,
|
||||
)
|
||||
|
||||
pluginregistry.Register(
|
||||
deschedulePluginName,
|
||||
fakeplugin.NewFakeDeschedulePluginFncFromFake(fakeDeschedulePlugin),
|
||||
&fakeplugin.FakeDeschedulePlugin{},
|
||||
&fakeplugin.FakeDeschedulePluginArgs{},
|
||||
fakeplugin.ValidateFakePluginArgs,
|
||||
fakeplugin.SetDefaults_FakePluginArgs,
|
||||
pluginregistry.PluginRegistry,
|
||||
)
|
||||
|
||||
pluginregistry.Register(
|
||||
balancePluginName,
|
||||
fakeplugin.NewFakeBalancePluginFncFromFake(fakeBalancePlugin),
|
||||
&fakeplugin.FakeBalancePlugin{},
|
||||
&fakeplugin.FakeBalancePluginArgs{},
|
||||
fakeplugin.ValidateFakePluginArgs,
|
||||
fakeplugin.SetDefaults_FakePluginArgs,
|
||||
pluginregistry.PluginRegistry,
|
||||
)
|
||||
|
||||
pluginregistry.Register(
|
||||
filterPluginName,
|
||||
fakeplugin.NewFakeFilterPluginFncFromFake(fakeFilterPlugin),
|
||||
&fakeplugin.FakeFilterPlugin{},
|
||||
&fakeplugin.FakeFilterPluginArgs{},
|
||||
fakeplugin.ValidateFakePluginArgs,
|
||||
fakeplugin.SetDefaults_FakePluginArgs,
|
||||
pluginregistry.PluginRegistry,
|
||||
)
|
||||
}
|
||||
|
||||
pluginregistry.Register(
|
||||
defaultevictor.PluginName,
|
||||
defaultevictor.New,
|
||||
&defaultevictor.DefaultEvictor{},
|
||||
&defaultevictor.DefaultEvictorArgs{},
|
||||
defaultevictor.ValidateDefaultEvictorArgs,
|
||||
defaultevictor.SetDefaults_DefaultEvictorArgs,
|
||||
pluginregistry.PluginRegistry,
|
||||
)
|
||||
|
||||
client := fakeclientset.NewSimpleClientset(n1, n2, p1)
|
||||
var evictedPods []string
|
||||
client.PrependReactor("create", "pods", podEvictionReactionFuc(&evictedPods))
|
||||
|
||||
sharedInformerFactory := informers.NewSharedInformerFactory(client, 0)
|
||||
podInformer := sharedInformerFactory.Core().V1().Pods().Informer()
|
||||
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
|
||||
if err != nil {
|
||||
t.Fatalf("build get pods assigned to node function error: %v", err)
|
||||
}
|
||||
|
||||
sharedInformerFactory.Start(ctx.Done())
|
||||
sharedInformerFactory.WaitForCacheSync(ctx.Done())
|
||||
|
||||
eventClient := fakeclientset.NewSimpleClientset(n1, n2, p1)
|
||||
eventBroadcaster, eventRecorder := utils.GetRecorderAndBroadcaster(ctx, eventClient)
|
||||
defer eventBroadcaster.Shutdown()
|
||||
|
||||
podEvictor := evictions.NewPodEvictor(client, "policy/v1", false, nil, nil, nodes, true, eventRecorder)
|
||||
|
||||
prfl, err := NewProfile(
|
||||
api.DeschedulerProfile{
|
||||
Name: "strategy-test-profile",
|
||||
PluginConfigs: []api.PluginConfig{
|
||||
{
|
||||
Name: defaultevictor.PluginName,
|
||||
Args: &defaultevictor.DefaultEvictorArgs{
|
||||
PriorityThreshold: &api.PriorityThreshold{
|
||||
Value: nil,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "FakePlugin_0",
|
||||
Args: &fakeplugin.FakePluginArgs{},
|
||||
},
|
||||
{
|
||||
Name: "FilterPlugin_0",
|
||||
Args: &fakeplugin.FakeFilterPluginArgs{},
|
||||
},
|
||||
{
|
||||
Name: "FilterPlugin_1",
|
||||
Args: &fakeplugin.FakeFilterPluginArgs{},
|
||||
},
|
||||
},
|
||||
Plugins: api.Plugins{
|
||||
Deschedule: api.PluginSet{
|
||||
Enabled: []string{"FakePlugin_0"},
|
||||
},
|
||||
Filter: api.PluginSet{
|
||||
Enabled: []string{"FilterPlugin_1", "FilterPlugin_0"},
|
||||
},
|
||||
Evict: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
},
|
||||
},
|
||||
pluginregistry.PluginRegistry,
|
||||
WithClientSet(client),
|
||||
WithSharedInformerFactory(sharedInformerFactory),
|
||||
WithPodEvictor(podEvictor),
|
||||
WithGetPodsAssignedToNodeFnc(getPodsAssignedToNode),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to create profile: %v", err)
|
||||
}
|
||||
|
||||
// Validate the extension points of all registered plugins are properly detected
|
||||
|
||||
diff := cmp.Diff(sets.NewString("DeschedulePlugin_0", "DeschedulePlugin_1", "DeschedulePlugin_2", "FakePlugin_0", "FakePlugin_1", "FakePlugin_2"), prfl.deschedule)
|
||||
if diff != "" {
|
||||
t.Errorf("check for deschedule failed. Results are not deep equal. mismatch (-want +got):\n%s", diff)
|
||||
}
|
||||
diff = cmp.Diff(sets.NewString("BalancePlugin_0", "BalancePlugin_1", "BalancePlugin_2", "FakePlugin_0", "FakePlugin_1", "FakePlugin_2"), prfl.balance)
|
||||
if diff != "" {
|
||||
t.Errorf("check for balance failed. Results are not deep equal. mismatch (-want +got):\n%s", diff)
|
||||
}
|
||||
diff = cmp.Diff(sets.NewString("DefaultEvictor", "FakePlugin_0", "FakePlugin_1", "FakePlugin_2", "FilterPlugin_0", "FilterPlugin_1", "FilterPlugin_2"), prfl.filter)
|
||||
if diff != "" {
|
||||
t.Errorf("check for filter failed. Results are not deep equal. mismatch (-want +got):\n%s", diff)
|
||||
}
|
||||
diff = cmp.Diff(sets.NewString("DefaultEvictor", "FakePlugin_0", "FakePlugin_1", "FakePlugin_2", "FilterPlugin_0", "FilterPlugin_1", "FilterPlugin_2"), prfl.preEvictionFilter)
|
||||
if diff != "" {
|
||||
t.Errorf("check for preEvictionFilter failed. Results are not deep equal. mismatch (-want +got):\n%s", diff)
|
||||
}
|
||||
|
||||
// One deschedule ep enabled
|
||||
names := []string{}
|
||||
for _, pl := range prfl.deschedulePlugins {
|
||||
names = append(names, pl.Name())
|
||||
}
|
||||
sort.Strings(names)
|
||||
diff = cmp.Diff(sets.NewString("FakePlugin_0"), sets.NewString(names...))
|
||||
if diff != "" {
|
||||
t.Errorf("check for deschedule failed. Results are not deep equal. mismatch (-want +got):\n%s", diff)
|
||||
}
|
||||
|
||||
// No balance ep enabled
|
||||
names = []string{}
|
||||
for _, pl := range prfl.balancePlugins {
|
||||
names = append(names, pl.Name())
|
||||
}
|
||||
sort.Strings(names)
|
||||
diff = cmp.Diff(sets.NewString(), sets.NewString(names...))
|
||||
if diff != "" {
|
||||
t.Errorf("check for balance failed. Results are not deep equal. mismatch (-want +got):\n%s", diff)
|
||||
}
|
||||
|
||||
// Two filter eps enabled
|
||||
names = []string{}
|
||||
for _, pl := range prfl.filterPlugins {
|
||||
names = append(names, pl.Name())
|
||||
}
|
||||
sort.Strings(names)
|
||||
diff = cmp.Diff(sets.NewString("FilterPlugin_0", "FilterPlugin_1"), sets.NewString(names...))
|
||||
if diff != "" {
|
||||
t.Errorf("check for filter failed. Results are not deep equal. mismatch (-want +got):\n%s", diff)
|
||||
}
|
||||
}
|
||||
|
||||
func TestProfileExtensionPointOrdering(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.TODO())
|
||||
defer cancel()
|
||||
|
||||
n1 := testutils.BuildTestNode("n1", 2000, 3000, 10, nil)
|
||||
n2 := testutils.BuildTestNode("n2", 2000, 3000, 10, nil)
|
||||
nodes := []*v1.Node{n1, n2}
|
||||
|
||||
p1 := testutils.BuildTestPod(fmt.Sprintf("pod_1_%s", n1.Name), 200, 0, n1.Name, nil)
|
||||
p1.ObjectMeta.OwnerReferences = []metav1.OwnerReference{{}}
|
||||
|
||||
pluginregistry.PluginRegistry = pluginregistry.NewRegistry()
|
||||
|
||||
filterInvocationOrder := []string{}
|
||||
preEvictionFilterInvocationOrder := []string{}
|
||||
descheduleInvocationOrder := []string{}
|
||||
balanceInvocationOrder := []string{}
|
||||
|
||||
for i := 0; i < 3; i++ {
|
||||
pluginName := fmt.Sprintf("Filter_%v", i)
|
||||
fakeFilterPlugin := &fakeplugin.FakeFilterPlugin{PluginName: pluginName}
|
||||
fakeFilterPlugin.AddReactor(string(frameworktypes.FilterExtensionPoint), func(action fakeplugin.Action) (handled, filter bool, err error) {
|
||||
if _, ok := action.(fakeplugin.FilterAction); ok {
|
||||
filterInvocationOrder = append(filterInvocationOrder, pluginName+"_filter")
|
||||
return true, true, nil
|
||||
}
|
||||
return false, false, nil
|
||||
})
|
||||
|
||||
fakeFilterPlugin.AddReactor(string(frameworktypes.PreEvictionFilterExtensionPoint), func(action fakeplugin.Action) (handled, filter bool, err error) {
|
||||
if _, ok := action.(fakeplugin.PreEvictionFilterAction); ok {
|
||||
preEvictionFilterInvocationOrder = append(preEvictionFilterInvocationOrder, pluginName+"_preEvictionFilter")
|
||||
return true, true, nil
|
||||
}
|
||||
return false, false, nil
|
||||
})
|
||||
|
||||
// plugin implementing Filter extension point
|
||||
pluginregistry.Register(
|
||||
pluginName,
|
||||
fakeplugin.NewFakeFilterPluginFncFromFake(fakeFilterPlugin),
|
||||
&fakeplugin.FakeFilterPlugin{},
|
||||
&fakeplugin.FakeFilterPluginArgs{},
|
||||
fakeplugin.ValidateFakePluginArgs,
|
||||
fakeplugin.SetDefaults_FakePluginArgs,
|
||||
pluginregistry.PluginRegistry,
|
||||
)
|
||||
|
||||
fakePluginName := fmt.Sprintf("FakePlugin_%v", i)
|
||||
fakePlugin := fakeplugin.FakePlugin{}
|
||||
idx := i
|
||||
fakePlugin.AddReactor(string(frameworktypes.DescheduleExtensionPoint), func(action fakeplugin.Action) (handled, filter bool, err error) {
|
||||
descheduleInvocationOrder = append(descheduleInvocationOrder, fakePluginName)
|
||||
if idx == 0 {
|
||||
if dAction, ok := action.(fakeplugin.DescheduleAction); ok {
|
||||
// Invoke filters
|
||||
dAction.Handle().Evictor().Filter(p1)
|
||||
// Invoke pre-eviction filters
|
||||
dAction.Handle().Evictor().PreEvictionFilter(p1)
|
||||
return true, true, nil
|
||||
}
|
||||
return false, false, nil
|
||||
}
|
||||
return true, false, nil
|
||||
})
|
||||
|
||||
fakePlugin.AddReactor(string(frameworktypes.BalanceExtensionPoint), func(action fakeplugin.Action) (handled, filter bool, err error) {
|
||||
balanceInvocationOrder = append(balanceInvocationOrder, fakePluginName)
|
||||
return true, false, nil
|
||||
})
|
||||
|
||||
pluginregistry.Register(
|
||||
fakePluginName,
|
||||
fakeplugin.NewPluginFncFromFake(&fakePlugin),
|
||||
&fakeplugin.FakePlugin{},
|
||||
&fakeplugin.FakePluginArgs{},
|
||||
fakeplugin.ValidateFakePluginArgs,
|
||||
fakeplugin.SetDefaults_FakePluginArgs,
|
||||
pluginregistry.PluginRegistry,
|
||||
)
|
||||
}
|
||||
|
||||
pluginregistry.Register(
|
||||
defaultevictor.PluginName,
|
||||
defaultevictor.New,
|
||||
&defaultevictor.DefaultEvictor{},
|
||||
&defaultevictor.DefaultEvictorArgs{},
|
||||
defaultevictor.ValidateDefaultEvictorArgs,
|
||||
defaultevictor.SetDefaults_DefaultEvictorArgs,
|
||||
pluginregistry.PluginRegistry,
|
||||
)
|
||||
|
||||
client := fakeclientset.NewSimpleClientset(n1, n2, p1)
|
||||
var evictedPods []string
|
||||
client.PrependReactor("create", "pods", podEvictionReactionFuc(&evictedPods))
|
||||
|
||||
sharedInformerFactory := informers.NewSharedInformerFactory(client, 0)
|
||||
podInformer := sharedInformerFactory.Core().V1().Pods().Informer()
|
||||
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
|
||||
if err != nil {
|
||||
t.Fatalf("build get pods assigned to node function error: %v", err)
|
||||
}
|
||||
|
||||
sharedInformerFactory.Start(ctx.Done())
|
||||
sharedInformerFactory.WaitForCacheSync(ctx.Done())
|
||||
|
||||
eventClient := fakeclientset.NewSimpleClientset(n1, n2, p1)
|
||||
eventBroadcaster, eventRecorder := utils.GetRecorderAndBroadcaster(ctx, eventClient)
|
||||
defer eventBroadcaster.Shutdown()
|
||||
|
||||
podEvictor := evictions.NewPodEvictor(client, "policy/v1", false, nil, nil, nodes, true, eventRecorder)
|
||||
|
||||
prfl, err := NewProfile(
|
||||
api.DeschedulerProfile{
|
||||
Name: "strategy-test-profile",
|
||||
PluginConfigs: []api.PluginConfig{
|
||||
{
|
||||
Name: defaultevictor.PluginName,
|
||||
Args: &defaultevictor.DefaultEvictorArgs{
|
||||
PriorityThreshold: &api.PriorityThreshold{
|
||||
Value: nil,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "FakePlugin_0",
|
||||
Args: &fakeplugin.FakePluginArgs{},
|
||||
},
|
||||
{
|
||||
Name: "FakePlugin_1",
|
||||
Args: &fakeplugin.FakePluginArgs{},
|
||||
},
|
||||
{
|
||||
Name: "FakePlugin_2",
|
||||
Args: &fakeplugin.FakePluginArgs{},
|
||||
},
|
||||
{
|
||||
Name: "Filter_0",
|
||||
Args: &fakeplugin.FakeFilterPluginArgs{},
|
||||
},
|
||||
{
|
||||
Name: "Filter_1",
|
||||
Args: &fakeplugin.FakeFilterPluginArgs{},
|
||||
},
|
||||
{
|
||||
Name: "Filter_2",
|
||||
Args: &fakeplugin.FakeFilterPluginArgs{},
|
||||
},
|
||||
},
|
||||
Plugins: api.Plugins{
|
||||
Deschedule: api.PluginSet{
|
||||
Enabled: []string{"FakePlugin_2", "FakePlugin_0", "FakePlugin_1"},
|
||||
},
|
||||
Balance: api.PluginSet{
|
||||
Enabled: []string{"FakePlugin_1", "FakePlugin_0", "FakePlugin_2"},
|
||||
},
|
||||
Filter: api.PluginSet{
|
||||
Enabled: []string{"Filter_2", "Filter_1", "Filter_0"},
|
||||
},
|
||||
PreEvictionFilter: api.PluginSet{
|
||||
Enabled: []string{"Filter_2", "Filter_1", "Filter_0"},
|
||||
},
|
||||
Evict: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
},
|
||||
},
|
||||
pluginregistry.PluginRegistry,
|
||||
WithClientSet(client),
|
||||
WithSharedInformerFactory(sharedInformerFactory),
|
||||
WithPodEvictor(podEvictor),
|
||||
WithGetPodsAssignedToNodeFnc(getPodsAssignedToNode),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to create profile: %v", err)
|
||||
}
|
||||
|
||||
prfl.RunDeschedulePlugins(ctx, nodes)
|
||||
|
||||
diff := cmp.Diff([]string{"Filter_2_filter", "Filter_1_filter", "Filter_0_filter"}, filterInvocationOrder)
|
||||
if diff != "" {
|
||||
t.Errorf("check for filter invocation order failed. Results are not deep equal. mismatch (-want +got):\n%s", diff)
|
||||
}
|
||||
|
||||
diff = cmp.Diff([]string{"Filter_2_preEvictionFilter", "Filter_1_preEvictionFilter", "Filter_0_preEvictionFilter"}, preEvictionFilterInvocationOrder)
|
||||
if diff != "" {
|
||||
t.Errorf("check for filter invocation order failed. Results are not deep equal. mismatch (-want +got):\n%s", diff)
|
||||
}
|
||||
|
||||
diff = cmp.Diff([]string{"FakePlugin_2", "FakePlugin_0", "FakePlugin_1"}, descheduleInvocationOrder)
|
||||
if diff != "" {
|
||||
t.Errorf("check for deschedule invocation order failed. Results are not deep equal. mismatch (-want +got):\n%s", diff)
|
||||
}
|
||||
|
||||
prfl.RunBalancePlugins(ctx, nodes)
|
||||
|
||||
diff = cmp.Diff([]string{"FakePlugin_1", "FakePlugin_0", "FakePlugin_2"}, balanceInvocationOrder)
|
||||
if diff != "" {
|
||||
t.Errorf("check for balance invocation order failed. Results are not deep equal. mismatch (-want +got):\n%s", diff)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package framework
|
||||
package types
|
||||
|
||||
import (
|
||||
"context"
|
||||
@@ -21,10 +21,10 @@ import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/framework"
|
||||
frameworkfake "sigs.k8s.io/descheduler/pkg/framework/fake"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/defaultevictor"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/removeduplicates"
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
@@ -197,7 +197,7 @@ func TestRemoveDuplicates(t *testing.T) {
|
||||
ClientsetImpl: clientSet,
|
||||
GetPodsAssignedToNodeFuncImpl: getPodsAssignedToNode,
|
||||
PodEvictorImpl: podEvictor,
|
||||
EvictorFilterImpl: evictorFilter.(framework.EvictorPlugin),
|
||||
EvictorFilterImpl: evictorFilter.(frameworktypes.EvictorPlugin),
|
||||
SharedInformerFactoryImpl: sharedInformerFactory,
|
||||
}
|
||||
|
||||
@@ -208,7 +208,7 @@ func TestRemoveDuplicates(t *testing.T) {
|
||||
t.Fatalf("Unable to initialize the plugin: %v", err)
|
||||
}
|
||||
t.Log("Running removeduplicates plugin")
|
||||
plugin.(framework.BalancePlugin).Balance(ctx, workerNodes)
|
||||
plugin.(frameworktypes.BalancePlugin).Balance(ctx, workerNodes)
|
||||
|
||||
waitForTerminatingPodsToDisappear(ctx, t, clientSet, testNamespace.Name)
|
||||
actualEvictedPodCount := podEvictor.TotalEvicted()
|
||||
|
||||
@@ -13,10 +13,10 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/utils/pointer"
|
||||
"sigs.k8s.io/descheduler/pkg/framework"
|
||||
frameworkfake "sigs.k8s.io/descheduler/pkg/framework/fake"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/defaultevictor"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/removefailedpods"
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
)
|
||||
|
||||
var oneHourPodLifetimeSeconds uint = 3600
|
||||
@@ -109,7 +109,7 @@ func TestFailedPods(t *testing.T) {
|
||||
&frameworkfake.HandleImpl{
|
||||
ClientsetImpl: clientSet,
|
||||
PodEvictorImpl: podEvictor,
|
||||
EvictorFilterImpl: evictorFilter.(framework.EvictorPlugin),
|
||||
EvictorFilterImpl: evictorFilter.(frameworktypes.EvictorPlugin),
|
||||
SharedInformerFactoryImpl: sharedInformerFactory,
|
||||
GetPodsAssignedToNodeFuncImpl: getPodsAssignedToNode,
|
||||
},
|
||||
@@ -118,7 +118,7 @@ func TestFailedPods(t *testing.T) {
|
||||
t.Fatalf("Unable to initialize the plugin: %v", err)
|
||||
}
|
||||
|
||||
plugin.(framework.DeschedulePlugin).Deschedule(ctx, nodes)
|
||||
plugin.(frameworktypes.DeschedulePlugin).Deschedule(ctx, nodes)
|
||||
t.Logf("Finished RemoveFailedPods strategy for %s", name)
|
||||
|
||||
if actualEvictedCount := podEvictor.TotalEvicted(); actualEvictedCount == tc.expectedEvictedCount {
|
||||
|
||||
@@ -49,11 +49,11 @@ import (
|
||||
eutils "sigs.k8s.io/descheduler/pkg/descheduler/evictions/utils"
|
||||
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/pkg/framework"
|
||||
frameworkfake "sigs.k8s.io/descheduler/pkg/framework/fake"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/defaultevictor"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/nodeutilization"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/podlifetime"
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
)
|
||||
|
||||
@@ -247,14 +247,14 @@ func runPodLifetimePlugin(
|
||||
}, &frameworkfake.HandleImpl{
|
||||
ClientsetImpl: clientset,
|
||||
PodEvictorImpl: podEvictor,
|
||||
EvictorFilterImpl: evictorFilter.(framework.EvictorPlugin),
|
||||
EvictorFilterImpl: evictorFilter.(frameworktypes.EvictorPlugin),
|
||||
GetPodsAssignedToNodeFuncImpl: getPodsAssignedToNode,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to initialize the plugin: %v", err)
|
||||
}
|
||||
t.Log("Running podlifetime plugin")
|
||||
plugin.(framework.DeschedulePlugin).Deschedule(ctx, nodes)
|
||||
plugin.(frameworktypes.DeschedulePlugin).Deschedule(ctx, nodes)
|
||||
}
|
||||
|
||||
func getPodNames(pods []v1.Pod) []string {
|
||||
@@ -397,7 +397,7 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
},
|
||||
)
|
||||
|
||||
podFilter, err := podutil.NewOptions().WithFilter(evictorFilter.(framework.EvictorPlugin).Filter).BuildFilterFunc()
|
||||
podFilter, err := podutil.NewOptions().WithFilter(evictorFilter.(frameworktypes.EvictorPlugin).Filter).BuildFilterFunc()
|
||||
if err != nil {
|
||||
t.Errorf("Error initializing pod filter function, %v", err)
|
||||
}
|
||||
@@ -413,7 +413,7 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
ClientsetImpl: clientSet,
|
||||
GetPodsAssignedToNodeFuncImpl: getPodsAssignedToNode,
|
||||
PodEvictorImpl: podEvictor,
|
||||
EvictorFilterImpl: evictorFilter.(framework.EvictorPlugin),
|
||||
EvictorFilterImpl: evictorFilter.(frameworktypes.EvictorPlugin),
|
||||
SharedInformerFactoryImpl: sharedInformerFactory,
|
||||
}
|
||||
|
||||
@@ -428,11 +428,11 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to initialize the plugin: %v", err)
|
||||
}
|
||||
plugin.(framework.BalancePlugin).Balance(ctx, workerNodes)
|
||||
plugin.(frameworktypes.BalancePlugin).Balance(ctx, workerNodes)
|
||||
|
||||
waitForTerminatingPodsToDisappear(ctx, t, clientSet, rc.Namespace)
|
||||
|
||||
podFilter, err = podutil.NewOptions().WithFilter(evictorFilter.(framework.EvictorPlugin).Filter).BuildFilterFunc()
|
||||
podFilter, err = podutil.NewOptions().WithFilter(evictorFilter.(frameworktypes.EvictorPlugin).Filter).BuildFilterFunc()
|
||||
if err != nil {
|
||||
t.Errorf("Error initializing pod filter function, %v", err)
|
||||
}
|
||||
|
||||
@@ -34,10 +34,10 @@ import (
|
||||
utilpointer "k8s.io/utils/pointer"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
eutils "sigs.k8s.io/descheduler/pkg/descheduler/evictions/utils"
|
||||
"sigs.k8s.io/descheduler/pkg/framework"
|
||||
frameworkfake "sigs.k8s.io/descheduler/pkg/framework/fake"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/defaultevictor"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/removepodshavingtoomanyrestarts"
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
)
|
||||
|
||||
func TestTooManyRestarts(t *testing.T) {
|
||||
@@ -197,7 +197,7 @@ func TestTooManyRestarts(t *testing.T) {
|
||||
&frameworkfake.HandleImpl{
|
||||
ClientsetImpl: clientSet,
|
||||
PodEvictorImpl: podEvictor,
|
||||
EvictorFilterImpl: evictorFilter.(framework.EvictorPlugin),
|
||||
EvictorFilterImpl: evictorFilter.(frameworktypes.EvictorPlugin),
|
||||
SharedInformerFactoryImpl: sharedInformerFactory,
|
||||
GetPodsAssignedToNodeFuncImpl: getPodsAssignedToNode,
|
||||
})
|
||||
@@ -207,7 +207,7 @@ func TestTooManyRestarts(t *testing.T) {
|
||||
|
||||
// Run RemovePodsHavingTooManyRestarts strategy
|
||||
t.Log("Running RemovePodsHavingTooManyRestarts strategy")
|
||||
plugin.(framework.DeschedulePlugin).Deschedule(ctx, workerNodes)
|
||||
plugin.(frameworktypes.DeschedulePlugin).Deschedule(ctx, workerNodes)
|
||||
t.Logf("Finished RemoveFailedPods strategy for %s", tc.name)
|
||||
|
||||
waitForTerminatingPodsToDisappear(ctx, t, clientSet, testNamespace.Name)
|
||||
|
||||
@@ -10,10 +10,10 @@ import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/framework"
|
||||
frameworkfake "sigs.k8s.io/descheduler/pkg/framework/fake"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/defaultevictor"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/removepodsviolatingtopologyspreadconstraint"
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
)
|
||||
|
||||
const zoneTopologyKey string = "topology.kubernetes.io/zone"
|
||||
@@ -108,7 +108,7 @@ func TestTopologySpreadConstraint(t *testing.T) {
|
||||
&frameworkfake.HandleImpl{
|
||||
ClientsetImpl: clientSet,
|
||||
PodEvictorImpl: podEvictor,
|
||||
EvictorFilterImpl: filter.(framework.EvictorPlugin),
|
||||
EvictorFilterImpl: filter.(frameworktypes.EvictorPlugin),
|
||||
GetPodsAssignedToNodeFuncImpl: getPodsAssignedToNode,
|
||||
},
|
||||
)
|
||||
@@ -116,7 +116,7 @@ func TestTopologySpreadConstraint(t *testing.T) {
|
||||
t.Fatalf("Unable to initialize the plugin: %v", err)
|
||||
}
|
||||
|
||||
plugin.(framework.BalancePlugin).Balance(ctx, workerNodes)
|
||||
plugin.(frameworktypes.BalancePlugin).Balance(ctx, workerNodes)
|
||||
t.Logf("Finished RemovePodsViolatingTopologySpreadConstraint strategy for %s", name)
|
||||
|
||||
t.Logf("Wait for terminating pods of %s to disappear", name)
|
||||
|
||||
Reference in New Issue
Block a user