diff --git a/pkg/apis/componentconfig/types_pluginargs.go b/pkg/apis/componentconfig/types_pluginargs.go index 021d1eb29..ddeb2ead3 100644 --- a/pkg/apis/componentconfig/types_pluginargs.go +++ b/pkg/apis/componentconfig/types_pluginargs.go @@ -78,3 +78,15 @@ type RemoveDuplicatesArgs struct { Namespaces *api.Namespaces ExcludeOwnerKinds []string } + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PodLifeTimeArgs holds arguments used to configure PodLifeTime plugin. +type PodLifeTimeArgs struct { + metav1.TypeMeta + + Namespaces *api.Namespaces + LabelSelector *metav1.LabelSelector + MaxPodLifeTimeSeconds *uint + States []string +} diff --git a/pkg/apis/componentconfig/validation/validation_pluginargs.go b/pkg/apis/componentconfig/validation/validation_pluginargs.go index 6d6721178..699a99599 100644 --- a/pkg/apis/componentconfig/validation/validation_pluginargs.go +++ b/pkg/apis/componentconfig/validation/validation_pluginargs.go @@ -19,8 +19,11 @@ package validation import ( "fmt" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/api/core/v1" utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/sets" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/descheduler/pkg/api" "sigs.k8s.io/descheduler/pkg/apis/componentconfig" ) @@ -64,6 +67,25 @@ func ValidateRemovePodsViolatingNodeAffinityArgs(args *componentconfig.RemovePod ) } +// ValidatePodLifeTimeArgs validates PodLifeTime arguments +func ValidatePodLifeTimeArgs(args *componentconfig.PodLifeTimeArgs) error { + var err error + if args.MaxPodLifeTimeSeconds == nil { + err = fmt.Errorf("MaxPodLifeTimeSeconds not set") + } + + return errorsAggregate( + err, + validateNamespaceArgs(args.Namespaces), + validateLabelSelectorArgs(args.LabelSelector), + validatePodLifeTimeStates(args.States), + ) +} + +func ValidateRemoveDuplicatesArgs(args *componentconfig.RemoveDuplicatesArgs) error { + return validateNamespaceArgs(args.Namespaces) +} + // errorsAggregate converts all arg validation errors to a single error interface. // if no errors, it will return nil. func errorsAggregate(errors ...error) error { @@ -96,9 +118,19 @@ func validatePodRestartThreshold(podRestartThreshold int32) error { return nil } -func ValidateRemoveDuplicatesArgs(args *componentconfig.RemoveDuplicatesArgs) error { - // At most one of include/exclude can be set - return errorsAggregate( - validateNamespaceArgs(args.Namespaces), +func validatePodLifeTimeStates(states []string) error { + podLifeTimeAllowedStates := sets.NewString( + string(v1.PodRunning), + string(v1.PodPending), + + // Container state reasons: https://github.com/kubernetes/kubernetes/blob/release-1.24/pkg/kubelet/kubelet_pods.go#L76-L79 + "PodInitializing", + "ContainerCreating", ) + + if !podLifeTimeAllowedStates.HasAll(states...) { + return fmt.Errorf("states must be one of %v", podLifeTimeAllowedStates.List()) + } + + return nil } diff --git a/pkg/apis/componentconfig/validation/validation_pluginargs_test.go b/pkg/apis/componentconfig/validation/validation_pluginargs_test.go index 71931d58f..c0ca2ce02 100644 --- a/pkg/apis/componentconfig/validation/validation_pluginargs_test.go +++ b/pkg/apis/componentconfig/validation/validation_pluginargs_test.go @@ -17,10 +17,12 @@ limitations under the License. package validation import ( + "testing" + + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/descheduler/pkg/api" "sigs.k8s.io/descheduler/pkg/apis/componentconfig" - "testing" ) func TestValidateRemovePodsViolatingNodeTaintsArgs(t *testing.T) { @@ -124,3 +126,45 @@ func TestValidateRemovePodsViolatingNodeAffinityArgs(t *testing.T) { }) } } + +func TestValidateRemovePodLifeTimeArgs(t *testing.T) { + testCases := []struct { + description string + args *componentconfig.PodLifeTimeArgs + expectError bool + }{ + { + description: "valid arg, no errors", + args: &componentconfig.PodLifeTimeArgs{ + MaxPodLifeTimeSeconds: func(i uint) *uint { return &i }(1), + States: []string{string(v1.PodRunning)}, + }, + expectError: false, + }, + { + description: "nil MaxPodLifeTimeSeconds arg, expects errors", + args: &componentconfig.PodLifeTimeArgs{ + MaxPodLifeTimeSeconds: nil, + }, + expectError: true, + }, + { + description: "invalid pod state arg, expects errors", + args: &componentconfig.PodLifeTimeArgs{ + States: []string{string(v1.NodeRunning)}, + }, + expectError: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.description, func(t *testing.T) { + err := ValidatePodLifeTimeArgs(tc.args) + + hasError := err != nil + if tc.expectError != hasError { + t.Error("unexpected arg validation behavior") + } + }) + } +} diff --git a/pkg/apis/componentconfig/zz_generated.deepcopy.go b/pkg/apis/componentconfig/zz_generated.deepcopy.go index 368e430c2..ec04c4963 100644 --- a/pkg/apis/componentconfig/zz_generated.deepcopy.go +++ b/pkg/apis/componentconfig/zz_generated.deepcopy.go @@ -54,6 +54,51 @@ func (in *DeschedulerConfiguration) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodLifeTimeArgs) DeepCopyInto(out *PodLifeTimeArgs) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.Namespaces != nil { + in, out := &in.Namespaces, &out.Namespaces + *out = new(api.Namespaces) + (*in).DeepCopyInto(*out) + } + if in.LabelSelector != nil { + in, out := &in.LabelSelector, &out.LabelSelector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.MaxPodLifeTimeSeconds != nil { + in, out := &in.MaxPodLifeTimeSeconds, &out.MaxPodLifeTimeSeconds + *out = new(uint) + **out = **in + } + if in.States != nil { + in, out := &in.States, &out.States + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodLifeTimeArgs. +func (in *PodLifeTimeArgs) DeepCopy() *PodLifeTimeArgs { + if in == nil { + return nil + } + out := new(PodLifeTimeArgs) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodLifeTimeArgs) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RemoveDuplicatesArgs) DeepCopyInto(out *RemoveDuplicatesArgs) { *out = *in diff --git a/pkg/descheduler/descheduler.go b/pkg/descheduler/descheduler.go index f520f9ff1..19e287592 100644 --- a/pkg/descheduler/descheduler.go +++ b/pkg/descheduler/descheduler.go @@ -19,6 +19,7 @@ package descheduler import ( "context" "fmt" + "k8s.io/klog/v2" v1 "k8s.io/api/core/v1" @@ -45,7 +46,6 @@ import ( "sigs.k8s.io/descheduler/pkg/descheduler/strategies" "sigs.k8s.io/descheduler/pkg/descheduler/strategies/nodeutilization" "sigs.k8s.io/descheduler/pkg/framework" - "sigs.k8s.io/descheduler/pkg/framework/plugins/removepodsviolatingnodetaints" "sigs.k8s.io/descheduler/pkg/utils" ) @@ -251,7 +251,7 @@ func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer "RemovePodsViolatingNodeAffinity": nil, "RemovePodsViolatingNodeTaints": nil, "RemovePodsHavingTooManyRestarts": nil, - "PodLifeTime": strategies.PodLifeTime, + "PodLifeTime": nil, "RemovePodsViolatingTopologySpreadConstraint": strategies.RemovePodsViolatingTopologySpreadConstraint, "RemoveFailedPods": nil, } @@ -369,7 +369,7 @@ func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer // TODO(jchaloup): once all strategies are migrated move this check under // the default evictor args validation if params.ThresholdPriority != nil && params.ThresholdPriorityClassName != "" { - klog.V(1).ErrorS(fmt.Errorf("priority threshold misconfigured"), "only one of priorityThreshold fields can be set", "pluginName", removepodsviolatingnodetaints.PluginName) + klog.V(1).ErrorS(fmt.Errorf("priority threshold misconfigured"), "only one of priorityThreshold fields can be set", "pluginName", name) continue } thresholdPriority, err := utils.GetPriorityFromStrategyParams(ctx, rs.Client, strategy.Params) diff --git a/pkg/descheduler/strategies/pod_lifetime.go b/pkg/descheduler/strategies/pod_lifetime.go deleted file mode 100644 index 3c84179c9..000000000 --- a/pkg/descheduler/strategies/pod_lifetime.go +++ /dev/null @@ -1,163 +0,0 @@ -/* -Copyright 2020 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package strategies - -import ( - "context" - "fmt" - - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/sets" - clientset "k8s.io/client-go/kubernetes" - "k8s.io/klog/v2" - - "sigs.k8s.io/descheduler/pkg/api" - "sigs.k8s.io/descheduler/pkg/descheduler/evictions" - podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod" -) - -var ( - podLifeTimeAllowedStates = sets.NewString( - string(v1.PodRunning), - string(v1.PodPending), - - // Container state reason list: https://github.com/kubernetes/kubernetes/blob/release-1.24/pkg/kubelet/kubelet_pods.go#L76-L79 - "PodInitializing", - "ContainerCreating", - ) -) - -func validatePodLifeTimeParams(params *api.StrategyParameters) (sets.String, error) { - if params == nil || params.PodLifeTime == nil || params.PodLifeTime.MaxPodLifeTimeSeconds == nil { - return nil, fmt.Errorf("MaxPodLifeTimeSeconds not set") - } - - var states []string - if params.PodLifeTime.PodStatusPhases != nil { - states = append(states, params.PodLifeTime.PodStatusPhases...) - } - if params.PodLifeTime.States != nil { - states = append(states, params.PodLifeTime.States...) - } - if !podLifeTimeAllowedStates.HasAll(states...) { - return nil, fmt.Errorf("states must be one of %v", podLifeTimeAllowedStates.List()) - } - - // At most one of include/exclude can be set - if params.Namespaces != nil && len(params.Namespaces.Include) > 0 && len(params.Namespaces.Exclude) > 0 { - return nil, fmt.Errorf("only one of Include/Exclude namespaces can be set") - } - if params.ThresholdPriority != nil && params.ThresholdPriorityClassName != "" { - return nil, fmt.Errorf("only one of thresholdPriority and thresholdPriorityClassName can be set") - } - - return sets.NewString(states...), nil -} - -// PodLifeTime evicts pods on nodes that were created more than strategy.Params.MaxPodLifeTimeSeconds seconds ago. -func PodLifeTime(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor, evictorFilter *evictions.EvictorFilter, getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc) { - states, err := validatePodLifeTimeParams(strategy.Params) - if err != nil { - klog.ErrorS(err, "Invalid PodLifeTime parameters") - return - } - - var includedNamespaces, excludedNamespaces sets.String - if strategy.Params.Namespaces != nil { - includedNamespaces = sets.NewString(strategy.Params.Namespaces.Include...) - excludedNamespaces = sets.NewString(strategy.Params.Namespaces.Exclude...) - } - - filter := evictorFilter.Filter - if states.Len() > 0 { - filter = podutil.WrapFilterFuncs(func(pod *v1.Pod) bool { - if states.Has(string(pod.Status.Phase)) { - return true - } - - for _, containerStatus := range pod.Status.ContainerStatuses { - if containerStatus.State.Waiting != nil && states.Has(containerStatus.State.Waiting.Reason) { - return true - } - } - - return false - }, filter) - } - - podFilter, err := podutil.NewOptions(). - WithFilter(filter). - WithNamespaces(includedNamespaces). - WithoutNamespaces(excludedNamespaces). - WithLabelSelector(strategy.Params.LabelSelector). - BuildFilterFunc() - if err != nil { - klog.ErrorS(err, "Error initializing pod filter function") - return - } - - podsToEvict := make([]*v1.Pod, 0) - nodeMap := make(map[string]*v1.Node, len(nodes)) - - for _, node := range nodes { - nodeMap[node.Name] = node - klog.V(1).InfoS("Processing node", "node", klog.KObj(node)) - - pods := listOldPodsOnNode(node.Name, getPodsAssignedToNode, podFilter, *strategy.Params.PodLifeTime.MaxPodLifeTimeSeconds) - podsToEvict = append(podsToEvict, pods...) - } - - // Should sort Pods so that the oldest can be evicted first - // in the event that PDB or settings such maxNoOfPodsToEvictPer* prevent too much eviction - podutil.SortPodsBasedOnAge(podsToEvict) - - nodeLimitExceeded := map[string]bool{} - for _, pod := range podsToEvict { - if nodeLimitExceeded[pod.Spec.NodeName] { - continue - } - if podEvictor.EvictPod(ctx, pod, evictions.EvictOptions{}) { - klog.V(1).InfoS("Evicted pod because it exceeded its lifetime", "pod", klog.KObj(pod), "maxPodLifeTime", *strategy.Params.PodLifeTime.MaxPodLifeTimeSeconds) - } - if podEvictor.NodeLimitExceeded(nodeMap[pod.Spec.NodeName]) { - nodeLimitExceeded[pod.Spec.NodeName] = true - } - } -} - -func listOldPodsOnNode( - nodeName string, - getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc, - filter podutil.FilterFunc, - maxPodLifeTimeSeconds uint, -) []*v1.Pod { - pods, err := podutil.ListPodsOnANode(nodeName, getPodsAssignedToNode, filter) - if err != nil { - return nil - } - - var oldPods []*v1.Pod - for _, pod := range pods { - podAgeSeconds := uint(metav1.Now().Sub(pod.GetCreationTimestamp().Local()).Seconds()) - if podAgeSeconds > maxPodLifeTimeSeconds { - oldPods = append(oldPods, pod) - } - } - - return oldPods -} diff --git a/pkg/descheduler/strategy_migration.go b/pkg/descheduler/strategy_migration.go index 2cb47c76a..ec50d9992 100644 --- a/pkg/descheduler/strategy_migration.go +++ b/pkg/descheduler/strategy_migration.go @@ -25,6 +25,7 @@ import ( "sigs.k8s.io/descheduler/pkg/apis/componentconfig" "sigs.k8s.io/descheduler/pkg/apis/componentconfig/validation" "sigs.k8s.io/descheduler/pkg/framework" + "sigs.k8s.io/descheduler/pkg/framework/plugins/podlifetime" "sigs.k8s.io/descheduler/pkg/framework/plugins/removeduplicates" "sigs.k8s.io/descheduler/pkg/framework/plugins/removefailedpods" "sigs.k8s.io/descheduler/pkg/framework/plugins/removepodshavingtoomanyrestarts" @@ -130,6 +131,40 @@ var pluginsMap = map[string]func(ctx context.Context, nodes []*v1.Node, params * klog.V(1).ErrorS(err, "plugin finished with error", "pluginName", removepodshavingtoomanyrestarts.PluginName) } }, + "PodLifeTime": func(ctx context.Context, nodes []*v1.Node, params *api.StrategyParameters, handle *handleImpl) { + podLifeTimeParams := params.PodLifeTime + if podLifeTimeParams == nil { + podLifeTimeParams = &api.PodLifeTime{} + } + + var states []string + if podLifeTimeParams.PodStatusPhases != nil { + states = append(states, podLifeTimeParams.PodStatusPhases...) + } + if podLifeTimeParams.States != nil { + states = append(states, podLifeTimeParams.States...) + } + + args := &componentconfig.PodLifeTimeArgs{ + Namespaces: params.Namespaces, + LabelSelector: params.LabelSelector, + MaxPodLifeTimeSeconds: podLifeTimeParams.MaxPodLifeTimeSeconds, + States: states, + } + if err := validation.ValidatePodLifeTimeArgs(args); err != nil { + klog.V(1).ErrorS(err, "unable to validate plugin arguments", "pluginName", podlifetime.PluginName) + return + } + pg, err := podlifetime.New(args, handle) + if err != nil { + klog.V(1).ErrorS(err, "unable to initialize a plugin", "pluginName", podlifetime.PluginName) + return + } + status := pg.(framework.DeschedulePlugin).Deschedule(ctx, nodes) + if status != nil && status.Err != nil { + klog.V(1).ErrorS(err, "plugin finished with error", "pluginName", podlifetime.PluginName) + } + }, "RemoveDuplicates": func(ctx context.Context, nodes []*v1.Node, params *api.StrategyParameters, handle *handleImpl) { args := &componentconfig.RemoveDuplicatesArgs{ Namespaces: params.Namespaces, diff --git a/pkg/framework/plugins/podlifetime/pod_lifetime.go b/pkg/framework/plugins/podlifetime/pod_lifetime.go new file mode 100644 index 000000000..a0ae2e639 --- /dev/null +++ b/pkg/framework/plugins/podlifetime/pod_lifetime.go @@ -0,0 +1,136 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package podlifetime + +import ( + "context" + "fmt" + + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/klog/v2" + "sigs.k8s.io/descheduler/pkg/apis/componentconfig" + "sigs.k8s.io/descheduler/pkg/framework" + + "sigs.k8s.io/descheduler/pkg/descheduler/evictions" + podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod" +) + +const PluginName = "PodLifeTime" + +var ( + _ framework.Plugin = &PodLifeTime{} + _ framework.DeschedulePlugin = &PodLifeTime{} +) + +// PodLifeTime evicts pods on the node that violate the max pod lifetime threshold +type PodLifeTime struct { + handle framework.Handle + args *componentconfig.PodLifeTimeArgs + podFilter podutil.FilterFunc +} + +// New builds plugin from its arguments while passing a handle +func New(args runtime.Object, handle framework.Handle) (framework.Plugin, error) { + podLifeTimeArgs, ok := args.(*componentconfig.PodLifeTimeArgs) + if !ok { + return nil, fmt.Errorf("want args to be of type PodLifeTimeArgs, got %T", args) + } + + var includedNamespaces, excludedNamespaces sets.String + if podLifeTimeArgs.Namespaces != nil { + includedNamespaces = sets.NewString(podLifeTimeArgs.Namespaces.Include...) + excludedNamespaces = sets.NewString(podLifeTimeArgs.Namespaces.Exclude...) + } + + podFilter, err := podutil.NewOptions(). + WithFilter(handle.Evictor().Filter). + WithNamespaces(includedNamespaces). + WithoutNamespaces(excludedNamespaces). + WithLabelSelector(podLifeTimeArgs.LabelSelector). + BuildFilterFunc() + if err != nil { + return nil, fmt.Errorf("error initializing pod filter function: %v", err) + } + + podFilter = podutil.WrapFilterFuncs(podFilter, func(pod *v1.Pod) bool { + podAgeSeconds := uint(metav1.Now().Sub(pod.GetCreationTimestamp().Local()).Seconds()) + return podAgeSeconds > *podLifeTimeArgs.MaxPodLifeTimeSeconds + }) + + if len(podLifeTimeArgs.States) > 0 { + states := sets.NewString(podLifeTimeArgs.States...) + podFilter = podutil.WrapFilterFuncs(podFilter, func(pod *v1.Pod) bool { + if states.Has(string(pod.Status.Phase)) { + return true + } + + for _, containerStatus := range pod.Status.ContainerStatuses { + if containerStatus.State.Waiting != nil && states.Has(containerStatus.State.Waiting.Reason) { + return true + } + } + + return false + }) + } + + return &PodLifeTime{ + handle: handle, + podFilter: podFilter, + args: podLifeTimeArgs, + }, nil +} + +// Name retrieves the plugin name +func (d *PodLifeTime) Name() string { + return PluginName +} + +// Deschedule extension point implementation for the plugin +func (d *PodLifeTime) Deschedule(ctx context.Context, nodes []*v1.Node) *framework.Status { + podsToEvict := make([]*v1.Pod, 0) + nodeMap := make(map[string]*v1.Node, len(nodes)) + + for _, node := range nodes { + klog.V(1).InfoS("Processing node", "node", klog.KObj(node)) + pods, err := podutil.ListAllPodsOnANode(node.Name, d.handle.GetPodsAssignedToNodeFunc(), d.podFilter) + if err != nil { + // no pods evicted as error encountered retrieving evictable Pods + return &framework.Status{ + Err: fmt.Errorf("error listing pods on a node: %v", err), + } + } + + nodeMap[node.Name] = node + podsToEvict = append(podsToEvict, pods...) + } + + // Should sort Pods so that the oldest can be evicted first + // in the event that PDB or settings such maxNoOfPodsToEvictPer* prevent too much eviction + podutil.SortPodsBasedOnAge(podsToEvict) + + for _, pod := range podsToEvict { + if !d.handle.Evictor().NodeLimitExceeded(nodeMap[pod.Spec.NodeName]) { + d.handle.Evictor().Evict(ctx, pod, evictions.EvictOptions{}) + } + } + + return nil +} diff --git a/pkg/descheduler/strategies/pod_lifetime_test.go b/pkg/framework/plugins/podlifetime/pod_lifetime_test.go similarity index 77% rename from pkg/descheduler/strategies/pod_lifetime_test.go rename to pkg/framework/plugins/podlifetime/pod_lifetime_test.go index 8e19e934d..357783b20 100644 --- a/pkg/descheduler/strategies/pod_lifetime_test.go +++ b/pkg/framework/plugins/podlifetime/pod_lifetime_test.go @@ -1,5 +1,5 @@ /* -Copyright 2020 The Kubernetes Authors. +Copyright 2022 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package strategies +package podlifetime import ( "context" @@ -28,10 +28,11 @@ import ( "k8s.io/client-go/informers" "k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/tools/events" - - "sigs.k8s.io/descheduler/pkg/api" + "sigs.k8s.io/descheduler/pkg/apis/componentconfig" "sigs.k8s.io/descheduler/pkg/descheduler/evictions" podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod" + "sigs.k8s.io/descheduler/pkg/framework" + frameworkfake "sigs.k8s.io/descheduler/pkg/framework/fake" "sigs.k8s.io/descheduler/test" ) @@ -141,7 +142,7 @@ func TestPodLifeTime(t *testing.T) { var maxLifeTime uint = 600 testCases := []struct { description string - strategy api.DeschedulerStrategy + args *componentconfig.PodLifeTimeArgs pods []*v1.Pod nodes []*v1.Node expectedEvictedPodCount uint @@ -151,11 +152,8 @@ func TestPodLifeTime(t *testing.T) { }{ { description: "Two pods in the `dev` Namespace, 1 is new and 1 very is old. 1 should be evicted.", - strategy: api.DeschedulerStrategy{ - Enabled: true, - Params: &api.StrategyParameters{ - PodLifeTime: &api.PodLifeTime{MaxPodLifeTimeSeconds: &maxLifeTime}, - }, + args: &componentconfig.PodLifeTimeArgs{ + MaxPodLifeTimeSeconds: &maxLifeTime, }, pods: []*v1.Pod{p1, p2}, nodes: []*v1.Node{node1}, @@ -163,11 +161,8 @@ func TestPodLifeTime(t *testing.T) { }, { description: "Two pods in the `dev` Namespace, 2 are new and 0 are old. 0 should be evicted.", - strategy: api.DeschedulerStrategy{ - Enabled: true, - Params: &api.StrategyParameters{ - PodLifeTime: &api.PodLifeTime{MaxPodLifeTimeSeconds: &maxLifeTime}, - }, + args: &componentconfig.PodLifeTimeArgs{ + MaxPodLifeTimeSeconds: &maxLifeTime, }, pods: []*v1.Pod{p3, p4}, nodes: []*v1.Node{node1}, @@ -175,11 +170,8 @@ func TestPodLifeTime(t *testing.T) { }, { description: "Two pods in the `dev` Namespace, 1 created 605 seconds ago. 1 should be evicted.", - strategy: api.DeschedulerStrategy{ - Enabled: true, - Params: &api.StrategyParameters{ - PodLifeTime: &api.PodLifeTime{MaxPodLifeTimeSeconds: &maxLifeTime}, - }, + args: &componentconfig.PodLifeTimeArgs{ + MaxPodLifeTimeSeconds: &maxLifeTime, }, pods: []*v1.Pod{p5, p6}, nodes: []*v1.Node{node1}, @@ -187,11 +179,8 @@ func TestPodLifeTime(t *testing.T) { }, { description: "Two pods in the `dev` Namespace, 1 created 595 seconds ago. 0 should be evicted.", - strategy: api.DeschedulerStrategy{ - Enabled: true, - Params: &api.StrategyParameters{ - PodLifeTime: &api.PodLifeTime{MaxPodLifeTimeSeconds: &maxLifeTime}, - }, + args: &componentconfig.PodLifeTimeArgs{ + MaxPodLifeTimeSeconds: &maxLifeTime, }, pods: []*v1.Pod{p7, p8}, nodes: []*v1.Node{node1}, @@ -199,14 +188,9 @@ func TestPodLifeTime(t *testing.T) { }, { description: "Two pods, one with ContainerCreating state. 1 should be evicted.", - strategy: api.DeschedulerStrategy{ - Enabled: true, - Params: &api.StrategyParameters{ - PodLifeTime: &api.PodLifeTime{ - MaxPodLifeTimeSeconds: &maxLifeTime, - States: []string{"ContainerCreating"}, - }, - }, + args: &componentconfig.PodLifeTimeArgs{ + MaxPodLifeTimeSeconds: &maxLifeTime, + States: []string{"ContainerCreating"}, }, pods: []*v1.Pod{ p9, @@ -227,14 +211,9 @@ func TestPodLifeTime(t *testing.T) { }, { description: "Two pods, one with PodInitializing state. 1 should be evicted.", - strategy: api.DeschedulerStrategy{ - Enabled: true, - Params: &api.StrategyParameters{ - PodLifeTime: &api.PodLifeTime{ - MaxPodLifeTimeSeconds: &maxLifeTime, - States: []string{"PodInitializing"}, - }, - }, + args: &componentconfig.PodLifeTimeArgs{ + MaxPodLifeTimeSeconds: &maxLifeTime, + States: []string{"PodInitializing"}, }, pods: []*v1.Pod{ p9, @@ -255,14 +234,9 @@ func TestPodLifeTime(t *testing.T) { }, { description: "Two old pods with different states. 1 should be evicted.", - strategy: api.DeschedulerStrategy{ - Enabled: true, - Params: &api.StrategyParameters{ - PodLifeTime: &api.PodLifeTime{ - MaxPodLifeTimeSeconds: &maxLifeTime, - States: []string{"Pending"}, - }, - }, + args: &componentconfig.PodLifeTimeArgs{ + MaxPodLifeTimeSeconds: &maxLifeTime, + States: []string{"Pending"}, }, pods: []*v1.Pod{p9, p10}, nodes: []*v1.Node{node1}, @@ -270,11 +244,8 @@ func TestPodLifeTime(t *testing.T) { }, { description: "does not evict pvc pods with ignorePvcPods set to true", - strategy: api.DeschedulerStrategy{ - Enabled: true, - Params: &api.StrategyParameters{ - PodLifeTime: &api.PodLifeTime{MaxPodLifeTimeSeconds: &maxLifeTime}, - }, + args: &componentconfig.PodLifeTimeArgs{ + MaxPodLifeTimeSeconds: &maxLifeTime, }, pods: []*v1.Pod{p11}, nodes: []*v1.Node{node1}, @@ -283,11 +254,8 @@ func TestPodLifeTime(t *testing.T) { }, { description: "evicts pvc pods with ignorePvcPods set to false (or unset)", - strategy: api.DeschedulerStrategy{ - Enabled: true, - Params: &api.StrategyParameters{ - PodLifeTime: &api.PodLifeTime{MaxPodLifeTimeSeconds: &maxLifeTime}, - }, + args: &componentconfig.PodLifeTimeArgs{ + MaxPodLifeTimeSeconds: &maxLifeTime, }, pods: []*v1.Pod{p11}, nodes: []*v1.Node{node1}, @@ -295,13 +263,10 @@ func TestPodLifeTime(t *testing.T) { }, { description: "No pod to evicted since all pod terminating", - strategy: api.DeschedulerStrategy{ - Enabled: true, - Params: &api.StrategyParameters{ - PodLifeTime: &api.PodLifeTime{MaxPodLifeTimeSeconds: &maxLifeTime}, - LabelSelector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"foo": "bar"}, - }, + args: &componentconfig.PodLifeTimeArgs{ + MaxPodLifeTimeSeconds: &maxLifeTime, + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"foo": "bar"}, }, }, pods: []*v1.Pod{p12, p13}, @@ -310,13 +275,10 @@ func TestPodLifeTime(t *testing.T) { }, { description: "No pod should be evicted since pod terminating", - strategy: api.DeschedulerStrategy{ - Enabled: true, - Params: &api.StrategyParameters{ - PodLifeTime: &api.PodLifeTime{MaxPodLifeTimeSeconds: &maxLifeTime}, - LabelSelector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"foo": "bar"}, - }, + args: &componentconfig.PodLifeTimeArgs{ + MaxPodLifeTimeSeconds: &maxLifeTime, + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"foo": "bar"}, }, }, pods: []*v1.Pod{p14, p15}, @@ -325,11 +287,8 @@ func TestPodLifeTime(t *testing.T) { }, { description: "2 Oldest pods should be evicted when maxPodsToEvictPerNode and maxPodsToEvictPerNamespace are not set", - strategy: api.DeschedulerStrategy{ - Enabled: true, - Params: &api.StrategyParameters{ - PodLifeTime: &api.PodLifeTime{MaxPodLifeTimeSeconds: &maxLifeTime}, - }, + args: &componentconfig.PodLifeTimeArgs{ + MaxPodLifeTimeSeconds: &maxLifeTime, }, pods: []*v1.Pod{p1, p2, p9}, nodes: []*v1.Node{node1}, @@ -339,11 +298,8 @@ func TestPodLifeTime(t *testing.T) { }, { description: "1 Oldest pod should be evicted when maxPodsToEvictPerNamespace is set to 1", - strategy: api.DeschedulerStrategy{ - Enabled: true, - Params: &api.StrategyParameters{ - PodLifeTime: &api.PodLifeTime{MaxPodLifeTimeSeconds: &maxLifeTime}, - }, + args: &componentconfig.PodLifeTimeArgs{ + MaxPodLifeTimeSeconds: &maxLifeTime, }, pods: []*v1.Pod{p1, p2, p9}, nodes: []*v1.Node{node1}, @@ -352,11 +308,8 @@ func TestPodLifeTime(t *testing.T) { }, { description: "1 Oldest pod should be evicted when maxPodsToEvictPerNode is set to 1", - strategy: api.DeschedulerStrategy{ - Enabled: true, - Params: &api.StrategyParameters{ - PodLifeTime: &api.PodLifeTime{MaxPodLifeTimeSeconds: &maxLifeTime}, - }, + args: &componentconfig.PodLifeTimeArgs{ + MaxPodLifeTimeSeconds: &maxLifeTime, }, pods: []*v1.Pod{p1, p2, p9}, nodes: []*v1.Node{node1}, @@ -412,7 +365,17 @@ func TestPodLifeTime(t *testing.T) { false, ) - PodLifeTime(ctx, fakeClient, tc.strategy, tc.nodes, podEvictor, evictorFilter, getPodsAssignedToNode) + plugin, err := New(tc.args, &frameworkfake.HandleImpl{ + ClientsetImpl: fakeClient, + PodEvictorImpl: podEvictor, + EvictorFilterImpl: evictorFilter, + GetPodsAssignedToNodeFuncImpl: getPodsAssignedToNode, + }) + if err != nil { + t.Fatalf("Unable to initialize the plugin: %v", err) + } + + plugin.(framework.DeschedulePlugin).Deschedule(ctx, tc.nodes) podsEvicted := podEvictor.TotalEvicted() if podsEvicted != tc.expectedEvictedPodCount { t.Errorf("Test error for description: %s. Expected evicted pods count %v, got %v", tc.description, tc.expectedEvictedPodCount, podsEvicted) diff --git a/test/e2e/e2e_test.go b/test/e2e/e2e_test.go index 554eec6a9..af59b0bcb 100644 --- a/test/e2e/e2e_test.go +++ b/test/e2e/e2e_test.go @@ -39,6 +39,10 @@ import ( "k8s.io/client-go/tools/events" v1qos "k8s.io/kubectl/pkg/util/qos" "k8s.io/utils/pointer" + "sigs.k8s.io/descheduler/pkg/apis/componentconfig" + "sigs.k8s.io/descheduler/pkg/framework" + frameworkfake "sigs.k8s.io/descheduler/pkg/framework/fake" + "sigs.k8s.io/descheduler/pkg/framework/plugins/podlifetime" "sigs.k8s.io/descheduler/cmd/descheduler/app/options" deschedulerapi "sigs.k8s.io/descheduler/pkg/api" @@ -48,7 +52,6 @@ import ( eutils "sigs.k8s.io/descheduler/pkg/descheduler/evictions/utils" nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node" podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod" - "sigs.k8s.io/descheduler/pkg/descheduler/strategies" "sigs.k8s.io/descheduler/pkg/descheduler/strategies/nodeutilization" "sigs.k8s.io/descheduler/pkg/utils" ) @@ -153,7 +156,7 @@ func waitForNodesReady(ctx context.Context, t *testing.T, clientSet clientset.In } } -func runPodLifetimeStrategy( +func runPodLifetimePlugin( ctx context.Context, t *testing.T, clientset clientset.Interface, @@ -166,7 +169,6 @@ func runPodLifetimeStrategy( labelSelector *metav1.LabelSelector, getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc, ) { - // Run descheduler. evictionPolicyGroupVersion, err := eutils.SupportEviction(clientset) if err != nil || len(evictionPolicyGroupVersion) == 0 { t.Fatalf("%v", err) @@ -177,51 +179,54 @@ func runPodLifetimeStrategy( t.Fatalf("%v", err) } - maxPodLifeTimeSeconds := uint(1) - strategy := deschedulerapi.DeschedulerStrategy{ - Enabled: true, - Params: &deschedulerapi.StrategyParameters{ - PodLifeTime: &deschedulerapi.PodLifeTime{MaxPodLifeTimeSeconds: &maxPodLifeTimeSeconds}, - Namespaces: namespaces, - ThresholdPriority: priority, - ThresholdPriorityClassName: priorityClass, - LabelSelector: labelSelector, - }, - } - - thresholdPriority, err := utils.GetPriorityFromStrategyParams(ctx, clientset, strategy.Params) - if err != nil { - t.Fatalf("Failed to get threshold priority from strategy's params") - } - - eventRecorder := &events.FakeRecorder{} - - strategies.PodLifeTime( - ctx, + podEvictor := evictions.NewPodEvictor( clientset, - strategy, + evictionPolicyGroupVersion, + false, + nil, + maxPodsToEvictPerNamespace, nodes, - evictions.NewPodEvictor( - clientset, - evictionPolicyGroupVersion, - false, - nil, - maxPodsToEvictPerNamespace, - nodes, - false, - eventRecorder, - ), - evictions.NewEvictorFilter( - nodes, - getPodsAssignedToNode, - false, - evictCritical, - false, - false, - evictions.WithPriorityThreshold(thresholdPriority), - ), - getPodsAssignedToNode, + false, + &events.FakeRecorder{}, ) + + var thresholdPriority int32 + if priority != nil { + thresholdPriority = *priority + } else { + thresholdPriority, err = utils.GetPriorityFromPriorityClass(ctx, clientset, priorityClass) + if err != nil { + t.Fatalf("Failed to get threshold priority from plugin arg params") + } + } + + evictorFilter := evictions.NewEvictorFilter( + nodes, + getPodsAssignedToNode, + false, + evictCritical, + false, + false, + evictions.WithPriorityThreshold(thresholdPriority), + ) + + maxPodLifeTimeSeconds := uint(1) + + plugin, err := podlifetime.New(&componentconfig.PodLifeTimeArgs{ + MaxPodLifeTimeSeconds: &maxPodLifeTimeSeconds, + LabelSelector: labelSelector, + Namespaces: namespaces, + }, &frameworkfake.HandleImpl{ + ClientsetImpl: clientset, + PodEvictorImpl: podEvictor, + EvictorFilterImpl: evictorFilter, + GetPodsAssignedToNodeFuncImpl: getPodsAssignedToNode, + }) + if err != nil { + t.Fatalf("Unable to initialize the plugin: %v", err) + } + t.Log("Running podlifetime plugin") + plugin.(framework.DeschedulePlugin).Deschedule(ctx, nodes) } func getPodNames(pods []v1.Pod) []string { @@ -440,8 +445,8 @@ func TestNamespaceConstraintsInclude(t *testing.T) { sort.Strings(initialPodNames) t.Logf("Existing pods: %v", initialPodNames) - t.Logf("set the strategy to delete pods from %v namespace", rc.Namespace) - runPodLifetimeStrategy(ctx, t, clientSet, nodeInformer, &deschedulerapi.Namespaces{ + t.Logf("run the plugin to delete pods from %v namespace", rc.Namespace) + runPodLifetimePlugin(ctx, t, clientSet, nodeInformer, &deschedulerapi.Namespaces{ Include: []string{rc.Namespace}, }, "", nil, false, nil, nil, getPodsAssignedToNode) @@ -511,8 +516,8 @@ func TestNamespaceConstraintsExclude(t *testing.T) { sort.Strings(initialPodNames) t.Logf("Existing pods: %v", initialPodNames) - t.Logf("set the strategy to delete pods from namespaces except the %v namespace", rc.Namespace) - runPodLifetimeStrategy(ctx, t, clientSet, nodeInformer, &deschedulerapi.Namespaces{ + t.Logf("run the plugin to delete pods from namespaces except the %v namespace", rc.Namespace) + runPodLifetimePlugin(ctx, t, clientSet, nodeInformer, &deschedulerapi.Namespaces{ Exclude: []string{rc.Namespace}, }, "", nil, false, nil, nil, getPodsAssignedToNode) @@ -520,7 +525,7 @@ func TestNamespaceConstraintsExclude(t *testing.T) { time.Sleep(10 * time.Second) podList, err = clientSet.CoreV1().Pods(rc.Namespace).List(ctx, metav1.ListOptions{LabelSelector: labels.SelectorFromSet(rc.Spec.Template.Labels).String()}) if err != nil { - t.Fatalf("Unable to list pods after running strategy: %v", err) + t.Fatalf("Unable to list pods after running PodLifeTime plugin: %v", err) } excludePodNames := getPodNames(podList.Items) @@ -627,9 +632,9 @@ func testEvictSystemCritical(t *testing.T, isPriorityClass bool) { t.Logf("Existing pods: %v", initialPodNames) if isPriorityClass { - runPodLifetimeStrategy(ctx, t, clientSet, nodeInformer, nil, highPriorityClass.Name, nil, true, nil, nil, getPodsAssignedToNode) + runPodLifetimePlugin(ctx, t, clientSet, nodeInformer, nil, highPriorityClass.Name, nil, true, nil, nil, getPodsAssignedToNode) } else { - runPodLifetimeStrategy(ctx, t, clientSet, nodeInformer, nil, "", &highPriority, true, nil, nil, getPodsAssignedToNode) + runPodLifetimePlugin(ctx, t, clientSet, nodeInformer, nil, "", &highPriority, true, nil, nil, getPodsAssignedToNode) } // All pods are supposed to be deleted, wait until all pods in the test namespace are terminating @@ -745,11 +750,11 @@ func testPriority(t *testing.T, isPriorityClass bool) { t.Logf("Pods not expected to be evicted: %v, pods expected to be evicted: %v", expectReservePodNames, expectEvictPodNames) if isPriorityClass { - t.Logf("set the strategy to delete pods with priority lower than priority class %s", highPriorityClass.Name) - runPodLifetimeStrategy(ctx, t, clientSet, nodeInformer, nil, highPriorityClass.Name, nil, false, nil, nil, getPodsAssignedToNode) + t.Logf("run the plugin to delete pods with priority lower than priority class %s", highPriorityClass.Name) + runPodLifetimePlugin(ctx, t, clientSet, nodeInformer, nil, highPriorityClass.Name, nil, false, nil, nil, getPodsAssignedToNode) } else { - t.Logf("set the strategy to delete pods with priority lower than %d", highPriority) - runPodLifetimeStrategy(ctx, t, clientSet, nodeInformer, nil, "", &highPriority, false, nil, nil, getPodsAssignedToNode) + t.Logf("run the plugin to delete pods with priority lower than %d", highPriority) + runPodLifetimePlugin(ctx, t, clientSet, nodeInformer, nil, "", &highPriority, false, nil, nil, getPodsAssignedToNode) } t.Logf("Waiting 10s") @@ -758,7 +763,7 @@ func testPriority(t *testing.T, isPriorityClass bool) { podListHighPriority, err = clientSet.CoreV1().Pods(rcHighPriority.Namespace).List( ctx, metav1.ListOptions{LabelSelector: labels.SelectorFromSet(rcHighPriority.Spec.Template.Labels).String()}) if err != nil { - t.Fatalf("Unable to list pods after running strategy: %v", err) + t.Fatalf("Unable to list pods after running plugin: %v", err) } excludePodNames := getPodNames(podListHighPriority.Items) @@ -852,8 +857,8 @@ func TestPodLabelSelector(t *testing.T) { sort.Strings(expectEvictPodNames) t.Logf("Pods not expected to be evicted: %v, pods expected to be evicted: %v", expectReservePodNames, expectEvictPodNames) - t.Logf("set the strategy to delete pods with label test:podlifetime-evict") - runPodLifetimeStrategy(ctx, t, clientSet, nodeInformer, nil, "", nil, false, nil, &metav1.LabelSelector{MatchLabels: map[string]string{"test": "podlifetime-evict"}}, getPodsAssignedToNode) + t.Logf("run the plugin to delete pods with label test:podlifetime-evict") + runPodLifetimePlugin(ctx, t, clientSet, nodeInformer, nil, "", nil, false, nil, &metav1.LabelSelector{MatchLabels: map[string]string{"test": "podlifetime-evict"}}, getPodsAssignedToNode) t.Logf("Waiting 10s") time.Sleep(10 * time.Second) @@ -861,7 +866,7 @@ func TestPodLabelSelector(t *testing.T) { podListReserve, err = clientSet.CoreV1().Pods(rcReserve.Namespace).List( ctx, metav1.ListOptions{LabelSelector: labels.SelectorFromSet(rcReserve.Spec.Template.Labels).String()}) if err != nil { - t.Fatalf("Unable to list pods after running strategy: %v", err) + t.Fatalf("Unable to list pods after running plugin: %v", err) } reservedPodNames := getPodNames(podListReserve.Items) @@ -952,13 +957,13 @@ func TestEvictAnnotation(t *testing.T) { sort.Strings(initialPodNames) t.Logf("Existing pods: %v", initialPodNames) - t.Log("Running PodLifetime strategy") - runPodLifetimeStrategy(ctx, t, clientSet, nodeInformer, nil, "", nil, false, nil, nil, getPodsAssignedToNode) + t.Log("Running PodLifetime plugin") + runPodLifetimePlugin(ctx, t, clientSet, nodeInformer, nil, "", nil, false, nil, nil, getPodsAssignedToNode) if err := wait.PollImmediate(5*time.Second, time.Minute, func() (bool, error) { podList, err = clientSet.CoreV1().Pods(rc.Namespace).List(ctx, metav1.ListOptions{LabelSelector: labels.SelectorFromSet(rc.Spec.Template.Labels).String()}) if err != nil { - return false, fmt.Errorf("Unable to list pods after running strategy: %v", err) + return false, fmt.Errorf("unable to list pods after running plugin: %v", err) } excludePodNames := getPodNames(podList.Items) @@ -1017,10 +1022,10 @@ func TestPodLifeTimeOldestEvicted(t *testing.T) { t.Fatalf("Unable to list pods: %v", err) } - t.Log("Running PodLifetime strategy with maxPodsToEvictPerNamespace=1 to ensure only the oldest pod is evicted") + t.Log("Running PodLifetime plugin with maxPodsToEvictPerNamespace=1 to ensure only the oldest pod is evicted") var maxPodsToEvictPerNamespace uint = 1 - runPodLifetimeStrategy(ctx, t, clientSet, nodeInformer, nil, "", nil, false, &maxPodsToEvictPerNamespace, nil, getPodsAssignedToNode) - t.Log("Finished PodLifetime strategy") + runPodLifetimePlugin(ctx, t, clientSet, nodeInformer, nil, "", nil, false, &maxPodsToEvictPerNamespace, nil, getPodsAssignedToNode) + t.Log("Finished PodLifetime plugin") t.Logf("Wait for terminating pod to disappear") waitForTerminatingPodsToDisappear(ctx, t, clientSet, rc.Namespace)