mirror of
https://github.com/kubernetes-sigs/descheduler.git
synced 2026-01-25 20:59:28 +01:00
Merge pull request #910 from JaneLiuL/master
bring lownodeutilization and highnodeutilization to plugin
This commit is contained in:
@@ -111,3 +111,23 @@ type RemovePodsViolatingInterPodAntiAffinityArgs struct {
|
||||
Namespaces *api.Namespaces
|
||||
LabelSelector *metav1.LabelSelector
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
type LowNodeUtilizationArgs struct {
|
||||
metav1.TypeMeta
|
||||
|
||||
UseDeviationThresholds bool
|
||||
Thresholds api.ResourceThresholds
|
||||
TargetThresholds api.ResourceThresholds
|
||||
NumberOfNodes int
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
type HighNodeUtilizationArgs struct {
|
||||
metav1.TypeMeta
|
||||
|
||||
Thresholds api.ResourceThresholds
|
||||
NumberOfNodes int
|
||||
}
|
||||
|
||||
@@ -28,6 +28,13 @@ import (
|
||||
"sigs.k8s.io/descheduler/pkg/apis/componentconfig"
|
||||
)
|
||||
|
||||
const (
|
||||
// MinResourcePercentage is the minimum value of a resource's percentage
|
||||
MinResourcePercentage = 0
|
||||
// MaxResourcePercentage is the maximum value of a resource's percentage
|
||||
MaxResourcePercentage = 100
|
||||
)
|
||||
|
||||
// ValidateRemovePodsViolatingNodeTaintsArgs validates RemovePodsViolatingNodeTaints arguments
|
||||
func ValidateRemovePodsViolatingNodeTaintsArgs(args *componentconfig.RemovePodsViolatingNodeTaintsArgs) error {
|
||||
return errorsAggregate(
|
||||
@@ -150,3 +157,47 @@ func validatePodLifeTimeStates(states []string) error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func ValidateHighNodeUtilizationArgs(args *componentconfig.HighNodeUtilizationArgs) error {
|
||||
return validateThresholds(args.Thresholds)
|
||||
}
|
||||
|
||||
func ValidateLowNodeUtilizationArgs(args *componentconfig.LowNodeUtilizationArgs) error {
|
||||
return validateLowNodeUtilizationThresholds(args.Thresholds, args.TargetThresholds, args.UseDeviationThresholds)
|
||||
}
|
||||
|
||||
func validateLowNodeUtilizationThresholds(thresholds, targetThresholds api.ResourceThresholds, useDeviationThresholds bool) error {
|
||||
// validate thresholds and targetThresholds config
|
||||
if err := validateThresholds(thresholds); err != nil {
|
||||
return fmt.Errorf("thresholds config is not valid: %v", err)
|
||||
}
|
||||
if err := validateThresholds(targetThresholds); err != nil {
|
||||
return fmt.Errorf("targetThresholds config is not valid: %v", err)
|
||||
}
|
||||
|
||||
// validate if thresholds and targetThresholds have same resources configured
|
||||
if len(thresholds) != len(targetThresholds) {
|
||||
return fmt.Errorf("thresholds and targetThresholds configured different resources")
|
||||
}
|
||||
for resourceName, value := range thresholds {
|
||||
if targetValue, ok := targetThresholds[resourceName]; !ok {
|
||||
return fmt.Errorf("thresholds and targetThresholds configured different resources")
|
||||
} else if value > targetValue && !useDeviationThresholds {
|
||||
return fmt.Errorf("thresholds' %v percentage is greater than targetThresholds'", resourceName)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// validateThresholds checks if thresholds have valid resource name and resource percentage configured
|
||||
func validateThresholds(thresholds api.ResourceThresholds) error {
|
||||
if len(thresholds) == 0 {
|
||||
return fmt.Errorf("no resource threshold is configured")
|
||||
}
|
||||
for name, percent := range thresholds {
|
||||
if percent < MinResourcePercentage || percent > MaxResourcePercentage {
|
||||
return fmt.Errorf("%v threshold not in [%v, %v] range", name, MinResourcePercentage, MaxResourcePercentage)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -17,6 +17,7 @@ limitations under the License.
|
||||
package validation
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
@@ -168,3 +169,163 @@ func TestValidateRemovePodLifeTimeArgs(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateLowNodeUtilizationPluginConfig(t *testing.T) {
|
||||
var extendedResource = v1.ResourceName("example.com/foo")
|
||||
tests := []struct {
|
||||
name string
|
||||
thresholds api.ResourceThresholds
|
||||
targetThresholds api.ResourceThresholds
|
||||
errInfo error
|
||||
}{
|
||||
{
|
||||
name: "passing invalid thresholds",
|
||||
thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 20,
|
||||
v1.ResourceMemory: 120,
|
||||
},
|
||||
targetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
v1.ResourceMemory: 80,
|
||||
},
|
||||
errInfo: fmt.Errorf("thresholds config is not valid: %v", fmt.Errorf(
|
||||
"%v threshold not in [%v, %v] range", v1.ResourceMemory, MinResourcePercentage, MaxResourcePercentage)),
|
||||
},
|
||||
{
|
||||
name: "thresholds and targetThresholds configured different num of resources",
|
||||
thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 20,
|
||||
v1.ResourceMemory: 20,
|
||||
},
|
||||
targetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
v1.ResourceMemory: 80,
|
||||
v1.ResourcePods: 80,
|
||||
},
|
||||
errInfo: fmt.Errorf("thresholds and targetThresholds configured different resources"),
|
||||
},
|
||||
{
|
||||
name: "thresholds and targetThresholds configured different resources",
|
||||
thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 20,
|
||||
v1.ResourceMemory: 20,
|
||||
},
|
||||
targetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
v1.ResourcePods: 80,
|
||||
},
|
||||
errInfo: fmt.Errorf("thresholds and targetThresholds configured different resources"),
|
||||
},
|
||||
{
|
||||
name: "thresholds' CPU config value is greater than targetThresholds'",
|
||||
thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 90,
|
||||
v1.ResourceMemory: 20,
|
||||
},
|
||||
targetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
v1.ResourceMemory: 80,
|
||||
},
|
||||
errInfo: fmt.Errorf("thresholds' %v percentage is greater than targetThresholds'", v1.ResourceCPU),
|
||||
},
|
||||
{
|
||||
name: "only thresholds configured extended resource",
|
||||
thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 20,
|
||||
v1.ResourceMemory: 20,
|
||||
extendedResource: 20,
|
||||
},
|
||||
targetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
v1.ResourceMemory: 80,
|
||||
},
|
||||
errInfo: fmt.Errorf("thresholds and targetThresholds configured different resources"),
|
||||
},
|
||||
{
|
||||
name: "only targetThresholds configured extended resource",
|
||||
thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 20,
|
||||
v1.ResourceMemory: 20,
|
||||
},
|
||||
targetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
v1.ResourceMemory: 80,
|
||||
extendedResource: 80,
|
||||
},
|
||||
errInfo: fmt.Errorf("thresholds and targetThresholds configured different resources"),
|
||||
},
|
||||
{
|
||||
name: "thresholds and targetThresholds configured different extended resources",
|
||||
thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 20,
|
||||
v1.ResourceMemory: 20,
|
||||
extendedResource: 20,
|
||||
},
|
||||
targetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
v1.ResourceMemory: 80,
|
||||
"example.com/bar": 80,
|
||||
},
|
||||
errInfo: fmt.Errorf("thresholds and targetThresholds configured different resources"),
|
||||
},
|
||||
{
|
||||
name: "thresholds' extended resource config value is greater than targetThresholds'",
|
||||
thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 20,
|
||||
v1.ResourceMemory: 20,
|
||||
extendedResource: 90,
|
||||
},
|
||||
targetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
v1.ResourceMemory: 80,
|
||||
extendedResource: 20,
|
||||
},
|
||||
errInfo: fmt.Errorf("thresholds' %v percentage is greater than targetThresholds'", extendedResource),
|
||||
},
|
||||
{
|
||||
name: "passing valid plugin config",
|
||||
thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 20,
|
||||
v1.ResourceMemory: 20,
|
||||
},
|
||||
targetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
v1.ResourceMemory: 80,
|
||||
},
|
||||
errInfo: nil,
|
||||
},
|
||||
{
|
||||
name: "passing valid plugin config with extended resource",
|
||||
thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 20,
|
||||
v1.ResourceMemory: 20,
|
||||
extendedResource: 20,
|
||||
},
|
||||
targetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
v1.ResourceMemory: 80,
|
||||
extendedResource: 80,
|
||||
},
|
||||
errInfo: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, testCase := range tests {
|
||||
args := &componentconfig.LowNodeUtilizationArgs{
|
||||
|
||||
Thresholds: testCase.thresholds,
|
||||
TargetThresholds: testCase.targetThresholds,
|
||||
}
|
||||
validateErr := validateLowNodeUtilizationThresholds(args.Thresholds, args.TargetThresholds, false)
|
||||
|
||||
if validateErr == nil || testCase.errInfo == nil {
|
||||
if validateErr != testCase.errInfo {
|
||||
t.Errorf("expected validity of plugin config: thresholds %#v targetThresholds %#v to be %v but got %v instead",
|
||||
testCase.thresholds, testCase.targetThresholds, testCase.errInfo, validateErr)
|
||||
}
|
||||
} else if validateErr.Error() != testCase.errInfo.Error() {
|
||||
t.Errorf("expected validity of plugin config: thresholds %#v targetThresholds %#v to be %v but got %v instead",
|
||||
testCase.thresholds, testCase.targetThresholds, testCase.errInfo, validateErr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -54,6 +54,77 @@ func (in *DeschedulerConfiguration) DeepCopyObject() runtime.Object {
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *HighNodeUtilizationArgs) DeepCopyInto(out *HighNodeUtilizationArgs) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
if in.Thresholds != nil {
|
||||
in, out := &in.Thresholds, &out.Thresholds
|
||||
*out = make(api.ResourceThresholds, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HighNodeUtilizationArgs.
|
||||
func (in *HighNodeUtilizationArgs) DeepCopy() *HighNodeUtilizationArgs {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(HighNodeUtilizationArgs)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *HighNodeUtilizationArgs) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *LowNodeUtilizationArgs) DeepCopyInto(out *LowNodeUtilizationArgs) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
if in.Thresholds != nil {
|
||||
in, out := &in.Thresholds, &out.Thresholds
|
||||
*out = make(api.ResourceThresholds, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
if in.TargetThresholds != nil {
|
||||
in, out := &in.TargetThresholds, &out.TargetThresholds
|
||||
*out = make(api.ResourceThresholds, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LowNodeUtilizationArgs.
|
||||
func (in *LowNodeUtilizationArgs) DeepCopy() *LowNodeUtilizationArgs {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(LowNodeUtilizationArgs)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *LowNodeUtilizationArgs) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *PodLifeTimeArgs) DeepCopyInto(out *PodLifeTimeArgs) {
|
||||
*out = *in
|
||||
|
||||
@@ -43,7 +43,6 @@ import (
|
||||
eutils "sigs.k8s.io/descheduler/pkg/descheduler/evictions/utils"
|
||||
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/strategies/nodeutilization"
|
||||
"sigs.k8s.io/descheduler/pkg/framework"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
)
|
||||
@@ -244,8 +243,8 @@ func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer
|
||||
|
||||
strategyFuncs := map[api.StrategyName]strategyFunction{
|
||||
"RemoveDuplicates": nil,
|
||||
"LowNodeUtilization": nodeutilization.LowNodeUtilization,
|
||||
"HighNodeUtilization": nodeutilization.HighNodeUtilization,
|
||||
"LowNodeUtilization": nil,
|
||||
"HighNodeUtilization": nil,
|
||||
"RemovePodsViolatingInterPodAntiAffinity": nil,
|
||||
"RemovePodsViolatingNodeAffinity": nil,
|
||||
"RemovePodsViolatingNodeTaints": nil,
|
||||
|
||||
@@ -25,6 +25,7 @@ import (
|
||||
"sigs.k8s.io/descheduler/pkg/apis/componentconfig"
|
||||
"sigs.k8s.io/descheduler/pkg/apis/componentconfig/validation"
|
||||
"sigs.k8s.io/descheduler/pkg/framework"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/nodeutilization"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/podlifetime"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/removeduplicates"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/removefailedpods"
|
||||
@@ -227,4 +228,46 @@ var pluginsMap = map[string]func(ctx context.Context, nodes []*v1.Node, params *
|
||||
klog.V(1).ErrorS(err, "plugin finished with error", "pluginName", removepodsviolatingtopologyspreadconstraint.PluginName)
|
||||
}
|
||||
},
|
||||
"HighNodeUtilization": func(ctx context.Context, nodes []*v1.Node, params *api.StrategyParameters, handle *handleImpl) {
|
||||
args := &componentconfig.HighNodeUtilizationArgs{
|
||||
Thresholds: params.NodeResourceUtilizationThresholds.Thresholds,
|
||||
NumberOfNodes: params.NodeResourceUtilizationThresholds.NumberOfNodes,
|
||||
}
|
||||
|
||||
if err := validation.ValidateHighNodeUtilizationArgs(args); err != nil {
|
||||
klog.V(1).ErrorS(err, "unable to validate plugin arguments", "pluginName", nodeutilization.HighNodeUtilizationPluginName)
|
||||
return
|
||||
}
|
||||
pg, err := nodeutilization.NewHighNodeUtilization(args, handle)
|
||||
if err != nil {
|
||||
klog.V(1).ErrorS(err, "unable to initialize a plugin", "pluginName", nodeutilization.HighNodeUtilizationPluginName)
|
||||
return
|
||||
}
|
||||
status := pg.(framework.BalancePlugin).Balance(ctx, nodes)
|
||||
if status != nil && status.Err != nil {
|
||||
klog.V(1).ErrorS(err, "plugin finished with error", "pluginName", nodeutilization.HighNodeUtilizationPluginName)
|
||||
}
|
||||
},
|
||||
"LowNodeUtilization": func(ctx context.Context, nodes []*v1.Node, params *api.StrategyParameters, handle *handleImpl) {
|
||||
args := &componentconfig.LowNodeUtilizationArgs{
|
||||
Thresholds: params.NodeResourceUtilizationThresholds.Thresholds,
|
||||
TargetThresholds: params.NodeResourceUtilizationThresholds.TargetThresholds,
|
||||
UseDeviationThresholds: params.NodeResourceUtilizationThresholds.UseDeviationThresholds,
|
||||
NumberOfNodes: params.NodeResourceUtilizationThresholds.NumberOfNodes,
|
||||
}
|
||||
|
||||
if err := validation.ValidateLowNodeUtilizationArgs(args); err != nil {
|
||||
klog.V(1).ErrorS(err, "unable to validate plugin arguments", "pluginName", nodeutilization.LowNodeUtilizationPluginName)
|
||||
return
|
||||
}
|
||||
pg, err := nodeutilization.NewLowNodeUtilization(args, handle)
|
||||
if err != nil {
|
||||
klog.V(1).ErrorS(err, "unable to initialize a plugin", "pluginName", nodeutilization.LowNodeUtilizationPluginName)
|
||||
return
|
||||
}
|
||||
status := pg.(framework.BalancePlugin).Balance(ctx, nodes)
|
||||
if status != nil && status.Err != nil {
|
||||
klog.V(1).ErrorS(err, "plugin finished with error", "pluginName", nodeutilization.LowNodeUtilizationPluginName)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
/*
|
||||
Copyright 2021 The Kubernetes Authors.
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -19,40 +19,69 @@ package nodeutilization
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/apis/componentconfig"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/pkg/framework"
|
||||
)
|
||||
|
||||
// HighNodeUtilization evicts pods from under utilized nodes so that scheduler can schedule according to its strategy.
|
||||
const HighNodeUtilizationPluginName = "HighNodeUtilization"
|
||||
|
||||
// HighNodeUtilization evicts pods from under utilized nodes so that scheduler can schedule according to its plugin.
|
||||
// Note that CPU/Memory requests are used to calculate nodes' utilization and not the actual resource usage.
|
||||
func HighNodeUtilization(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor, evictorFilter *evictions.EvictorFilter, getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc) {
|
||||
if err := validateNodeUtilizationParams(strategy.Params); err != nil {
|
||||
klog.ErrorS(err, "Invalid HighNodeUtilization parameters")
|
||||
return
|
||||
|
||||
type HighNodeUtilization struct {
|
||||
handle framework.Handle
|
||||
args *componentconfig.HighNodeUtilizationArgs
|
||||
podFilter func(pod *v1.Pod) bool
|
||||
}
|
||||
|
||||
var _ framework.Plugin = &HighNodeUtilization{}
|
||||
var _ framework.BalancePlugin = &HighNodeUtilization{}
|
||||
|
||||
// NewHighNodeUtilization builds plugin from its arguments while passing a handle
|
||||
func NewHighNodeUtilization(args runtime.Object, handle framework.Handle) (framework.Plugin, error) {
|
||||
highNodeUtilizatioArgs, ok := args.(*componentconfig.HighNodeUtilizationArgs)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("want args to be of type HighNodeUtilizationArgs, got %T", args)
|
||||
}
|
||||
|
||||
thresholds := strategy.Params.NodeResourceUtilizationThresholds.Thresholds
|
||||
targetThresholds := strategy.Params.NodeResourceUtilizationThresholds.TargetThresholds
|
||||
if err := validateHighUtilizationStrategyConfig(thresholds, targetThresholds); err != nil {
|
||||
klog.ErrorS(err, "HighNodeUtilization config is not valid")
|
||||
return
|
||||
podFilter, err := podutil.NewOptions().
|
||||
WithFilter(handle.Evictor().Filter).
|
||||
BuildFilterFunc()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error initializing pod filter function: %v", err)
|
||||
}
|
||||
targetThresholds = make(api.ResourceThresholds)
|
||||
|
||||
return &HighNodeUtilization{
|
||||
handle: handle,
|
||||
args: highNodeUtilizatioArgs,
|
||||
podFilter: podFilter,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Name retrieves the plugin name
|
||||
func (h *HighNodeUtilization) Name() string {
|
||||
return HighNodeUtilizationPluginName
|
||||
}
|
||||
|
||||
// Balance extension point implementation for the plugin
|
||||
func (h *HighNodeUtilization) Balance(ctx context.Context, nodes []*v1.Node) *framework.Status {
|
||||
thresholds := h.args.Thresholds
|
||||
targetThresholds := make(api.ResourceThresholds)
|
||||
|
||||
setDefaultForThresholds(thresholds, targetThresholds)
|
||||
resourceNames := getResourceNames(targetThresholds)
|
||||
|
||||
sourceNodes, highNodes := classifyNodes(
|
||||
getNodeUsage(nodes, resourceNames, getPodsAssignedToNode),
|
||||
getNodeThresholds(nodes, thresholds, targetThresholds, resourceNames, getPodsAssignedToNode, false),
|
||||
getNodeUsage(nodes, resourceNames, h.handle.GetPodsAssignedToNodeFunc()),
|
||||
getNodeThresholds(nodes, thresholds, targetThresholds, resourceNames, h.handle.GetPodsAssignedToNodeFunc(), false),
|
||||
func(node *v1.Node, usage NodeUsage, threshold NodeThresholds) bool {
|
||||
return isNodeWithLowUtilization(usage, threshold.lowResourceThreshold)
|
||||
},
|
||||
@@ -81,19 +110,19 @@ func HighNodeUtilization(ctx context.Context, client clientset.Interface, strate
|
||||
|
||||
if len(sourceNodes) == 0 {
|
||||
klog.V(1).InfoS("No node is underutilized, nothing to do here, you might tune your thresholds further")
|
||||
return
|
||||
return nil
|
||||
}
|
||||
if len(sourceNodes) <= strategy.Params.NodeResourceUtilizationThresholds.NumberOfNodes {
|
||||
klog.V(1).InfoS("Number of nodes underutilized is less or equal than NumberOfNodes, nothing to do here", "underutilizedNodes", len(sourceNodes), "numberOfNodes", strategy.Params.NodeResourceUtilizationThresholds.NumberOfNodes)
|
||||
return
|
||||
if len(sourceNodes) <= h.args.NumberOfNodes {
|
||||
klog.V(1).InfoS("Number of nodes underutilized is less or equal than NumberOfNodes, nothing to do here", "underutilizedNodes", len(sourceNodes), "numberOfNodes", h.args.NumberOfNodes)
|
||||
return nil
|
||||
}
|
||||
if len(sourceNodes) == len(nodes) {
|
||||
klog.V(1).InfoS("All nodes are underutilized, nothing to do here")
|
||||
return
|
||||
return nil
|
||||
}
|
||||
if len(highNodes) == 0 {
|
||||
klog.V(1).InfoS("No node is available to schedule the pods, nothing to do here")
|
||||
return
|
||||
return nil
|
||||
}
|
||||
|
||||
// stop if the total available usage has dropped to zero - no more pods can be scheduled
|
||||
@@ -114,21 +143,11 @@ func HighNodeUtilization(ctx context.Context, client clientset.Interface, strate
|
||||
ctx,
|
||||
sourceNodes,
|
||||
highNodes,
|
||||
podEvictor,
|
||||
evictorFilter.Filter,
|
||||
h.handle.Evictor(),
|
||||
h.podFilter,
|
||||
resourceNames,
|
||||
"HighNodeUtilization",
|
||||
continueEvictionCond)
|
||||
|
||||
}
|
||||
|
||||
func validateHighUtilizationStrategyConfig(thresholds, targetThresholds api.ResourceThresholds) error {
|
||||
if targetThresholds != nil {
|
||||
return fmt.Errorf("targetThresholds is not applicable for HighNodeUtilization")
|
||||
}
|
||||
if err := validateThresholds(thresholds); err != nil {
|
||||
return fmt.Errorf("thresholds config is not valid: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -31,8 +31,11 @@ import (
|
||||
"k8s.io/client-go/tools/events"
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/apis/componentconfig"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/pkg/framework"
|
||||
frameworkfake "sigs.k8s.io/descheduler/pkg/framework/fake"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
"sigs.k8s.io/descheduler/test"
|
||||
)
|
||||
@@ -478,29 +481,6 @@ func TestHighNodeUtilization(t *testing.T) {
|
||||
sharedInformerFactory.Start(ctx.Done())
|
||||
sharedInformerFactory.WaitForCacheSync(ctx.Done())
|
||||
|
||||
//fakeClient := &fake.Clientset{}
|
||||
//fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
|
||||
// list := action.(core.ListAction)
|
||||
// fieldString := list.GetListRestrictions().Fields.String()
|
||||
// if strings.Contains(fieldString, n1NodeName) {
|
||||
// return true, test.pods[n1NodeName], nil
|
||||
// }
|
||||
// if strings.Contains(fieldString, n2NodeName) {
|
||||
// return true, test.pods[n2NodeName], nil
|
||||
// }
|
||||
// if strings.Contains(fieldString, n3NodeName) {
|
||||
// return true, test.pods[n3NodeName], nil
|
||||
// }
|
||||
// return true, nil, fmt.Errorf("Failed to list: %v", list)
|
||||
//})
|
||||
//fakeClient.Fake.AddReactor("get", "nodes", func(action core.Action) (bool, runtime.Object, error) {
|
||||
// getAction := action.(core.GetAction)
|
||||
// if node, exists := testCase.nodes[getAction.GetName()]; exists {
|
||||
// return true, node, nil
|
||||
// }
|
||||
// return true, nil, fmt.Errorf("Wrong node: %v", getAction.GetName())
|
||||
//})
|
||||
|
||||
eventRecorder := &events.FakeRecorder{}
|
||||
|
||||
podEvictor := evictions.NewPodEvictor(
|
||||
@@ -514,27 +494,30 @@ func TestHighNodeUtilization(t *testing.T) {
|
||||
eventRecorder,
|
||||
)
|
||||
|
||||
strategy := api.DeschedulerStrategy{
|
||||
Enabled: true,
|
||||
Params: &api.StrategyParameters{
|
||||
NodeResourceUtilizationThresholds: &api.NodeResourceUtilizationThresholds{
|
||||
Thresholds: testCase.thresholds,
|
||||
},
|
||||
NodeFit: true,
|
||||
},
|
||||
handle := &frameworkfake.HandleImpl{
|
||||
ClientsetImpl: fakeClient,
|
||||
GetPodsAssignedToNodeFuncImpl: getPodsAssignedToNode,
|
||||
PodEvictorImpl: podEvictor,
|
||||
EvictorFilterImpl: evictions.NewEvictorFilter(
|
||||
testCase.nodes,
|
||||
getPodsAssignedToNode,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
evictions.WithNodeFit(true),
|
||||
),
|
||||
SharedInformerFactoryImpl: sharedInformerFactory,
|
||||
}
|
||||
|
||||
evictorFilter := evictions.NewEvictorFilter(
|
||||
testCase.nodes,
|
||||
getPodsAssignedToNode,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
evictions.WithNodeFit(strategy.Params.NodeFit),
|
||||
)
|
||||
|
||||
HighNodeUtilization(ctx, fakeClient, strategy, testCase.nodes, podEvictor, evictorFilter, getPodsAssignedToNode)
|
||||
plugin, err := NewHighNodeUtilization(&componentconfig.HighNodeUtilizationArgs{
|
||||
Thresholds: testCase.thresholds,
|
||||
},
|
||||
handle)
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to initialize the plugin: %v", err)
|
||||
}
|
||||
plugin.(framework.BalancePlugin).Balance(ctx, testCase.nodes)
|
||||
|
||||
podsEvicted := podEvictor.TotalEvicted()
|
||||
if testCase.expectedPodsEvicted != podsEvicted {
|
||||
@@ -547,85 +530,7 @@ func TestHighNodeUtilization(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateHighNodeUtilizationStrategyConfig(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
thresholds api.ResourceThresholds
|
||||
targetThresholds api.ResourceThresholds
|
||||
errInfo error
|
||||
}{
|
||||
{
|
||||
name: "passing target thresholds",
|
||||
thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 20,
|
||||
v1.ResourceMemory: 20,
|
||||
},
|
||||
targetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
v1.ResourceMemory: 80,
|
||||
},
|
||||
errInfo: fmt.Errorf("targetThresholds is not applicable for HighNodeUtilization"),
|
||||
},
|
||||
{
|
||||
name: "passing empty thresholds",
|
||||
thresholds: api.ResourceThresholds{},
|
||||
errInfo: fmt.Errorf("thresholds config is not valid: no resource threshold is configured"),
|
||||
},
|
||||
{
|
||||
name: "passing invalid thresholds",
|
||||
thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
v1.ResourceMemory: 120,
|
||||
},
|
||||
errInfo: fmt.Errorf("thresholds config is not valid: %v", fmt.Errorf(
|
||||
"%v threshold not in [%v, %v] range", v1.ResourceMemory, MinResourcePercentage, MaxResourcePercentage)),
|
||||
},
|
||||
{
|
||||
name: "passing valid strategy config",
|
||||
thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
v1.ResourceMemory: 80,
|
||||
},
|
||||
errInfo: nil,
|
||||
},
|
||||
{
|
||||
name: "passing valid strategy config with extended resource",
|
||||
thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
v1.ResourceMemory: 80,
|
||||
extendedResource: 80,
|
||||
},
|
||||
errInfo: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, testCase := range tests {
|
||||
validateErr := validateHighUtilizationStrategyConfig(testCase.thresholds, testCase.targetThresholds)
|
||||
|
||||
if validateErr == nil || testCase.errInfo == nil {
|
||||
if validateErr != testCase.errInfo {
|
||||
t.Errorf("expected validity of strategy config: thresholds %#v targetThresholds %#v to be %v but got %v instead",
|
||||
testCase.thresholds, testCase.targetThresholds, testCase.errInfo, validateErr)
|
||||
}
|
||||
} else if validateErr.Error() != testCase.errInfo.Error() {
|
||||
t.Errorf("expected validity of strategy config: thresholds %#v targetThresholds %#v to be %v but got %v instead",
|
||||
testCase.thresholds, testCase.targetThresholds, testCase.errInfo, validateErr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestHighNodeUtilizationWithTaints(t *testing.T) {
|
||||
strategy := api.DeschedulerStrategy{
|
||||
Enabled: true,
|
||||
Params: &api.StrategyParameters{
|
||||
NodeResourceUtilizationThresholds: &api.NodeResourceUtilizationThresholds{
|
||||
Thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 40,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
n1 := test.BuildTestNode("n1", 1000, 3000, 10, nil)
|
||||
n2 := test.BuildTestNode("n2", 1000, 3000, 10, nil)
|
||||
n3 := test.BuildTestNode("n3", 1000, 3000, 10, nil)
|
||||
@@ -729,16 +634,31 @@ func TestHighNodeUtilizationWithTaints(t *testing.T) {
|
||||
eventRecorder,
|
||||
)
|
||||
|
||||
evictorFilter := evictions.NewEvictorFilter(
|
||||
item.nodes,
|
||||
getPodsAssignedToNode,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
)
|
||||
handle := &frameworkfake.HandleImpl{
|
||||
ClientsetImpl: fakeClient,
|
||||
GetPodsAssignedToNodeFuncImpl: getPodsAssignedToNode,
|
||||
PodEvictorImpl: podEvictor,
|
||||
EvictorFilterImpl: evictions.NewEvictorFilter(
|
||||
item.nodes,
|
||||
getPodsAssignedToNode,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
),
|
||||
SharedInformerFactoryImpl: sharedInformerFactory,
|
||||
}
|
||||
|
||||
HighNodeUtilization(ctx, fakeClient, strategy, item.nodes, podEvictor, evictorFilter, getPodsAssignedToNode)
|
||||
plugin, err := NewHighNodeUtilization(&componentconfig.HighNodeUtilizationArgs{
|
||||
Thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 40,
|
||||
},
|
||||
},
|
||||
handle)
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to initialize the plugin: %v", err)
|
||||
}
|
||||
plugin.(framework.BalancePlugin).Balance(ctx, item.nodes)
|
||||
|
||||
if item.evictionsExpected != podEvictor.TotalEvicted() {
|
||||
t.Errorf("Expected %v evictions, got %v", item.evictionsExpected, podEvictor.TotalEvicted())
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@@ -19,35 +19,62 @@ package nodeutilization
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
"sigs.k8s.io/descheduler/pkg/apis/componentconfig"
|
||||
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/pkg/framework"
|
||||
)
|
||||
|
||||
const LowNodeUtilizationPluginName = "LowNodeUtilization"
|
||||
|
||||
// LowNodeUtilization evicts pods from overutilized nodes to underutilized nodes. Note that CPU/Memory requests are used
|
||||
// to calculate nodes' utilization and not the actual resource usage.
|
||||
func LowNodeUtilization(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor, evictorFilter *evictions.EvictorFilter, getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc) {
|
||||
// TODO: May be create a struct for the strategy as well, so that we don't have to pass along the all the params?
|
||||
if err := validateNodeUtilizationParams(strategy.Params); err != nil {
|
||||
klog.ErrorS(err, "Invalid LowNodeUtilization parameters")
|
||||
return
|
||||
|
||||
type LowNodeUtilization struct {
|
||||
handle framework.Handle
|
||||
args *componentconfig.LowNodeUtilizationArgs
|
||||
podFilter func(pod *v1.Pod) bool
|
||||
}
|
||||
|
||||
var _ framework.Plugin = &LowNodeUtilization{}
|
||||
var _ framework.BalancePlugin = &LowNodeUtilization{}
|
||||
|
||||
// NewLowNodeUtilization builds plugin from its arguments while passing a handle
|
||||
func NewLowNodeUtilization(args runtime.Object, handle framework.Handle) (framework.Plugin, error) {
|
||||
lowNodeUtilizationArgsArgs, ok := args.(*componentconfig.LowNodeUtilizationArgs)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("want args to be of type LowNodeUtilizationArgs, got %T", args)
|
||||
}
|
||||
|
||||
useDeviationThresholds := strategy.Params.NodeResourceUtilizationThresholds.UseDeviationThresholds
|
||||
thresholds := strategy.Params.NodeResourceUtilizationThresholds.Thresholds
|
||||
targetThresholds := strategy.Params.NodeResourceUtilizationThresholds.TargetThresholds
|
||||
if err := validateLowUtilizationStrategyConfig(thresholds, targetThresholds, useDeviationThresholds); err != nil {
|
||||
klog.ErrorS(err, "LowNodeUtilization config is not valid")
|
||||
return
|
||||
podFilter, err := podutil.NewOptions().
|
||||
WithFilter(handle.Evictor().Filter).
|
||||
BuildFilterFunc()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error initializing pod filter function: %v", err)
|
||||
}
|
||||
|
||||
return &LowNodeUtilization{
|
||||
handle: handle,
|
||||
args: lowNodeUtilizationArgsArgs,
|
||||
podFilter: podFilter,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Name retrieves the plugin name
|
||||
func (l *LowNodeUtilization) Name() string {
|
||||
return LowNodeUtilizationPluginName
|
||||
}
|
||||
|
||||
// Balance extension point implementation for the plugin
|
||||
func (l *LowNodeUtilization) Balance(ctx context.Context, nodes []*v1.Node) *framework.Status {
|
||||
useDeviationThresholds := l.args.UseDeviationThresholds
|
||||
thresholds := l.args.Thresholds
|
||||
targetThresholds := l.args.TargetThresholds
|
||||
|
||||
// check if Pods/CPU/Mem are set, if not, set them to 100
|
||||
if _, ok := thresholds[v1.ResourcePods]; !ok {
|
||||
if useDeviationThresholds {
|
||||
@@ -79,8 +106,8 @@ func LowNodeUtilization(ctx context.Context, client clientset.Interface, strateg
|
||||
resourceNames := getResourceNames(thresholds)
|
||||
|
||||
lowNodes, sourceNodes := classifyNodes(
|
||||
getNodeUsage(nodes, resourceNames, getPodsAssignedToNode),
|
||||
getNodeThresholds(nodes, thresholds, targetThresholds, resourceNames, getPodsAssignedToNode, useDeviationThresholds),
|
||||
getNodeUsage(nodes, resourceNames, l.handle.GetPodsAssignedToNodeFunc()),
|
||||
getNodeThresholds(nodes, thresholds, targetThresholds, resourceNames, l.handle.GetPodsAssignedToNodeFunc(), useDeviationThresholds),
|
||||
// The node has to be schedulable (to be able to move workload there)
|
||||
func(node *v1.Node, usage NodeUsage, threshold NodeThresholds) bool {
|
||||
if nodeutil.IsNodeUnschedulable(node) {
|
||||
@@ -124,22 +151,22 @@ func LowNodeUtilization(ctx context.Context, client clientset.Interface, strateg
|
||||
|
||||
if len(lowNodes) == 0 {
|
||||
klog.V(1).InfoS("No node is underutilized, nothing to do here, you might tune your thresholds further")
|
||||
return
|
||||
return nil
|
||||
}
|
||||
|
||||
if len(lowNodes) <= strategy.Params.NodeResourceUtilizationThresholds.NumberOfNodes {
|
||||
klog.V(1).InfoS("Number of nodes underutilized is less or equal than NumberOfNodes, nothing to do here", "underutilizedNodes", len(lowNodes), "numberOfNodes", strategy.Params.NodeResourceUtilizationThresholds.NumberOfNodes)
|
||||
return
|
||||
if len(lowNodes) <= l.args.NumberOfNodes {
|
||||
klog.V(1).InfoS("Number of nodes underutilized is less or equal than NumberOfNodes, nothing to do here", "underutilizedNodes", len(lowNodes), "numberOfNodes", l.args.NumberOfNodes)
|
||||
return nil
|
||||
}
|
||||
|
||||
if len(lowNodes) == len(nodes) {
|
||||
klog.V(1).InfoS("All nodes are underutilized, nothing to do here")
|
||||
return
|
||||
return nil
|
||||
}
|
||||
|
||||
if len(sourceNodes) == 0 {
|
||||
klog.V(1).InfoS("All nodes are under target utilization, nothing to do here")
|
||||
return
|
||||
return nil
|
||||
}
|
||||
|
||||
// stop if node utilization drops below target threshold or any of required capacity (cpu, memory, pods) is moved
|
||||
@@ -163,35 +190,10 @@ func LowNodeUtilization(ctx context.Context, client clientset.Interface, strateg
|
||||
ctx,
|
||||
sourceNodes,
|
||||
lowNodes,
|
||||
podEvictor,
|
||||
evictorFilter.Filter,
|
||||
l.handle.Evictor(),
|
||||
l.podFilter,
|
||||
resourceNames,
|
||||
"LowNodeUtilization",
|
||||
continueEvictionCond)
|
||||
|
||||
klog.V(1).InfoS("Total number of pods evicted", "evictedPods", podEvictor.TotalEvicted())
|
||||
}
|
||||
|
||||
// validateLowUtilizationStrategyConfig checks if the strategy's config is valid
|
||||
func validateLowUtilizationStrategyConfig(thresholds, targetThresholds api.ResourceThresholds, useDeviationThresholds bool) error {
|
||||
// validate thresholds and targetThresholds config
|
||||
if err := validateThresholds(thresholds); err != nil {
|
||||
return fmt.Errorf("thresholds config is not valid: %v", err)
|
||||
}
|
||||
if err := validateThresholds(targetThresholds); err != nil {
|
||||
return fmt.Errorf("targetThresholds config is not valid: %v", err)
|
||||
}
|
||||
|
||||
// validate if thresholds and targetThresholds have same resources configured
|
||||
if len(thresholds) != len(targetThresholds) {
|
||||
return fmt.Errorf("thresholds and targetThresholds configured different resources")
|
||||
}
|
||||
for resourceName, value := range thresholds {
|
||||
if targetValue, ok := targetThresholds[resourceName]; !ok {
|
||||
return fmt.Errorf("thresholds and targetThresholds configured different resources")
|
||||
} else if value > targetValue && !useDeviationThresholds {
|
||||
return fmt.Errorf("thresholds' %v percentage is greater than targetThresholds'", resourceName)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -19,6 +19,10 @@ package nodeutilization
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/apis/componentconfig"
|
||||
"sigs.k8s.io/descheduler/pkg/framework"
|
||||
frameworkfake "sigs.k8s.io/descheduler/pkg/framework/fake"
|
||||
"testing"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
@@ -31,7 +35,6 @@ import (
|
||||
core "k8s.io/client-go/testing"
|
||||
"k8s.io/client-go/tools/events"
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
@@ -720,28 +723,6 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
t.Errorf("Build get pods assigned to node function error: %v", err)
|
||||
}
|
||||
|
||||
//fakeClient := &fake.Clientset{}
|
||||
//fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
|
||||
// list := action.(core.ListAction)
|
||||
// fieldString := list.GetListRestrictions().Fields.String()
|
||||
// if strings.Contains(fieldString, n1NodeName) {
|
||||
// return true, test.pods[n1NodeName], nil
|
||||
// }
|
||||
// if strings.Contains(fieldString, n2NodeName) {
|
||||
// return true, test.pods[n2NodeName], nil
|
||||
// }
|
||||
// if strings.Contains(fieldString, n3NodeName) {
|
||||
// return true, test.pods[n3NodeName], nil
|
||||
// }
|
||||
// return true, nil, fmt.Errorf("Failed to list: %v", list)
|
||||
//})
|
||||
//fakeClient.Fake.AddReactor("get", "nodes", func(action core.Action) (bool, runtime.Object, error) {
|
||||
// getAction := action.(core.GetAction)
|
||||
// if node, exists := test.nodes[getAction.GetName()]; exists {
|
||||
// return true, node, nil
|
||||
// }
|
||||
// return true, nil, fmt.Errorf("Wrong node: %v", getAction.GetName())
|
||||
//})
|
||||
podsForEviction := make(map[string]struct{})
|
||||
for _, pod := range test.evictedPods {
|
||||
podsForEviction[pod] = struct{}{}
|
||||
@@ -779,29 +760,33 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
eventRecorder,
|
||||
)
|
||||
|
||||
strategy := api.DeschedulerStrategy{
|
||||
Enabled: true,
|
||||
Params: &api.StrategyParameters{
|
||||
NodeResourceUtilizationThresholds: &api.NodeResourceUtilizationThresholds{
|
||||
Thresholds: test.thresholds,
|
||||
TargetThresholds: test.targetThresholds,
|
||||
UseDeviationThresholds: test.useDeviationThresholds,
|
||||
},
|
||||
NodeFit: true,
|
||||
},
|
||||
handle := &frameworkfake.HandleImpl{
|
||||
ClientsetImpl: fakeClient,
|
||||
GetPodsAssignedToNodeFuncImpl: getPodsAssignedToNode,
|
||||
PodEvictorImpl: podEvictor,
|
||||
EvictorFilterImpl: evictions.NewEvictorFilter(
|
||||
test.nodes,
|
||||
getPodsAssignedToNode,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
evictions.WithNodeFit(true),
|
||||
),
|
||||
SharedInformerFactoryImpl: sharedInformerFactory,
|
||||
}
|
||||
|
||||
evictorFilter := evictions.NewEvictorFilter(
|
||||
test.nodes,
|
||||
getPodsAssignedToNode,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
evictions.WithNodeFit(strategy.Params.NodeFit),
|
||||
)
|
||||
plugin, err := NewLowNodeUtilization(&componentconfig.LowNodeUtilizationArgs{
|
||||
|
||||
LowNodeUtilization(ctx, fakeClient, strategy, test.nodes, podEvictor, evictorFilter, getPodsAssignedToNode)
|
||||
Thresholds: test.thresholds,
|
||||
TargetThresholds: test.targetThresholds,
|
||||
UseDeviationThresholds: test.useDeviationThresholds,
|
||||
},
|
||||
handle)
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to initialize the plugin: %v", err)
|
||||
}
|
||||
plugin.(framework.BalancePlugin).Balance(ctx, test.nodes)
|
||||
|
||||
podsEvicted := podEvictor.TotalEvicted()
|
||||
if test.expectedPodsEvicted != podsEvicted {
|
||||
@@ -814,176 +799,8 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateLowNodeUtilizationStrategyConfig(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
thresholds api.ResourceThresholds
|
||||
targetThresholds api.ResourceThresholds
|
||||
errInfo error
|
||||
}{
|
||||
{
|
||||
name: "passing invalid thresholds",
|
||||
thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 20,
|
||||
v1.ResourceMemory: 120,
|
||||
},
|
||||
targetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
v1.ResourceMemory: 80,
|
||||
},
|
||||
errInfo: fmt.Errorf("thresholds config is not valid: %v", fmt.Errorf(
|
||||
"%v threshold not in [%v, %v] range", v1.ResourceMemory, MinResourcePercentage, MaxResourcePercentage)),
|
||||
},
|
||||
{
|
||||
name: "thresholds and targetThresholds configured different num of resources",
|
||||
thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 20,
|
||||
v1.ResourceMemory: 20,
|
||||
},
|
||||
targetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
v1.ResourceMemory: 80,
|
||||
v1.ResourcePods: 80,
|
||||
},
|
||||
errInfo: fmt.Errorf("thresholds and targetThresholds configured different resources"),
|
||||
},
|
||||
{
|
||||
name: "thresholds and targetThresholds configured different resources",
|
||||
thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 20,
|
||||
v1.ResourceMemory: 20,
|
||||
},
|
||||
targetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
v1.ResourcePods: 80,
|
||||
},
|
||||
errInfo: fmt.Errorf("thresholds and targetThresholds configured different resources"),
|
||||
},
|
||||
{
|
||||
name: "thresholds' CPU config value is greater than targetThresholds'",
|
||||
thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 90,
|
||||
v1.ResourceMemory: 20,
|
||||
},
|
||||
targetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
v1.ResourceMemory: 80,
|
||||
},
|
||||
errInfo: fmt.Errorf("thresholds' %v percentage is greater than targetThresholds'", v1.ResourceCPU),
|
||||
},
|
||||
{
|
||||
name: "only thresholds configured extended resource",
|
||||
thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 20,
|
||||
v1.ResourceMemory: 20,
|
||||
extendedResource: 20,
|
||||
},
|
||||
targetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
v1.ResourceMemory: 80,
|
||||
},
|
||||
errInfo: fmt.Errorf("thresholds and targetThresholds configured different resources"),
|
||||
},
|
||||
{
|
||||
name: "only targetThresholds configured extended resource",
|
||||
thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 20,
|
||||
v1.ResourceMemory: 20,
|
||||
},
|
||||
targetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
v1.ResourceMemory: 80,
|
||||
extendedResource: 80,
|
||||
},
|
||||
errInfo: fmt.Errorf("thresholds and targetThresholds configured different resources"),
|
||||
},
|
||||
{
|
||||
name: "thresholds and targetThresholds configured different extended resources",
|
||||
thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 20,
|
||||
v1.ResourceMemory: 20,
|
||||
extendedResource: 20,
|
||||
},
|
||||
targetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
v1.ResourceMemory: 80,
|
||||
"example.com/bar": 80,
|
||||
},
|
||||
errInfo: fmt.Errorf("thresholds and targetThresholds configured different resources"),
|
||||
},
|
||||
{
|
||||
name: "thresholds' extended resource config value is greater than targetThresholds'",
|
||||
thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 20,
|
||||
v1.ResourceMemory: 20,
|
||||
extendedResource: 90,
|
||||
},
|
||||
targetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
v1.ResourceMemory: 80,
|
||||
extendedResource: 20,
|
||||
},
|
||||
errInfo: fmt.Errorf("thresholds' %v percentage is greater than targetThresholds'", extendedResource),
|
||||
},
|
||||
{
|
||||
name: "passing valid strategy config",
|
||||
thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 20,
|
||||
v1.ResourceMemory: 20,
|
||||
},
|
||||
targetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
v1.ResourceMemory: 80,
|
||||
},
|
||||
errInfo: nil,
|
||||
},
|
||||
{
|
||||
name: "passing valid strategy config with extended resource",
|
||||
thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 20,
|
||||
v1.ResourceMemory: 20,
|
||||
extendedResource: 20,
|
||||
},
|
||||
targetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
v1.ResourceMemory: 80,
|
||||
extendedResource: 80,
|
||||
},
|
||||
errInfo: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, testCase := range tests {
|
||||
validateErr := validateLowUtilizationStrategyConfig(testCase.thresholds, testCase.targetThresholds, false)
|
||||
|
||||
if validateErr == nil || testCase.errInfo == nil {
|
||||
if validateErr != testCase.errInfo {
|
||||
t.Errorf("expected validity of strategy config: thresholds %#v targetThresholds %#v to be %v but got %v instead",
|
||||
testCase.thresholds, testCase.targetThresholds, testCase.errInfo, validateErr)
|
||||
}
|
||||
} else if validateErr.Error() != testCase.errInfo.Error() {
|
||||
t.Errorf("expected validity of strategy config: thresholds %#v targetThresholds %#v to be %v but got %v instead",
|
||||
testCase.thresholds, testCase.targetThresholds, testCase.errInfo, validateErr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestLowNodeUtilizationWithTaints(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
strategy := api.DeschedulerStrategy{
|
||||
Enabled: true,
|
||||
Params: &api.StrategyParameters{
|
||||
NodeResourceUtilizationThresholds: &api.NodeResourceUtilizationThresholds{
|
||||
Thresholds: api.ResourceThresholds{
|
||||
v1.ResourcePods: 20,
|
||||
},
|
||||
TargetThresholds: api.ResourceThresholds{
|
||||
v1.ResourcePods: 70,
|
||||
},
|
||||
},
|
||||
NodeFit: true,
|
||||
},
|
||||
}
|
||||
|
||||
n1 := test.BuildTestNode("n1", 2000, 3000, 10, nil)
|
||||
n2 := test.BuildTestNode("n2", 1000, 3000, 10, nil)
|
||||
@@ -1103,16 +920,36 @@ func TestLowNodeUtilizationWithTaints(t *testing.T) {
|
||||
eventRecorder,
|
||||
)
|
||||
|
||||
evictorFilter := evictions.NewEvictorFilter(
|
||||
item.nodes,
|
||||
getPodsAssignedToNode,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
)
|
||||
handle := &frameworkfake.HandleImpl{
|
||||
ClientsetImpl: fakeClient,
|
||||
GetPodsAssignedToNodeFuncImpl: getPodsAssignedToNode,
|
||||
PodEvictorImpl: podEvictor,
|
||||
EvictorFilterImpl: evictions.NewEvictorFilter(
|
||||
item.nodes,
|
||||
getPodsAssignedToNode,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
evictions.WithNodeFit(true),
|
||||
),
|
||||
SharedInformerFactoryImpl: sharedInformerFactory,
|
||||
}
|
||||
|
||||
LowNodeUtilization(ctx, fakeClient, strategy, item.nodes, podEvictor, evictorFilter, getPodsAssignedToNode)
|
||||
plugin, err := NewLowNodeUtilization(&componentconfig.LowNodeUtilizationArgs{
|
||||
|
||||
Thresholds: api.ResourceThresholds{
|
||||
v1.ResourcePods: 20,
|
||||
},
|
||||
TargetThresholds: api.ResourceThresholds{
|
||||
v1.ResourcePods: 70,
|
||||
},
|
||||
},
|
||||
handle)
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to initialize the plugin: %v", err)
|
||||
}
|
||||
plugin.(framework.BalancePlugin).Balance(ctx, item.nodes)
|
||||
|
||||
if item.evictionsExpected != podEvictor.TotalEvicted() {
|
||||
t.Errorf("Expected %v evictions, got %v", item.evictionsExpected, podEvictor.TotalEvicted())
|
||||
@@ -18,18 +18,17 @@ package nodeutilization
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sort"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/node"
|
||||
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/pkg/framework"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
)
|
||||
|
||||
@@ -62,30 +61,6 @@ const (
|
||||
MaxResourcePercentage = 100
|
||||
)
|
||||
|
||||
func validateNodeUtilizationParams(params *api.StrategyParameters) error {
|
||||
if params == nil || params.NodeResourceUtilizationThresholds == nil {
|
||||
return fmt.Errorf("NodeResourceUtilizationThresholds not set")
|
||||
}
|
||||
if params.ThresholdPriority != nil && params.ThresholdPriorityClassName != "" {
|
||||
return fmt.Errorf("only one of thresholdPriority and thresholdPriorityClassName can be set")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// validateThresholds checks if thresholds have valid resource name and resource percentage configured
|
||||
func validateThresholds(thresholds api.ResourceThresholds) error {
|
||||
if len(thresholds) == 0 {
|
||||
return fmt.Errorf("no resource threshold is configured")
|
||||
}
|
||||
for name, percent := range thresholds {
|
||||
if percent < MinResourcePercentage || percent > MaxResourcePercentage {
|
||||
return fmt.Errorf("%v threshold not in [%v, %v] range", name, MinResourcePercentage, MaxResourcePercentage)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func normalizePercentage(percent api.Percentage) api.Percentage {
|
||||
if percent > MaxResourcePercentage {
|
||||
return MaxResourcePercentage
|
||||
@@ -237,10 +212,9 @@ func classifyNodes(
|
||||
func evictPodsFromSourceNodes(
|
||||
ctx context.Context,
|
||||
sourceNodes, destinationNodes []NodeInfo,
|
||||
podEvictor *evictions.PodEvictor,
|
||||
podEvictor framework.Evictor,
|
||||
podFilter func(pod *v1.Pod) bool,
|
||||
resourceNames []v1.ResourceName,
|
||||
strategy string,
|
||||
continueEviction continueEvictionCond,
|
||||
) {
|
||||
// upper bound on total number of pods/cpu/memory and optional extended resources to be moved
|
||||
@@ -290,8 +264,8 @@ func evictPodsFromSourceNodes(
|
||||
klog.V(1).InfoS("Evicting pods based on priority, if they have same priority, they'll be evicted based on QoS tiers")
|
||||
// sort the evictable Pods based on priority. This also sorts them based on QoS. If there are multiple pods with same priority, they are sorted based on QoS tiers.
|
||||
podutil.SortPodsBasedOnPriorityLowToHigh(removablePods)
|
||||
evictPods(ctx, removablePods, node, totalAvailableUsage, taintsOfDestinationNodes, podEvictor, strategy, continueEviction)
|
||||
klog.V(1).InfoS("Evicted pods from node", "node", klog.KObj(node.node), "evictedPods", podEvictor.NodeEvicted(node.node), "usage", node.usage)
|
||||
evictPods(ctx, removablePods, node, totalAvailableUsage, taintsOfDestinationNodes, podEvictor, continueEviction)
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
@@ -301,8 +275,7 @@ func evictPods(
|
||||
nodeInfo NodeInfo,
|
||||
totalAvailableUsage map[v1.ResourceName]*resource.Quantity,
|
||||
taintsOfLowNodes map[string][]v1.Taint,
|
||||
podEvictor *evictions.PodEvictor,
|
||||
strategy string,
|
||||
podEvictor framework.Evictor,
|
||||
continueEviction continueEvictionCond,
|
||||
) {
|
||||
|
||||
@@ -313,7 +286,7 @@ func evictPods(
|
||||
continue
|
||||
}
|
||||
|
||||
if podEvictor.EvictPod(ctx, pod, evictions.EvictOptions{}) {
|
||||
if podEvictor.Evict(ctx, pod, evictions.EvictOptions{}) {
|
||||
klog.V(3).InfoS("Evicted pods", "pod", klog.KObj(pod))
|
||||
|
||||
for name := range totalAvailableUsage {
|
||||
@@ -352,7 +325,7 @@ func evictPods(
|
||||
}
|
||||
}
|
||||
|
||||
// sortNodesByUsage sorts nodes based on usage according to the given strategy.
|
||||
// sortNodesByUsage sorts nodes based on usage according to the given plugin.
|
||||
func sortNodesByUsage(nodes []NodeInfo, ascending bool) {
|
||||
sort.Slice(nodes, func(i, j int) bool {
|
||||
ti := nodes[i].usage[v1.ResourceMemory].Value() + nodes[i].usage[v1.ResourceCPU].MilliValue() + nodes[i].usage[v1.ResourcePods].Value()
|
||||
@@ -366,12 +339,12 @@ func sortNodesByUsage(nodes []NodeInfo, ascending bool) {
|
||||
}
|
||||
}
|
||||
|
||||
// Return ascending order for HighNodeUtilization strategy
|
||||
// Return ascending order for HighNodeUtilization plugin
|
||||
if ascending {
|
||||
return ti < tj
|
||||
}
|
||||
|
||||
// Return descending order for LowNodeUtilization strategy
|
||||
// Return descending order for LowNodeUtilization plugin
|
||||
return ti > tj
|
||||
})
|
||||
}
|
||||
@@ -17,14 +17,12 @@ limitations under the License.
|
||||
package nodeutilization
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"testing"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -105,94 +103,6 @@ var (
|
||||
}
|
||||
)
|
||||
|
||||
func TestValidateThresholds(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input api.ResourceThresholds
|
||||
errInfo error
|
||||
}{
|
||||
{
|
||||
name: "passing nil map for threshold",
|
||||
input: nil,
|
||||
errInfo: fmt.Errorf("no resource threshold is configured"),
|
||||
},
|
||||
{
|
||||
name: "passing no threshold",
|
||||
input: api.ResourceThresholds{},
|
||||
errInfo: fmt.Errorf("no resource threshold is configured"),
|
||||
},
|
||||
{
|
||||
name: "passing extended resource name other than cpu/memory/pods",
|
||||
input: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 40,
|
||||
extendedResource: 50,
|
||||
},
|
||||
errInfo: nil,
|
||||
},
|
||||
{
|
||||
name: "passing invalid resource value",
|
||||
input: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 110,
|
||||
v1.ResourceMemory: 80,
|
||||
},
|
||||
errInfo: fmt.Errorf("%v threshold not in [%v, %v] range", v1.ResourceCPU, MinResourcePercentage, MaxResourcePercentage),
|
||||
},
|
||||
{
|
||||
name: "passing a valid threshold with max and min resource value",
|
||||
input: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 100,
|
||||
v1.ResourceMemory: 0,
|
||||
},
|
||||
errInfo: nil,
|
||||
},
|
||||
{
|
||||
name: "passing a valid threshold with only cpu",
|
||||
input: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
},
|
||||
errInfo: nil,
|
||||
},
|
||||
{
|
||||
name: "passing a valid threshold with cpu, memory and pods",
|
||||
input: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 20,
|
||||
v1.ResourceMemory: 30,
|
||||
v1.ResourcePods: 40,
|
||||
},
|
||||
errInfo: nil,
|
||||
},
|
||||
{
|
||||
name: "passing a valid threshold with only extended resource",
|
||||
input: api.ResourceThresholds{
|
||||
extendedResource: 80,
|
||||
},
|
||||
errInfo: nil,
|
||||
},
|
||||
{
|
||||
name: "passing a valid threshold with cpu, memory, pods and extended resource",
|
||||
input: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 20,
|
||||
v1.ResourceMemory: 30,
|
||||
v1.ResourcePods: 40,
|
||||
extendedResource: 50,
|
||||
},
|
||||
errInfo: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
validateErr := validateThresholds(test.input)
|
||||
|
||||
if validateErr == nil || test.errInfo == nil {
|
||||
if validateErr != test.errInfo {
|
||||
t.Errorf("expected validity of threshold: %#v to be %v but got %v instead", test.input, test.errInfo, validateErr)
|
||||
}
|
||||
} else if validateErr.Error() != test.errInfo.Error() {
|
||||
t.Errorf("expected validity of threshold: %#v to be %v but got %v instead", test.input, test.errInfo, validateErr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestResourceUsagePercentages(t *testing.T) {
|
||||
resourceUsagePercentage := resourceUsagePercentages(NodeUsage{
|
||||
node: &v1.Node{
|
||||
@@ -39,20 +39,20 @@ import (
|
||||
"k8s.io/client-go/tools/events"
|
||||
v1qos "k8s.io/kubectl/pkg/util/qos"
|
||||
"k8s.io/utils/pointer"
|
||||
"sigs.k8s.io/descheduler/pkg/apis/componentconfig"
|
||||
"sigs.k8s.io/descheduler/pkg/framework"
|
||||
frameworkfake "sigs.k8s.io/descheduler/pkg/framework/fake"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/podlifetime"
|
||||
|
||||
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
deschedulerapi "sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/apis/componentconfig"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/client"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
eutils "sigs.k8s.io/descheduler/pkg/descheduler/evictions/utils"
|
||||
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/strategies/nodeutilization"
|
||||
"sigs.k8s.io/descheduler/pkg/framework"
|
||||
frameworkfake "sigs.k8s.io/descheduler/pkg/framework/fake"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/nodeutilization"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/podlifetime"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
)
|
||||
|
||||
@@ -255,7 +255,7 @@ func intersectStrings(lista, listb []string) []string {
|
||||
func TestLowNodeUtilization(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
clientSet, _, _, getPodsAssignedToNode, stopCh := initializeClient(t)
|
||||
clientSet, sharedInformerFactory, _, getPodsAssignedToNode, stopCh := initializeClient(t)
|
||||
defer close(stopCh)
|
||||
|
||||
nodeList, err := clientSet.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
|
||||
@@ -343,7 +343,7 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
defer deleteRC(ctx, t, clientSet, rc)
|
||||
waitForRCPodsRunning(ctx, t, clientSet, rc)
|
||||
|
||||
// Run LowNodeUtilization strategy
|
||||
// Run LowNodeUtilization plugin
|
||||
podEvictor := initPodEvictorOrFail(t, clientSet, getPodsAssignedToNode, nodes)
|
||||
|
||||
evictorFilter := evictions.NewEvictorFilter(
|
||||
@@ -366,28 +366,27 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
}
|
||||
podsBefore := len(podsOnMosttUtilizedNode)
|
||||
|
||||
t.Log("Running LowNodeUtilization strategy")
|
||||
nodeutilization.LowNodeUtilization(
|
||||
ctx,
|
||||
clientSet,
|
||||
deschedulerapi.DeschedulerStrategy{
|
||||
Enabled: true,
|
||||
Params: &deschedulerapi.StrategyParameters{
|
||||
NodeResourceUtilizationThresholds: &deschedulerapi.NodeResourceUtilizationThresholds{
|
||||
Thresholds: deschedulerapi.ResourceThresholds{
|
||||
v1.ResourceCPU: 70,
|
||||
},
|
||||
TargetThresholds: deschedulerapi.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
},
|
||||
},
|
||||
},
|
||||
t.Log("Running LowNodeUtilization plugin")
|
||||
handle := &frameworkfake.HandleImpl{
|
||||
ClientsetImpl: clientSet,
|
||||
GetPodsAssignedToNodeFuncImpl: getPodsAssignedToNode,
|
||||
PodEvictorImpl: podEvictor,
|
||||
EvictorFilterImpl: evictorFilter,
|
||||
SharedInformerFactoryImpl: sharedInformerFactory,
|
||||
}
|
||||
|
||||
plugin, err := nodeutilization.NewLowNodeUtilization(&componentconfig.LowNodeUtilizationArgs{
|
||||
Thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 70,
|
||||
},
|
||||
workerNodes,
|
||||
podEvictor,
|
||||
evictorFilter,
|
||||
getPodsAssignedToNode,
|
||||
)
|
||||
TargetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
},
|
||||
}, handle)
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to initialize the plugin: %v", err)
|
||||
}
|
||||
plugin.(framework.BalancePlugin).Balance(ctx, workerNodes)
|
||||
|
||||
waitForTerminatingPodsToDisappear(ctx, t, clientSet, rc.Namespace)
|
||||
|
||||
|
||||
Reference in New Issue
Block a user