1
0
mirror of https://github.com/kubernetes-sigs/descheduler.git synced 2026-01-26 05:14:13 +01:00

Merge pull request #1532 from ingvagabund/node-utilization-refactoring

Node utilization refactoring
This commit is contained in:
Kubernetes Prow Robot
2024-10-14 20:10:22 +01:00
committed by GitHub
2 changed files with 104 additions and 92 deletions

View File

@@ -38,9 +38,12 @@ const HighNodeUtilizationPluginName = "HighNodeUtilization"
// Note that CPU/Memory requests are used to calculate nodes' utilization and not the actual resource usage.
type HighNodeUtilization struct {
handle frameworktypes.Handle
args *HighNodeUtilizationArgs
podFilter func(pod *v1.Pod) bool
handle frameworktypes.Handle
args *HighNodeUtilizationArgs
podFilter func(pod *v1.Pod) bool
underutilizationCriteria []interface{}
resourceNames []v1.ResourceName
targetThresholds api.ResourceThresholds
}
var _ frameworktypes.BalancePlugin = &HighNodeUtilization{}
@@ -52,6 +55,21 @@ func NewHighNodeUtilization(args runtime.Object, handle frameworktypes.Handle) (
return nil, fmt.Errorf("want args to be of type HighNodeUtilizationArgs, got %T", args)
}
targetThresholds := make(api.ResourceThresholds)
setDefaultForThresholds(highNodeUtilizatioArgs.Thresholds, targetThresholds)
resourceNames := getResourceNames(targetThresholds)
underutilizationCriteria := []interface{}{
"CPU", highNodeUtilizatioArgs.Thresholds[v1.ResourceCPU],
"Mem", highNodeUtilizatioArgs.Thresholds[v1.ResourceMemory],
"Pods", highNodeUtilizatioArgs.Thresholds[v1.ResourcePods],
}
for name := range highNodeUtilizatioArgs.Thresholds {
if !nodeutil.IsBasicResource(name) {
underutilizationCriteria = append(underutilizationCriteria, string(name), int64(highNodeUtilizatioArgs.Thresholds[name]))
}
}
podFilter, err := podutil.NewOptions().
WithFilter(handle.Evictor().Filter).
BuildFilterFunc()
@@ -60,9 +78,12 @@ func NewHighNodeUtilization(args runtime.Object, handle frameworktypes.Handle) (
}
return &HighNodeUtilization{
handle: handle,
args: highNodeUtilizatioArgs,
podFilter: podFilter,
handle: handle,
args: highNodeUtilizatioArgs,
resourceNames: resourceNames,
targetThresholds: targetThresholds,
underutilizationCriteria: underutilizationCriteria,
podFilter: podFilter,
}, nil
}
@@ -73,15 +94,9 @@ func (h *HighNodeUtilization) Name() string {
// Balance extension point implementation for the plugin
func (h *HighNodeUtilization) Balance(ctx context.Context, nodes []*v1.Node) *frameworktypes.Status {
thresholds := h.args.Thresholds
targetThresholds := make(api.ResourceThresholds)
setDefaultForThresholds(thresholds, targetThresholds)
resourceNames := getResourceNames(targetThresholds)
sourceNodes, highNodes := classifyNodes(
getNodeUsage(nodes, resourceNames, h.handle.GetPodsAssignedToNodeFunc()),
getNodeThresholds(nodes, thresholds, targetThresholds, resourceNames, h.handle.GetPodsAssignedToNodeFunc(), false),
getNodeUsage(nodes, h.resourceNames, h.handle.GetPodsAssignedToNodeFunc()),
getNodeThresholds(nodes, h.args.Thresholds, h.targetThresholds, h.resourceNames, h.handle.GetPodsAssignedToNodeFunc(), false),
func(node *v1.Node, usage NodeUsage, threshold NodeThresholds) bool {
return isNodeWithLowUtilization(usage, threshold.lowResourceThreshold)
},
@@ -94,18 +109,7 @@ func (h *HighNodeUtilization) Balance(ctx context.Context, nodes []*v1.Node) *fr
})
// log message in one line
keysAndValues := []interface{}{
"CPU", thresholds[v1.ResourceCPU],
"Mem", thresholds[v1.ResourceMemory],
"Pods", thresholds[v1.ResourcePods],
}
for name := range thresholds {
if !nodeutil.IsBasicResource(name) {
keysAndValues = append(keysAndValues, string(name), int64(thresholds[name]))
}
}
klog.V(1).InfoS("Criteria for a node below target utilization", keysAndValues...)
klog.V(1).InfoS("Criteria for a node below target utilization", h.underutilizationCriteria...)
klog.V(1).InfoS("Number of underutilized nodes", "totalNumber", len(sourceNodes))
if len(sourceNodes) == 0 {
@@ -147,7 +151,7 @@ func (h *HighNodeUtilization) Balance(ctx context.Context, nodes []*v1.Node) *fr
h.handle.Evictor(),
evictions.EvictOptions{StrategyName: HighNodeUtilizationPluginName},
h.podFilter,
resourceNames,
h.resourceNames,
continueEvictionCond)
return nil

View File

@@ -24,6 +24,7 @@ import (
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/klog/v2"
"sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
@@ -36,9 +37,12 @@ const LowNodeUtilizationPluginName = "LowNodeUtilization"
// to calculate nodes' utilization and not the actual resource usage.
type LowNodeUtilization struct {
handle frameworktypes.Handle
args *LowNodeUtilizationArgs
podFilter func(pod *v1.Pod) bool
handle frameworktypes.Handle
args *LowNodeUtilizationArgs
podFilter func(pod *v1.Pod) bool
underutilizationCriteria []interface{}
overutilizationCriteria []interface{}
resourceNames []v1.ResourceName
}
var _ frameworktypes.BalancePlugin = &LowNodeUtilization{}
@@ -50,6 +54,30 @@ func NewLowNodeUtilization(args runtime.Object, handle frameworktypes.Handle) (f
return nil, fmt.Errorf("want args to be of type LowNodeUtilizationArgs, got %T", args)
}
setDefaultForLNUThresholds(lowNodeUtilizationArgsArgs.Thresholds, lowNodeUtilizationArgsArgs.TargetThresholds, lowNodeUtilizationArgsArgs.UseDeviationThresholds)
underutilizationCriteria := []interface{}{
"CPU", lowNodeUtilizationArgsArgs.Thresholds[v1.ResourceCPU],
"Mem", lowNodeUtilizationArgsArgs.Thresholds[v1.ResourceMemory],
"Pods", lowNodeUtilizationArgsArgs.Thresholds[v1.ResourcePods],
}
for name := range lowNodeUtilizationArgsArgs.Thresholds {
if !nodeutil.IsBasicResource(name) {
underutilizationCriteria = append(underutilizationCriteria, string(name), int64(lowNodeUtilizationArgsArgs.Thresholds[name]))
}
}
overutilizationCriteria := []interface{}{
"CPU", lowNodeUtilizationArgsArgs.TargetThresholds[v1.ResourceCPU],
"Mem", lowNodeUtilizationArgsArgs.TargetThresholds[v1.ResourceMemory],
"Pods", lowNodeUtilizationArgsArgs.TargetThresholds[v1.ResourcePods],
}
for name := range lowNodeUtilizationArgsArgs.TargetThresholds {
if !nodeutil.IsBasicResource(name) {
overutilizationCriteria = append(overutilizationCriteria, string(name), int64(lowNodeUtilizationArgsArgs.TargetThresholds[name]))
}
}
podFilter, err := podutil.NewOptions().
WithFilter(handle.Evictor().Filter).
BuildFilterFunc()
@@ -58,9 +86,12 @@ func NewLowNodeUtilization(args runtime.Object, handle frameworktypes.Handle) (f
}
return &LowNodeUtilization{
handle: handle,
args: lowNodeUtilizationArgsArgs,
podFilter: podFilter,
handle: handle,
args: lowNodeUtilizationArgsArgs,
underutilizationCriteria: underutilizationCriteria,
overutilizationCriteria: overutilizationCriteria,
resourceNames: getResourceNames(lowNodeUtilizationArgsArgs.Thresholds),
podFilter: podFilter,
}, nil
}
@@ -71,43 +102,9 @@ func (l *LowNodeUtilization) Name() string {
// Balance extension point implementation for the plugin
func (l *LowNodeUtilization) Balance(ctx context.Context, nodes []*v1.Node) *frameworktypes.Status {
useDeviationThresholds := l.args.UseDeviationThresholds
thresholds := l.args.Thresholds
targetThresholds := l.args.TargetThresholds
// check if Pods/CPU/Mem are set, if not, set them to 100
if _, ok := thresholds[v1.ResourcePods]; !ok {
if useDeviationThresholds {
thresholds[v1.ResourcePods] = MinResourcePercentage
targetThresholds[v1.ResourcePods] = MinResourcePercentage
} else {
thresholds[v1.ResourcePods] = MaxResourcePercentage
targetThresholds[v1.ResourcePods] = MaxResourcePercentage
}
}
if _, ok := thresholds[v1.ResourceCPU]; !ok {
if useDeviationThresholds {
thresholds[v1.ResourceCPU] = MinResourcePercentage
targetThresholds[v1.ResourceCPU] = MinResourcePercentage
} else {
thresholds[v1.ResourceCPU] = MaxResourcePercentage
targetThresholds[v1.ResourceCPU] = MaxResourcePercentage
}
}
if _, ok := thresholds[v1.ResourceMemory]; !ok {
if useDeviationThresholds {
thresholds[v1.ResourceMemory] = MinResourcePercentage
targetThresholds[v1.ResourceMemory] = MinResourcePercentage
} else {
thresholds[v1.ResourceMemory] = MaxResourcePercentage
targetThresholds[v1.ResourceMemory] = MaxResourcePercentage
}
}
resourceNames := getResourceNames(thresholds)
lowNodes, sourceNodes := classifyNodes(
getNodeUsage(nodes, resourceNames, l.handle.GetPodsAssignedToNodeFunc()),
getNodeThresholds(nodes, thresholds, targetThresholds, resourceNames, l.handle.GetPodsAssignedToNodeFunc(), useDeviationThresholds),
getNodeUsage(nodes, l.resourceNames, l.handle.GetPodsAssignedToNodeFunc()),
getNodeThresholds(nodes, l.args.Thresholds, l.args.TargetThresholds, l.resourceNames, l.handle.GetPodsAssignedToNodeFunc(), l.args.UseDeviationThresholds),
// The node has to be schedulable (to be able to move workload there)
func(node *v1.Node, usage NodeUsage, threshold NodeThresholds) bool {
if nodeutil.IsNodeUnschedulable(node) {
@@ -122,31 +119,11 @@ func (l *LowNodeUtilization) Balance(ctx context.Context, nodes []*v1.Node) *fra
)
// log message for nodes with low utilization
underutilizationCriteria := []interface{}{
"CPU", thresholds[v1.ResourceCPU],
"Mem", thresholds[v1.ResourceMemory],
"Pods", thresholds[v1.ResourcePods],
}
for name := range thresholds {
if !nodeutil.IsBasicResource(name) {
underutilizationCriteria = append(underutilizationCriteria, string(name), int64(thresholds[name]))
}
}
klog.V(1).InfoS("Criteria for a node under utilization", underutilizationCriteria...)
klog.V(1).InfoS("Criteria for a node under utilization", l.underutilizationCriteria...)
klog.V(1).InfoS("Number of underutilized nodes", "totalNumber", len(lowNodes))
// log message for over utilized nodes
overutilizationCriteria := []interface{}{
"CPU", targetThresholds[v1.ResourceCPU],
"Mem", targetThresholds[v1.ResourceMemory],
"Pods", targetThresholds[v1.ResourcePods],
}
for name := range targetThresholds {
if !nodeutil.IsBasicResource(name) {
overutilizationCriteria = append(overutilizationCriteria, string(name), int64(targetThresholds[name]))
}
}
klog.V(1).InfoS("Criteria for a node above target utilization", overutilizationCriteria...)
klog.V(1).InfoS("Criteria for a node above target utilization", l.overutilizationCriteria...)
klog.V(1).InfoS("Number of overutilized nodes", "totalNumber", len(sourceNodes))
if len(lowNodes) == 0 {
@@ -194,8 +171,39 @@ func (l *LowNodeUtilization) Balance(ctx context.Context, nodes []*v1.Node) *fra
l.handle.Evictor(),
evictions.EvictOptions{StrategyName: LowNodeUtilizationPluginName},
l.podFilter,
resourceNames,
l.resourceNames,
continueEvictionCond)
return nil
}
func setDefaultForLNUThresholds(thresholds, targetThresholds api.ResourceThresholds, useDeviationThresholds bool) {
// check if Pods/CPU/Mem are set, if not, set them to 100
if _, ok := thresholds[v1.ResourcePods]; !ok {
if useDeviationThresholds {
thresholds[v1.ResourcePods] = MinResourcePercentage
targetThresholds[v1.ResourcePods] = MinResourcePercentage
} else {
thresholds[v1.ResourcePods] = MaxResourcePercentage
targetThresholds[v1.ResourcePods] = MaxResourcePercentage
}
}
if _, ok := thresholds[v1.ResourceCPU]; !ok {
if useDeviationThresholds {
thresholds[v1.ResourceCPU] = MinResourcePercentage
targetThresholds[v1.ResourceCPU] = MinResourcePercentage
} else {
thresholds[v1.ResourceCPU] = MaxResourcePercentage
targetThresholds[v1.ResourceCPU] = MaxResourcePercentage
}
}
if _, ok := thresholds[v1.ResourceMemory]; !ok {
if useDeviationThresholds {
thresholds[v1.ResourceMemory] = MinResourcePercentage
targetThresholds[v1.ResourceMemory] = MinResourcePercentage
} else {
thresholds[v1.ResourceMemory] = MaxResourcePercentage
targetThresholds[v1.ResourceMemory] = MaxResourcePercentage
}
}
}