1
0
mirror of https://github.com/kubernetes-sigs/descheduler.git synced 2026-01-26 13:29:11 +01:00
Files
descheduler/pkg/framework/plugins/nodeutilization/lownodeutilization.go
Gabriel Tiossi bb5930eb21 Improve PodEvictor observability through EvictOptions (#1349)
* feat: profile name for pods_evicted metric

Support new label "profile" for "pods_evicted" metric to allow
understand which profiles are evicting more pods, allowing better
observability

* refactor: evictoptions improved observability

Send profile and strategy names for EvictOptions, allowing Evictors to
access observability information

* cleanup: remove unnecessary evictoption reference

* feat: evictoptions for nodeutilzation

Explicit usage of options when invoking evictPods from the helper
function from nodeutilization for both highnodeutilization and
lownodeutilization
2024-03-02 12:06:05 -08:00

202 lines
6.9 KiB
Go

/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package nodeutilization
import (
"context"
"fmt"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/klog/v2"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
)
const LowNodeUtilizationPluginName = "LowNodeUtilization"
// LowNodeUtilization evicts pods from overutilized nodes to underutilized nodes. Note that CPU/Memory requests are used
// to calculate nodes' utilization and not the actual resource usage.
type LowNodeUtilization struct {
handle frameworktypes.Handle
args *LowNodeUtilizationArgs
podFilter func(pod *v1.Pod) bool
}
var _ frameworktypes.BalancePlugin = &LowNodeUtilization{}
// NewLowNodeUtilization builds plugin from its arguments while passing a handle
func NewLowNodeUtilization(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
lowNodeUtilizationArgsArgs, ok := args.(*LowNodeUtilizationArgs)
if !ok {
return nil, fmt.Errorf("want args to be of type LowNodeUtilizationArgs, got %T", args)
}
podFilter, err := podutil.NewOptions().
WithFilter(handle.Evictor().Filter).
BuildFilterFunc()
if err != nil {
return nil, fmt.Errorf("error initializing pod filter function: %v", err)
}
return &LowNodeUtilization{
handle: handle,
args: lowNodeUtilizationArgsArgs,
podFilter: podFilter,
}, nil
}
// Name retrieves the plugin name
func (l *LowNodeUtilization) Name() string {
return LowNodeUtilizationPluginName
}
// Balance extension point implementation for the plugin
func (l *LowNodeUtilization) Balance(ctx context.Context, nodes []*v1.Node) *frameworktypes.Status {
useDeviationThresholds := l.args.UseDeviationThresholds
thresholds := l.args.Thresholds
targetThresholds := l.args.TargetThresholds
// check if Pods/CPU/Mem are set, if not, set them to 100
if _, ok := thresholds[v1.ResourcePods]; !ok {
if useDeviationThresholds {
thresholds[v1.ResourcePods] = MinResourcePercentage
targetThresholds[v1.ResourcePods] = MinResourcePercentage
} else {
thresholds[v1.ResourcePods] = MaxResourcePercentage
targetThresholds[v1.ResourcePods] = MaxResourcePercentage
}
}
if _, ok := thresholds[v1.ResourceCPU]; !ok {
if useDeviationThresholds {
thresholds[v1.ResourceCPU] = MinResourcePercentage
targetThresholds[v1.ResourceCPU] = MinResourcePercentage
} else {
thresholds[v1.ResourceCPU] = MaxResourcePercentage
targetThresholds[v1.ResourceCPU] = MaxResourcePercentage
}
}
if _, ok := thresholds[v1.ResourceMemory]; !ok {
if useDeviationThresholds {
thresholds[v1.ResourceMemory] = MinResourcePercentage
targetThresholds[v1.ResourceMemory] = MinResourcePercentage
} else {
thresholds[v1.ResourceMemory] = MaxResourcePercentage
targetThresholds[v1.ResourceMemory] = MaxResourcePercentage
}
}
resourceNames := getResourceNames(thresholds)
lowNodes, sourceNodes := classifyNodes(
getNodeUsage(nodes, resourceNames, l.handle.GetPodsAssignedToNodeFunc()),
getNodeThresholds(nodes, thresholds, targetThresholds, resourceNames, l.handle.GetPodsAssignedToNodeFunc(), useDeviationThresholds),
// The node has to be schedulable (to be able to move workload there)
func(node *v1.Node, usage NodeUsage, threshold NodeThresholds) bool {
if nodeutil.IsNodeUnschedulable(node) {
klog.V(2).InfoS("Node is unschedulable, thus not considered as underutilized", "node", klog.KObj(node))
return false
}
return isNodeWithLowUtilization(usage, threshold.lowResourceThreshold)
},
func(node *v1.Node, usage NodeUsage, threshold NodeThresholds) bool {
return isNodeAboveTargetUtilization(usage, threshold.highResourceThreshold)
},
)
// log message for nodes with low utilization
underutilizationCriteria := []interface{}{
"CPU", thresholds[v1.ResourceCPU],
"Mem", thresholds[v1.ResourceMemory],
"Pods", thresholds[v1.ResourcePods],
}
for name := range thresholds {
if !nodeutil.IsBasicResource(name) {
underutilizationCriteria = append(underutilizationCriteria, string(name), int64(thresholds[name]))
}
}
klog.V(1).InfoS("Criteria for a node under utilization", underutilizationCriteria...)
klog.V(1).InfoS("Number of underutilized nodes", "totalNumber", len(lowNodes))
// log message for over utilized nodes
overutilizationCriteria := []interface{}{
"CPU", targetThresholds[v1.ResourceCPU],
"Mem", targetThresholds[v1.ResourceMemory],
"Pods", targetThresholds[v1.ResourcePods],
}
for name := range targetThresholds {
if !nodeutil.IsBasicResource(name) {
overutilizationCriteria = append(overutilizationCriteria, string(name), int64(targetThresholds[name]))
}
}
klog.V(1).InfoS("Criteria for a node above target utilization", overutilizationCriteria...)
klog.V(1).InfoS("Number of overutilized nodes", "totalNumber", len(sourceNodes))
if len(lowNodes) == 0 {
klog.V(1).InfoS("No node is underutilized, nothing to do here, you might tune your thresholds further")
return nil
}
if len(lowNodes) <= l.args.NumberOfNodes {
klog.V(1).InfoS("Number of nodes underutilized is less or equal than NumberOfNodes, nothing to do here", "underutilizedNodes", len(lowNodes), "numberOfNodes", l.args.NumberOfNodes)
return nil
}
if len(lowNodes) == len(nodes) {
klog.V(1).InfoS("All nodes are underutilized, nothing to do here")
return nil
}
if len(sourceNodes) == 0 {
klog.V(1).InfoS("All nodes are under target utilization, nothing to do here")
return nil
}
// stop if node utilization drops below target threshold or any of required capacity (cpu, memory, pods) is moved
continueEvictionCond := func(nodeInfo NodeInfo, totalAvailableUsage map[v1.ResourceName]*resource.Quantity) bool {
if !isNodeAboveTargetUtilization(nodeInfo.NodeUsage, nodeInfo.thresholds.highResourceThreshold) {
return false
}
for name := range totalAvailableUsage {
if totalAvailableUsage[name].CmpInt64(0) < 1 {
return false
}
}
return true
}
// Sort the nodes by the usage in descending order
sortNodesByUsage(sourceNodes, false)
evictPodsFromSourceNodes(
ctx,
l.args.EvictableNamespaces,
sourceNodes,
lowNodes,
l.handle.Evictor(),
evictions.EvictOptions{StrategyName: LowNodeUtilizationPluginName},
l.podFilter,
resourceNames,
continueEvictionCond)
return nil
}