mirror of
https://github.com/kubernetes-sigs/descheduler.git
synced 2026-01-27 05:46:13 +01:00
feat: refactoring thresholds and usage assessment
this commit refactors the thresholds and usage assessment for the node utilization plugins. both high and low plugins are affected by this change.
This commit is contained in:
@@ -28,6 +28,7 @@ import (
|
||||
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
|
||||
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/nodeutilization/normalizer"
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
)
|
||||
|
||||
@@ -42,6 +43,7 @@ type HighNodeUtilization struct {
|
||||
podFilter func(pod *v1.Pod) bool
|
||||
underutilizationCriteria []interface{}
|
||||
resourceNames []v1.ResourceName
|
||||
extendedResourceNames []v1.ResourceName
|
||||
targetThresholds api.ResourceThresholds
|
||||
usageClient usageClient
|
||||
}
|
||||
@@ -57,7 +59,7 @@ func NewHighNodeUtilization(args runtime.Object, handle frameworktypes.Handle) (
|
||||
|
||||
targetThresholds := make(api.ResourceThresholds)
|
||||
setDefaultForThresholds(highNodeUtilizatioArgs.Thresholds, targetThresholds)
|
||||
resourceNames := getResourceNames(targetThresholds)
|
||||
resourceNames := getResourceNames(highNodeUtilizatioArgs.Thresholds)
|
||||
|
||||
underutilizationCriteria := []interface{}{
|
||||
"CPU", highNodeUtilizatioArgs.Thresholds[v1.ResourceCPU],
|
||||
@@ -77,14 +79,19 @@ func NewHighNodeUtilization(args runtime.Object, handle frameworktypes.Handle) (
|
||||
return nil, fmt.Errorf("error initializing pod filter function: %v", err)
|
||||
}
|
||||
|
||||
extendedResourceNames := uniquifyResourceNames(
|
||||
append(resourceNames, v1.ResourceCPU, v1.ResourceMemory, v1.ResourcePods),
|
||||
)
|
||||
|
||||
return &HighNodeUtilization{
|
||||
handle: handle,
|
||||
args: highNodeUtilizatioArgs,
|
||||
resourceNames: resourceNames,
|
||||
extendedResourceNames: extendedResourceNames,
|
||||
targetThresholds: targetThresholds,
|
||||
underutilizationCriteria: underutilizationCriteria,
|
||||
podFilter: podFilter,
|
||||
usageClient: newRequestedUsageClient(resourceNames, handle.GetPodsAssignedToNodeFunc()),
|
||||
usageClient: newRequestedUsageClient(extendedResourceNames, handle.GetPodsAssignedToNodeFunc()),
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -102,11 +109,18 @@ func (h *HighNodeUtilization) Balance(ctx context.Context, nodes []*v1.Node) *fr
|
||||
}
|
||||
|
||||
nodesMap, nodesUsageMap, podListMap := getNodeUsageSnapshot(nodes, h.usageClient)
|
||||
nodeThresholdsMap := getStaticNodeThresholds(nodes, h.args.Thresholds, h.targetThresholds)
|
||||
nodesUsageAsNodeThresholdsMap := nodeUsageToResourceThresholds(nodesUsageMap, nodesMap)
|
||||
capacities := referencedResourceListForNodesCapacity(nodes)
|
||||
|
||||
usage, thresholds := assessNodesUsagesAndStaticThresholds(
|
||||
nodesUsageMap,
|
||||
capacities,
|
||||
h.args.Thresholds,
|
||||
h.targetThresholds,
|
||||
)
|
||||
|
||||
nodeGroups := classifyNodeUsage(
|
||||
nodesUsageAsNodeThresholdsMap,
|
||||
nodeThresholdsMap,
|
||||
usage,
|
||||
thresholds,
|
||||
[]classifierFnc{
|
||||
// underutilized nodes
|
||||
func(nodeName string, usage, threshold api.ResourceThresholds) bool {
|
||||
@@ -128,7 +142,12 @@ func (h *HighNodeUtilization) Balance(ctx context.Context, nodes []*v1.Node) *fr
|
||||
category := []string{"underutilized", "overutilized"}
|
||||
for i := range nodeGroups {
|
||||
for nodeName := range nodeGroups[i] {
|
||||
klog.InfoS("Node is "+category[i], "node", klog.KObj(nodesMap[nodeName]), "usage", nodesUsageMap[nodeName], "usagePercentage", resourceUsagePercentages(nodesUsageMap[nodeName], nodesMap[nodeName], true))
|
||||
klog.InfoS(
|
||||
fmt.Sprintf("Node is %s", category[i]),
|
||||
"node", klog.KObj(nodesMap[nodeName]),
|
||||
"usage", nodesUsageMap[nodeName],
|
||||
"usagePercentage", normalizer.Round(usage[nodeName]),
|
||||
)
|
||||
nodeInfos[i] = append(nodeInfos[i], NodeInfo{
|
||||
NodeUsage: NodeUsage{
|
||||
node: nodesMap[nodeName],
|
||||
@@ -136,8 +155,8 @@ func (h *HighNodeUtilization) Balance(ctx context.Context, nodes []*v1.Node) *fr
|
||||
allPods: podListMap[nodeName],
|
||||
},
|
||||
thresholds: NodeThresholds{
|
||||
lowResourceThreshold: resourceThresholdsToNodeUsage(nodeThresholdsMap[nodeName][0], nodesMap[nodeName]),
|
||||
highResourceThreshold: resourceThresholdsToNodeUsage(nodeThresholdsMap[nodeName][1], nodesMap[nodeName]),
|
||||
lowResourceThreshold: resourceThresholdsToNodeUsage(thresholds[nodeName][0], capacities[nodeName], h.extendedResourceNames),
|
||||
highResourceThreshold: resourceThresholdsToNodeUsage(thresholds[nodeName][1], capacities[nodeName], h.extendedResourceNames),
|
||||
},
|
||||
})
|
||||
}
|
||||
@@ -189,7 +208,7 @@ func (h *HighNodeUtilization) Balance(ctx context.Context, nodes []*v1.Node) *fr
|
||||
h.handle.Evictor(),
|
||||
evictions.EvictOptions{StrategyName: HighNodeUtilizationPluginName},
|
||||
h.podFilter,
|
||||
h.resourceNames,
|
||||
h.extendedResourceNames,
|
||||
continueEvictionCond,
|
||||
h.usageClient,
|
||||
nil,
|
||||
@@ -199,22 +218,16 @@ func (h *HighNodeUtilization) Balance(ctx context.Context, nodes []*v1.Node) *fr
|
||||
}
|
||||
|
||||
func setDefaultForThresholds(thresholds, targetThresholds api.ResourceThresholds) {
|
||||
// check if Pods/CPU/Mem are set, if not, set them to 100
|
||||
if _, ok := thresholds[v1.ResourcePods]; !ok {
|
||||
thresholds[v1.ResourcePods] = MaxResourcePercentage
|
||||
if _, ok := thresholds[v1.ResourcePods]; ok {
|
||||
targetThresholds[v1.ResourcePods] = MaxResourcePercentage
|
||||
}
|
||||
if _, ok := thresholds[v1.ResourceCPU]; !ok {
|
||||
thresholds[v1.ResourceCPU] = MaxResourcePercentage
|
||||
if _, ok := thresholds[v1.ResourceCPU]; ok {
|
||||
targetThresholds[v1.ResourceCPU] = MaxResourcePercentage
|
||||
}
|
||||
if _, ok := thresholds[v1.ResourceMemory]; !ok {
|
||||
thresholds[v1.ResourceMemory] = MaxResourcePercentage
|
||||
if _, ok := thresholds[v1.ResourceMemory]; ok {
|
||||
targetThresholds[v1.ResourceMemory] = MaxResourcePercentage
|
||||
}
|
||||
|
||||
// Default targetThreshold resource values to 100
|
||||
targetThresholds[v1.ResourcePods] = MaxResourcePercentage
|
||||
targetThresholds[v1.ResourceCPU] = MaxResourcePercentage
|
||||
targetThresholds[v1.ResourceMemory] = MaxResourcePercentage
|
||||
|
||||
for name := range thresholds {
|
||||
if !nodeutil.IsBasicResource(name) {
|
||||
targetThresholds[name] = MaxResourcePercentage
|
||||
|
||||
Reference in New Issue
Block a user