1
0
mirror of https://github.com/kubernetes-sigs/descheduler.git synced 2026-01-26 13:29:11 +01:00

Merge pull request #771 from dineshbhor/fix-highnodeutilization-node-sorting

Sort nodes in ascending order for HighNodeUtilization
This commit is contained in:
Kubernetes Prow Robot
2022-03-29 02:58:47 -07:00
committed by GitHub
4 changed files with 115 additions and 8 deletions

View File

@@ -120,6 +120,10 @@ func HighNodeUtilization(ctx context.Context, client clientset.Interface, strate
return true
}
// Sort the nodes by the usage in ascending order
sortNodesByUsage(sourceNodes, true)
evictPodsFromSourceNodes(
ctx,
sourceNodes,

View File

@@ -168,6 +168,9 @@ func LowNodeUtilization(ctx context.Context, client clientset.Interface, strateg
return true
}
// Sort the nodes by the usage in descending order
sortNodesByUsage(sourceNodes, false)
evictPodsFromSourceNodes(
ctx,
sourceNodes,

View File

@@ -241,9 +241,6 @@ func evictPodsFromSourceNodes(
strategy string,
continueEviction continueEvictionCond,
) {
sortNodesByUsage(sourceNodes)
// upper bound on total number of pods/cpu/memory and optional extended resources to be moved
totalAvailableUsage := map[v1.ResourceName]*resource.Quantity{
v1.ResourcePods: {},
@@ -356,8 +353,8 @@ func evictPods(
}
}
// sortNodesByUsage sorts nodes based on usage in descending order
func sortNodesByUsage(nodes []NodeInfo) {
// sortNodesByUsage sorts nodes based on usage according to the given strategy.
func sortNodesByUsage(nodes []NodeInfo, ascending bool) {
sort.Slice(nodes, func(i, j int) bool {
ti := nodes[i].usage[v1.ResourceMemory].Value() + nodes[i].usage[v1.ResourceCPU].MilliValue() + nodes[i].usage[v1.ResourcePods].Value()
tj := nodes[j].usage[v1.ResourceMemory].Value() + nodes[j].usage[v1.ResourceCPU].MilliValue() + nodes[j].usage[v1.ResourcePods].Value()
@@ -370,7 +367,12 @@ func sortNodesByUsage(nodes []NodeInfo) {
}
}
// To return sorted in descending order
// Return ascending order for HighNodeUtilization strategy
if ascending {
return ti < tj
}
// Return descending order for LowNodeUtilization strategy
return ti > tj
})
}

View File

@@ -18,17 +18,91 @@ package nodeutilization
import (
"fmt"
"math"
"testing"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"math"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/descheduler/pkg/api"
"testing"
)
var (
lowPriority = int32(0)
highPriority = int32(10000)
extendedResource = v1.ResourceName("example.com/foo")
testNode1 = NodeInfo{
NodeUsage: NodeUsage{
node: &v1.Node{
Status: v1.NodeStatus{
Capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(3977868*1024, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(29, resource.BinarySI),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(1930, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(3287692*1024, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(29, resource.BinarySI),
},
},
ObjectMeta: metav1.ObjectMeta{Name: "node1"},
},
usage: map[v1.ResourceName]*resource.Quantity{
v1.ResourceCPU: resource.NewMilliQuantity(1730, resource.DecimalSI),
v1.ResourceMemory: resource.NewQuantity(3038982964, resource.BinarySI),
v1.ResourcePods: resource.NewQuantity(25, resource.BinarySI),
},
},
}
testNode2 = NodeInfo{
NodeUsage: NodeUsage{
node: &v1.Node{
Status: v1.NodeStatus{
Capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(3977868*1024, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(29, resource.BinarySI),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(1930, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(3287692*1024, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(29, resource.BinarySI),
},
},
ObjectMeta: metav1.ObjectMeta{Name: "node2"},
},
usage: map[v1.ResourceName]*resource.Quantity{
v1.ResourceCPU: resource.NewMilliQuantity(1220, resource.DecimalSI),
v1.ResourceMemory: resource.NewQuantity(3038982964, resource.BinarySI),
v1.ResourcePods: resource.NewQuantity(11, resource.BinarySI),
},
},
}
testNode3 = NodeInfo{
NodeUsage: NodeUsage{
node: &v1.Node{
Status: v1.NodeStatus{
Capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(3977868*1024, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(29, resource.BinarySI),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(1930, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(3287692*1024, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(29, resource.BinarySI),
},
},
ObjectMeta: metav1.ObjectMeta{Name: "node3"},
},
usage: map[v1.ResourceName]*resource.Quantity{
v1.ResourceCPU: resource.NewMilliQuantity(1530, resource.DecimalSI),
v1.ResourceMemory: resource.NewQuantity(5038982964, resource.BinarySI),
v1.ResourcePods: resource.NewQuantity(20, resource.BinarySI),
},
},
}
)
func TestValidateThresholds(t *testing.T) {
@@ -156,3 +230,27 @@ func TestResourceUsagePercentages(t *testing.T) {
t.Logf("resourceUsagePercentage: %#v\n", resourceUsagePercentage)
}
func TestSortNodesByUsageDescendingOrder(t *testing.T) {
nodeList := []NodeInfo{testNode1, testNode2, testNode3}
expectedNodeList := []NodeInfo{testNode3, testNode1, testNode2} // testNode3 has the highest usage
sortNodesByUsage(nodeList, false) // ascending=false, sort nodes in descending order
for i := 0; i < len(expectedNodeList); i++ {
if nodeList[i].NodeUsage.node.Name != expectedNodeList[i].NodeUsage.node.Name {
t.Errorf("Expected %v, got %v", expectedNodeList[i].NodeUsage.node.Name, nodeList[i].NodeUsage.node.Name)
}
}
}
func TestSortNodesByUsageAscendingOrder(t *testing.T) {
nodeList := []NodeInfo{testNode1, testNode2, testNode3}
expectedNodeList := []NodeInfo{testNode2, testNode1, testNode3}
sortNodesByUsage(nodeList, true) // ascending=true, sort nodes in ascending order
for i := 0; i < len(expectedNodeList); i++ {
if nodeList[i].NodeUsage.node.Name != expectedNodeList[i].NodeUsage.node.Name {
t.Errorf("Expected %v, got %v", expectedNodeList[i].NodeUsage.node.Name, nodeList[i].NodeUsage.node.Name)
}
}
}