mirror of
https://github.com/kubernetes-sigs/descheduler.git
synced 2026-01-25 20:59:28 +01:00
Merge pull request #45 from ravisantoshgudimetla/lownodeutilization-fix#43
Changes to fix low node utilization strategy
This commit is contained in:
@@ -86,6 +86,7 @@ func GetNodeLister(client clientset.Interface, stopChannel <-chan struct{}) core
|
||||
return nodeLister
|
||||
}
|
||||
|
||||
// IsReady checks if the descheduler could run against given node.
|
||||
func IsReady(node *v1.Node) bool {
|
||||
for i := range node.Status.Conditions {
|
||||
cond := &node.Status.Conditions[i]
|
||||
@@ -111,3 +112,12 @@ func IsReady(node *v1.Node) bool {
|
||||
}*/
|
||||
return true
|
||||
}
|
||||
|
||||
// IsNodeUschedulable checks if the node is unschedulable. This is helper function to check only in case of
|
||||
// underutilized node so that they won't be accounted for.
|
||||
func IsNodeUschedulable(node *v1.Node) bool {
|
||||
if node.Spec.Unschedulable {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -72,3 +72,33 @@ func TestReadyNodesWithNodeSelector(t *testing.T) {
|
||||
t.Errorf("Expected node1, got %s", nodes[0].Name)
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsNodeUschedulable(t *testing.T) {
|
||||
tests := []struct {
|
||||
description string
|
||||
node *v1.Node
|
||||
IsUnSchedulable bool
|
||||
}{
|
||||
{
|
||||
description: "Node is expected to be schedulable",
|
||||
node: &v1.Node{
|
||||
Spec: v1.NodeSpec{Unschedulable: false},
|
||||
},
|
||||
IsUnSchedulable: false,
|
||||
},
|
||||
{
|
||||
description: "Node is not expected to be schedulable because of unschedulable field",
|
||||
node: &v1.Node{
|
||||
Spec: v1.NodeSpec{Unschedulable: true},
|
||||
},
|
||||
IsUnSchedulable: true,
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
actualUnSchedulable := IsNodeUschedulable(test.node)
|
||||
if actualUnSchedulable != test.IsUnSchedulable {
|
||||
t.Errorf("Test %#v failed", test.description)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -28,6 +28,7 @@ import (
|
||||
"github.com/kubernetes-incubator/descheduler/cmd/descheduler/app/options"
|
||||
"github.com/kubernetes-incubator/descheduler/pkg/api"
|
||||
"github.com/kubernetes-incubator/descheduler/pkg/descheduler/evictions"
|
||||
nodeutil "github.com/kubernetes-incubator/descheduler/pkg/descheduler/node"
|
||||
podutil "github.com/kubernetes-incubator/descheduler/pkg/descheduler/pod"
|
||||
)
|
||||
|
||||
@@ -58,7 +59,7 @@ func LowNodeUtilization(ds *options.DeschedulerServer, strategy api.DeschedulerS
|
||||
}
|
||||
|
||||
npm := CreateNodePodsMap(ds.Client, nodes)
|
||||
lowNodes, targetNodes, _ := classifyNodes(npm, thresholds, targetThresholds)
|
||||
lowNodes, targetNodes := classifyNodes(npm, thresholds, targetThresholds)
|
||||
|
||||
if len(lowNodes) == 0 {
|
||||
glog.V(1).Infof("No node is underutilized")
|
||||
@@ -109,25 +110,25 @@ func validateTargetThresholds(targetThresholds api.ResourceThresholds) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func classifyNodes(npm NodePodsMap, thresholds api.ResourceThresholds, targetThresholds api.ResourceThresholds) ([]NodeUsageMap, []NodeUsageMap, []NodeUsageMap) {
|
||||
lowNodes, targetNodes, otherNodes := []NodeUsageMap{}, []NodeUsageMap{}, []NodeUsageMap{}
|
||||
// classifyNodes classifies the nodes into low-utilization or high-utilization nodes. If a node lies between
|
||||
// low and high thresholds, it is simply ignored.
|
||||
func classifyNodes(npm NodePodsMap, thresholds api.ResourceThresholds, targetThresholds api.ResourceThresholds) ([]NodeUsageMap, []NodeUsageMap) {
|
||||
lowNodes, targetNodes := []NodeUsageMap{}, []NodeUsageMap{}
|
||||
for node, pods := range npm {
|
||||
usage, nonRemovablePods, bePods, bPods, gPods := NodeUtilization(node, pods)
|
||||
nuMap := NodeUsageMap{node, usage, nonRemovablePods, bePods, bPods, gPods}
|
||||
glog.V(1).Infof("Node %#v usage: %#v", node.Name, usage)
|
||||
|
||||
if IsNodeWithLowUtilization(usage, thresholds) {
|
||||
// Check if node is underutilized and if we can schedule pods on it.
|
||||
if IsNodeWithLowUtilization(usage, thresholds) && !nodeutil.IsNodeUschedulable(node) {
|
||||
glog.V(1).Infof("Identified underutilized node: %v", node.ObjectMeta.Name)
|
||||
lowNodes = append(lowNodes, nuMap)
|
||||
} else if IsNodeAboveTargetUtilization(usage, targetThresholds) {
|
||||
glog.V(1).Infof("Identified target node: %v", node.ObjectMeta.Name)
|
||||
targetNodes = append(targetNodes, nuMap)
|
||||
} else {
|
||||
// Seems we don't need to collect them?
|
||||
otherNodes = append(otherNodes, nuMap)
|
||||
}
|
||||
}
|
||||
return lowNodes, targetNodes, otherNodes
|
||||
return lowNodes, targetNodes
|
||||
}
|
||||
|
||||
func evictPodsFromTargetNodes(client clientset.Interface, evictionPolicyGroupVersion string, targetNodes, lowNodes []NodeUsageMap, targetThresholds api.ResourceThresholds, dryRun bool) int {
|
||||
|
||||
@@ -41,6 +41,9 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
|
||||
n1 := test.BuildTestNode("n1", 4000, 3000, 9)
|
||||
n2 := test.BuildTestNode("n2", 4000, 3000, 10)
|
||||
n3 := test.BuildTestNode("n3", 4000, 3000, 10)
|
||||
// Making n3 node unschedulable so that it won't counted in lowUtilized nodes list.
|
||||
n3.Spec.Unschedulable = true
|
||||
p1 := test.BuildTestPod("p1", 400, 0, n1.Name)
|
||||
p2 := test.BuildTestPod("p2", 400, 0, n1.Name)
|
||||
p3 := test.BuildTestPod("p3", 400, 0, n1.Name)
|
||||
@@ -89,6 +92,9 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
if strings.Contains(fieldString, "n2") {
|
||||
return true, &v1.PodList{Items: []v1.Pod{*p9}}, nil
|
||||
}
|
||||
if strings.Contains(fieldString, "n3") {
|
||||
return true, &v1.PodList{Items: []v1.Pod{}}, nil
|
||||
}
|
||||
return true, nil, fmt.Errorf("Failed to list: %v", list)
|
||||
})
|
||||
fakeClient.Fake.AddReactor("get", "nodes", func(action core.Action) (bool, runtime.Object, error) {
|
||||
@@ -98,12 +104,17 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
return true, n1, nil
|
||||
case n2.Name:
|
||||
return true, n2, nil
|
||||
case n3.Name:
|
||||
return true, n3, nil
|
||||
}
|
||||
return true, nil, fmt.Errorf("Wrong node: %v", getAction.GetName())
|
||||
})
|
||||
expectedPodsEvicted := 4
|
||||
npm := CreateNodePodsMap(fakeClient, []*v1.Node{n1, n2})
|
||||
lowNodes, targetNodes, _ := classifyNodes(npm, thresholds, targetThresholds)
|
||||
npm := CreateNodePodsMap(fakeClient, []*v1.Node{n1, n2, n3})
|
||||
lowNodes, targetNodes := classifyNodes(npm, thresholds, targetThresholds)
|
||||
if len(lowNodes) != 1 {
|
||||
t.Errorf("After ignoring unschedulable nodes, expected only one node to be under utilized.")
|
||||
}
|
||||
podsEvicted := evictPodsFromTargetNodes(fakeClient, "v1", targetNodes, lowNodes, targetThresholds, false)
|
||||
if expectedPodsEvicted != podsEvicted {
|
||||
t.Errorf("Expected %#v pods to be evicted but %#v got evicted", expectedPodsEvicted)
|
||||
|
||||
Reference in New Issue
Block a user