From 6dbc8a1fcc3f7693560086222e595054a734cc32 Mon Sep 17 00:00:00 2001 From: ravisantoshgudimetla Date: Mon, 27 Nov 2017 15:28:30 -0500 Subject: [PATCH] Changes to fix low node utilization strategy --- pkg/descheduler/node/node.go | 10 +++++++ pkg/descheduler/node/node_test.go | 30 +++++++++++++++++++ .../strategies/lownodeutilization.go | 17 ++++++----- .../strategies/lownodeutilization_test.go | 15 ++++++++-- 4 files changed, 62 insertions(+), 10 deletions(-) diff --git a/pkg/descheduler/node/node.go b/pkg/descheduler/node/node.go index bb4d915f8..44c6dc91e 100644 --- a/pkg/descheduler/node/node.go +++ b/pkg/descheduler/node/node.go @@ -86,6 +86,7 @@ func GetNodeLister(client clientset.Interface, stopChannel <-chan struct{}) core return nodeLister } +// IsReady checks if the descheduler could run against given node. func IsReady(node *v1.Node) bool { for i := range node.Status.Conditions { cond := &node.Status.Conditions[i] @@ -111,3 +112,12 @@ func IsReady(node *v1.Node) bool { }*/ return true } + +// IsNodeUschedulable checks if the node is unschedulable. This is helper function to check only in case of +// underutilized node so that they won't be accounted for. +func IsNodeUschedulable(node *v1.Node) bool { + if node.Spec.Unschedulable { + return true + } + return false +} diff --git a/pkg/descheduler/node/node_test.go b/pkg/descheduler/node/node_test.go index f33bda04d..cb444559e 100644 --- a/pkg/descheduler/node/node_test.go +++ b/pkg/descheduler/node/node_test.go @@ -72,3 +72,33 @@ func TestReadyNodesWithNodeSelector(t *testing.T) { t.Errorf("Expected node1, got %s", nodes[0].Name) } } + +func TestIsNodeUschedulable(t *testing.T) { + tests := []struct { + description string + node *v1.Node + IsUnSchedulable bool + }{ + { + description: "Node is expected to be schedulable", + node: &v1.Node{ + Spec: v1.NodeSpec{Unschedulable: false}, + }, + IsUnSchedulable: false, + }, + { + description: "Node is not expected to be schedulable because of unschedulable field", + node: &v1.Node{ + Spec: v1.NodeSpec{Unschedulable: true}, + }, + IsUnSchedulable: true, + }, + } + for _, test := range tests { + actualUnSchedulable := IsNodeUschedulable(test.node) + if actualUnSchedulable != test.IsUnSchedulable { + t.Errorf("Test %#v failed", test.description) + } + } + +} diff --git a/pkg/descheduler/strategies/lownodeutilization.go b/pkg/descheduler/strategies/lownodeutilization.go index bfe6d1587..998de5b55 100644 --- a/pkg/descheduler/strategies/lownodeutilization.go +++ b/pkg/descheduler/strategies/lownodeutilization.go @@ -28,6 +28,7 @@ import ( "github.com/kubernetes-incubator/descheduler/cmd/descheduler/app/options" "github.com/kubernetes-incubator/descheduler/pkg/api" "github.com/kubernetes-incubator/descheduler/pkg/descheduler/evictions" + nodeutil "github.com/kubernetes-incubator/descheduler/pkg/descheduler/node" podutil "github.com/kubernetes-incubator/descheduler/pkg/descheduler/pod" ) @@ -58,7 +59,7 @@ func LowNodeUtilization(ds *options.DeschedulerServer, strategy api.DeschedulerS } npm := CreateNodePodsMap(ds.Client, nodes) - lowNodes, targetNodes, _ := classifyNodes(npm, thresholds, targetThresholds) + lowNodes, targetNodes := classifyNodes(npm, thresholds, targetThresholds) if len(lowNodes) == 0 { glog.V(1).Infof("No node is underutilized") @@ -109,25 +110,25 @@ func validateTargetThresholds(targetThresholds api.ResourceThresholds) bool { return true } -func classifyNodes(npm NodePodsMap, thresholds api.ResourceThresholds, targetThresholds api.ResourceThresholds) ([]NodeUsageMap, []NodeUsageMap, []NodeUsageMap) { - lowNodes, targetNodes, otherNodes := []NodeUsageMap{}, []NodeUsageMap{}, []NodeUsageMap{} +// classifyNodes classifies the nodes into low-utilization or high-utilization nodes. If a node lies between +// low and high thresholds, it is simply ignored. +func classifyNodes(npm NodePodsMap, thresholds api.ResourceThresholds, targetThresholds api.ResourceThresholds) ([]NodeUsageMap, []NodeUsageMap) { + lowNodes, targetNodes := []NodeUsageMap{}, []NodeUsageMap{} for node, pods := range npm { usage, nonRemovablePods, bePods, bPods, gPods := NodeUtilization(node, pods) nuMap := NodeUsageMap{node, usage, nonRemovablePods, bePods, bPods, gPods} glog.V(1).Infof("Node %#v usage: %#v", node.Name, usage) - if IsNodeWithLowUtilization(usage, thresholds) { + // Check if node is underutilized and if we can schedule pods on it. + if IsNodeWithLowUtilization(usage, thresholds) && !nodeutil.IsNodeUschedulable(node) { glog.V(1).Infof("Identified underutilized node: %v", node.ObjectMeta.Name) lowNodes = append(lowNodes, nuMap) } else if IsNodeAboveTargetUtilization(usage, targetThresholds) { glog.V(1).Infof("Identified target node: %v", node.ObjectMeta.Name) targetNodes = append(targetNodes, nuMap) - } else { - // Seems we don't need to collect them? - otherNodes = append(otherNodes, nuMap) } } - return lowNodes, targetNodes, otherNodes + return lowNodes, targetNodes } func evictPodsFromTargetNodes(client clientset.Interface, evictionPolicyGroupVersion string, targetNodes, lowNodes []NodeUsageMap, targetThresholds api.ResourceThresholds, dryRun bool) int { diff --git a/pkg/descheduler/strategies/lownodeutilization_test.go b/pkg/descheduler/strategies/lownodeutilization_test.go index 1718b2c1a..849177197 100644 --- a/pkg/descheduler/strategies/lownodeutilization_test.go +++ b/pkg/descheduler/strategies/lownodeutilization_test.go @@ -41,6 +41,9 @@ func TestLowNodeUtilization(t *testing.T) { n1 := test.BuildTestNode("n1", 4000, 3000, 9) n2 := test.BuildTestNode("n2", 4000, 3000, 10) + n3 := test.BuildTestNode("n3", 4000, 3000, 10) + // Making n3 node unschedulable so that it won't counted in lowUtilized nodes list. + n3.Spec.Unschedulable = true p1 := test.BuildTestPod("p1", 400, 0, n1.Name) p2 := test.BuildTestPod("p2", 400, 0, n1.Name) p3 := test.BuildTestPod("p3", 400, 0, n1.Name) @@ -89,6 +92,9 @@ func TestLowNodeUtilization(t *testing.T) { if strings.Contains(fieldString, "n2") { return true, &v1.PodList{Items: []v1.Pod{*p9}}, nil } + if strings.Contains(fieldString, "n3") { + return true, &v1.PodList{Items: []v1.Pod{}}, nil + } return true, nil, fmt.Errorf("Failed to list: %v", list) }) fakeClient.Fake.AddReactor("get", "nodes", func(action core.Action) (bool, runtime.Object, error) { @@ -98,12 +104,17 @@ func TestLowNodeUtilization(t *testing.T) { return true, n1, nil case n2.Name: return true, n2, nil + case n3.Name: + return true, n3, nil } return true, nil, fmt.Errorf("Wrong node: %v", getAction.GetName()) }) expectedPodsEvicted := 4 - npm := CreateNodePodsMap(fakeClient, []*v1.Node{n1, n2}) - lowNodes, targetNodes, _ := classifyNodes(npm, thresholds, targetThresholds) + npm := CreateNodePodsMap(fakeClient, []*v1.Node{n1, n2, n3}) + lowNodes, targetNodes := classifyNodes(npm, thresholds, targetThresholds) + if len(lowNodes) != 1 { + t.Errorf("After ignoring unschedulable nodes, expected only one node to be under utilized.") + } podsEvicted := evictPodsFromTargetNodes(fakeClient, "v1", targetNodes, lowNodes, targetThresholds, false) if expectedPodsEvicted != podsEvicted { t.Errorf("Expected %#v pods to be evicted but %#v got evicted", expectedPodsEvicted)