1
0
mirror of https://github.com/kubernetes-sigs/descheduler.git synced 2026-01-26 05:14:13 +01:00

Limit maximum number of pods to be evicted per node

This commit is contained in:
ravisantoshgudimetla
2018-01-11 22:23:36 +05:30
committed by Jan Chaloupka
parent 0a7f14d75e
commit f1f8b2eaa7
15 changed files with 150 additions and 43 deletions

View File

@@ -42,4 +42,7 @@ type DeschedulerConfiguration struct {
// Node selectors
NodeSelector string
// MaxNoOfPodsToEvictPerNode restricts maximum of pods to be evicted per node.
MaxNoOfPodsToEvictPerNode int
}

View File

@@ -42,4 +42,7 @@ type DeschedulerConfiguration struct {
// Node selectors
NodeSelector string `json:"nodeSelector,omitempty"`
// MaxNoOfPodsToEvictPerNode restricts maximum of pods to be evicted per node.
MaxNoOfPodsToEvictPerNode int `json:"maxNoOfPodsToEvictPerNode,omitempty"`
}

View File

@@ -60,10 +60,11 @@ func Run(rs *options.DeschedulerServer) error {
return nil
}
strategies.RemoveDuplicatePods(rs, deschedulerPolicy.Strategies["RemoveDuplicates"], evictionPolicyGroupVersion, nodes)
strategies.LowNodeUtilization(rs, deschedulerPolicy.Strategies["LowNodeUtilization"], evictionPolicyGroupVersion, nodes)
strategies.RemovePodsViolatingInterPodAntiAffinity(rs, deschedulerPolicy.Strategies["RemovePodsViolatingInterPodAntiAffinity"], evictionPolicyGroupVersion, nodes)
strategies.RemovePodsViolatingNodeAffinity(rs, deschedulerPolicy.Strategies["RemovePodsViolatingNodeAffinity"], evictionPolicyGroupVersion, nodes)
nodePodCount := strategies.InitializeNodePodCount(nodes)
strategies.RemoveDuplicatePods(rs, deschedulerPolicy.Strategies["RemoveDuplicates"], evictionPolicyGroupVersion, nodes, nodePodCount)
strategies.LowNodeUtilization(rs, deschedulerPolicy.Strategies["LowNodeUtilization"], evictionPolicyGroupVersion, nodes, nodePodCount)
strategies.RemovePodsViolatingInterPodAntiAffinity(rs, deschedulerPolicy.Strategies["RemovePodsViolatingInterPodAntiAffinity"], evictionPolicyGroupVersion, nodes, nodePodCount)
strategies.RemovePodsViolatingNodeAffinity(rs, deschedulerPolicy.Strategies["RemovePodsViolatingNodeAffinity"], evictionPolicyGroupVersion, nodes, nodePodCount)
return nil
}

View File

@@ -17,9 +17,10 @@ limitations under the License.
package strategies
import (
"github.com/golang/glog"
"strings"
"github.com/golang/glog"
"k8s.io/api/core/v1"
clientset "k8s.io/client-go/kubernetes"
@@ -35,15 +36,15 @@ type DuplicatePodsMap map[string][]*v1.Pod
// RemoveDuplicatePods removes the duplicate pods on node. This strategy evicts all duplicate pods on node.
// A pod is said to be a duplicate of other if both of them are from same creator, kind and are within the same
// namespace. As of now, this strategy won't evict daemonsets, mirror pods, critical pods and pods with local storages.
func RemoveDuplicatePods(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, policyGroupVersion string, nodes []*v1.Node) {
func RemoveDuplicatePods(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, policyGroupVersion string, nodes []*v1.Node, nodepodCount nodePodEvictedCount) {
if !strategy.Enabled {
return
}
deleteDuplicatePods(ds.Client, policyGroupVersion, nodes, ds.DryRun)
deleteDuplicatePods(ds.Client, policyGroupVersion, nodes, ds.DryRun, nodepodCount, ds.MaxNoOfPodsToEvictPerNode)
}
// deleteDuplicatePods evicts the pod from node and returns the count of evicted pods.
func deleteDuplicatePods(client clientset.Interface, policyGroupVersion string, nodes []*v1.Node, dryRun bool) int {
func deleteDuplicatePods(client clientset.Interface, policyGroupVersion string, nodes []*v1.Node, dryRun bool, nodepodCount nodePodEvictedCount, maxPodsToEvict int) int {
podsEvicted := 0
for _, node := range nodes {
glog.V(1).Infof("Processing node: %#v", node.Name)
@@ -53,16 +54,20 @@ func deleteDuplicatePods(client clientset.Interface, policyGroupVersion string,
glog.V(1).Infof("%#v", creator)
// i = 0 does not evict the first pod
for i := 1; i < len(pods); i++ {
if nodepodCount[node]+1 > maxPodsToEvict {
break
}
success, err := evictions.EvictPod(client, pods[i], policyGroupVersion, dryRun)
if !success {
glog.Infof("Error when evicting pod: %#v (%#v)", pods[i].Name, err)
} else {
podsEvicted++
nodepodCount[node]++
glog.V(1).Infof("Evicted pod: %#v (%#v)", pods[i].Name, err)
}
}
}
}
podsEvicted += nodepodCount[node]
}
return podsEvicted
}

View File

@@ -37,11 +37,15 @@ func TestFindDuplicatePods(t *testing.T) {
p5 := test.BuildTestPod("p5", 100, 0, node.Name)
p6 := test.BuildTestPod("p6", 100, 0, node.Name)
p7 := test.BuildTestPod("p7", 100, 0, node.Name)
p8 := test.BuildTestPod("p8", 100, 0, node.Name)
p9 := test.BuildTestPod("p9", 100, 0, node.Name)
// All the following pods expect for one will be evicted.
p1.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
p2.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
p3.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
p8.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
p9.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
// The following 4 pods won't get evicted.
// A daemonset.
@@ -66,12 +70,14 @@ func TestFindDuplicatePods(t *testing.T) {
expectedEvictedPodCount := 2
fakeClient := &fake.Clientset{}
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
return true, &v1.PodList{Items: []v1.Pod{*p1, *p2, *p3, *p4, *p5, *p6, *p7}}, nil
return true, &v1.PodList{Items: []v1.Pod{*p1, *p2, *p3, *p4, *p5, *p6, *p7, *p8, *p9}}, nil
})
fakeClient.Fake.AddReactor("get", "nodes", func(action core.Action) (bool, runtime.Object, error) {
return true, node, nil
})
podsEvicted := deleteDuplicatePods(fakeClient, "v1", []*v1.Node{node}, false)
npe := nodePodEvictedCount{}
npe[node] = 0
podsEvicted := deleteDuplicatePods(fakeClient, "v1", []*v1.Node{node}, false, npe, 2)
if podsEvicted != expectedEvictedPodCount {
t.Errorf("Unexpected no of pods evicted")
}

View File

@@ -43,7 +43,7 @@ type NodeUsageMap struct {
}
type NodePodsMap map[*v1.Node][]*v1.Pod
func LowNodeUtilization(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, evictionPolicyGroupVersion string, nodes []*v1.Node) {
func LowNodeUtilization(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, evictionPolicyGroupVersion string, nodes []*v1.Node, nodepodCount nodePodEvictedCount) {
if !strategy.Enabled {
return
}
@@ -90,7 +90,7 @@ func LowNodeUtilization(ds *options.DeschedulerServer, strategy api.DeschedulerS
targetThresholds[v1.ResourceCPU], targetThresholds[v1.ResourceMemory], targetThresholds[v1.ResourcePods])
glog.V(1).Infof("Total number of nodes above target utilization: %v", len(targetNodes))
totalPodsEvicted := evictPodsFromTargetNodes(ds.Client, evictionPolicyGroupVersion, targetNodes, lowNodes, targetThresholds, ds.DryRun)
totalPodsEvicted := evictPodsFromTargetNodes(ds.Client, evictionPolicyGroupVersion, targetNodes, lowNodes, targetThresholds, ds.DryRun, ds.MaxNoOfPodsToEvictPerNode, nodepodCount)
glog.V(1).Infof("Total number of pods evicted: %v", totalPodsEvicted)
}
@@ -151,7 +151,7 @@ func classifyNodes(npm NodePodsMap, thresholds api.ResourceThresholds, targetThr
return lowNodes, targetNodes
}
func evictPodsFromTargetNodes(client clientset.Interface, evictionPolicyGroupVersion string, targetNodes, lowNodes []NodeUsageMap, targetThresholds api.ResourceThresholds, dryRun bool) int {
func evictPodsFromTargetNodes(client clientset.Interface, evictionPolicyGroupVersion string, targetNodes, lowNodes []NodeUsageMap, targetThresholds api.ResourceThresholds, dryRun bool, maxPodsToEvict int, nodepodCount nodePodEvictedCount) int {
podsEvicted := 0
SortNodesByUsage(targetNodes)
@@ -189,17 +189,17 @@ func evictPodsFromTargetNodes(client clientset.Interface, evictionPolicyGroupVer
nodeCapacity = node.node.Status.Allocatable
}
glog.V(3).Infof("evicting pods from node %#v with usage: %#v", node.node.Name, node.usage)
currentPodsEvicted := podsEvicted
currentPodsEvicted := nodepodCount[node.node]
// evict best effort pods
evictPods(node.bePods, client, evictionPolicyGroupVersion, targetThresholds, nodeCapacity, node.usage, &totalPods, &totalCpu, &totalMem, &podsEvicted, dryRun)
evictPods(node.bePods, client, evictionPolicyGroupVersion, targetThresholds, nodeCapacity, node.usage, &totalPods, &totalCpu, &totalMem, &currentPodsEvicted, dryRun, maxPodsToEvict)
// evict burstable pods
evictPods(node.bPods, client, evictionPolicyGroupVersion, targetThresholds, nodeCapacity, node.usage, &totalPods, &totalCpu, &totalMem, &podsEvicted, dryRun)
evictPods(node.bPods, client, evictionPolicyGroupVersion, targetThresholds, nodeCapacity, node.usage, &totalPods, &totalCpu, &totalMem, &currentPodsEvicted, dryRun, maxPodsToEvict)
// evict guaranteed pods
evictPods(node.gPods, client, evictionPolicyGroupVersion, targetThresholds, nodeCapacity, node.usage, &totalPods, &totalCpu, &totalMem, &podsEvicted, dryRun)
podsEvictedFromNode := podsEvicted - currentPodsEvicted
glog.V(1).Infof("%v pods evicted from node %#v with usage %v", podsEvictedFromNode, node.node.Name, node.usage)
evictPods(node.gPods, client, evictionPolicyGroupVersion, targetThresholds, nodeCapacity, node.usage, &totalPods, &totalCpu, &totalMem, &currentPodsEvicted, dryRun, maxPodsToEvict)
nodepodCount[node.node] = currentPodsEvicted
podsEvicted = podsEvicted + nodepodCount[node.node]
glog.V(1).Infof("%v pods evicted from node %#v with usage %v", nodepodCount[node.node], node.node.Name, node.usage)
}
return podsEvicted
}
@@ -214,10 +214,13 @@ func evictPods(inputPods []*v1.Pod,
totalCpu *float64,
totalMem *float64,
podsEvicted *int,
dryRun bool) {
dryRun bool, maxPodsToEvict int) {
if IsNodeAboveTargetUtilization(nodeUsage, targetThresholds) && (*totalPods > 0 || *totalCpu > 0 || *totalMem > 0) {
onePodPercentage := api.Percentage((float64(1) * 100) / float64(nodeCapacity.Pods().Value()))
for _, pod := range inputPods {
if *podsEvicted+1 > maxPodsToEvict {
break
}
cUsage := helper.GetResourceRequest(pod, v1.ResourceCPU)
mUsage := helper.GetResourceRequest(pod, v1.ResourceMemory)
success, err := evictions.EvictPod(client, pod, evictionPolicyGroupVersion, dryRun)

View File

@@ -109,15 +109,19 @@ func TestLowNodeUtilization(t *testing.T) {
}
return true, nil, fmt.Errorf("Wrong node: %v", getAction.GetName())
})
expectedPodsEvicted := 4
expectedPodsEvicted := 3
npm := CreateNodePodsMap(fakeClient, []*v1.Node{n1, n2, n3})
lowNodes, targetNodes := classifyNodes(npm, thresholds, targetThresholds)
if len(lowNodes) != 1 {
t.Errorf("After ignoring unschedulable nodes, expected only one node to be under utilized.")
}
podsEvicted := evictPodsFromTargetNodes(fakeClient, "v1", targetNodes, lowNodes, targetThresholds, false)
npe := nodePodEvictedCount{}
npe[n1] = 0
npe[n2] = 0
npe[n3] = 0
podsEvicted := evictPodsFromTargetNodes(fakeClient, "v1", targetNodes, lowNodes, targetThresholds, false, 3, npe)
if expectedPodsEvicted != podsEvicted {
t.Errorf("Expected %#v pods to be evicted but %#v got evicted", expectedPodsEvicted)
t.Errorf("Expected %#v pods to be evicted but %#v got evicted", expectedPodsEvicted, podsEvicted)
}
}

View File

@@ -26,12 +26,12 @@ import (
"k8s.io/api/core/v1"
)
func RemovePodsViolatingNodeAffinity(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, evictionPolicyGroupVersion string, nodes []*v1.Node) {
evictionCount := removePodsViolatingNodeAffinityCount(ds, strategy, evictionPolicyGroupVersion, nodes)
func RemovePodsViolatingNodeAffinity(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, evictionPolicyGroupVersion string, nodes []*v1.Node, nodePodCount nodePodEvictedCount) {
evictionCount := removePodsViolatingNodeAffinityCount(ds, strategy, evictionPolicyGroupVersion, nodes, nodePodCount, ds.MaxNoOfPodsToEvictPerNode)
glog.V(1).Infof("Evicted %v pods", evictionCount)
}
func removePodsViolatingNodeAffinityCount(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, evictionPolicyGroupVersion string, nodes []*v1.Node) int {
func removePodsViolatingNodeAffinityCount(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, evictionPolicyGroupVersion string, nodes []*v1.Node, nodepodCount nodePodEvictedCount, maxPodsToEvict int) int {
evictedPodCount := 0
if !strategy.Enabled {
return evictedPodCount
@@ -51,15 +51,19 @@ func removePodsViolatingNodeAffinityCount(ds *options.DeschedulerServer, strateg
}
for _, pod := range pods {
if nodepodCount[node]+1 > maxPodsToEvict {
break
}
if pod.Spec.Affinity != nil && pod.Spec.Affinity.NodeAffinity != nil && pod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil {
if !nodeutil.PodFitsCurrentNode(pod, node) && nodeutil.PodFitsAnyNode(pod, nodes) {
glog.V(1).Infof("Evicting pod: %v", pod.Name)
evictions.EvictPod(ds.Client, pod, evictionPolicyGroupVersion, false)
evictedPodCount++
nodepodCount[node]++
}
}
}
evictedPodCount += nodepodCount[node]
}
default:
glog.Errorf("invalid nodeAffinityType: %v", nodeAffinity)

View File

@@ -92,6 +92,8 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
pods []v1.Pod
strategy api.DeschedulerStrategy
expectedEvictedPodCount int
npe nodePodEvictedCount
maxPodsToEvict int
}{
{
description: "Strategy disabled, should not evict any pods",
@@ -104,8 +106,10 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
},
},
expectedEvictedPodCount: 0,
pods: addPodsToNode(nodeWithoutLabels),
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
pods: addPodsToNode(nodeWithoutLabels),
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
npe: nodePodEvictedCount{nodeWithoutLabels: 0, nodeWithLabels: 0},
maxPodsToEvict: 0,
},
{
description: "Invalid strategy type, should not evict any pods",
@@ -118,15 +122,19 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
},
},
expectedEvictedPodCount: 0,
pods: addPodsToNode(nodeWithoutLabels),
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
pods: addPodsToNode(nodeWithoutLabels),
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
npe: nodePodEvictedCount{nodeWithoutLabels: 0, nodeWithLabels: 0},
maxPodsToEvict: 0,
},
{
description: "Pod is correctly scheduled on node, no eviction expected",
strategy: requiredDuringSchedulingIgnoredDuringExecutionStrategy,
expectedEvictedPodCount: 0,
pods: addPodsToNode(nodeWithLabels),
nodes: []*v1.Node{nodeWithLabels},
pods: addPodsToNode(nodeWithLabels),
nodes: []*v1.Node{nodeWithLabels},
npe: nodePodEvictedCount{nodeWithLabels: 0},
maxPodsToEvict: 0,
},
{
description: "Pod is scheduled on node without matching labels, another schedulable node available, should be evicted",
@@ -134,6 +142,17 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
strategy: requiredDuringSchedulingIgnoredDuringExecutionStrategy,
pods: addPodsToNode(nodeWithoutLabels),
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
npe: nodePodEvictedCount{nodeWithoutLabels: 0, nodeWithLabels: 0},
maxPodsToEvict: 1,
},
{
description: "Pod is scheduled on node without matching labels, another schedulable node available, maxPodsToEvict set to 0, should not be evicted",
expectedEvictedPodCount: 0,
strategy: requiredDuringSchedulingIgnoredDuringExecutionStrategy,
pods: addPodsToNode(nodeWithoutLabels),
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
npe: nodePodEvictedCount{nodeWithoutLabels: 0, nodeWithLabels: 0},
maxPodsToEvict: 0,
},
{
description: "Pod is scheduled on node without matching labels, but no node where pod fits is available, should not evict",
@@ -141,6 +160,8 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
strategy: requiredDuringSchedulingIgnoredDuringExecutionStrategy,
pods: addPodsToNode(nodeWithoutLabels),
nodes: []*v1.Node{nodeWithoutLabels, unschedulableNodeWithLabels},
npe: nodePodEvictedCount{nodeWithoutLabels: 0, unschedulableNodeWithLabels: 0},
maxPodsToEvict: 0,
},
}
@@ -155,7 +176,7 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
Client: fakeClient,
}
actualEvictedPodCount := removePodsViolatingNodeAffinityCount(&ds, tc.strategy, "v1", tc.nodes)
actualEvictedPodCount := removePodsViolatingNodeAffinityCount(&ds, tc.strategy, "v1", tc.nodes, tc.npe, tc.maxPodsToEvict)
if actualEvictedPodCount != tc.expectedEvictedPodCount {
t.Errorf("Test %#v failed, expected %v pod evictions, but got %v pod evictions\n", tc.description, tc.expectedEvictedPodCount, actualEvictedPodCount)
}

View File

@@ -31,15 +31,15 @@ import (
)
// RemovePodsViolatingInterPodAntiAffinity with elimination strategy
func RemovePodsViolatingInterPodAntiAffinity(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, policyGroupVersion string, nodes []*v1.Node) {
func RemovePodsViolatingInterPodAntiAffinity(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, policyGroupVersion string, nodes []*v1.Node, nodePodCount nodePodEvictedCount) {
if !strategy.Enabled {
return
}
removePodsWithAffinityRules(ds.Client, policyGroupVersion, nodes, ds.DryRun)
removePodsWithAffinityRules(ds.Client, policyGroupVersion, nodes, ds.DryRun, nodePodCount, ds.MaxNoOfPodsToEvictPerNode)
}
// removePodsWithAffinityRules evicts pods on the node which are having a pod affinity rules.
func removePodsWithAffinityRules(client clientset.Interface, policyGroupVersion string, nodes []*v1.Node, dryRun bool) int {
func removePodsWithAffinityRules(client clientset.Interface, policyGroupVersion string, nodes []*v1.Node, dryRun bool, nodePodCount nodePodEvictedCount, maxPodsToEvict int) int {
podsEvicted := 0
for _, node := range nodes {
glog.V(1).Infof("Processing node: %#v\n", node.Name)
@@ -49,12 +49,15 @@ func removePodsWithAffinityRules(client clientset.Interface, policyGroupVersion
}
totalPods := len(pods)
for i := 0; i < totalPods; i++ {
if nodePodCount[node]+1 > maxPodsToEvict {
break
}
if checkPodsWithAntiAffinityExist(pods[i], pods) {
success, err := evictions.EvictPod(client, pods[i], policyGroupVersion, dryRun)
if !success {
glog.Infof("Error when evicting pod: %#v (%#v)\n", pods[i].Name, err)
} else {
podsEvicted++
nodePodCount[node]++
glog.V(1).Infof("Evicted pod: %#v (%#v)\n because of existing anti-affinity", pods[i].Name, err)
// Since the current pod is evicted all other pods which have anti-affinity with this
// pod need not be evicted.
@@ -65,6 +68,7 @@ func removePodsWithAffinityRules(client clientset.Interface, policyGroupVersion
}
}
}
podsEvicted += nodePodCount[node]
}
return podsEvicted
}

View File

@@ -79,8 +79,15 @@ func TestPodAntiAffinity(t *testing.T) {
fakeClient.Fake.AddReactor("get", "nodes", func(action core.Action) (bool, runtime.Object, error) {
return true, node, nil
})
expectedEvictedPodCount := 1
podsEvicted := removePodsWithAffinityRules(fakeClient, "v1", []*v1.Node{node}, false)
npe := nodePodEvictedCount{}
npe[node] = 0
expectedEvictedPodCount := 0
podsEvicted := removePodsWithAffinityRules(fakeClient, "v1", []*v1.Node{node}, false, npe, 0)
if podsEvicted != expectedEvictedPodCount {
t.Errorf("Unexpected no of pods evicted")
}
expectedEvictedPodCount = 1
podsEvicted = removePodsWithAffinityRules(fakeClient, "v1", []*v1.Node{node}, false, npe, 1)
if podsEvicted != expectedEvictedPodCount {
t.Errorf("Unexpected no of pods evicted")
}

View File

@@ -0,0 +1,37 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package strategies
import (
"k8s.io/api/core/v1"
)
// This file contains the datastructures, types & functions needed by all the strategies so that we don't have
// to compute them again in each strategy.
// nodePodEvictedCount keeps count of pods evicted on node. This is used in conjunction with strategies to
type nodePodEvictedCount map[*v1.Node]int
// InitializeNodePodCount initializes the nodePodCount.
func InitializeNodePodCount(nodeList []*v1.Node) nodePodEvictedCount {
var nodePodCount = make(nodePodEvictedCount)
for _, node := range nodeList {
// Initialize podsEvicted till now with 0.
nodePodCount[node] = 0
}
return nodePodCount
}