mirror of
https://github.com/kubernetes-sigs/descheduler.git
synced 2026-01-26 05:14:13 +01:00
Remove createNodePodsMap
This commit is contained in:
@@ -95,8 +95,7 @@ func LowNodeUtilization(ctx context.Context, client clientset.Interface, strateg
|
|||||||
targetThresholds[v1.ResourceMemory] = MaxResourcePercentage
|
targetThresholds[v1.ResourceMemory] = MaxResourcePercentage
|
||||||
}
|
}
|
||||||
|
|
||||||
npm := createNodePodsMap(ctx, client, nodes)
|
lowNodes, targetNodes := classifyNodes(ctx, client, nodes, thresholds, targetThresholds)
|
||||||
lowNodes, targetNodes := classifyNodes(npm, thresholds, targetThresholds)
|
|
||||||
|
|
||||||
klog.V(1).Infof("Criteria for a node under utilization: CPU: %v, Mem: %v, Pods: %v",
|
klog.V(1).Infof("Criteria for a node under utilization: CPU: %v, Mem: %v, Pods: %v",
|
||||||
thresholds[v1.ResourceCPU], thresholds[v1.ResourceMemory], thresholds[v1.ResourcePods])
|
thresholds[v1.ResourceCPU], thresholds[v1.ResourceMemory], thresholds[v1.ResourcePods])
|
||||||
@@ -183,9 +182,16 @@ func validateThresholds(thresholds api.ResourceThresholds) error {
|
|||||||
|
|
||||||
// classifyNodes classifies the nodes into low-utilization or high-utilization nodes. If a node lies between
|
// classifyNodes classifies the nodes into low-utilization or high-utilization nodes. If a node lies between
|
||||||
// low and high thresholds, it is simply ignored.
|
// low and high thresholds, it is simply ignored.
|
||||||
func classifyNodes(npm NodePodsMap, thresholds api.ResourceThresholds, targetThresholds api.ResourceThresholds) ([]NodeUsageMap, []NodeUsageMap) {
|
func classifyNodes(ctx context.Context, client clientset.Interface, nodes []*v1.Node, thresholds api.ResourceThresholds, targetThresholds api.ResourceThresholds) ([]NodeUsageMap, []NodeUsageMap) {
|
||||||
lowNodes, targetNodes := []NodeUsageMap{}, []NodeUsageMap{}
|
lowNodes, targetNodes := []NodeUsageMap{}, []NodeUsageMap{}
|
||||||
for node, pods := range npm {
|
|
||||||
|
for _, node := range nodes {
|
||||||
|
pods, err := podutil.ListPodsOnANode(ctx, client, node)
|
||||||
|
if err != nil {
|
||||||
|
klog.Warningf("node %s will not be processed, error in accessing its pods (%#v)", node.Name, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
usage := nodeUtilization(node, pods)
|
usage := nodeUtilization(node, pods)
|
||||||
nuMap := NodeUsageMap{
|
nuMap := NodeUsageMap{
|
||||||
node: node,
|
node: node,
|
||||||
@@ -342,20 +348,6 @@ func sortNodesByUsage(nodes []NodeUsageMap) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// createNodePodsMap returns nodepodsmap with evictable pods on node.
|
|
||||||
func createNodePodsMap(ctx context.Context, client clientset.Interface, nodes []*v1.Node) NodePodsMap {
|
|
||||||
npm := NodePodsMap{}
|
|
||||||
for _, node := range nodes {
|
|
||||||
pods, err := podutil.ListPodsOnANode(ctx, client, node)
|
|
||||||
if err != nil {
|
|
||||||
klog.Warningf("node %s will not be processed, error in accessing its pods (%#v)", node.Name, err)
|
|
||||||
} else {
|
|
||||||
npm[node] = pods
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return npm
|
|
||||||
}
|
|
||||||
|
|
||||||
// isNodeAboveTargetUtilization checks if a node is overutilized
|
// isNodeAboveTargetUtilization checks if a node is overutilized
|
||||||
func isNodeAboveTargetUtilization(nodeThresholds api.ResourceThresholds, thresholds api.ResourceThresholds) bool {
|
func isNodeAboveTargetUtilization(nodeThresholds api.ResourceThresholds, thresholds api.ResourceThresholds) bool {
|
||||||
for name, nodeValue := range nodeThresholds {
|
for name, nodeValue := range nodeThresholds {
|
||||||
|
|||||||
Reference in New Issue
Block a user