From 50dd3b8971a77f5ad3be0a5b21816844a1575a95 Mon Sep 17 00:00:00 2001 From: Jan Chaloupka Date: Fri, 7 Mar 2025 13:10:17 +0100 Subject: [PATCH] ReferencedResourceList: alias for map[v1.ResourceName]*resource.Quantity to avoid the type definition duplication --- pkg/api/types.go | 4 +++ .../metricscollector/metricscollector.go | 17 +++++++------ .../metricscollector/metricscollector_test.go | 3 ++- pkg/descheduler/node/node.go | 9 ++++--- .../nodeutilization/highnodeutilization.go | 3 +-- .../nodeutilization/lownodeutilization.go | 3 +-- .../nodeutilization/nodeutilization.go | 22 ++++++++-------- .../nodeutilization/nodeutilization_test.go | 15 +++++------ .../plugins/nodeutilization/usageclients.go | 25 ++++++++++--------- 9 files changed, 54 insertions(+), 47 deletions(-) diff --git a/pkg/api/types.go b/pkg/api/types.go index 1f2ce2aeb..f3edce0c4 100644 --- a/pkg/api/types.go +++ b/pkg/api/types.go @@ -18,6 +18,7 @@ package api import ( v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" ) @@ -104,3 +105,6 @@ type MetricsCollector struct { // Later, the collection can be extended to other providers. Enabled bool } + +// ReferencedResourceList is an adaption of v1.ResourceList with resources as references +type ReferencedResourceList = map[v1.ResourceName]*resource.Quantity diff --git a/pkg/descheduler/metricscollector/metricscollector.go b/pkg/descheduler/metricscollector/metricscollector.go index 5c49cec11..f577e9f3e 100644 --- a/pkg/descheduler/metricscollector/metricscollector.go +++ b/pkg/descheduler/metricscollector/metricscollector.go @@ -32,6 +32,7 @@ import ( "k8s.io/klog/v2" metricsclient "k8s.io/metrics/pkg/client/clientset/versioned" utilptr "k8s.io/utils/ptr" + "sigs.k8s.io/descheduler/pkg/api" ) const ( @@ -43,7 +44,7 @@ type MetricsCollector struct { metricsClientset metricsclient.Interface nodeSelector labels.Selector - nodes map[string]map[v1.ResourceName]*resource.Quantity + nodes map[string]api.ReferencedResourceList mu sync.RWMutex // hasSynced signals at least one sync succeeded @@ -55,7 +56,7 @@ func NewMetricsCollector(nodeLister listercorev1.NodeLister, metricsClientset me nodeLister: nodeLister, metricsClientset: metricsClientset, nodeSelector: nodeSelector, - nodes: make(map[string]map[v1.ResourceName]*resource.Quantity), + nodes: make(map[string]api.ReferencedResourceList), } } @@ -77,13 +78,13 @@ func weightedAverage(prevValue, value int64) int64 { return int64(math.Round(beta*float64(prevValue) + (1-beta)*float64(value))) } -func (mc *MetricsCollector) AllNodesUsage() (map[string]map[v1.ResourceName]*resource.Quantity, error) { +func (mc *MetricsCollector) AllNodesUsage() (map[string]api.ReferencedResourceList, error) { mc.mu.RLock() defer mc.mu.RUnlock() - allNodesUsage := make(map[string]map[v1.ResourceName]*resource.Quantity) + allNodesUsage := make(map[string]api.ReferencedResourceList) for nodeName := range mc.nodes { - allNodesUsage[nodeName] = map[v1.ResourceName]*resource.Quantity{ + allNodesUsage[nodeName] = api.ReferencedResourceList{ v1.ResourceCPU: utilptr.To[resource.Quantity](mc.nodes[nodeName][v1.ResourceCPU].DeepCopy()), v1.ResourceMemory: utilptr.To[resource.Quantity](mc.nodes[nodeName][v1.ResourceMemory].DeepCopy()), } @@ -92,7 +93,7 @@ func (mc *MetricsCollector) AllNodesUsage() (map[string]map[v1.ResourceName]*res return allNodesUsage, nil } -func (mc *MetricsCollector) NodeUsage(node *v1.Node) (map[v1.ResourceName]*resource.Quantity, error) { +func (mc *MetricsCollector) NodeUsage(node *v1.Node) (api.ReferencedResourceList, error) { mc.mu.RLock() defer mc.mu.RUnlock() @@ -100,7 +101,7 @@ func (mc *MetricsCollector) NodeUsage(node *v1.Node) (map[v1.ResourceName]*resou klog.V(4).InfoS("unable to find node in the collected metrics", "node", klog.KObj(node)) return nil, fmt.Errorf("unable to find node %q in the collected metrics", node.Name) } - return map[v1.ResourceName]*resource.Quantity{ + return api.ReferencedResourceList{ v1.ResourceCPU: utilptr.To[resource.Quantity](mc.nodes[node.Name][v1.ResourceCPU].DeepCopy()), v1.ResourceMemory: utilptr.To[resource.Quantity](mc.nodes[node.Name][v1.ResourceMemory].DeepCopy()), }, nil @@ -131,7 +132,7 @@ func (mc *MetricsCollector) Collect(ctx context.Context) error { } if _, exists := mc.nodes[node.Name]; !exists { - mc.nodes[node.Name] = map[v1.ResourceName]*resource.Quantity{ + mc.nodes[node.Name] = api.ReferencedResourceList{ v1.ResourceCPU: utilptr.To[resource.Quantity](metrics.Usage.Cpu().DeepCopy()), v1.ResourceMemory: utilptr.To[resource.Quantity](metrics.Usage.Memory().DeepCopy()), } diff --git a/pkg/descheduler/metricscollector/metricscollector_test.go b/pkg/descheduler/metricscollector/metricscollector_test.go index c1477172e..f369681f8 100644 --- a/pkg/descheduler/metricscollector/metricscollector_test.go +++ b/pkg/descheduler/metricscollector/metricscollector_test.go @@ -29,10 +29,11 @@ import ( fakeclientset "k8s.io/client-go/kubernetes/fake" fakemetricsclient "k8s.io/metrics/pkg/client/clientset/versioned/fake" + "sigs.k8s.io/descheduler/pkg/api" "sigs.k8s.io/descheduler/test" ) -func checkCpuNodeUsage(t *testing.T, usage map[v1.ResourceName]*resource.Quantity, millicpu int64) { +func checkCpuNodeUsage(t *testing.T, usage api.ReferencedResourceList, millicpu int64) { t.Logf("current node cpu usage: %v\n", usage[v1.ResourceCPU].MilliValue()) if usage[v1.ResourceCPU].MilliValue() != millicpu { t.Fatalf("cpu node usage expected to be %v, got %v instead", millicpu, usage[v1.ResourceCPU].MilliValue()) diff --git a/pkg/descheduler/node/node.go b/pkg/descheduler/node/node.go index 976ae0a64..b43cb5b19 100644 --- a/pkg/descheduler/node/node.go +++ b/pkg/descheduler/node/node.go @@ -30,6 +30,7 @@ import ( listersv1 "k8s.io/client-go/listers/core/v1" "k8s.io/client-go/util/workqueue" "k8s.io/klog/v2" + "sigs.k8s.io/descheduler/pkg/api" podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod" "sigs.k8s.io/descheduler/pkg/utils" ) @@ -244,7 +245,7 @@ func fitsRequest(nodeIndexer podutil.GetPodsAssignedToNodeFunc, pod *v1.Pod, nod } // nodeAvailableResources returns resources mapped to the quanitity available on the node. -func nodeAvailableResources(nodeIndexer podutil.GetPodsAssignedToNodeFunc, node *v1.Node, resourceNames []v1.ResourceName, podUtilization podutil.PodUtilizationFnc) (map[v1.ResourceName]*resource.Quantity, error) { +func nodeAvailableResources(nodeIndexer podutil.GetPodsAssignedToNodeFunc, node *v1.Node, resourceNames []v1.ResourceName, podUtilization podutil.PodUtilizationFnc) (api.ReferencedResourceList, error) { podsOnNode, err := podutil.ListPodsOnANode(node.Name, nodeIndexer, nil) if err != nil { return nil, err @@ -253,7 +254,7 @@ func nodeAvailableResources(nodeIndexer podutil.GetPodsAssignedToNodeFunc, node if err != nil { return nil, err } - remainingResources := map[v1.ResourceName]*resource.Quantity{ + remainingResources := api.ReferencedResourceList{ v1.ResourceCPU: resource.NewMilliQuantity(node.Status.Allocatable.Cpu().MilliValue()-nodeUtilization[v1.ResourceCPU].MilliValue(), resource.DecimalSI), v1.ResourceMemory: resource.NewQuantity(node.Status.Allocatable.Memory().Value()-nodeUtilization[v1.ResourceMemory].Value(), resource.BinarySI), v1.ResourcePods: resource.NewQuantity(node.Status.Allocatable.Pods().Value()-nodeUtilization[v1.ResourcePods].Value(), resource.DecimalSI), @@ -273,8 +274,8 @@ func nodeAvailableResources(nodeIndexer podutil.GetPodsAssignedToNodeFunc, node } // NodeUtilization returns the resources requested by the given pods. Only resources supplied in the resourceNames parameter are calculated. -func NodeUtilization(pods []*v1.Pod, resourceNames []v1.ResourceName, podUtilization podutil.PodUtilizationFnc) (map[v1.ResourceName]*resource.Quantity, error) { - totalUtilization := map[v1.ResourceName]*resource.Quantity{ +func NodeUtilization(pods []*v1.Pod, resourceNames []v1.ResourceName, podUtilization podutil.PodUtilizationFnc) (api.ReferencedResourceList, error) { + totalUtilization := api.ReferencedResourceList{ v1.ResourceCPU: resource.NewMilliQuantity(0, resource.DecimalSI), v1.ResourceMemory: resource.NewQuantity(0, resource.BinarySI), v1.ResourcePods: resource.NewQuantity(int64(len(pods)), resource.DecimalSI), diff --git a/pkg/framework/plugins/nodeutilization/highnodeutilization.go b/pkg/framework/plugins/nodeutilization/highnodeutilization.go index dcfa44bdf..297567071 100644 --- a/pkg/framework/plugins/nodeutilization/highnodeutilization.go +++ b/pkg/framework/plugins/nodeutilization/highnodeutilization.go @@ -21,7 +21,6 @@ import ( "fmt" v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/runtime" "k8s.io/klog/v2" "sigs.k8s.io/descheduler/pkg/api" @@ -138,7 +137,7 @@ func (h *HighNodeUtilization) Balance(ctx context.Context, nodes []*v1.Node) *fr } // stop if the total available usage has dropped to zero - no more pods can be scheduled - continueEvictionCond := func(nodeInfo NodeInfo, totalAvailableUsage map[v1.ResourceName]*resource.Quantity) bool { + continueEvictionCond := func(nodeInfo NodeInfo, totalAvailableUsage api.ReferencedResourceList) bool { for name := range totalAvailableUsage { if totalAvailableUsage[name].CmpInt64(0) < 1 { return false diff --git a/pkg/framework/plugins/nodeutilization/lownodeutilization.go b/pkg/framework/plugins/nodeutilization/lownodeutilization.go index 8abb48465..8c12beccc 100644 --- a/pkg/framework/plugins/nodeutilization/lownodeutilization.go +++ b/pkg/framework/plugins/nodeutilization/lownodeutilization.go @@ -21,7 +21,6 @@ import ( "fmt" v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/runtime" "k8s.io/klog/v2" @@ -168,7 +167,7 @@ func (l *LowNodeUtilization) Balance(ctx context.Context, nodes []*v1.Node) *fra } // stop if node utilization drops below target threshold or any of required capacity (cpu, memory, pods) is moved - continueEvictionCond := func(nodeInfo NodeInfo, totalAvailableUsage map[v1.ResourceName]*resource.Quantity) bool { + continueEvictionCond := func(nodeInfo NodeInfo, totalAvailableUsage api.ReferencedResourceList) bool { if !isNodeAboveTargetUtilization(nodeInfo.NodeUsage, nodeInfo.thresholds.highResourceThreshold) { return false } diff --git a/pkg/framework/plugins/nodeutilization/nodeutilization.go b/pkg/framework/plugins/nodeutilization/nodeutilization.go index a694cacc8..9750b7b6b 100644 --- a/pkg/framework/plugins/nodeutilization/nodeutilization.go +++ b/pkg/framework/plugins/nodeutilization/nodeutilization.go @@ -37,13 +37,13 @@ import ( // NodeUsage stores a node's info, pods on it, thresholds and its resource usage type NodeUsage struct { node *v1.Node - usage map[v1.ResourceName]*resource.Quantity + usage api.ReferencedResourceList allPods []*v1.Pod } type NodeThresholds struct { - lowResourceThreshold map[v1.ResourceName]*resource.Quantity - highResourceThreshold map[v1.ResourceName]*resource.Quantity + lowResourceThreshold api.ReferencedResourceList + highResourceThreshold api.ReferencedResourceList } type NodeInfo struct { @@ -51,7 +51,7 @@ type NodeInfo struct { thresholds NodeThresholds } -type continueEvictionCond func(nodeInfo NodeInfo, totalAvailableUsage map[v1.ResourceName]*resource.Quantity) bool +type continueEvictionCond func(nodeInfo NodeInfo, totalAvailableUsage api.ReferencedResourceList) bool // NodePodsMap is a set of (node, pods) pairs type NodePodsMap map[*v1.Node][]*v1.Pod @@ -94,8 +94,8 @@ func getNodeThresholds( } nodeThresholdsMap[node.Name] = NodeThresholds{ - lowResourceThreshold: map[v1.ResourceName]*resource.Quantity{}, - highResourceThreshold: map[v1.ResourceName]*resource.Quantity{}, + lowResourceThreshold: api.ReferencedResourceList{}, + highResourceThreshold: api.ReferencedResourceList{}, } for _, resourceName := range resourceNames { @@ -206,7 +206,7 @@ func classifyNodes( return lowNodes, highNodes } -func usageToKeysAndValues(usage map[v1.ResourceName]*resource.Quantity) []interface{} { +func usageToKeysAndValues(usage api.ReferencedResourceList) []interface{} { // log message in one line keysAndValues := []interface{}{} if quantity, exists := usage[v1.ResourceCPU]; exists { @@ -241,7 +241,7 @@ func evictPodsFromSourceNodes( usageClient usageClient, ) { // upper bound on total number of pods/cpu/memory and optional extended resources to be moved - totalAvailableUsage := map[v1.ResourceName]*resource.Quantity{} + totalAvailableUsage := api.ReferencedResourceList{} for _, resourceName := range resourceNames { totalAvailableUsage[resourceName] = &resource.Quantity{} } @@ -296,7 +296,7 @@ func evictPods( evictableNamespaces *api.Namespaces, inputPods []*v1.Pod, nodeInfo NodeInfo, - totalAvailableUsage map[v1.ResourceName]*resource.Quantity, + totalAvailableUsage api.ReferencedResourceList, taintsOfLowNodes map[string][]v1.Taint, podEvictor frameworktypes.Evictor, evictOptions evictions.EvictOptions, @@ -400,7 +400,7 @@ func sortNodesByUsage(nodes []NodeInfo, ascending bool) { // isNodeAboveTargetUtilization checks if a node is overutilized // At least one resource has to be above the high threshold -func isNodeAboveTargetUtilization(usage NodeUsage, threshold map[v1.ResourceName]*resource.Quantity) bool { +func isNodeAboveTargetUtilization(usage NodeUsage, threshold api.ReferencedResourceList) bool { for name, nodeValue := range usage.usage { // usage.highResourceThreshold[name] < nodeValue if threshold[name].Cmp(*nodeValue) == -1 { @@ -412,7 +412,7 @@ func isNodeAboveTargetUtilization(usage NodeUsage, threshold map[v1.ResourceName // isNodeWithLowUtilization checks if a node is underutilized // All resources have to be below the low threshold -func isNodeWithLowUtilization(usage NodeUsage, threshold map[v1.ResourceName]*resource.Quantity) bool { +func isNodeWithLowUtilization(usage NodeUsage, threshold api.ReferencedResourceList) bool { for name, nodeValue := range usage.usage { // usage.lowResourceThreshold[name] < nodeValue if threshold[name].Cmp(*nodeValue) == -1 { diff --git a/pkg/framework/plugins/nodeutilization/nodeutilization_test.go b/pkg/framework/plugins/nodeutilization/nodeutilization_test.go index 1703fd492..7bcab1960 100644 --- a/pkg/framework/plugins/nodeutilization/nodeutilization_test.go +++ b/pkg/framework/plugins/nodeutilization/nodeutilization_test.go @@ -23,6 +23,7 @@ import ( v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/descheduler/pkg/api" ) func BuildTestNodeInfo(name string, apply func(*NodeInfo)) *NodeInfo { @@ -71,7 +72,7 @@ func TestResourceUsagePercentages(t *testing.T) { }, }, }, - usage: map[v1.ResourceName]*resource.Quantity{ + usage: api.ReferencedResourceList{ v1.ResourceCPU: resource.NewMilliQuantity(1220, resource.DecimalSI), v1.ResourceMemory: resource.NewQuantity(3038982964, resource.BinarySI), v1.ResourcePods: resource.NewQuantity(11, resource.BinarySI), @@ -103,21 +104,21 @@ func TestSortNodesByUsage(t *testing.T) { name: "cpu memory pods", nodeInfoList: []NodeInfo{ *BuildTestNodeInfo("node1", func(nodeInfo *NodeInfo) { - nodeInfo.usage = map[v1.ResourceName]*resource.Quantity{ + nodeInfo.usage = api.ReferencedResourceList{ v1.ResourceCPU: resource.NewMilliQuantity(1730, resource.DecimalSI), v1.ResourceMemory: resource.NewQuantity(3038982964, resource.BinarySI), v1.ResourcePods: resource.NewQuantity(25, resource.BinarySI), } }), *BuildTestNodeInfo("node2", func(nodeInfo *NodeInfo) { - nodeInfo.usage = map[v1.ResourceName]*resource.Quantity{ + nodeInfo.usage = api.ReferencedResourceList{ v1.ResourceCPU: resource.NewMilliQuantity(1220, resource.DecimalSI), v1.ResourceMemory: resource.NewQuantity(3038982964, resource.BinarySI), v1.ResourcePods: resource.NewQuantity(11, resource.BinarySI), } }), *BuildTestNodeInfo("node3", func(nodeInfo *NodeInfo) { - nodeInfo.usage = map[v1.ResourceName]*resource.Quantity{ + nodeInfo.usage = api.ReferencedResourceList{ v1.ResourceCPU: resource.NewMilliQuantity(1530, resource.DecimalSI), v1.ResourceMemory: resource.NewQuantity(5038982964, resource.BinarySI), v1.ResourcePods: resource.NewQuantity(20, resource.BinarySI), @@ -130,17 +131,17 @@ func TestSortNodesByUsage(t *testing.T) { name: "memory", nodeInfoList: []NodeInfo{ *BuildTestNodeInfo("node1", func(nodeInfo *NodeInfo) { - nodeInfo.usage = map[v1.ResourceName]*resource.Quantity{ + nodeInfo.usage = api.ReferencedResourceList{ v1.ResourceMemory: resource.NewQuantity(3038982964, resource.BinarySI), } }), *BuildTestNodeInfo("node2", func(nodeInfo *NodeInfo) { - nodeInfo.usage = map[v1.ResourceName]*resource.Quantity{ + nodeInfo.usage = api.ReferencedResourceList{ v1.ResourceMemory: resource.NewQuantity(2038982964, resource.BinarySI), } }), *BuildTestNodeInfo("node3", func(nodeInfo *NodeInfo) { - nodeInfo.usage = map[v1.ResourceName]*resource.Quantity{ + nodeInfo.usage = api.ReferencedResourceList{ v1.ResourceMemory: resource.NewQuantity(5038982964, resource.BinarySI), } }), diff --git a/pkg/framework/plugins/nodeutilization/usageclients.go b/pkg/framework/plugins/nodeutilization/usageclients.go index 01d1cdce4..44191b11c 100644 --- a/pkg/framework/plugins/nodeutilization/usageclients.go +++ b/pkg/framework/plugins/nodeutilization/usageclients.go @@ -26,6 +26,7 @@ import ( "k8s.io/klog/v2" utilptr "k8s.io/utils/ptr" + "sigs.k8s.io/descheduler/pkg/api" "sigs.k8s.io/descheduler/pkg/descheduler/metricscollector" nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node" podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod" @@ -37,9 +38,9 @@ type usageClient interface { // after Balance method is invoked. There's no cache invalidation so each // Balance is expected to get the latest data by invoking sync. sync(nodes []*v1.Node) error - nodeUtilization(node string) map[v1.ResourceName]*resource.Quantity + nodeUtilization(node string) api.ReferencedResourceList pods(node string) []*v1.Pod - podUsage(pod *v1.Pod) (map[v1.ResourceName]*resource.Quantity, error) + podUsage(pod *v1.Pod) (api.ReferencedResourceList, error) } type requestedUsageClient struct { @@ -47,7 +48,7 @@ type requestedUsageClient struct { getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc _pods map[string][]*v1.Pod - _nodeUtilization map[string]map[v1.ResourceName]*resource.Quantity + _nodeUtilization map[string]api.ReferencedResourceList } var _ usageClient = &requestedUsageClient{} @@ -62,7 +63,7 @@ func newRequestedUsageClient( } } -func (s *requestedUsageClient) nodeUtilization(node string) map[v1.ResourceName]*resource.Quantity { +func (s *requestedUsageClient) nodeUtilization(node string) api.ReferencedResourceList { return s._nodeUtilization[node] } @@ -70,8 +71,8 @@ func (s *requestedUsageClient) pods(node string) []*v1.Pod { return s._pods[node] } -func (s *requestedUsageClient) podUsage(pod *v1.Pod) (map[v1.ResourceName]*resource.Quantity, error) { - usage := make(map[v1.ResourceName]*resource.Quantity) +func (s *requestedUsageClient) podUsage(pod *v1.Pod) (api.ReferencedResourceList, error) { + usage := make(api.ReferencedResourceList) for _, resourceName := range s.resourceNames { usage[resourceName] = utilptr.To[resource.Quantity](utils.GetResourceRequestQuantity(pod, resourceName).DeepCopy()) } @@ -79,7 +80,7 @@ func (s *requestedUsageClient) podUsage(pod *v1.Pod) (map[v1.ResourceName]*resou } func (s *requestedUsageClient) sync(nodes []*v1.Node) error { - s._nodeUtilization = make(map[string]map[v1.ResourceName]*resource.Quantity) + s._nodeUtilization = make(map[string]api.ReferencedResourceList) s._pods = make(map[string][]*v1.Pod) for _, node := range nodes { @@ -111,7 +112,7 @@ type actualUsageClient struct { metricsCollector *metricscollector.MetricsCollector _pods map[string][]*v1.Pod - _nodeUtilization map[string]map[v1.ResourceName]*resource.Quantity + _nodeUtilization map[string]api.ReferencedResourceList } var _ usageClient = &actualUsageClient{} @@ -128,7 +129,7 @@ func newActualUsageClient( } } -func (client *actualUsageClient) nodeUtilization(node string) map[v1.ResourceName]*resource.Quantity { +func (client *actualUsageClient) nodeUtilization(node string) api.ReferencedResourceList { return client._nodeUtilization[node] } @@ -136,7 +137,7 @@ func (client *actualUsageClient) pods(node string) []*v1.Pod { return client._pods[node] } -func (client *actualUsageClient) podUsage(pod *v1.Pod) (map[v1.ResourceName]*resource.Quantity, error) { +func (client *actualUsageClient) podUsage(pod *v1.Pod) (api.ReferencedResourceList, error) { // It's not efficient to keep track of all pods in a cluster when only their fractions is evicted. // Thus, take the current pod metrics without computing any softening (like e.g. EWMA). podMetrics, err := client.metricsCollector.MetricsClient().MetricsV1beta1().PodMetricses(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{}) @@ -144,7 +145,7 @@ func (client *actualUsageClient) podUsage(pod *v1.Pod) (map[v1.ResourceName]*res return nil, fmt.Errorf("unable to get podmetrics for %q/%q: %v", pod.Namespace, pod.Name, err) } - totalUsage := make(map[v1.ResourceName]*resource.Quantity) + totalUsage := make(api.ReferencedResourceList) for _, container := range podMetrics.Containers { for _, resourceName := range client.resourceNames { if resourceName == v1.ResourcePods { @@ -165,7 +166,7 @@ func (client *actualUsageClient) podUsage(pod *v1.Pod) (map[v1.ResourceName]*res } func (client *actualUsageClient) sync(nodes []*v1.Node) error { - client._nodeUtilization = make(map[string]map[v1.ResourceName]*resource.Quantity) + client._nodeUtilization = make(map[string]api.ReferencedResourceList) client._pods = make(map[string][]*v1.Pod) nodesUsage, err := client.metricsCollector.AllNodesUsage()