1
0
mirror of https://github.com/kubernetes-sigs/descheduler.git synced 2026-01-26 13:29:11 +01:00

ReferencedResourceList: alias for map[v1.ResourceName]*resource.Quantity to avoid the type definition duplication

This commit is contained in:
Jan Chaloupka
2025-03-07 13:10:17 +01:00
parent fd9f2b4614
commit 50dd3b8971
9 changed files with 54 additions and 47 deletions

View File

@@ -21,7 +21,6 @@ import (
"fmt"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/klog/v2"
"sigs.k8s.io/descheduler/pkg/api"
@@ -138,7 +137,7 @@ func (h *HighNodeUtilization) Balance(ctx context.Context, nodes []*v1.Node) *fr
}
// stop if the total available usage has dropped to zero - no more pods can be scheduled
continueEvictionCond := func(nodeInfo NodeInfo, totalAvailableUsage map[v1.ResourceName]*resource.Quantity) bool {
continueEvictionCond := func(nodeInfo NodeInfo, totalAvailableUsage api.ReferencedResourceList) bool {
for name := range totalAvailableUsage {
if totalAvailableUsage[name].CmpInt64(0) < 1 {
return false

View File

@@ -21,7 +21,6 @@ import (
"fmt"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/klog/v2"
@@ -168,7 +167,7 @@ func (l *LowNodeUtilization) Balance(ctx context.Context, nodes []*v1.Node) *fra
}
// stop if node utilization drops below target threshold or any of required capacity (cpu, memory, pods) is moved
continueEvictionCond := func(nodeInfo NodeInfo, totalAvailableUsage map[v1.ResourceName]*resource.Quantity) bool {
continueEvictionCond := func(nodeInfo NodeInfo, totalAvailableUsage api.ReferencedResourceList) bool {
if !isNodeAboveTargetUtilization(nodeInfo.NodeUsage, nodeInfo.thresholds.highResourceThreshold) {
return false
}

View File

@@ -37,13 +37,13 @@ import (
// NodeUsage stores a node's info, pods on it, thresholds and its resource usage
type NodeUsage struct {
node *v1.Node
usage map[v1.ResourceName]*resource.Quantity
usage api.ReferencedResourceList
allPods []*v1.Pod
}
type NodeThresholds struct {
lowResourceThreshold map[v1.ResourceName]*resource.Quantity
highResourceThreshold map[v1.ResourceName]*resource.Quantity
lowResourceThreshold api.ReferencedResourceList
highResourceThreshold api.ReferencedResourceList
}
type NodeInfo struct {
@@ -51,7 +51,7 @@ type NodeInfo struct {
thresholds NodeThresholds
}
type continueEvictionCond func(nodeInfo NodeInfo, totalAvailableUsage map[v1.ResourceName]*resource.Quantity) bool
type continueEvictionCond func(nodeInfo NodeInfo, totalAvailableUsage api.ReferencedResourceList) bool
// NodePodsMap is a set of (node, pods) pairs
type NodePodsMap map[*v1.Node][]*v1.Pod
@@ -94,8 +94,8 @@ func getNodeThresholds(
}
nodeThresholdsMap[node.Name] = NodeThresholds{
lowResourceThreshold: map[v1.ResourceName]*resource.Quantity{},
highResourceThreshold: map[v1.ResourceName]*resource.Quantity{},
lowResourceThreshold: api.ReferencedResourceList{},
highResourceThreshold: api.ReferencedResourceList{},
}
for _, resourceName := range resourceNames {
@@ -206,7 +206,7 @@ func classifyNodes(
return lowNodes, highNodes
}
func usageToKeysAndValues(usage map[v1.ResourceName]*resource.Quantity) []interface{} {
func usageToKeysAndValues(usage api.ReferencedResourceList) []interface{} {
// log message in one line
keysAndValues := []interface{}{}
if quantity, exists := usage[v1.ResourceCPU]; exists {
@@ -241,7 +241,7 @@ func evictPodsFromSourceNodes(
usageClient usageClient,
) {
// upper bound on total number of pods/cpu/memory and optional extended resources to be moved
totalAvailableUsage := map[v1.ResourceName]*resource.Quantity{}
totalAvailableUsage := api.ReferencedResourceList{}
for _, resourceName := range resourceNames {
totalAvailableUsage[resourceName] = &resource.Quantity{}
}
@@ -296,7 +296,7 @@ func evictPods(
evictableNamespaces *api.Namespaces,
inputPods []*v1.Pod,
nodeInfo NodeInfo,
totalAvailableUsage map[v1.ResourceName]*resource.Quantity,
totalAvailableUsage api.ReferencedResourceList,
taintsOfLowNodes map[string][]v1.Taint,
podEvictor frameworktypes.Evictor,
evictOptions evictions.EvictOptions,
@@ -400,7 +400,7 @@ func sortNodesByUsage(nodes []NodeInfo, ascending bool) {
// isNodeAboveTargetUtilization checks if a node is overutilized
// At least one resource has to be above the high threshold
func isNodeAboveTargetUtilization(usage NodeUsage, threshold map[v1.ResourceName]*resource.Quantity) bool {
func isNodeAboveTargetUtilization(usage NodeUsage, threshold api.ReferencedResourceList) bool {
for name, nodeValue := range usage.usage {
// usage.highResourceThreshold[name] < nodeValue
if threshold[name].Cmp(*nodeValue) == -1 {
@@ -412,7 +412,7 @@ func isNodeAboveTargetUtilization(usage NodeUsage, threshold map[v1.ResourceName
// isNodeWithLowUtilization checks if a node is underutilized
// All resources have to be below the low threshold
func isNodeWithLowUtilization(usage NodeUsage, threshold map[v1.ResourceName]*resource.Quantity) bool {
func isNodeWithLowUtilization(usage NodeUsage, threshold api.ReferencedResourceList) bool {
for name, nodeValue := range usage.usage {
// usage.lowResourceThreshold[name] < nodeValue
if threshold[name].Cmp(*nodeValue) == -1 {

View File

@@ -23,6 +23,7 @@ import (
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/descheduler/pkg/api"
)
func BuildTestNodeInfo(name string, apply func(*NodeInfo)) *NodeInfo {
@@ -71,7 +72,7 @@ func TestResourceUsagePercentages(t *testing.T) {
},
},
},
usage: map[v1.ResourceName]*resource.Quantity{
usage: api.ReferencedResourceList{
v1.ResourceCPU: resource.NewMilliQuantity(1220, resource.DecimalSI),
v1.ResourceMemory: resource.NewQuantity(3038982964, resource.BinarySI),
v1.ResourcePods: resource.NewQuantity(11, resource.BinarySI),
@@ -103,21 +104,21 @@ func TestSortNodesByUsage(t *testing.T) {
name: "cpu memory pods",
nodeInfoList: []NodeInfo{
*BuildTestNodeInfo("node1", func(nodeInfo *NodeInfo) {
nodeInfo.usage = map[v1.ResourceName]*resource.Quantity{
nodeInfo.usage = api.ReferencedResourceList{
v1.ResourceCPU: resource.NewMilliQuantity(1730, resource.DecimalSI),
v1.ResourceMemory: resource.NewQuantity(3038982964, resource.BinarySI),
v1.ResourcePods: resource.NewQuantity(25, resource.BinarySI),
}
}),
*BuildTestNodeInfo("node2", func(nodeInfo *NodeInfo) {
nodeInfo.usage = map[v1.ResourceName]*resource.Quantity{
nodeInfo.usage = api.ReferencedResourceList{
v1.ResourceCPU: resource.NewMilliQuantity(1220, resource.DecimalSI),
v1.ResourceMemory: resource.NewQuantity(3038982964, resource.BinarySI),
v1.ResourcePods: resource.NewQuantity(11, resource.BinarySI),
}
}),
*BuildTestNodeInfo("node3", func(nodeInfo *NodeInfo) {
nodeInfo.usage = map[v1.ResourceName]*resource.Quantity{
nodeInfo.usage = api.ReferencedResourceList{
v1.ResourceCPU: resource.NewMilliQuantity(1530, resource.DecimalSI),
v1.ResourceMemory: resource.NewQuantity(5038982964, resource.BinarySI),
v1.ResourcePods: resource.NewQuantity(20, resource.BinarySI),
@@ -130,17 +131,17 @@ func TestSortNodesByUsage(t *testing.T) {
name: "memory",
nodeInfoList: []NodeInfo{
*BuildTestNodeInfo("node1", func(nodeInfo *NodeInfo) {
nodeInfo.usage = map[v1.ResourceName]*resource.Quantity{
nodeInfo.usage = api.ReferencedResourceList{
v1.ResourceMemory: resource.NewQuantity(3038982964, resource.BinarySI),
}
}),
*BuildTestNodeInfo("node2", func(nodeInfo *NodeInfo) {
nodeInfo.usage = map[v1.ResourceName]*resource.Quantity{
nodeInfo.usage = api.ReferencedResourceList{
v1.ResourceMemory: resource.NewQuantity(2038982964, resource.BinarySI),
}
}),
*BuildTestNodeInfo("node3", func(nodeInfo *NodeInfo) {
nodeInfo.usage = map[v1.ResourceName]*resource.Quantity{
nodeInfo.usage = api.ReferencedResourceList{
v1.ResourceMemory: resource.NewQuantity(5038982964, resource.BinarySI),
}
}),

View File

@@ -26,6 +26,7 @@ import (
"k8s.io/klog/v2"
utilptr "k8s.io/utils/ptr"
"sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/descheduler/metricscollector"
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
@@ -37,9 +38,9 @@ type usageClient interface {
// after Balance method is invoked. There's no cache invalidation so each
// Balance is expected to get the latest data by invoking sync.
sync(nodes []*v1.Node) error
nodeUtilization(node string) map[v1.ResourceName]*resource.Quantity
nodeUtilization(node string) api.ReferencedResourceList
pods(node string) []*v1.Pod
podUsage(pod *v1.Pod) (map[v1.ResourceName]*resource.Quantity, error)
podUsage(pod *v1.Pod) (api.ReferencedResourceList, error)
}
type requestedUsageClient struct {
@@ -47,7 +48,7 @@ type requestedUsageClient struct {
getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc
_pods map[string][]*v1.Pod
_nodeUtilization map[string]map[v1.ResourceName]*resource.Quantity
_nodeUtilization map[string]api.ReferencedResourceList
}
var _ usageClient = &requestedUsageClient{}
@@ -62,7 +63,7 @@ func newRequestedUsageClient(
}
}
func (s *requestedUsageClient) nodeUtilization(node string) map[v1.ResourceName]*resource.Quantity {
func (s *requestedUsageClient) nodeUtilization(node string) api.ReferencedResourceList {
return s._nodeUtilization[node]
}
@@ -70,8 +71,8 @@ func (s *requestedUsageClient) pods(node string) []*v1.Pod {
return s._pods[node]
}
func (s *requestedUsageClient) podUsage(pod *v1.Pod) (map[v1.ResourceName]*resource.Quantity, error) {
usage := make(map[v1.ResourceName]*resource.Quantity)
func (s *requestedUsageClient) podUsage(pod *v1.Pod) (api.ReferencedResourceList, error) {
usage := make(api.ReferencedResourceList)
for _, resourceName := range s.resourceNames {
usage[resourceName] = utilptr.To[resource.Quantity](utils.GetResourceRequestQuantity(pod, resourceName).DeepCopy())
}
@@ -79,7 +80,7 @@ func (s *requestedUsageClient) podUsage(pod *v1.Pod) (map[v1.ResourceName]*resou
}
func (s *requestedUsageClient) sync(nodes []*v1.Node) error {
s._nodeUtilization = make(map[string]map[v1.ResourceName]*resource.Quantity)
s._nodeUtilization = make(map[string]api.ReferencedResourceList)
s._pods = make(map[string][]*v1.Pod)
for _, node := range nodes {
@@ -111,7 +112,7 @@ type actualUsageClient struct {
metricsCollector *metricscollector.MetricsCollector
_pods map[string][]*v1.Pod
_nodeUtilization map[string]map[v1.ResourceName]*resource.Quantity
_nodeUtilization map[string]api.ReferencedResourceList
}
var _ usageClient = &actualUsageClient{}
@@ -128,7 +129,7 @@ func newActualUsageClient(
}
}
func (client *actualUsageClient) nodeUtilization(node string) map[v1.ResourceName]*resource.Quantity {
func (client *actualUsageClient) nodeUtilization(node string) api.ReferencedResourceList {
return client._nodeUtilization[node]
}
@@ -136,7 +137,7 @@ func (client *actualUsageClient) pods(node string) []*v1.Pod {
return client._pods[node]
}
func (client *actualUsageClient) podUsage(pod *v1.Pod) (map[v1.ResourceName]*resource.Quantity, error) {
func (client *actualUsageClient) podUsage(pod *v1.Pod) (api.ReferencedResourceList, error) {
// It's not efficient to keep track of all pods in a cluster when only their fractions is evicted.
// Thus, take the current pod metrics without computing any softening (like e.g. EWMA).
podMetrics, err := client.metricsCollector.MetricsClient().MetricsV1beta1().PodMetricses(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{})
@@ -144,7 +145,7 @@ func (client *actualUsageClient) podUsage(pod *v1.Pod) (map[v1.ResourceName]*res
return nil, fmt.Errorf("unable to get podmetrics for %q/%q: %v", pod.Namespace, pod.Name, err)
}
totalUsage := make(map[v1.ResourceName]*resource.Quantity)
totalUsage := make(api.ReferencedResourceList)
for _, container := range podMetrics.Containers {
for _, resourceName := range client.resourceNames {
if resourceName == v1.ResourcePods {
@@ -165,7 +166,7 @@ func (client *actualUsageClient) podUsage(pod *v1.Pod) (map[v1.ResourceName]*res
}
func (client *actualUsageClient) sync(nodes []*v1.Node) error {
client._nodeUtilization = make(map[string]map[v1.ResourceName]*resource.Quantity)
client._nodeUtilization = make(map[string]api.ReferencedResourceList)
client._pods = make(map[string][]*v1.Pod)
nodesUsage, err := client.metricsCollector.AllNodesUsage()