mirror of
https://github.com/kubernetes-sigs/descheduler.git
synced 2026-01-26 21:31:18 +01:00
[nodeutilization]: prometheus usage client with prometheus metrics
This commit is contained in:
@@ -95,7 +95,7 @@ func (h *HighNodeUtilization) Name() string {
|
||||
|
||||
// Balance extension point implementation for the plugin
|
||||
func (h *HighNodeUtilization) Balance(ctx context.Context, nodes []*v1.Node) *frameworktypes.Status {
|
||||
if err := h.usageClient.sync(nodes); err != nil {
|
||||
if err := h.usageClient.sync(ctx, nodes); err != nil {
|
||||
return &frameworktypes.Status{
|
||||
Err: fmt.Errorf("error getting node usage: %v", err),
|
||||
}
|
||||
|
||||
@@ -55,7 +55,23 @@ func NewLowNodeUtilization(args runtime.Object, handle frameworktypes.Handle) (f
|
||||
return nil, fmt.Errorf("want args to be of type LowNodeUtilizationArgs, got %T", args)
|
||||
}
|
||||
|
||||
setDefaultForLNUThresholds(lowNodeUtilizationArgsArgs.Thresholds, lowNodeUtilizationArgsArgs.TargetThresholds, lowNodeUtilizationArgsArgs.UseDeviationThresholds)
|
||||
metricsUtilization := lowNodeUtilizationArgsArgs.MetricsUtilization
|
||||
if metricsUtilization != nil && metricsUtilization.Source == api.PrometheusMetrics {
|
||||
if metricsUtilization.Prometheus != nil && metricsUtilization.Prometheus.Query != "" {
|
||||
uResourceNames := getResourceNames(lowNodeUtilizationArgsArgs.Thresholds)
|
||||
oResourceNames := getResourceNames(lowNodeUtilizationArgsArgs.TargetThresholds)
|
||||
if len(uResourceNames) != 1 || uResourceNames[0] != MetricResource {
|
||||
return nil, fmt.Errorf("thresholds are expected to specify a single instance of %q resource, got %v instead", MetricResource, uResourceNames)
|
||||
}
|
||||
if len(oResourceNames) != 1 || oResourceNames[0] != MetricResource {
|
||||
return nil, fmt.Errorf("targetThresholds are expected to specify a single instance of %q resource, got %v instead", MetricResource, oResourceNames)
|
||||
}
|
||||
} else {
|
||||
return nil, fmt.Errorf("prometheus query is missing")
|
||||
}
|
||||
} else {
|
||||
setDefaultForLNUThresholds(lowNodeUtilizationArgsArgs.Thresholds, lowNodeUtilizationArgsArgs.TargetThresholds, lowNodeUtilizationArgsArgs.UseDeviationThresholds)
|
||||
}
|
||||
|
||||
underutilizationCriteria := []interface{}{
|
||||
"CPU", lowNodeUtilizationArgsArgs.Thresholds[v1.ResourceCPU],
|
||||
@@ -90,11 +106,23 @@ func NewLowNodeUtilization(args runtime.Object, handle frameworktypes.Handle) (f
|
||||
|
||||
var usageClient usageClient
|
||||
// MetricsServer is deprecated, removed once dropped
|
||||
if lowNodeUtilizationArgsArgs.MetricsUtilization != nil && (lowNodeUtilizationArgsArgs.MetricsUtilization.MetricsServer || lowNodeUtilizationArgsArgs.MetricsUtilization.Source == api.KubernetesMetrics) {
|
||||
if handle.MetricsCollector() == nil {
|
||||
return nil, fmt.Errorf("metrics client not initialized")
|
||||
if metricsUtilization != nil {
|
||||
switch {
|
||||
case metricsUtilization.MetricsServer, metricsUtilization.Source == api.KubernetesMetrics:
|
||||
if handle.MetricsCollector() == nil {
|
||||
return nil, fmt.Errorf("metrics client not initialized")
|
||||
}
|
||||
usageClient = newActualUsageClient(resourceNames, handle.GetPodsAssignedToNodeFunc(), handle.MetricsCollector())
|
||||
case metricsUtilization.Source == api.PrometheusMetrics:
|
||||
if handle.PrometheusClient() == nil {
|
||||
return nil, fmt.Errorf("prometheus client not initialized")
|
||||
}
|
||||
usageClient = newPrometheusUsageClient(handle.GetPodsAssignedToNodeFunc(), handle.PrometheusClient(), metricsUtilization.Prometheus.Query)
|
||||
case metricsUtilization.Source != "":
|
||||
return nil, fmt.Errorf("unrecognized metrics source")
|
||||
default:
|
||||
return nil, fmt.Errorf("metrics source is empty")
|
||||
}
|
||||
usageClient = newActualUsageClient(resourceNames, handle.GetPodsAssignedToNodeFunc(), handle.MetricsCollector())
|
||||
} else {
|
||||
usageClient = newRequestedUsageClient(resourceNames, handle.GetPodsAssignedToNodeFunc())
|
||||
}
|
||||
@@ -117,7 +145,7 @@ func (l *LowNodeUtilization) Name() string {
|
||||
|
||||
// Balance extension point implementation for the plugin
|
||||
func (l *LowNodeUtilization) Balance(ctx context.Context, nodes []*v1.Node) *frameworktypes.Status {
|
||||
if err := l.usageClient.sync(nodes); err != nil {
|
||||
if err := l.usageClient.sync(ctx, nodes); err != nil {
|
||||
return &frameworktypes.Status{
|
||||
Err: fmt.Errorf("error getting node usage: %v", err),
|
||||
}
|
||||
@@ -126,12 +154,20 @@ func (l *LowNodeUtilization) Balance(ctx context.Context, nodes []*v1.Node) *fra
|
||||
nodesMap, nodesUsageMap, podListMap := getNodeUsageSnapshot(nodes, l.usageClient)
|
||||
var nodeThresholdsMap map[string][]api.ResourceThresholds
|
||||
if l.args.UseDeviationThresholds {
|
||||
nodeThresholdsMap = getNodeThresholdsFromAverageNodeUsage(nodes, l.usageClient, l.args.Thresholds, l.args.TargetThresholds)
|
||||
thresholds, average := getNodeThresholdsFromAverageNodeUsage(nodes, l.usageClient, l.args.Thresholds, l.args.TargetThresholds)
|
||||
klog.InfoS("Average utilization through all nodes", "utilization", average)
|
||||
// All nodes are expected to have the same thresholds
|
||||
for nodeName := range thresholds {
|
||||
klog.InfoS("Underutilization threshold based on average utilization", "threshold", thresholds[nodeName][0])
|
||||
klog.InfoS("Overutilization threshold based on average utilization", "threshold", thresholds[nodeName][1])
|
||||
break
|
||||
}
|
||||
nodeThresholdsMap = thresholds
|
||||
} else {
|
||||
nodeThresholdsMap = getStaticNodeThresholds(nodes, l.args.Thresholds, l.args.TargetThresholds)
|
||||
}
|
||||
nodesUsageAsNodeThresholdsMap := nodeUsageToResourceThresholds(nodesUsageMap, nodesMap)
|
||||
|
||||
nodesUsageAsNodeThresholdsMap := nodeUsageToResourceThresholds(nodesUsageMap, nodesMap)
|
||||
nodeGroups := classifyNodeUsage(
|
||||
nodesUsageAsNodeThresholdsMap,
|
||||
nodeThresholdsMap,
|
||||
|
||||
@@ -41,6 +41,8 @@ import (
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
"sigs.k8s.io/descheduler/test"
|
||||
|
||||
"github.com/prometheus/common/model"
|
||||
)
|
||||
|
||||
func TestLowNodeUtilization(t *testing.T) {
|
||||
@@ -1257,9 +1259,9 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
}
|
||||
handle.MetricsCollectorImpl = collector
|
||||
|
||||
var metricsSource api.MetricsSource = ""
|
||||
var metricsUtilization *MetricsUtilization
|
||||
if metricsEnabled {
|
||||
metricsSource = api.KubernetesMetrics
|
||||
metricsUtilization = &MetricsUtilization{Source: api.KubernetesMetrics}
|
||||
}
|
||||
|
||||
plugin, err := NewLowNodeUtilization(&LowNodeUtilizationArgs{
|
||||
@@ -1268,9 +1270,7 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
UseDeviationThresholds: tc.useDeviationThresholds,
|
||||
EvictionLimits: tc.evictionLimits,
|
||||
EvictableNamespaces: tc.evictableNamespaces,
|
||||
MetricsUtilization: &MetricsUtilization{
|
||||
Source: metricsSource,
|
||||
},
|
||||
MetricsUtilization: metricsUtilization,
|
||||
},
|
||||
handle)
|
||||
if err != nil {
|
||||
@@ -1444,3 +1444,241 @@ func TestLowNodeUtilizationWithTaints(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func withLocalStorage(pod *v1.Pod) {
|
||||
// A pod with local storage.
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Spec.Volumes = []v1.Volume{
|
||||
{
|
||||
Name: "sample",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
// A Mirror Pod.
|
||||
pod.Annotations = test.GetMirrorPodAnnotation()
|
||||
}
|
||||
|
||||
func withCriticalPod(pod *v1.Pod) {
|
||||
// A Critical Pod.
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Namespace = "kube-system"
|
||||
priority := utils.SystemCriticalPriority
|
||||
pod.Spec.Priority = &priority
|
||||
}
|
||||
|
||||
func TestLowNodeUtilizationWithPrometheusMetrics(t *testing.T) {
|
||||
n1NodeName := "n1"
|
||||
n2NodeName := "n2"
|
||||
n3NodeName := "n3"
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
samples model.Vector
|
||||
nodes []*v1.Node
|
||||
pods []*v1.Pod
|
||||
expectedPodsEvicted uint
|
||||
evictedPods []string
|
||||
args *LowNodeUtilizationArgs
|
||||
}{
|
||||
{
|
||||
name: "with instance:node_cpu:rate:sum query",
|
||||
args: &LowNodeUtilizationArgs{
|
||||
Thresholds: api.ResourceThresholds{
|
||||
MetricResource: 30,
|
||||
},
|
||||
TargetThresholds: api.ResourceThresholds{
|
||||
MetricResource: 50,
|
||||
},
|
||||
MetricsUtilization: &MetricsUtilization{
|
||||
Source: api.PrometheusMetrics,
|
||||
Prometheus: &Prometheus{
|
||||
Query: "instance:node_cpu:rate:sum",
|
||||
},
|
||||
},
|
||||
},
|
||||
samples: model.Vector{
|
||||
sample("instance:node_cpu:rate:sum", n1NodeName, 0.5695757575757561),
|
||||
sample("instance:node_cpu:rate:sum", n2NodeName, 0.4245454545454522),
|
||||
sample("instance:node_cpu:rate:sum", n3NodeName, 0.20381818181818104),
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode(n1NodeName, 4000, 3000, 9, nil),
|
||||
test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil),
|
||||
test.BuildTestNode(n3NodeName, 4000, 3000, 10, nil),
|
||||
},
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p1", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p2", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p3", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p4", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p5", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
// These won't be evicted.
|
||||
test.BuildTestPod("p6", 400, 0, n1NodeName, test.SetDSOwnerRef),
|
||||
test.BuildTestPod("p7", 400, 0, n1NodeName, withLocalStorage),
|
||||
test.BuildTestPod("p8", 400, 0, n1NodeName, withCriticalPod),
|
||||
test.BuildTestPod("p9", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
},
|
||||
expectedPodsEvicted: 1,
|
||||
},
|
||||
{
|
||||
name: "with instance:node_cpu:rate:sum query with more evictions",
|
||||
args: &LowNodeUtilizationArgs{
|
||||
Thresholds: api.ResourceThresholds{
|
||||
MetricResource: 30,
|
||||
},
|
||||
TargetThresholds: api.ResourceThresholds{
|
||||
MetricResource: 50,
|
||||
},
|
||||
EvictionLimits: &api.EvictionLimits{
|
||||
Node: ptr.To[uint](3),
|
||||
},
|
||||
MetricsUtilization: &MetricsUtilization{
|
||||
Source: api.PrometheusMetrics,
|
||||
Prometheus: &Prometheus{
|
||||
Query: "instance:node_cpu:rate:sum",
|
||||
},
|
||||
},
|
||||
},
|
||||
samples: model.Vector{
|
||||
sample("instance:node_cpu:rate:sum", n1NodeName, 0.5695757575757561),
|
||||
sample("instance:node_cpu:rate:sum", n2NodeName, 0.4245454545454522),
|
||||
sample("instance:node_cpu:rate:sum", n3NodeName, 0.20381818181818104),
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode(n1NodeName, 4000, 3000, 9, nil),
|
||||
test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil),
|
||||
test.BuildTestNode(n3NodeName, 4000, 3000, 10, nil),
|
||||
},
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p1", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p2", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p3", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p4", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p5", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
// These won't be evicted.
|
||||
test.BuildTestPod("p6", 400, 0, n1NodeName, test.SetDSOwnerRef),
|
||||
test.BuildTestPod("p7", 400, 0, n1NodeName, withLocalStorage),
|
||||
test.BuildTestPod("p8", 400, 0, n1NodeName, withCriticalPod),
|
||||
test.BuildTestPod("p9", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
},
|
||||
expectedPodsEvicted: 3,
|
||||
},
|
||||
{
|
||||
name: "with instance:node_cpu:rate:sum query with deviation",
|
||||
args: &LowNodeUtilizationArgs{
|
||||
Thresholds: api.ResourceThresholds{
|
||||
MetricResource: 5,
|
||||
},
|
||||
TargetThresholds: api.ResourceThresholds{
|
||||
MetricResource: 5,
|
||||
},
|
||||
EvictionLimits: &api.EvictionLimits{
|
||||
Node: ptr.To[uint](2),
|
||||
},
|
||||
UseDeviationThresholds: true,
|
||||
MetricsUtilization: &MetricsUtilization{
|
||||
Source: api.PrometheusMetrics,
|
||||
Prometheus: &Prometheus{
|
||||
Query: "instance:node_cpu:rate:sum",
|
||||
},
|
||||
},
|
||||
},
|
||||
samples: model.Vector{
|
||||
sample("instance:node_cpu:rate:sum", n1NodeName, 0.5695757575757561),
|
||||
sample("instance:node_cpu:rate:sum", n2NodeName, 0.4245454545454522),
|
||||
sample("instance:node_cpu:rate:sum", n3NodeName, 0.20381818181818104),
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode(n1NodeName, 4000, 3000, 9, nil),
|
||||
test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil),
|
||||
test.BuildTestNode(n3NodeName, 4000, 3000, 10, nil),
|
||||
},
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p1", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p2", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p3", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p4", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p5", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
// These won't be evicted.
|
||||
test.BuildTestPod("p6", 400, 0, n1NodeName, test.SetDSOwnerRef),
|
||||
test.BuildTestPod("p7", 400, 0, n1NodeName, withLocalStorage),
|
||||
test.BuildTestPod("p8", 400, 0, n1NodeName, withCriticalPod),
|
||||
test.BuildTestPod("p9", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
},
|
||||
expectedPodsEvicted: 2,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
testFnc := func(metricsEnabled bool, expectedPodsEvicted uint) func(t *testing.T) {
|
||||
return func(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
var objs []runtime.Object
|
||||
for _, node := range tc.nodes {
|
||||
objs = append(objs, node)
|
||||
}
|
||||
for _, pod := range tc.pods {
|
||||
objs = append(objs, pod)
|
||||
}
|
||||
|
||||
fakeClient := fake.NewSimpleClientset(objs...)
|
||||
|
||||
podsForEviction := make(map[string]struct{})
|
||||
for _, pod := range tc.evictedPods {
|
||||
podsForEviction[pod] = struct{}{}
|
||||
}
|
||||
|
||||
evictionFailed := false
|
||||
if len(tc.evictedPods) > 0 {
|
||||
fakeClient.Fake.AddReactor("create", "pods", func(action core.Action) (bool, runtime.Object, error) {
|
||||
getAction := action.(core.CreateAction)
|
||||
obj := getAction.GetObject()
|
||||
if eviction, ok := obj.(*policy.Eviction); ok {
|
||||
if _, exists := podsForEviction[eviction.Name]; exists {
|
||||
return true, obj, nil
|
||||
}
|
||||
evictionFailed = true
|
||||
return true, nil, fmt.Errorf("pod %q was unexpectedly evicted", eviction.Name)
|
||||
}
|
||||
return true, obj, nil
|
||||
})
|
||||
}
|
||||
|
||||
handle, podEvictor, err := frameworktesting.InitFrameworkHandle(ctx, fakeClient, nil, defaultevictor.DefaultEvictorArgs{NodeFit: true}, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to initialize a framework handle: %v", err)
|
||||
}
|
||||
|
||||
handle.PrometheusClientImpl = &fakePromClient{
|
||||
result: tc.samples,
|
||||
dataType: model.ValVector,
|
||||
}
|
||||
plugin, err := NewLowNodeUtilization(tc.args, handle)
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to initialize the plugin: %v", err)
|
||||
}
|
||||
|
||||
status := plugin.(frameworktypes.BalancePlugin).Balance(ctx, tc.nodes)
|
||||
if status != nil {
|
||||
t.Fatalf("Balance.err: %v", status.Err)
|
||||
}
|
||||
|
||||
podsEvicted := podEvictor.TotalEvicted()
|
||||
if expectedPodsEvicted != podsEvicted {
|
||||
t.Errorf("Expected %v pods to be evicted but %v got evicted", expectedPodsEvicted, podsEvicted)
|
||||
}
|
||||
if evictionFailed {
|
||||
t.Errorf("Pod evictions failed unexpectedly")
|
||||
}
|
||||
}
|
||||
}
|
||||
t.Run(tc.name, testFnc(false, tc.expectedPodsEvicted))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -57,6 +57,7 @@ import (
|
||||
// - thresholds: map[string][]api.ReferencedResourceList
|
||||
// - pod list: map[string][]*v1.Pod
|
||||
// Once the nodes are classified produce the original []NodeInfo so the code is not that much changed (postponing further refactoring once it is needed)
|
||||
const MetricResource = v1.ResourceName("MetricResource")
|
||||
|
||||
// NodeUsage stores a node's info, pods on it, thresholds and its resource usage
|
||||
type NodeUsage struct {
|
||||
@@ -94,20 +95,30 @@ func normalizePercentage(percent api.Percentage) api.Percentage {
|
||||
return percent
|
||||
}
|
||||
|
||||
func nodeCapacity(node *v1.Node, nodeUsage api.ReferencedResourceList) v1.ResourceList {
|
||||
capacity := node.Status.Capacity
|
||||
if len(node.Status.Allocatable) > 0 {
|
||||
capacity = node.Status.Allocatable
|
||||
}
|
||||
// the usage captures the metrics resource
|
||||
if _, ok := nodeUsage[MetricResource]; ok {
|
||||
// Make ResourceMetrics 100% => 100 points
|
||||
capacity[MetricResource] = *resource.NewQuantity(int64(100), resource.DecimalSI)
|
||||
}
|
||||
return capacity
|
||||
}
|
||||
|
||||
func getNodeThresholdsFromAverageNodeUsage(
|
||||
nodes []*v1.Node,
|
||||
usageClient usageClient,
|
||||
lowSpan, highSpan api.ResourceThresholds,
|
||||
) map[string][]api.ResourceThresholds {
|
||||
) (map[string][]api.ResourceThresholds, api.ResourceThresholds) {
|
||||
total := api.ResourceThresholds{}
|
||||
average := api.ResourceThresholds{}
|
||||
numberOfNodes := len(nodes)
|
||||
for _, node := range nodes {
|
||||
usage := usageClient.nodeUtilization(node.Name)
|
||||
nodeCapacity := node.Status.Capacity
|
||||
if len(node.Status.Allocatable) > 0 {
|
||||
nodeCapacity = node.Status.Allocatable
|
||||
}
|
||||
nodeCapacity := nodeCapacity(node, usage)
|
||||
for resource, value := range usage {
|
||||
nodeCapacityValue := nodeCapacity[resource]
|
||||
if resource == v1.ResourceCPU {
|
||||
@@ -138,7 +149,7 @@ func getNodeThresholdsFromAverageNodeUsage(
|
||||
highThreshold,
|
||||
}
|
||||
}
|
||||
return nodeThresholds
|
||||
return nodeThresholds, average
|
||||
}
|
||||
|
||||
func getStaticNodeThresholds(
|
||||
@@ -216,10 +227,7 @@ func roundTo2Decimals(percentage float64) float64 {
|
||||
}
|
||||
|
||||
func resourceUsagePercentages(nodeUsage api.ReferencedResourceList, node *v1.Node, round bool) api.ResourceThresholds {
|
||||
nodeCapacity := node.Status.Capacity
|
||||
if len(node.Status.Allocatable) > 0 {
|
||||
nodeCapacity = node.Status.Allocatable
|
||||
}
|
||||
nodeCapacity := nodeCapacity(node, nodeUsage)
|
||||
|
||||
resourceUsagePercentage := api.ResourceThresholds{}
|
||||
for resourceName, resourceUsage := range nodeUsage {
|
||||
@@ -395,16 +403,29 @@ func evictPods(
|
||||
if !preEvictionFilterWithOptions(pod) {
|
||||
continue
|
||||
}
|
||||
|
||||
// In case podUsage does not support resource counting (e.g. provided metric
|
||||
// does not quantify pod resource utilization).
|
||||
unconstrainedResourceEviction := false
|
||||
podUsage, err := usageClient.podUsage(pod)
|
||||
if err != nil {
|
||||
klog.Errorf("unable to get pod usage for %v/%v: %v", pod.Namespace, pod.Name, err)
|
||||
continue
|
||||
if _, ok := err.(*notSupportedError); !ok {
|
||||
klog.Errorf("unable to get pod usage for %v/%v: %v", pod.Namespace, pod.Name, err)
|
||||
continue
|
||||
}
|
||||
unconstrainedResourceEviction = true
|
||||
}
|
||||
err = podEvictor.Evict(ctx, pod, evictOptions)
|
||||
if err == nil {
|
||||
if maxNoOfPodsToEvictPerNode == nil && unconstrainedResourceEviction {
|
||||
klog.V(3).InfoS("Currently, only a single pod eviction is allowed")
|
||||
break
|
||||
}
|
||||
evictionCounter++
|
||||
klog.V(3).InfoS("Evicted pods", "pod", klog.KObj(pod))
|
||||
|
||||
if unconstrainedResourceEviction {
|
||||
continue
|
||||
}
|
||||
for name := range totalAvailableUsage {
|
||||
if name == v1.ResourcePods {
|
||||
nodeInfo.usage[name].Sub(*resource.NewQuantity(1, resource.DecimalSI))
|
||||
|
||||
@@ -55,13 +55,24 @@ type HighNodeUtilizationArgs struct {
|
||||
}
|
||||
|
||||
// MetricsUtilization allow to consume actual resource utilization from metrics
|
||||
// +k8s:deepcopy-gen=true
|
||||
type MetricsUtilization struct {
|
||||
// metricsServer enables metrics from a kubernetes metrics server.
|
||||
// Please see https://kubernetes-sigs.github.io/metrics-server/ for more.
|
||||
// Deprecated. Use MetricsSource instead.
|
||||
// Deprecated. Use Source instead.
|
||||
MetricsServer bool `json:"metricsServer,omitempty"`
|
||||
|
||||
// source enables the plugin to consume metrics from a metrics source.
|
||||
// Currently only KubernetesMetrics available.
|
||||
Source api.MetricsSource `json:"source,omitempty"`
|
||||
|
||||
// prometheus enables metrics collection through a prometheus query.
|
||||
Prometheus *Prometheus `json:"prometheus,omitempty"`
|
||||
}
|
||||
|
||||
type Prometheus struct {
|
||||
// query returning a vector of samples, each sample labeled with `instance`
|
||||
// corresponding to a node name with each sample value as a real number
|
||||
// in <0; 1> interval.
|
||||
Query string `json:"query,omitempty"`
|
||||
}
|
||||
|
||||
@@ -19,7 +19,11 @@ package nodeutilization
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
promapi "github.com/prometheus/client_golang/api"
|
||||
promv1 "github.com/prometheus/client_golang/api/prometheus/v1"
|
||||
"github.com/prometheus/common/model"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -33,11 +37,33 @@ import (
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
)
|
||||
|
||||
type UsageClientType int
|
||||
|
||||
const (
|
||||
requestedUsageClientType UsageClientType = iota
|
||||
actualUsageClientType
|
||||
prometheusUsageClientType
|
||||
)
|
||||
|
||||
type notSupportedError struct {
|
||||
usageClientType UsageClientType
|
||||
}
|
||||
|
||||
func (e notSupportedError) Error() string {
|
||||
return "maximum number of evicted pods per node reached"
|
||||
}
|
||||
|
||||
func newNotSupportedError(usageClientType UsageClientType) *notSupportedError {
|
||||
return ¬SupportedError{
|
||||
usageClientType: usageClientType,
|
||||
}
|
||||
}
|
||||
|
||||
type usageClient interface {
|
||||
// Both low/high node utilization plugins are expected to invoke sync right
|
||||
// after Balance method is invoked. There's no cache invalidation so each
|
||||
// Balance is expected to get the latest data by invoking sync.
|
||||
sync(nodes []*v1.Node) error
|
||||
sync(ctx context.Context, nodes []*v1.Node) error
|
||||
nodeUtilization(node string) api.ReferencedResourceList
|
||||
pods(node string) []*v1.Pod
|
||||
podUsage(pod *v1.Pod) (api.ReferencedResourceList, error)
|
||||
@@ -79,7 +105,7 @@ func (s *requestedUsageClient) podUsage(pod *v1.Pod) (api.ReferencedResourceList
|
||||
return usage, nil
|
||||
}
|
||||
|
||||
func (s *requestedUsageClient) sync(nodes []*v1.Node) error {
|
||||
func (s *requestedUsageClient) sync(ctx context.Context, nodes []*v1.Node) error {
|
||||
s._nodeUtilization = make(map[string]api.ReferencedResourceList)
|
||||
s._pods = make(map[string][]*v1.Pod)
|
||||
|
||||
@@ -165,7 +191,7 @@ func (client *actualUsageClient) podUsage(pod *v1.Pod) (api.ReferencedResourceLi
|
||||
return totalUsage, nil
|
||||
}
|
||||
|
||||
func (client *actualUsageClient) sync(nodes []*v1.Node) error {
|
||||
func (client *actualUsageClient) sync(ctx context.Context, nodes []*v1.Node) error {
|
||||
client._nodeUtilization = make(map[string]api.ReferencedResourceList)
|
||||
client._pods = make(map[string][]*v1.Pod)
|
||||
|
||||
@@ -200,3 +226,95 @@ func (client *actualUsageClient) sync(nodes []*v1.Node) error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type prometheusUsageClient struct {
|
||||
getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc
|
||||
promClient promapi.Client
|
||||
promQuery string
|
||||
|
||||
_pods map[string][]*v1.Pod
|
||||
_nodeUtilization map[string]map[v1.ResourceName]*resource.Quantity
|
||||
}
|
||||
|
||||
var _ usageClient = &actualUsageClient{}
|
||||
|
||||
func newPrometheusUsageClient(
|
||||
getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc,
|
||||
promClient promapi.Client,
|
||||
promQuery string,
|
||||
) *prometheusUsageClient {
|
||||
return &prometheusUsageClient{
|
||||
getPodsAssignedToNode: getPodsAssignedToNode,
|
||||
promClient: promClient,
|
||||
promQuery: promQuery,
|
||||
}
|
||||
}
|
||||
|
||||
func (client *prometheusUsageClient) nodeUtilization(node string) map[v1.ResourceName]*resource.Quantity {
|
||||
return client._nodeUtilization[node]
|
||||
}
|
||||
|
||||
func (client *prometheusUsageClient) pods(node string) []*v1.Pod {
|
||||
return client._pods[node]
|
||||
}
|
||||
|
||||
func (client *prometheusUsageClient) podUsage(pod *v1.Pod) (map[v1.ResourceName]*resource.Quantity, error) {
|
||||
return nil, newNotSupportedError(prometheusUsageClientType)
|
||||
}
|
||||
|
||||
func NodeUsageFromPrometheusMetrics(ctx context.Context, promClient promapi.Client, promQuery string) (map[string]map[v1.ResourceName]*resource.Quantity, error) {
|
||||
results, warnings, err := promv1.NewAPI(promClient).Query(ctx, promQuery, time.Now())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to capture prometheus metrics: %v", err)
|
||||
}
|
||||
if len(warnings) > 0 {
|
||||
klog.Infof("prometheus metrics warnings: %v", warnings)
|
||||
}
|
||||
|
||||
if results.Type() != model.ValVector {
|
||||
return nil, fmt.Errorf("expected query results to be of type %q, got %q instead", model.ValVector, results.Type())
|
||||
}
|
||||
|
||||
nodeUsages := make(map[string]map[v1.ResourceName]*resource.Quantity)
|
||||
for _, sample := range results.(model.Vector) {
|
||||
nodeName, exists := sample.Metric["instance"]
|
||||
if !exists {
|
||||
return nil, fmt.Errorf("The collected metrics sample is missing 'instance' key")
|
||||
}
|
||||
if sample.Value < 0 || sample.Value > 1 {
|
||||
return nil, fmt.Errorf("The collected metrics sample for %q has value %v outside of <0; 1> interval", string(nodeName), sample.Value)
|
||||
}
|
||||
nodeUsages[string(nodeName)] = map[v1.ResourceName]*resource.Quantity{
|
||||
MetricResource: resource.NewQuantity(int64(sample.Value*100), resource.DecimalSI),
|
||||
}
|
||||
}
|
||||
|
||||
return nodeUsages, nil
|
||||
}
|
||||
|
||||
func (client *prometheusUsageClient) sync(ctx context.Context, nodes []*v1.Node) error {
|
||||
client._nodeUtilization = make(map[string]map[v1.ResourceName]*resource.Quantity)
|
||||
client._pods = make(map[string][]*v1.Pod)
|
||||
|
||||
nodeUsages, err := NodeUsageFromPrometheusMetrics(ctx, client.promClient, client.promQuery)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, node := range nodes {
|
||||
if _, exists := nodeUsages[node.Name]; !exists {
|
||||
return fmt.Errorf("unable to find metric entry for %v", node.Name)
|
||||
}
|
||||
pods, err := podutil.ListPodsOnANode(node.Name, client.getPodsAssignedToNode, nil)
|
||||
if err != nil {
|
||||
klog.V(2).InfoS("Node will not be processed, error accessing its pods", "node", klog.KObj(node), "err", err)
|
||||
return fmt.Errorf("error accessing %q node's pods: %v", node.Name, err)
|
||||
}
|
||||
|
||||
// store the snapshot of pods from the same (or the closest) node utilization computation
|
||||
client._pods[node.Name] = pods
|
||||
client._nodeUtilization[node.Name] = nodeUsages[node.Name]
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -18,9 +18,14 @@ package nodeutilization
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"testing"
|
||||
|
||||
"github.com/prometheus/common/model"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
@@ -58,9 +63,9 @@ func updateMetricsAndCheckNodeUtilization(
|
||||
if err != nil {
|
||||
t.Fatalf("failed to capture metrics: %v", err)
|
||||
}
|
||||
err = usageClient.sync(nodes)
|
||||
err = usageClient.sync(ctx, nodes)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to capture a snapshot: %v", err)
|
||||
t.Fatalf("failed to sync a snapshot: %v", err)
|
||||
}
|
||||
nodeUtilization := usageClient.nodeUtilization(nodeName)
|
||||
t.Logf("current node cpu usage: %v\n", nodeUtilization[v1.ResourceCPU].MilliValue())
|
||||
@@ -137,3 +142,158 @@ func TestActualUsageClient(t *testing.T) {
|
||||
metricsClientset, collector, usageClient, nodes, n2.Name, n2metrics,
|
||||
)
|
||||
}
|
||||
|
||||
type fakePromClient struct {
|
||||
result interface{}
|
||||
dataType model.ValueType
|
||||
}
|
||||
|
||||
type fakePayload struct {
|
||||
Status string `json:"status"`
|
||||
Data queryResult `json:"data"`
|
||||
}
|
||||
|
||||
type queryResult struct {
|
||||
Type model.ValueType `json:"resultType"`
|
||||
Result interface{} `json:"result"`
|
||||
}
|
||||
|
||||
func (client *fakePromClient) URL(ep string, args map[string]string) *url.URL {
|
||||
return &url.URL{}
|
||||
}
|
||||
|
||||
func (client *fakePromClient) Do(ctx context.Context, request *http.Request) (*http.Response, []byte, error) {
|
||||
jsonData, err := json.Marshal(fakePayload{
|
||||
Status: "success",
|
||||
Data: queryResult{
|
||||
Type: client.dataType,
|
||||
Result: client.result,
|
||||
},
|
||||
})
|
||||
|
||||
return &http.Response{StatusCode: 200}, jsonData, err
|
||||
}
|
||||
|
||||
func sample(metricName, nodeName string, value float64) *model.Sample {
|
||||
return &model.Sample{
|
||||
Metric: model.Metric{
|
||||
"__name__": model.LabelValue(metricName),
|
||||
"instance": model.LabelValue(nodeName),
|
||||
},
|
||||
Value: model.SampleValue(value),
|
||||
Timestamp: 1728991761711,
|
||||
}
|
||||
}
|
||||
|
||||
func TestPrometheusUsageClient(t *testing.T) {
|
||||
n1 := test.BuildTestNode("ip-10-0-17-165.ec2.internal", 2000, 3000, 10, nil)
|
||||
n2 := test.BuildTestNode("ip-10-0-51-101.ec2.internal", 2000, 3000, 10, nil)
|
||||
n3 := test.BuildTestNode("ip-10-0-94-25.ec2.internal", 2000, 3000, 10, nil)
|
||||
|
||||
nodes := []*v1.Node{n1, n2, n3}
|
||||
|
||||
p1 := test.BuildTestPod("p1", 400, 0, n1.Name, nil)
|
||||
p21 := test.BuildTestPod("p21", 400, 0, n2.Name, nil)
|
||||
p22 := test.BuildTestPod("p22", 400, 0, n2.Name, nil)
|
||||
p3 := test.BuildTestPod("p3", 400, 0, n3.Name, nil)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
result interface{}
|
||||
dataType model.ValueType
|
||||
nodeUsage map[string]int64
|
||||
err error
|
||||
}{
|
||||
{
|
||||
name: "valid data",
|
||||
dataType: model.ValVector,
|
||||
result: model.Vector{
|
||||
sample("instance:node_cpu:rate:sum", "ip-10-0-51-101.ec2.internal", 0.20381818181818104),
|
||||
sample("instance:node_cpu:rate:sum", "ip-10-0-17-165.ec2.internal", 0.4245454545454522),
|
||||
sample("instance:node_cpu:rate:sum", "ip-10-0-94-25.ec2.internal", 0.5695757575757561),
|
||||
},
|
||||
nodeUsage: map[string]int64{
|
||||
"ip-10-0-51-101.ec2.internal": 20,
|
||||
"ip-10-0-17-165.ec2.internal": 42,
|
||||
"ip-10-0-94-25.ec2.internal": 56,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid data missing instance label",
|
||||
dataType: model.ValVector,
|
||||
result: model.Vector{
|
||||
&model.Sample{
|
||||
Metric: model.Metric{
|
||||
"__name__": model.LabelValue("instance:node_cpu:rate:sum"),
|
||||
},
|
||||
Value: model.SampleValue(0.20381818181818104),
|
||||
Timestamp: 1728991761711,
|
||||
},
|
||||
},
|
||||
err: fmt.Errorf("The collected metrics sample is missing 'instance' key"),
|
||||
},
|
||||
{
|
||||
name: "invalid data value out of range",
|
||||
dataType: model.ValVector,
|
||||
result: model.Vector{
|
||||
sample("instance:node_cpu:rate:sum", "ip-10-0-51-101.ec2.internal", 1.20381818181818104),
|
||||
},
|
||||
err: fmt.Errorf("The collected metrics sample for \"ip-10-0-51-101.ec2.internal\" has value 1.203818181818181 outside of <0; 1> interval"),
|
||||
},
|
||||
{
|
||||
name: "invalid data not a vector",
|
||||
dataType: model.ValScalar,
|
||||
result: model.Scalar{
|
||||
Value: model.SampleValue(0.20381818181818104),
|
||||
Timestamp: 1728991761711,
|
||||
},
|
||||
err: fmt.Errorf("expected query results to be of type \"vector\", got \"scalar\" instead"),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
pClient := &fakePromClient{
|
||||
result: tc.result,
|
||||
dataType: tc.dataType,
|
||||
}
|
||||
|
||||
clientset := fakeclientset.NewSimpleClientset(n1, n2, n3, p1, p21, p22, p3)
|
||||
|
||||
ctx := context.TODO()
|
||||
sharedInformerFactory := informers.NewSharedInformerFactory(clientset, 0)
|
||||
podInformer := sharedInformerFactory.Core().V1().Pods().Informer()
|
||||
podsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
|
||||
if err != nil {
|
||||
t.Fatalf("Build get pods assigned to node function error: %v", err)
|
||||
}
|
||||
|
||||
sharedInformerFactory.Start(ctx.Done())
|
||||
sharedInformerFactory.WaitForCacheSync(ctx.Done())
|
||||
|
||||
prometheusUsageClient := newPrometheusUsageClient(podsAssignedToNode, pClient, "instance:node_cpu:rate:sum")
|
||||
err = prometheusUsageClient.sync(ctx, nodes)
|
||||
if tc.err == nil {
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
} else {
|
||||
if err == nil {
|
||||
t.Fatalf("unexpected %q error, got nil instead", tc.err)
|
||||
} else if err.Error() != tc.err.Error() {
|
||||
t.Fatalf("expected %q error, got %q instead", tc.err, err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
for _, node := range nodes {
|
||||
nodeUtil := prometheusUsageClient.nodeUtilization(node.Name)
|
||||
if nodeUtil[MetricResource].Value() != tc.nodeUsage[node.Name] {
|
||||
t.Fatalf("expected %q node utilization to be %v, got %v instead", node.Name, tc.nodeUsage[node.Name], nodeUtil[MetricResource])
|
||||
} else {
|
||||
t.Logf("%v node utilization: %v", node.Name, nodeUtil[MetricResource])
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -44,6 +44,17 @@ func ValidateLowNodeUtilizationArgs(obj runtime.Object) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if args.MetricsUtilization != nil {
|
||||
if args.MetricsUtilization.Source == api.KubernetesMetrics && args.MetricsUtilization.MetricsServer {
|
||||
return fmt.Errorf("it is not allowed to set both %q source and metricsServer", api.KubernetesMetrics)
|
||||
}
|
||||
if args.MetricsUtilization.Source == api.KubernetesMetrics && args.MetricsUtilization.Prometheus != nil {
|
||||
return fmt.Errorf("prometheus configuration is not allowed to set when source is set to %q", api.KubernetesMetrics)
|
||||
}
|
||||
if args.MetricsUtilization.Source == api.PrometheusMetrics && (args.MetricsUtilization.Prometheus == nil || args.MetricsUtilization.Prometheus.Query == "") {
|
||||
return fmt.Errorf("prometheus query is required when metrics source is set to %q", api.PrometheusMetrics)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -183,6 +183,65 @@ func TestValidateLowNodeUtilizationPluginConfig(t *testing.T) {
|
||||
},
|
||||
errInfo: nil,
|
||||
},
|
||||
{
|
||||
name: "setting both kubernetes metrics source and metricsserver",
|
||||
args: &LowNodeUtilizationArgs{
|
||||
Thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 20,
|
||||
v1.ResourceMemory: 20,
|
||||
extendedResource: 20,
|
||||
},
|
||||
TargetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
v1.ResourceMemory: 80,
|
||||
extendedResource: 80,
|
||||
},
|
||||
MetricsUtilization: &MetricsUtilization{
|
||||
MetricsServer: true,
|
||||
Source: api.KubernetesMetrics,
|
||||
},
|
||||
},
|
||||
errInfo: fmt.Errorf("it is not allowed to set both \"KubernetesMetrics\" source and metricsServer"),
|
||||
},
|
||||
{
|
||||
name: "missing prometheus query",
|
||||
args: &LowNodeUtilizationArgs{
|
||||
Thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 20,
|
||||
v1.ResourceMemory: 20,
|
||||
extendedResource: 20,
|
||||
},
|
||||
TargetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
v1.ResourceMemory: 80,
|
||||
extendedResource: 80,
|
||||
},
|
||||
MetricsUtilization: &MetricsUtilization{
|
||||
Source: api.PrometheusMetrics,
|
||||
},
|
||||
},
|
||||
errInfo: fmt.Errorf("prometheus query is required when metrics source is set to \"Prometheus\""),
|
||||
},
|
||||
{
|
||||
name: "prometheus set when source set to kubernetes metrics",
|
||||
args: &LowNodeUtilizationArgs{
|
||||
Thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 20,
|
||||
v1.ResourceMemory: 20,
|
||||
extendedResource: 20,
|
||||
},
|
||||
TargetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
v1.ResourceMemory: 80,
|
||||
extendedResource: 80,
|
||||
},
|
||||
MetricsUtilization: &MetricsUtilization{
|
||||
Source: api.KubernetesMetrics,
|
||||
Prometheus: &Prometheus{},
|
||||
},
|
||||
},
|
||||
errInfo: fmt.Errorf("prometheus configuration is not allowed to set when source is set to \"KubernetesMetrics\""),
|
||||
},
|
||||
}
|
||||
|
||||
for _, testCase := range tests {
|
||||
@@ -190,10 +249,10 @@ func TestValidateLowNodeUtilizationPluginConfig(t *testing.T) {
|
||||
validateErr := ValidateLowNodeUtilizationArgs(runtime.Object(testCase.args))
|
||||
if validateErr == nil || testCase.errInfo == nil {
|
||||
if validateErr != testCase.errInfo {
|
||||
t.Errorf("expected validity of plugin config: %v but got %v instead", testCase.errInfo, validateErr)
|
||||
t.Errorf("expected validity of plugin config: %q but got %q instead", testCase.errInfo, validateErr)
|
||||
}
|
||||
} else if validateErr.Error() != testCase.errInfo.Error() {
|
||||
t.Errorf("expected validity of plugin config: %v but got %v instead", testCase.errInfo, validateErr)
|
||||
t.Errorf("expected validity of plugin config: %q but got %q instead", testCase.errInfo, validateErr)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -84,7 +84,7 @@ func (in *LowNodeUtilizationArgs) DeepCopyInto(out *LowNodeUtilizationArgs) {
|
||||
if in.MetricsUtilization != nil {
|
||||
in, out := &in.MetricsUtilization, &out.MetricsUtilization
|
||||
*out = new(MetricsUtilization)
|
||||
**out = **in
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.EvictableNamespaces != nil {
|
||||
in, out := &in.EvictableNamespaces, &out.EvictableNamespaces
|
||||
@@ -116,3 +116,24 @@ func (in *LowNodeUtilizationArgs) DeepCopyObject() runtime.Object {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *MetricsUtilization) DeepCopyInto(out *MetricsUtilization) {
|
||||
*out = *in
|
||||
if in.Prometheus != nil {
|
||||
in, out := &in.Prometheus, &out.Prometheus
|
||||
*out = new(Prometheus)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricsUtilization.
|
||||
func (in *MetricsUtilization) DeepCopy() *MetricsUtilization {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(MetricsUtilization)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user