mirror of
https://github.com/kubernetes-sigs/descheduler.git
synced 2026-01-26 21:31:18 +01:00
feat: refactoring thresholds and usage assessment
this commit refactors the thresholds and usage assessment for the node utilization plugins. both high and low plugins are affected by this change.
This commit is contained in:
@@ -1022,6 +1022,79 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
expectedPodsWithMetricsEvicted: 2,
|
||||
evictedPods: []string{},
|
||||
},
|
||||
{
|
||||
name: "deviation thresholds and overevicting memory",
|
||||
thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 5,
|
||||
v1.ResourcePods: 5,
|
||||
},
|
||||
targetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 5,
|
||||
v1.ResourcePods: 5,
|
||||
},
|
||||
useDeviationThresholds: true,
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode(n1NodeName, 4000, 3000, 10, nil),
|
||||
test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil),
|
||||
test.BuildTestNode(n3NodeName, 4000, 3000, 10, test.SetNodeUnschedulable),
|
||||
},
|
||||
// totalcpuusage = 3600m, avgcpuusage = 3600/12000 = 0.3 => 30%
|
||||
// totalpodsusage = 9, avgpodsusage = 9/30 = 0.3 => 30%
|
||||
// n1 and n2 are fully memory utilized
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p1", 400, 375, n1NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p2", 400, 375, n1NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p3", 400, 375, n1NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p4", 400, 375, n1NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p5", 400, 375, n1NodeName, test.SetRSOwnerRef),
|
||||
// These won't be evicted.
|
||||
test.BuildTestPod("p6", 400, 375, n1NodeName, test.SetDSOwnerRef),
|
||||
test.BuildTestPod("p7", 400, 375, n1NodeName, func(pod *v1.Pod) {
|
||||
// A pod with local storage.
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Spec.Volumes = []v1.Volume{
|
||||
{
|
||||
Name: "sample",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
// A Mirror Pod.
|
||||
pod.Annotations = test.GetMirrorPodAnnotation()
|
||||
}),
|
||||
test.BuildTestPod("p8", 400, 375, n1NodeName, func(pod *v1.Pod) {
|
||||
// A Critical Pod.
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Namespace = "kube-system"
|
||||
priority := utils.SystemCriticalPriority
|
||||
pod.Spec.Priority = &priority
|
||||
}),
|
||||
test.BuildTestPod("p9", 400, 3000, n2NodeName, test.SetRSOwnerRef),
|
||||
},
|
||||
nodemetricses: []*v1beta1.NodeMetrics{
|
||||
test.BuildNodeMetrics(n1NodeName, 4000, 3000),
|
||||
test.BuildNodeMetrics(n2NodeName, 4000, 3000),
|
||||
test.BuildNodeMetrics(n3NodeName, 4000, 3000),
|
||||
},
|
||||
podmetricses: []*v1beta1.PodMetrics{
|
||||
test.BuildPodMetrics("p1", 400, 375),
|
||||
test.BuildPodMetrics("p2", 400, 375),
|
||||
test.BuildPodMetrics("p3", 400, 375),
|
||||
test.BuildPodMetrics("p4", 400, 375),
|
||||
test.BuildPodMetrics("p5", 400, 375),
|
||||
test.BuildPodMetrics("p6", 400, 375),
|
||||
test.BuildPodMetrics("p7", 400, 375),
|
||||
test.BuildPodMetrics("p8", 400, 375),
|
||||
test.BuildPodMetrics("p9", 400, 3000),
|
||||
},
|
||||
expectedPodsEvicted: 0,
|
||||
expectedPodsWithMetricsEvicted: 0,
|
||||
evictedPods: []string{},
|
||||
},
|
||||
{
|
||||
name: "without priorities different evictions for requested and actual resources",
|
||||
thresholds: api.ResourceThresholds{
|
||||
@@ -1612,6 +1685,43 @@ func TestLowNodeUtilizationWithPrometheusMetrics(t *testing.T) {
|
||||
},
|
||||
expectedPodsEvicted: 2,
|
||||
},
|
||||
{
|
||||
name: "with instance:node_cpu:rate:sum query and deviation thresholds",
|
||||
args: &LowNodeUtilizationArgs{
|
||||
UseDeviationThresholds: true,
|
||||
Thresholds: api.ResourceThresholds{MetricResource: 10},
|
||||
TargetThresholds: api.ResourceThresholds{MetricResource: 10},
|
||||
MetricsUtilization: &MetricsUtilization{
|
||||
Source: api.PrometheusMetrics,
|
||||
Prometheus: &Prometheus{
|
||||
Query: "instance:node_cpu:rate:sum",
|
||||
},
|
||||
},
|
||||
},
|
||||
samples: model.Vector{
|
||||
sample("instance:node_cpu:rate:sum", n1NodeName, 1),
|
||||
sample("instance:node_cpu:rate:sum", n2NodeName, 0.5),
|
||||
sample("instance:node_cpu:rate:sum", n3NodeName, 0),
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode(n1NodeName, 4000, 3000, 9, nil),
|
||||
test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil),
|
||||
test.BuildTestNode(n3NodeName, 4000, 3000, 10, nil),
|
||||
},
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p1", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p2", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p3", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p4", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p5", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
// These won't be evicted.
|
||||
test.BuildTestPod("p6", 400, 0, n1NodeName, test.SetDSOwnerRef),
|
||||
test.BuildTestPod("p7", 400, 0, n1NodeName, withLocalStorage),
|
||||
test.BuildTestPod("p8", 400, 0, n1NodeName, withCriticalPod),
|
||||
test.BuildTestPod("p9", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
},
|
||||
expectedPodsEvicted: 1,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
|
||||
Reference in New Issue
Block a user