1
0
mirror of https://github.com/kubernetes-sigs/descheduler.git synced 2026-01-26 13:29:11 +01:00

Do not collect the metrics when the metrics server is not enabled

This commit is contained in:
Jan Chaloupka
2021-12-10 15:38:17 +01:00
parent e0f086ff85
commit 901a16ecbc
16 changed files with 36 additions and 10 deletions

View File

@@ -271,6 +271,7 @@ func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer
evictSystemCriticalPods,
ignorePvcPods,
evictBarePods,
!rs.DisableMetrics,
)
for name, strategy := range deschedulerPolicy.Strategies {

View File

@@ -61,6 +61,7 @@ type PodEvictor struct {
evictLocalStoragePods bool
evictSystemCriticalPods bool
ignorePvcPods bool
metricsEnabled bool
}
func NewPodEvictor(
@@ -74,6 +75,7 @@ func NewPodEvictor(
evictSystemCriticalPods bool,
ignorePvcPods bool,
evictFailedBarePods bool,
metricsEnabled bool,
) *PodEvictor {
var nodePodCount = make(nodePodEvictedCount)
var namespacePodCount = make(namespacePodEvictCount)
@@ -95,6 +97,7 @@ func NewPodEvictor(
evictSystemCriticalPods: evictSystemCriticalPods,
evictFailedBarePods: evictFailedBarePods,
ignorePvcPods: ignorePvcPods,
metricsEnabled: metricsEnabled,
}
}
@@ -121,20 +124,26 @@ func (pe *PodEvictor) EvictPod(ctx context.Context, pod *v1.Pod, node *v1.Node,
reason += " (" + strings.Join(reasons, ", ") + ")"
}
if pe.maxPodsToEvictPerNode != nil && pe.nodepodCount[node]+1 > *pe.maxPodsToEvictPerNode {
metrics.PodsEvicted.With(map[string]string{"result": "maximum number of pods per node reached", "strategy": strategy, "namespace": pod.Namespace, "node": node.Name}).Inc()
if pe.metricsEnabled {
metrics.PodsEvicted.With(map[string]string{"result": "maximum number of pods per node reached", "strategy": strategy, "namespace": pod.Namespace, "node": node.Name}).Inc()
}
return false, fmt.Errorf("Maximum number %v of evicted pods per %q node reached", *pe.maxPodsToEvictPerNode, node.Name)
}
if pe.maxPodsToEvictPerNamespace != nil && pe.namespacePodCount[pod.Namespace]+1 > *pe.maxPodsToEvictPerNamespace {
metrics.PodsEvicted.With(map[string]string{"result": "maximum number of pods per namespace reached", "strategy": strategy, "namespace": pod.Namespace, "node": node.Name}).Inc()
if pe.metricsEnabled {
metrics.PodsEvicted.With(map[string]string{"result": "maximum number of pods per namespace reached", "strategy": strategy, "namespace": pod.Namespace, "node": node.Name}).Inc()
}
return false, fmt.Errorf("Maximum number %v of evicted pods per %q namespace reached", *pe.maxPodsToEvictPerNamespace, pod.Namespace)
}
err := evictPod(ctx, pe.client, pod, pe.policyGroupVersion, pe.dryRun)
err := evictPod(ctx, pe.client, pod, pe.policyGroupVersion)
if err != nil {
// err is used only for logging purposes
klog.ErrorS(err, "Error evicting pod", "pod", klog.KObj(pod), "reason", reason)
metrics.PodsEvicted.With(map[string]string{"result": "error", "strategy": strategy, "namespace": pod.Namespace, "node": node.Name}).Inc()
if pe.metricsEnabled {
metrics.PodsEvicted.With(map[string]string{"result": "error", "strategy": strategy, "namespace": pod.Namespace, "node": node.Name}).Inc()
}
return false, nil
}
@@ -149,15 +158,14 @@ func (pe *PodEvictor) EvictPod(ctx context.Context, pod *v1.Pod, node *v1.Node,
eventBroadcaster.StartRecordingToSink(&clientcorev1.EventSinkImpl{Interface: pe.client.CoreV1().Events(pod.Namespace)})
r := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "sigs.k8s.io.descheduler"})
r.Event(pod, v1.EventTypeNormal, "Descheduled", fmt.Sprintf("pod evicted by sigs.k8s.io/descheduler%s", reason))
metrics.PodsEvicted.With(map[string]string{"result": "success", "strategy": strategy, "namespace": pod.Namespace, "node": node.Name}).Inc()
if pe.metricsEnabled {
metrics.PodsEvicted.With(map[string]string{"result": "success", "strategy": strategy, "namespace": pod.Namespace, "node": node.Name}).Inc()
}
}
return true, nil
}
func evictPod(ctx context.Context, client clientset.Interface, pod *v1.Pod, policyGroupVersion string, dryRun bool) error {
if dryRun {
return nil
}
func evictPod(ctx context.Context, client clientset.Interface, pod *v1.Pod, policyGroupVersion string) error {
deleteOptions := &metav1.DeleteOptions{}
// GracePeriodSeconds ?
eviction := &policy.Eviction{

View File

@@ -62,7 +62,7 @@ func TestEvictPod(t *testing.T) {
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
return true, &v1.PodList{Items: test.pods}, nil
})
got := evictPod(ctx, fakeClient, test.pod, "v1", false)
got := evictPod(ctx, fakeClient, test.pod, "v1")
if got != test.want {
t.Errorf("Test error for Desc: %s. Expected %v pod eviction to be %v, got %v", test.description, test.pod.Name, test.want, got)
}

View File

@@ -301,6 +301,7 @@ func TestFindDuplicatePods(t *testing.T) {
false,
false,
false,
false,
)
RemoveDuplicatePods(ctx, fakeClient, testCase.strategy, testCase.nodes, podEvictor, getPodsAssignedToNode)
@@ -727,6 +728,7 @@ func TestRemoveDuplicatesUniformly(t *testing.T) {
false,
false,
false,
false,
)
RemoveDuplicatePods(ctx, fakeClient, testCase.strategy, testCase.nodes, podEvictor, getPodsAssignedToNode)

View File

@@ -265,6 +265,7 @@ func TestRemoveFailedPods(t *testing.T) {
false,
false,
false,
false,
)
RemoveFailedPods(ctx, fakeClient, tc.strategy, tc.nodes, podEvictor, getPodsAssignedToNode)

View File

@@ -226,6 +226,7 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
false,
false,
false,
false,
)
RemovePodsViolatingNodeAffinity(ctx, fakeClient, tc.strategy, tc.nodes, podEvictor, getPodsAssignedToNode)

View File

@@ -263,6 +263,7 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
tc.evictSystemCriticalPods,
false,
false,
false,
)
strategy := api.DeschedulerStrategy{

View File

@@ -467,6 +467,7 @@ func TestHighNodeUtilization(t *testing.T) {
false,
false,
false,
false,
)
strategy := api.DeschedulerStrategy{
@@ -671,6 +672,7 @@ func TestHighNodeUtilizationWithTaints(t *testing.T) {
false,
false,
false,
false,
)
HighNodeUtilization(ctx, fakeClient, strategy, item.nodes, podEvictor, getPodsAssignedToNode)

View File

@@ -724,6 +724,7 @@ func TestLowNodeUtilization(t *testing.T) {
false,
false,
false,
false,
)
strategy := api.DeschedulerStrategy{
@@ -1036,6 +1037,7 @@ func TestLowNodeUtilizationWithTaints(t *testing.T) {
false,
false,
false,
false,
)
LowNodeUtilization(ctx, fakeClient, strategy, item.nodes, podEvictor, getPodsAssignedToNode)

View File

@@ -213,6 +213,7 @@ func TestPodAntiAffinity(t *testing.T) {
false,
false,
false,
false,
)
strategy := api.DeschedulerStrategy{
Params: &api.StrategyParameters{

View File

@@ -302,6 +302,7 @@ func TestPodLifeTime(t *testing.T) {
false,
tc.ignorePvcPods,
false,
false,
)
PodLifeTime(ctx, fakeClient, tc.strategy, tc.nodes, podEvictor, getPodsAssignedToNode)

View File

@@ -238,6 +238,7 @@ func TestRemovePodsHavingTooManyRestarts(t *testing.T) {
false,
false,
false,
false,
)
RemovePodsHavingTooManyRestarts(ctx, fakeClient, tc.strategy, tc.nodes, podEvictor, getPodsAssignedToNode)

View File

@@ -906,6 +906,7 @@ func TestTopologySpreadConstraint(t *testing.T) {
false,
false,
false,
false,
)
RemovePodsViolatingTopologySpreadConstraint(ctx, fakeClient, tc.strategy, tc.nodes, podEvictor, getPodsAssignedToNode)
podsEvicted := podEvictor.TotalEvicted()