1
0
mirror of https://github.com/kubernetes-sigs/descheduler.git synced 2026-01-26 13:29:11 +01:00

Convert logs to use structured logging

This commit is contained in:
Ali Farah
2020-09-05 01:30:59 +10:00
parent e6f1c6f78a
commit e37c27313e
4 changed files with 10 additions and 10 deletions

View File

@@ -112,9 +112,9 @@ func (pe *PodEvictor) EvictPod(ctx context.Context, pod *v1.Pod, node *v1.Node,
pe.nodepodCount[node]++
if pe.dryRun {
klog.V(1).Infof("Evicted pod in dry run mode: %#v in namespace %#v%s", pod.Name, pod.Namespace, reason)
klog.V(1).InfoS("Evicted pod in dry run mode", "pod", klog.KObj(pod), "reason", reason)
} else {
klog.V(1).Infof("Evicted pod: %#v in namespace %#v%s", pod.Name, pod.Namespace, reason)
klog.V(1).InfoS("Evicted pod", "pod", klog.KObj(pod), "reason", reason)
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(klog.V(3).Infof)
eventBroadcaster.StartRecordingToSink(&clientcorev1.EventSinkImpl{Interface: pe.client.CoreV1().Events(pod.Namespace)})

View File

@@ -98,14 +98,14 @@ func LowNodeUtilization(ctx context.Context, client clientset.Interface, strateg
npm := createNodePodsMap(ctx, client, nodes)
lowNodes, targetNodes := classifyNodes(npm, thresholds, targetThresholds)
klog.V(1).Infof("Criteria for a node under utilization: CPU: %v, Mem: %v, Pods: %v",
thresholds[v1.ResourceCPU], thresholds[v1.ResourceMemory], thresholds[v1.ResourcePods])
klog.V(1).InfoS("Criteria for a node under utilization",
"CPU", thresholds[v1.ResourceCPU], "Mem", thresholds[v1.ResourceMemory], "Pods", thresholds[v1.ResourcePods])
if len(lowNodes) == 0 {
klog.V(1).Infof("No node is underutilized, nothing to do here, you might tune your thresholds further")
return
}
klog.V(1).Infof("Total number of underutilized nodes: %v", len(lowNodes))
klog.V(1).InfoS("Total number of underutilized nodes", "totalNumber", len(lowNodes))
if len(lowNodes) < strategy.Params.NodeResourceUtilizationThresholds.NumberOfNodes {
klog.V(1).Infof("number of nodes underutilized (%v) is less than NumberOfNodes (%v), nothing to do here", len(lowNodes), strategy.Params.NodeResourceUtilizationThresholds.NumberOfNodes)
@@ -122,10 +122,10 @@ func LowNodeUtilization(ctx context.Context, client clientset.Interface, strateg
return
}
klog.V(1).Infof("Criteria for a node above target utilization: CPU: %v, Mem: %v, Pods: %v",
targetThresholds[v1.ResourceCPU], targetThresholds[v1.ResourceMemory], targetThresholds[v1.ResourcePods])
klog.V(1).Infof("Total number of nodes above target utilization: %v", len(targetNodes))
klog.V(1).InfoS("Criteria for a node above target utilization",
"CPU", targetThresholds[v1.ResourceCPU], "Mem", targetThresholds[v1.ResourceMemory], "Pods", targetThresholds[v1.ResourcePods])
klog.V(1).InfoS("Number of nodes above target utilization", "totalNumber", len(targetNodes))
evictable := podEvictor.Evictable(evictions.WithPriorityThreshold(thresholdPriority))
evictPodsFromTargetNodes(

View File

@@ -104,5 +104,5 @@ func RemovePodsViolatingNodeAffinity(ctx context.Context, client clientset.Inter
klog.Errorf("invalid nodeAffinityType: %v", nodeAffinity)
}
}
klog.V(1).Infof("Evicted %v pods", podEvictor.TotalEvicted())
klog.V(1).InfoS("Number of evicted pods", "totalEvicted", podEvictor.TotalEvicted())
}

View File

@@ -70,7 +70,7 @@ func podMatchesNodeLabels(pod *v1.Pod, node *v1.Node) bool {
// Match node selector for requiredDuringSchedulingIgnoredDuringExecution.
if nodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil {
nodeSelectorTerms := nodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms
klog.V(10).Infof("Match for RequiredDuringSchedulingIgnoredDuringExecution node selector terms %+v", nodeSelectorTerms)
klog.V(10).InfoS("Match for RequiredDuringSchedulingIgnoredDuringExecution node selector terms", "terms", nodeSelectorTerms)
return nodeMatchesNodeSelectorTerms(node, nodeSelectorTerms)
}
}