1
0
mirror of https://github.com/kubernetes-sigs/descheduler.git synced 2026-01-26 05:14:13 +01:00

Convert logs to use structured logs

This commit is contained in:
Ali Farah
2020-09-11 01:02:55 +10:00
parent e37c27313e
commit 6329b6c27b
8 changed files with 17 additions and 17 deletions

View File

@@ -43,7 +43,7 @@ func NewDeschedulerCommand(out io.Writer) *cobra.Command {
defer logs.FlushLogs()
err := Run(s)
if err != nil {
klog.Errorf("%v", err)
klog.ErrorS(err, "descheduler server")
}
},
}

View File

@@ -103,7 +103,7 @@ func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer
}
if len(nodes) <= 1 {
klog.V(1).Infof("The cluster size is 0 or 1 meaning eviction causes service disruption or degradation. So aborting..")
klog.V(1).InfoS("The cluster size is 0 or 1 meaning eviction causes service disruption or degradation. So aborting..")
close(stopChannel)
return
}

View File

@@ -106,7 +106,7 @@ func (pe *PodEvictor) EvictPod(ctx context.Context, pod *v1.Pod, node *v1.Node,
err := evictPod(ctx, pe.client, pod, pe.policyGroupVersion, pe.dryRun)
if err != nil {
// err is used only for logging purposes
klog.Errorf("Error evicting pod: %#v in namespace %#v%s: %#v", pod.Name, pod.Namespace, reason, err)
klog.ErrorS(err, "Error evicting pod", "pod", klog.KObj(pod), "reason", reason)
return false, nil
}
@@ -227,7 +227,7 @@ func (ev *evictable) IsEvictable(pod *v1.Pod) bool {
}
if len(checkErrs) > 0 && !HaveEvictAnnotation(pod) {
klog.V(4).Infof("Pod %s in namespace %s is not evictable: Pod lacks an eviction annotation and fails the following checks: %v", pod.Name, pod.Namespace, errors.NewAggregate(checkErrs).Error())
klog.V(4).InfoS("Pod lacks an eviction annotation and fails the following checks", "pod", klog.KObj(pod), "checks", errors.NewAggregate(checkErrs).Error())
return false
}
return true

View File

@@ -42,7 +42,7 @@ func ReadyNodes(ctx context.Context, client clientset.Interface, nodeInformer co
}
if len(nodes) == 0 {
klog.V(2).Infof("node lister returned empty list, now fetch directly")
klog.V(2).InfoS("Node lister returned empty list, now fetch directly")
nItems, err := client.CoreV1().Nodes().List(ctx, metav1.ListOptions{LabelSelector: nodeSelector})
if err != nil {
@@ -80,7 +80,7 @@ func IsReady(node *v1.Node) bool {
klog.V(1).InfoS("Ignoring node", "node", klog.KObj(node), "condition", cond.Type, "status", cond.Status)
return false
} /*else if cond.Type == v1.NodeOutOfDisk && cond.Status != v1.ConditionFalse {
klog.V(4).Infof("Ignoring node %v with %v condition status %v", node.Name, cond.Type, cond.Status)
klog.V(4).InfoS("Ignoring node %v with %v condition status %v", node.Name, cond.Type, cond.Status)
return false
} else if cond.Type == v1.NodeNetworkUnavailable && cond.Status != v1.ConditionFalse {
klog.V(4).Infof("Ignoring node %v with %v condition status %v", node.Name, cond.Type, cond.Status)

View File

@@ -30,7 +30,7 @@ import (
func LoadPolicyConfig(policyConfigFile string) (*api.DeschedulerPolicy, error) {
if policyConfigFile == "" {
klog.V(1).Infof("policy config file not specified")
klog.V(1).InfoS("Policy config file not specified")
return nil, nil
}

View File

@@ -78,7 +78,7 @@ func LowNodeUtilization(ctx context.Context, client clientset.Interface, strateg
thresholds := strategy.Params.NodeResourceUtilizationThresholds.Thresholds
targetThresholds := strategy.Params.NodeResourceUtilizationThresholds.TargetThresholds
if err := validateStrategyConfig(thresholds, targetThresholds); err != nil {
klog.Errorf("LowNodeUtilization config is not valid: %v", err)
klog.ErrorS(err, "LowNodeUtilization config is not valid")
return
}
// check if Pods/CPU/Mem are set, if not, set them to 100
@@ -102,23 +102,23 @@ func LowNodeUtilization(ctx context.Context, client clientset.Interface, strateg
"CPU", thresholds[v1.ResourceCPU], "Mem", thresholds[v1.ResourceMemory], "Pods", thresholds[v1.ResourcePods])
if len(lowNodes) == 0 {
klog.V(1).Infof("No node is underutilized, nothing to do here, you might tune your thresholds further")
klog.V(1).InfoS("No node is underutilized, nothing to do here, you might tune your thresholds further")
return
}
klog.V(1).InfoS("Total number of underutilized nodes", "totalNumber", len(lowNodes))
if len(lowNodes) < strategy.Params.NodeResourceUtilizationThresholds.NumberOfNodes {
klog.V(1).Infof("number of nodes underutilized (%v) is less than NumberOfNodes (%v), nothing to do here", len(lowNodes), strategy.Params.NodeResourceUtilizationThresholds.NumberOfNodes)
klog.V(1).InfoS("Number of nodes underutilized is less than NumberOfNodes, nothing to do here", "underutilizedNodes", len(lowNodes), "numberOfNodes", strategy.Params.NodeResourceUtilizationThresholds.NumberOfNodes)
return
}
if len(lowNodes) == len(nodes) {
klog.V(1).Infof("All nodes are underutilized, nothing to do here")
klog.V(1).InfoS("All nodes are underutilized, nothing to do here")
return
}
if len(targetNodes) == 0 {
klog.V(1).Infof("All nodes are under target utilization, nothing to do here")
klog.V(1).InfoS("All nodes are under target utilization, nothing to do here")
return
}
@@ -252,14 +252,14 @@ func evictPodsFromTargetNodes(
klog.V(3).InfoS("Evicting pods from node", "node", klog.KObj(node.node), "usage", node.usage)
nonRemovablePods, removablePods := classifyPods(node.allPods, podFilter)
klog.V(2).Infof("AllPods:%v, nonRemovablePods:%v, removablePods:%v", len(node.allPods), len(nonRemovablePods), len(removablePods))
klog.V(2).InfoS("AllPods", len(node.allPods), "nonRemovablePods", len(nonRemovablePods), "removablePods", len(removablePods))
if len(removablePods) == 0 {
klog.V(1).InfoS("No removable pods on node, try next node", "node", klog.KObj(node.node))
continue
}
klog.V(1).Infof("evicting pods based on priority, if they have same priority, they'll be evicted based on QoS tiers")
klog.V(1).InfoS("Evicting pods based on priority, if they have same priority, they'll be evicted based on QoS tiers")
// sort the evictable Pods based on priority. This also sorts them based on QoS. If there are multiple pods with same priority, they are sorted based on QoS tiers.
podutil.SortPodsBasedOnPriorityLowToHigh(removablePods)
evictPods(ctx, removablePods, targetThresholds, nodeCapacity, node.usage, &totalPods, &totalCPU, &totalMem, taintsOfLowNodes, podEvictor, node.node)

View File

@@ -87,7 +87,7 @@ func RemovePodsViolatingNodeAffinity(ctx context.Context, client clientset.Inter
podutil.WithoutNamespaces(excludedNamespaces),
)
if err != nil {
klog.Errorf("failed to get pods from %v: %v", node.Name, err)
klog.ErrorS(err, "Failed to get pods", "node", klog.KObj(node))
}
for _, pod := range pods {
@@ -101,7 +101,7 @@ func RemovePodsViolatingNodeAffinity(ctx context.Context, client clientset.Inter
}
}
default:
klog.Errorf("invalid nodeAffinityType: %v", nodeAffinity)
klog.ErrorS(nil, "Invalid nodeAffinityType", "nodeAffinity", nodeAffinity)
}
}
klog.V(1).InfoS("Number of evicted pods", "totalEvicted", podEvictor.TotalEvicted())

View File

@@ -83,7 +83,7 @@ func nodeMatchesNodeSelectorTerms(node *v1.Node, nodeSelectorTerms []v1.NodeSele
for _, req := range nodeSelectorTerms {
nodeSelector, err := NodeSelectorRequirementsAsSelector(req.MatchExpressions)
if err != nil {
klog.V(10).Infof("Failed to parse MatchExpressions: %+v, regarding as not match.", req.MatchExpressions)
klog.V(10).InfoS("Failed to parse MatchExpressions", "matchExpression", req.MatchExpressions)
return false
}
if nodeSelector.Matches(labels.Set(node.Labels)) {