mirror of
https://github.com/kubernetes-sigs/descheduler.git
synced 2026-01-26 05:14:13 +01:00
Change klog to use structured logging
Signed-off-by: Ali Farah <aliyfarah9@gmail.com>
This commit is contained in:
@@ -97,7 +97,7 @@ func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer
|
||||
wait.Until(func() {
|
||||
nodes, err := nodeutil.ReadyNodes(ctx, rs.Client, nodeInformer, nodeSelector, stopChannel)
|
||||
if err != nil {
|
||||
klog.V(1).Infof("Unable to get ready nodes: %v", err)
|
||||
klog.V(1).InfoS("Unable to get ready nodes", "err", err)
|
||||
close(stopChannel)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -77,7 +77,7 @@ func IsReady(node *v1.Node) bool {
|
||||
// - NodeOutOfDisk condition status is ConditionFalse,
|
||||
// - NodeNetworkUnavailable condition status is ConditionFalse.
|
||||
if cond.Type == v1.NodeReady && cond.Status != v1.ConditionTrue {
|
||||
klog.V(1).Infof("Ignoring node %v with %v condition status %v", node.Name, cond.Type, cond.Status)
|
||||
klog.V(1).InfoS("Ignoring node", "node", klog.KObj(node), "condition", cond.Type, "status", cond.Status)
|
||||
return false
|
||||
} /*else if cond.Type == v1.NodeOutOfDisk && cond.Status != v1.ConditionFalse {
|
||||
klog.V(4).Infof("Ignoring node %v with %v condition status %v", node.Name, cond.Type, cond.Status)
|
||||
@@ -112,7 +112,7 @@ func PodFitsAnyNode(pod *v1.Pod, nodes []*v1.Node) bool {
|
||||
continue
|
||||
}
|
||||
if !IsNodeUnschedulable(node) {
|
||||
klog.V(2).Infof("Pod %v can possibly be scheduled on %v", pod.Name, node.Name)
|
||||
klog.V(2).InfoS("Pod can possibly be scheduled on a different node", "pod", klog.KObj(pod), "node", klog.KObj(node))
|
||||
return true
|
||||
}
|
||||
}
|
||||
@@ -130,10 +130,10 @@ func PodFitsCurrentNode(pod *v1.Pod, node *v1.Node) bool {
|
||||
}
|
||||
|
||||
if !ok {
|
||||
klog.V(2).Infof("Pod %v does not fit on node %v", pod.Name, node.Name)
|
||||
klog.V(2).InfoS("Pod does not fit on node", "pod", klog.KObj(pod), "node", klog.KObj(node))
|
||||
return false
|
||||
}
|
||||
|
||||
klog.V(2).Infof("Pod %v fits on node %v", pod.Name, node.Name)
|
||||
klog.V(2).InfoS("Pod fits on node", "pod", klog.KObj(pod), "node", klog.KObj(node))
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -63,17 +63,17 @@ func RemoveDuplicatePods(
|
||||
}
|
||||
thresholdPriority, err := utils.GetPriorityFromStrategyParams(ctx, client, strategy.Params)
|
||||
if err != nil {
|
||||
klog.V(1).Infof("failed to get threshold priority from strategy's params: %#v", err)
|
||||
klog.V(1).InfoS("Failed to get threshold priority from strategy's params", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
evictable := podEvictor.Evictable(evictions.WithPriorityThreshold(thresholdPriority))
|
||||
|
||||
for _, node := range nodes {
|
||||
klog.V(1).Infof("Processing node: %#v", node.Name)
|
||||
klog.V(1).InfoS("Processing node", "node", klog.KObj(node))
|
||||
pods, err := podutil.ListPodsOnANode(ctx, client, node, podutil.WithFilter(evictable.IsEvictable))
|
||||
if err != nil {
|
||||
klog.Errorf("error listing evictable pods on node %s: %+v", node.Name, err)
|
||||
klog.ErrorS(err, "Error listing evictable pods on node", "node", klog.KObj(node))
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -131,7 +131,7 @@ func RemoveDuplicatePods(
|
||||
|
||||
for _, pod := range duplicatePods {
|
||||
if _, err := podEvictor.EvictPod(ctx, pod, node, "RemoveDuplicatePods"); err != nil {
|
||||
klog.Errorf("Error evicting pod: (%#v)", err)
|
||||
klog.ErrorS(err, "Error evicting pod", "pod", klog.KObj(pod))
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
@@ -71,7 +71,7 @@ func LowNodeUtilization(ctx context.Context, client clientset.Interface, strateg
|
||||
}
|
||||
thresholdPriority, err := utils.GetPriorityFromStrategyParams(ctx, client, strategy.Params)
|
||||
if err != nil {
|
||||
klog.V(1).Infof("failed to get threshold priority from strategy's params: %#v", err)
|
||||
klog.V(1).InfoS("Failed to get threshold priority from strategy's params", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -113,12 +113,12 @@ func LowNodeUtilization(ctx context.Context, client clientset.Interface, strateg
|
||||
}
|
||||
|
||||
if len(lowNodes) == len(nodes) {
|
||||
klog.V(1).Infof("all nodes are underutilized, nothing to do here")
|
||||
klog.V(1).Infof("All nodes are underutilized, nothing to do here")
|
||||
return
|
||||
}
|
||||
|
||||
if len(targetNodes) == 0 {
|
||||
klog.V(1).Infof("all nodes are under target utilization, nothing to do here")
|
||||
klog.V(1).Infof("All nodes are under target utilization, nothing to do here")
|
||||
return
|
||||
}
|
||||
|
||||
@@ -136,7 +136,7 @@ func LowNodeUtilization(ctx context.Context, client clientset.Interface, strateg
|
||||
podEvictor,
|
||||
evictable.IsEvictable)
|
||||
|
||||
klog.V(1).Infof("Total number of pods evicted: %v", podEvictor.TotalEvicted())
|
||||
klog.V(1).InfoS("Total number of pods evicted", "evictedPods", podEvictor.TotalEvicted())
|
||||
}
|
||||
|
||||
// validateStrategyConfig checks if the strategy's config is valid
|
||||
@@ -194,13 +194,14 @@ func classifyNodes(npm NodePodsMap, thresholds api.ResourceThresholds, targetThr
|
||||
}
|
||||
// Check if node is underutilized and if we can schedule pods on it.
|
||||
if !nodeutil.IsNodeUnschedulable(node) && isNodeWithLowUtilization(usage, thresholds) {
|
||||
klog.V(2).Infof("Node %#v is under utilized with usage: %#v", node.Name, usage)
|
||||
klog.V(2).InfoS("Node is underutilized", "node", klog.KObj(node), "usage", usage)
|
||||
lowNodes = append(lowNodes, nuMap)
|
||||
} else if isNodeAboveTargetUtilization(usage, targetThresholds) {
|
||||
klog.V(2).Infof("Node %#v is over utilized with usage: %#v", node.Name, usage)
|
||||
klog.V(2).InfoS("Node is overutilized", "node", klog.KObj(node), "usage", usage)
|
||||
targetNodes = append(targetNodes, nuMap)
|
||||
} else {
|
||||
klog.V(2).Infof("Node %#v is appropriately utilized with usage: %#v", node.Name, usage)
|
||||
klog.V(2).InfoS("Node is appropriately utilized", "node", klog.KObj(node), "usage", usage)
|
||||
|
||||
}
|
||||
}
|
||||
return lowNodes, targetNodes
|
||||
@@ -241,21 +242,20 @@ func evictPodsFromTargetNodes(
|
||||
totalMem += ((float64(memPercentage) * float64(nodeCapacity.Memory().Value())) / 100)
|
||||
}
|
||||
|
||||
klog.V(1).Infof("Total capacity to be moved: CPU:%v, Mem:%v, Pods:%v", totalCPU, totalMem, totalPods)
|
||||
klog.V(1).Infof("********Number of pods evicted from each node:***********")
|
||||
klog.V(1).InfoS("Total capacity to be moved", "CPU", totalCPU, "Mem", totalMem, "Pods", totalPods)
|
||||
|
||||
for _, node := range targetNodes {
|
||||
nodeCapacity := node.node.Status.Capacity
|
||||
if len(node.node.Status.Allocatable) > 0 {
|
||||
nodeCapacity = node.node.Status.Allocatable
|
||||
}
|
||||
klog.V(3).Infof("evicting pods from node %#v with usage: %#v", node.node.Name, node.usage)
|
||||
klog.V(3).InfoS("Evicting pods from node", "node", klog.KObj(node.node), "usage", node.usage)
|
||||
|
||||
nonRemovablePods, removablePods := classifyPods(node.allPods, podFilter)
|
||||
klog.V(2).Infof("allPods:%v, nonRemovablePods:%v, removablePods:%v", len(node.allPods), len(nonRemovablePods), len(removablePods))
|
||||
klog.V(2).Infof("AllPods:%v, nonRemovablePods:%v, removablePods:%v", len(node.allPods), len(nonRemovablePods), len(removablePods))
|
||||
|
||||
if len(removablePods) == 0 {
|
||||
klog.V(1).Infof("no removable pods on node %#v, try next node", node.node.Name)
|
||||
klog.V(1).InfoS("No removable pods on node, try next node", "node", klog.KObj(node.node))
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -263,8 +263,7 @@ func evictPodsFromTargetNodes(
|
||||
// sort the evictable Pods based on priority. This also sorts them based on QoS. If there are multiple pods with same priority, they are sorted based on QoS tiers.
|
||||
podutil.SortPodsBasedOnPriorityLowToHigh(removablePods)
|
||||
evictPods(ctx, removablePods, targetThresholds, nodeCapacity, node.usage, &totalPods, &totalCPU, &totalMem, taintsOfLowNodes, podEvictor, node.node)
|
||||
|
||||
klog.V(1).Infof("%v pods evicted from node %#v with usage %v", podEvictor.NodeEvicted(node.node), node.node.Name, node.usage)
|
||||
klog.V(1).InfoS("Evicted pods from node", "node", klog.KObj(node.node), "evictedPods", podEvictor.NodeEvicted(node.node), "usage", node.usage)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -285,7 +284,8 @@ func evictPods(
|
||||
onePodPercentage := api.Percentage((float64(1) * 100) / float64(nodeCapacity.Pods().Value()))
|
||||
for _, pod := range inputPods {
|
||||
if !utils.PodToleratesTaints(pod, taintsOfLowNodes) {
|
||||
klog.V(3).Infof("Skipping eviction for Pod: %#v, doesn't tolerate node taint", pod.Name)
|
||||
klog.V(3).InfoS("Skipping eviction for pod, doesn't tolerate node taint", "pod", klog.KObj(pod))
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -294,12 +294,13 @@ func evictPods(
|
||||
|
||||
success, err := podEvictor.EvictPod(ctx, pod, node, "LowNodeUtilization")
|
||||
if err != nil {
|
||||
klog.Errorf("Error evicting pod: (%#v)", err)
|
||||
klog.ErrorS(err, "Error evicting pod", "pod", klog.KObj(pod))
|
||||
break
|
||||
}
|
||||
|
||||
if success {
|
||||
klog.V(3).Infof("Evicted pod: %#v", pod.Name)
|
||||
klog.V(3).InfoS("Evicted pods", "pod", klog.KObj(pod), "err", err)
|
||||
|
||||
// update remaining pods
|
||||
nodeUsage[v1.ResourcePods] -= onePodPercentage
|
||||
*totalPods--
|
||||
@@ -312,7 +313,7 @@ func evictPods(
|
||||
*totalMem -= float64(mUsage)
|
||||
nodeUsage[v1.ResourceMemory] -= api.Percentage(float64(mUsage) / float64(nodeCapacity.Memory().Value()) * 100)
|
||||
|
||||
klog.V(3).Infof("updated node usage: %#v", nodeUsage)
|
||||
klog.V(3).InfoS("Updated node usage", "updatedUsage", nodeUsage)
|
||||
// check if node utilization drops below target threshold or any required capacity (cpu, memory, pods) is moved
|
||||
if !isNodeAboveTargetUtilization(nodeUsage, targetThresholds) || *totalPods <= 0 || *totalCPU <= 0 || *totalMem <= 0 {
|
||||
break
|
||||
|
||||
@@ -54,7 +54,7 @@ func RemovePodsViolatingNodeAffinity(ctx context.Context, client clientset.Inter
|
||||
}
|
||||
thresholdPriority, err := utils.GetPriorityFromStrategyParams(ctx, client, strategy.Params)
|
||||
if err != nil {
|
||||
klog.V(1).Infof("failed to get threshold priority from strategy's params: %#v", err)
|
||||
klog.V(1).InfoS("Failed to get threshold priority from strategy's params", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -67,12 +67,12 @@ func RemovePodsViolatingNodeAffinity(ctx context.Context, client clientset.Inter
|
||||
evictable := podEvictor.Evictable(evictions.WithPriorityThreshold(thresholdPriority))
|
||||
|
||||
for _, nodeAffinity := range strategy.Params.NodeAffinityType {
|
||||
klog.V(2).Infof("Executing for nodeAffinityType: %v", nodeAffinity)
|
||||
klog.V(2).InfoS("Executing for nodeAffinityType", "nodeAffinity", nodeAffinity)
|
||||
|
||||
switch nodeAffinity {
|
||||
case "requiredDuringSchedulingIgnoredDuringExecution":
|
||||
for _, node := range nodes {
|
||||
klog.V(1).Infof("Processing node: %#v\n", node.Name)
|
||||
klog.V(1).InfoS("Processing node", "node", klog.KObj(node))
|
||||
|
||||
pods, err := podutil.ListPodsOnANode(
|
||||
ctx,
|
||||
@@ -92,9 +92,9 @@ func RemovePodsViolatingNodeAffinity(ctx context.Context, client clientset.Inter
|
||||
|
||||
for _, pod := range pods {
|
||||
if pod.Spec.Affinity != nil && pod.Spec.Affinity.NodeAffinity != nil && pod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil {
|
||||
klog.V(1).Infof("Evicting pod: %v", pod.Name)
|
||||
klog.V(1).InfoS("Evicting pod", "pod", klog.KObj(pod))
|
||||
if _, err := podEvictor.EvictPod(ctx, pod, node, "NodeAffinity"); err != nil {
|
||||
klog.Errorf("Error evicting pod: (%#v)", err)
|
||||
klog.ErrorS(err, "Error evicting pod")
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
@@ -61,14 +61,14 @@ func RemovePodsViolatingNodeTaints(ctx context.Context, client clientset.Interfa
|
||||
|
||||
thresholdPriority, err := utils.GetPriorityFromStrategyParams(ctx, client, strategy.Params)
|
||||
if err != nil {
|
||||
klog.V(1).Infof("failed to get threshold priority from strategy's params: %#v", err)
|
||||
klog.V(1).InfoS("Failed to get threshold priority from strategy's params", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
evictable := podEvictor.Evictable(evictions.WithPriorityThreshold(thresholdPriority))
|
||||
|
||||
for _, node := range nodes {
|
||||
klog.V(1).Infof("Processing node: %#v\n", node.Name)
|
||||
klog.V(1).InfoS("Processing node", "node", klog.KObj(node))
|
||||
pods, err := podutil.ListPodsOnANode(
|
||||
ctx,
|
||||
client,
|
||||
@@ -88,9 +88,9 @@ func RemovePodsViolatingNodeTaints(ctx context.Context, client clientset.Interfa
|
||||
node.Spec.Taints,
|
||||
func(taint *v1.Taint) bool { return taint.Effect == v1.TaintEffectNoSchedule },
|
||||
) {
|
||||
klog.V(2).Infof("Not all taints with NoSchedule effect are tolerated after update for pod %v on node %v", pods[i].Name, node.Name)
|
||||
klog.V(2).InfoS("Not all taints with NoSchedule effect are tolerated after update for pod on node", "pod", klog.KObj(pods[i]), "node", klog.KObj(node))
|
||||
if _, err := podEvictor.EvictPod(ctx, pods[i], node, "NodeTaint"); err != nil {
|
||||
klog.Errorf("Error evicting pod: (%#v)", err)
|
||||
klog.ErrorS(err, "Error evicting pod")
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
@@ -62,14 +62,14 @@ func RemovePodsViolatingInterPodAntiAffinity(ctx context.Context, client clients
|
||||
|
||||
thresholdPriority, err := utils.GetPriorityFromStrategyParams(ctx, client, strategy.Params)
|
||||
if err != nil {
|
||||
klog.V(1).Infof("failed to get threshold priority from strategy's params: %#v", err)
|
||||
klog.V(1).InfoS("Failed to get threshold priority from strategy's params", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
evictable := podEvictor.Evictable(evictions.WithPriorityThreshold(thresholdPriority))
|
||||
|
||||
for _, node := range nodes {
|
||||
klog.V(1).Infof("Processing node: %#v\n", node.Name)
|
||||
klog.V(1).InfoS("Processing node", "node", klog.KObj(node))
|
||||
pods, err := podutil.ListPodsOnANode(
|
||||
ctx,
|
||||
client,
|
||||
@@ -88,7 +88,7 @@ func RemovePodsViolatingInterPodAntiAffinity(ctx context.Context, client clients
|
||||
if checkPodsWithAntiAffinityExist(pods[i], pods) {
|
||||
success, err := podEvictor.EvictPod(ctx, pods[i], node, "InterPodAntiAffinity")
|
||||
if err != nil {
|
||||
klog.Errorf("Error evicting pod: (%#v)", err)
|
||||
klog.ErrorS(err, "Error evicting pod")
|
||||
break
|
||||
}
|
||||
|
||||
|
||||
@@ -56,7 +56,7 @@ func PodLifeTime(ctx context.Context, client clientset.Interface, strategy api.D
|
||||
|
||||
thresholdPriority, err := utils.GetPriorityFromStrategyParams(ctx, client, strategy.Params)
|
||||
if err != nil {
|
||||
klog.V(1).Infof("failed to get threshold priority from strategy's params: %#v", err)
|
||||
klog.V(1).InfoS("Failed to get threshold priority from strategy's params", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -69,17 +69,17 @@ func PodLifeTime(ctx context.Context, client clientset.Interface, strategy api.D
|
||||
evictable := podEvictor.Evictable(evictions.WithPriorityThreshold(thresholdPriority))
|
||||
|
||||
for _, node := range nodes {
|
||||
klog.V(1).Infof("Processing node: %#v", node.Name)
|
||||
klog.V(1).InfoS("Processing node", "node", klog.KObj(node))
|
||||
|
||||
pods := listOldPodsOnNode(ctx, client, node, includedNamespaces, excludedNamespaces, *strategy.Params.MaxPodLifeTimeSeconds, evictable.IsEvictable)
|
||||
for _, pod := range pods {
|
||||
success, err := podEvictor.EvictPod(ctx, pod, node, "PodLifeTime")
|
||||
if success {
|
||||
klog.V(1).Infof("Evicted pod: %#v because it was created more than %v seconds ago", pod.Name, *strategy.Params.MaxPodLifeTimeSeconds)
|
||||
klog.V(1).InfoS("Evicted pod because it exceeded its lifetime", "pod", klog.KObj(pod), "maxPodLifeTime", *strategy.Params.MaxPodLifeTimeSeconds)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
klog.Errorf("Error evicting pod: (%#v)", err)
|
||||
klog.ErrorS(err, "Error evicting pod", "pod", klog.KObj(pod))
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
@@ -57,7 +57,7 @@ func RemovePodsHavingTooManyRestarts(ctx context.Context, client clientset.Inter
|
||||
|
||||
thresholdPriority, err := utils.GetPriorityFromStrategyParams(ctx, client, strategy.Params)
|
||||
if err != nil {
|
||||
klog.V(1).Infof("failed to get threshold priority from strategy's params: %#v", err)
|
||||
klog.V(1).InfoS("Failed to get threshold priority from strategy's params", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -70,7 +70,7 @@ func RemovePodsHavingTooManyRestarts(ctx context.Context, client clientset.Inter
|
||||
evictable := podEvictor.Evictable(evictions.WithPriorityThreshold(thresholdPriority))
|
||||
|
||||
for _, node := range nodes {
|
||||
klog.V(1).Infof("Processing node: %s", node.Name)
|
||||
klog.V(1).InfoS("Processing node", "node", klog.KObj(node))
|
||||
pods, err := podutil.ListPodsOnANode(
|
||||
ctx,
|
||||
client,
|
||||
@@ -80,7 +80,7 @@ func RemovePodsHavingTooManyRestarts(ctx context.Context, client clientset.Inter
|
||||
podutil.WithoutNamespaces(excludedNamespaces),
|
||||
)
|
||||
if err != nil {
|
||||
klog.Errorf("Error when list pods at node %s", node.Name)
|
||||
klog.ErrorS(err, "Error listing a nodes pods", "node", klog.KObj(node))
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -94,7 +94,7 @@ func RemovePodsHavingTooManyRestarts(ctx context.Context, client clientset.Inter
|
||||
continue
|
||||
}
|
||||
if _, err := podEvictor.EvictPod(ctx, pods[i], node, "TooManyRestarts"); err != nil {
|
||||
klog.Errorf("Error evicting pod: (%#v)", err)
|
||||
klog.ErrorS(err, "Error evicting pod", "pod", klog.KObj(pod))
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
@@ -186,7 +186,7 @@ func PodToleratesTaints(pod *v1.Pod, taintsOfNodes map[string][]v1.Taint) bool {
|
||||
if len(pod.Spec.Tolerations) >= len(taintsForNode) && TolerationsTolerateTaintsWithFilter(pod.Spec.Tolerations, taintsForNode, nil) {
|
||||
return true
|
||||
}
|
||||
klog.V(5).Infof("pod: %#v doesn't tolerate node %s's taints", pod.Name, nodeName)
|
||||
klog.V(5).InfoS("Pod doesn't tolerate nodes taint", "pod", klog.KObj(pod), "nodeName", nodeName)
|
||||
}
|
||||
|
||||
return false
|
||||
|
||||
Reference in New Issue
Block a user