mirror of
https://github.com/kubernetes-sigs/descheduler.git
synced 2026-01-26 05:14:13 +01:00
Merge pull request #407 from ingvagabund/structured-klog
Flip Info/Infof/Error to InfoS/ErrorS
This commit is contained in:
@@ -18,7 +18,8 @@ package node
|
||||
|
||||
import (
|
||||
"context"
|
||||
"k8s.io/api/core/v1"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
coreinformers "k8s.io/client-go/informers/core/v1"
|
||||
@@ -80,16 +81,16 @@ func IsReady(node *v1.Node) bool {
|
||||
klog.V(1).InfoS("Ignoring node", "node", klog.KObj(node), "condition", cond.Type, "status", cond.Status)
|
||||
return false
|
||||
} /*else if cond.Type == v1.NodeOutOfDisk && cond.Status != v1.ConditionFalse {
|
||||
klog.V(4).InfoS("Ignoring node %v with %v condition status %v", node.Name, cond.Type, cond.Status)
|
||||
klog.V(4).InfoS("Ignoring node with condition status", "node", klog.KObj(node.Name), "condition", cond.Type, "status", cond.Status)
|
||||
return false
|
||||
} else if cond.Type == v1.NodeNetworkUnavailable && cond.Status != v1.ConditionFalse {
|
||||
klog.V(4).Infof("Ignoring node %v with %v condition status %v", node.Name, cond.Type, cond.Status)
|
||||
klog.V(4).InfoS("Ignoring node with condition status", "node", klog.KObj(node.Name), "condition", cond.Type, "status", cond.Status)
|
||||
return false
|
||||
}*/
|
||||
}
|
||||
// Ignore nodes that are marked unschedulable
|
||||
/*if node.Spec.Unschedulable {
|
||||
klog.V(4).Infof("Ignoring node %v since it is unschedulable", node.Name)
|
||||
klog.V(4).InfoS("Ignoring node since it is unschedulable", "node", klog.KObj(node.Name))
|
||||
return false
|
||||
}*/
|
||||
return true
|
||||
@@ -125,7 +126,7 @@ func PodFitsCurrentNode(pod *v1.Pod, node *v1.Node) bool {
|
||||
ok, err := utils.PodMatchNodeSelector(pod, node)
|
||||
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
klog.ErrorS(err, "Failed to match node selector")
|
||||
return false
|
||||
}
|
||||
|
||||
|
||||
@@ -62,7 +62,7 @@ func RemoveDuplicatePods(
|
||||
podEvictor *evictions.PodEvictor,
|
||||
) {
|
||||
if err := validateRemoveDuplicatePodsParams(strategy.Params); err != nil {
|
||||
klog.V(1).Info(err)
|
||||
klog.V(1).InfoS("Invalid RemoveDuplicatePods parameters", "err", err)
|
||||
return
|
||||
}
|
||||
thresholdPriority, err := utils.GetPriorityFromStrategyParams(ctx, client, strategy.Params)
|
||||
|
||||
@@ -69,7 +69,7 @@ func validateLowNodeUtilizationParams(params *api.StrategyParameters) error {
|
||||
func LowNodeUtilization(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor) {
|
||||
// TODO: May be create a struct for the strategy as well, so that we don't have to pass along the all the params?
|
||||
if err := validateLowNodeUtilizationParams(strategy.Params); err != nil {
|
||||
klog.V(1).Info(err)
|
||||
klog.V(1).InfoS("Invalid LowNodeUtilization parameters", "err", err)
|
||||
return
|
||||
}
|
||||
thresholdPriority, err := utils.GetPriorityFromStrategyParams(ctx, client, strategy.Params)
|
||||
|
||||
@@ -49,7 +49,7 @@ func validatePodsViolatingNodeAffinityParams(params *api.StrategyParameters) err
|
||||
// RemovePodsViolatingNodeAffinity evicts pods on nodes which violate node affinity
|
||||
func RemovePodsViolatingNodeAffinity(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor) {
|
||||
if err := validatePodsViolatingNodeAffinityParams(strategy.Params); err != nil {
|
||||
klog.V(1).Info(err)
|
||||
klog.V(1).InfoS("Invalid RemovePodsViolatingNodeAffinity parameters", "err", err)
|
||||
return
|
||||
}
|
||||
thresholdPriority, err := utils.GetPriorityFromStrategyParams(ctx, client, strategy.Params)
|
||||
|
||||
@@ -49,7 +49,7 @@ func validateRemovePodsViolatingNodeTaintsParams(params *api.StrategyParameters)
|
||||
// RemovePodsViolatingNodeTaints evicts pods on the node which violate NoSchedule Taints on nodes
|
||||
func RemovePodsViolatingNodeTaints(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor) {
|
||||
if err := validateRemovePodsViolatingNodeTaintsParams(strategy.Params); err != nil {
|
||||
klog.V(1).Info(err)
|
||||
klog.V(1).InfoS("Invalid RemovePodsViolatingNodeTaints parameters", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -50,7 +50,7 @@ func validateRemovePodsViolatingInterPodAntiAffinityParams(params *api.StrategyP
|
||||
// RemovePodsViolatingInterPodAntiAffinity evicts pods on the node which are having a pod affinity rules.
|
||||
func RemovePodsViolatingInterPodAntiAffinity(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor) {
|
||||
if err := validateRemovePodsViolatingInterPodAntiAffinityParams(strategy.Params); err != nil {
|
||||
klog.V(1).Info(err)
|
||||
klog.V(1).InfoS("Invalid RemovePodsViolatingInterPodAntiAffinity parameters", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -112,7 +112,7 @@ func checkPodsWithAntiAffinityExist(pod *v1.Pod, pods []*v1.Pod) bool {
|
||||
namespaces := utils.GetNamespacesFromPodAffinityTerm(pod, &term)
|
||||
selector, err := metav1.LabelSelectorAsSelector(term.LabelSelector)
|
||||
if err != nil {
|
||||
klog.Infof("%v", err)
|
||||
klog.InfoS("Unable to convert LabelSelector into Selector", "err", err)
|
||||
return false
|
||||
}
|
||||
for _, existingPod := range pods {
|
||||
|
||||
@@ -58,7 +58,7 @@ func validatePodLifeTimeParams(params *api.StrategyParameters) error {
|
||||
// PodLifeTime evicts pods on nodes that were created more than strategy.Params.MaxPodLifeTimeSeconds seconds ago.
|
||||
func PodLifeTime(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor) {
|
||||
if err := validatePodLifeTimeParams(strategy.Params); err != nil {
|
||||
klog.V(1).Info(err)
|
||||
klog.V(1).InfoS("Invalid PodLifeTime parameters", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -51,7 +51,7 @@ func validateRemovePodsHavingTooManyRestartsParams(params *api.StrategyParameter
|
||||
// As of now, this strategy won't evict daemonsets, mirror pods, critical pods and pods with local storages.
|
||||
func RemovePodsHavingTooManyRestarts(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor) {
|
||||
if err := validateRemovePodsHavingTooManyRestartsParams(strategy.Params); err != nil {
|
||||
klog.V(1).Info(err)
|
||||
klog.V(1).InfoS("Invalid RemovePodsHavingTooManyRestarts parameters", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user