mirror of
https://github.com/kubernetes-sigs/descheduler.git
synced 2026-01-25 20:59:28 +01:00
Change klog info messages after a strategy is exited into error messages
This commit is contained in:
@@ -58,12 +58,12 @@ func RemoveDuplicatePods(
|
||||
podEvictor *evictions.PodEvictor,
|
||||
) {
|
||||
if err := validateRemoveDuplicatePodsParams(strategy.Params); err != nil {
|
||||
klog.V(1).InfoS("Invalid RemoveDuplicatePods parameters", "err", err)
|
||||
klog.ErrorS(err, "Invalid RemoveDuplicatePods parameters")
|
||||
return
|
||||
}
|
||||
thresholdPriority, err := utils.GetPriorityFromStrategyParams(ctx, client, strategy.Params)
|
||||
if err != nil {
|
||||
klog.V(1).InfoS("Failed to get threshold priority from strategy's params", "err", err)
|
||||
klog.ErrorS(err, "Failed to get threshold priority from strategy's params")
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -69,12 +69,12 @@ func validateLowNodeUtilizationParams(params *api.StrategyParameters) error {
|
||||
func LowNodeUtilization(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor) {
|
||||
// TODO: May be create a struct for the strategy as well, so that we don't have to pass along the all the params?
|
||||
if err := validateLowNodeUtilizationParams(strategy.Params); err != nil {
|
||||
klog.V(1).InfoS("Invalid LowNodeUtilization parameters", "err", err)
|
||||
klog.ErrorS(err, "Invalid LowNodeUtilization parameters")
|
||||
return
|
||||
}
|
||||
thresholdPriority, err := utils.GetPriorityFromStrategyParams(ctx, client, strategy.Params)
|
||||
if err != nil {
|
||||
klog.V(1).InfoS("Failed to get threshold priority from strategy's params", "err", err)
|
||||
klog.ErrorS(err, "Failed to get threshold priority from strategy's params")
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -49,12 +49,12 @@ func validatePodsViolatingNodeAffinityParams(params *api.StrategyParameters) err
|
||||
// RemovePodsViolatingNodeAffinity evicts pods on nodes which violate node affinity
|
||||
func RemovePodsViolatingNodeAffinity(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor) {
|
||||
if err := validatePodsViolatingNodeAffinityParams(strategy.Params); err != nil {
|
||||
klog.V(1).InfoS("Invalid RemovePodsViolatingNodeAffinity parameters", "err", err)
|
||||
klog.ErrorS(err, "Invalid RemovePodsViolatingNodeAffinity parameters")
|
||||
return
|
||||
}
|
||||
thresholdPriority, err := utils.GetPriorityFromStrategyParams(ctx, client, strategy.Params)
|
||||
if err != nil {
|
||||
klog.V(1).InfoS("Failed to get threshold priority from strategy's params", "err", err)
|
||||
klog.ErrorS(err, "Failed to get threshold priority from strategy's params")
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -49,7 +49,7 @@ func validateRemovePodsViolatingNodeTaintsParams(params *api.StrategyParameters)
|
||||
// RemovePodsViolatingNodeTaints evicts pods on the node which violate NoSchedule Taints on nodes
|
||||
func RemovePodsViolatingNodeTaints(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor) {
|
||||
if err := validateRemovePodsViolatingNodeTaintsParams(strategy.Params); err != nil {
|
||||
klog.V(1).InfoS("Invalid RemovePodsViolatingNodeTaints parameters", "err", err)
|
||||
klog.ErrorS(err, "Invalid RemovePodsViolatingNodeTaints parameters")
|
||||
return
|
||||
}
|
||||
|
||||
@@ -61,7 +61,7 @@ func RemovePodsViolatingNodeTaints(ctx context.Context, client clientset.Interfa
|
||||
|
||||
thresholdPriority, err := utils.GetPriorityFromStrategyParams(ctx, client, strategy.Params)
|
||||
if err != nil {
|
||||
klog.V(1).InfoS("Failed to get threshold priority from strategy's params", "err", err)
|
||||
klog.ErrorS(err, "Failed to get threshold priority from strategy's params")
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -50,7 +50,7 @@ func validateRemovePodsViolatingInterPodAntiAffinityParams(params *api.StrategyP
|
||||
// RemovePodsViolatingInterPodAntiAffinity evicts pods on the node which are having a pod affinity rules.
|
||||
func RemovePodsViolatingInterPodAntiAffinity(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor) {
|
||||
if err := validateRemovePodsViolatingInterPodAntiAffinityParams(strategy.Params); err != nil {
|
||||
klog.V(1).InfoS("Invalid RemovePodsViolatingInterPodAntiAffinity parameters", "err", err)
|
||||
klog.ErrorS(err, "Invalid RemovePodsViolatingInterPodAntiAffinity parameters")
|
||||
return
|
||||
}
|
||||
|
||||
@@ -62,7 +62,7 @@ func RemovePodsViolatingInterPodAntiAffinity(ctx context.Context, client clients
|
||||
|
||||
thresholdPriority, err := utils.GetPriorityFromStrategyParams(ctx, client, strategy.Params)
|
||||
if err != nil {
|
||||
klog.V(1).InfoS("Failed to get threshold priority from strategy's params", "err", err)
|
||||
klog.ErrorS(err, "Failed to get threshold priority from strategy's params")
|
||||
return
|
||||
}
|
||||
|
||||
@@ -112,7 +112,7 @@ func checkPodsWithAntiAffinityExist(pod *v1.Pod, pods []*v1.Pod) bool {
|
||||
namespaces := utils.GetNamespacesFromPodAffinityTerm(pod, &term)
|
||||
selector, err := metav1.LabelSelectorAsSelector(term.LabelSelector)
|
||||
if err != nil {
|
||||
klog.InfoS("Unable to convert LabelSelector into Selector", "err", err)
|
||||
klog.ErrorS(err, "Unable to convert LabelSelector into Selector")
|
||||
return false
|
||||
}
|
||||
for _, existingPod := range pods {
|
||||
|
||||
@@ -58,13 +58,13 @@ func validatePodLifeTimeParams(params *api.StrategyParameters) error {
|
||||
// PodLifeTime evicts pods on nodes that were created more than strategy.Params.MaxPodLifeTimeSeconds seconds ago.
|
||||
func PodLifeTime(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor) {
|
||||
if err := validatePodLifeTimeParams(strategy.Params); err != nil {
|
||||
klog.V(1).InfoS("Invalid PodLifeTime parameters", "err", err)
|
||||
klog.ErrorS(err, "Invalid PodLifeTime parameters")
|
||||
return
|
||||
}
|
||||
|
||||
thresholdPriority, err := utils.GetPriorityFromStrategyParams(ctx, client, strategy.Params)
|
||||
if err != nil {
|
||||
klog.V(1).InfoS("Failed to get threshold priority from strategy's params", "err", err)
|
||||
klog.ErrorS(err, "Failed to get threshold priority from strategy's params")
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -51,13 +51,13 @@ func validateRemovePodsHavingTooManyRestartsParams(params *api.StrategyParameter
|
||||
// As of now, this strategy won't evict daemonsets, mirror pods, critical pods and pods with local storages.
|
||||
func RemovePodsHavingTooManyRestarts(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor) {
|
||||
if err := validateRemovePodsHavingTooManyRestartsParams(strategy.Params); err != nil {
|
||||
klog.V(1).InfoS("Invalid RemovePodsHavingTooManyRestarts parameters", "err", err)
|
||||
klog.ErrorS(err, "Invalid RemovePodsHavingTooManyRestarts parameters")
|
||||
return
|
||||
}
|
||||
|
||||
thresholdPriority, err := utils.GetPriorityFromStrategyParams(ctx, client, strategy.Params)
|
||||
if err != nil {
|
||||
klog.V(1).InfoS("Failed to get threshold priority from strategy's params", "err", err)
|
||||
klog.ErrorS(err, "Failed to get threshold priority from strategy's params")
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user