1
0
mirror of https://github.com/kubernetes-sigs/descheduler.git synced 2026-01-26 13:29:11 +01:00

feature: use contextal logging for plugins

Signed-off-by: googs1025 <googs1025@gmail.com>
This commit is contained in:
googs1025
2025-03-24 20:04:51 +08:00
parent 9f918371a2
commit 33894afe2b
15 changed files with 140 additions and 88 deletions

View File

@@ -33,6 +33,7 @@ const PluginName = "RemovePodsViolatingNodeAffinity"
// RemovePodsViolatingNodeAffinity evicts pods on the node which violate node affinity
type RemovePodsViolatingNodeAffinity struct {
logger klog.Logger
handle frameworktypes.Handle
args *RemovePodsViolatingNodeAffinityArgs
podFilter podutil.FilterFunc
@@ -46,6 +47,7 @@ func New(ctx context.Context, args runtime.Object, handle frameworktypes.Handle)
if !ok {
return nil, fmt.Errorf("want args to be of type RemovePodsViolatingNodeAffinityArgs, got %T", args)
}
logger := klog.FromContext(ctx).WithValues("plugin", PluginName)
var includedNamespaces, excludedNamespaces sets.Set[string]
if nodeAffinityArgs.Namespaces != nil {
@@ -65,6 +67,7 @@ func New(ctx context.Context, args runtime.Object, handle frameworktypes.Handle)
}
return &RemovePodsViolatingNodeAffinity{
logger: logger,
handle: handle,
podFilter: podFilter,
args: nodeAffinityArgs,
@@ -77,8 +80,9 @@ func (d *RemovePodsViolatingNodeAffinity) Name() string {
}
func (d *RemovePodsViolatingNodeAffinity) Deschedule(ctx context.Context, nodes []*v1.Node) *frameworktypes.Status {
logger := klog.FromContext(klog.NewContext(ctx, d.logger)).WithValues("ExtensionPoint", frameworktypes.DescheduleExtensionPoint)
for _, nodeAffinity := range d.args.NodeAffinityType {
klog.V(2).InfoS("Executing for nodeAffinityType", "nodeAffinity", nodeAffinity)
logger.V(2).Info("Executing for nodeAffinityType", "nodeAffinity", nodeAffinity)
var err *frameworktypes.Status = nil
// The pods that we'll evict must be evictable. For example, the current number of replicas
@@ -106,7 +110,7 @@ func (d *RemovePodsViolatingNodeAffinity) Deschedule(ctx context.Context, nodes
}
err = d.processNodes(ctx, nodes, filterFunc)
default:
klog.ErrorS(nil, "Invalid nodeAffinityType", "nodeAffinity", nodeAffinity)
logger.Error(nil, "Invalid nodeAffinityType", "nodeAffinity", nodeAffinity)
}
if err != nil {
@@ -118,7 +122,7 @@ func (d *RemovePodsViolatingNodeAffinity) Deschedule(ctx context.Context, nodes
func (d *RemovePodsViolatingNodeAffinity) processNodes(ctx context.Context, nodes []*v1.Node, filterFunc func(*v1.Pod, *v1.Node, []*v1.Node) bool) *frameworktypes.Status {
for _, node := range nodes {
klog.V(2).InfoS("Processing node", "node", klog.KObj(node))
d.logger.V(2).Info("Processing node", "node", klog.KObj(node))
// Potentially evictable pods
pods, err := podutil.ListPodsOnANode(
@@ -136,7 +140,7 @@ func (d *RemovePodsViolatingNodeAffinity) processNodes(ctx context.Context, node
loop:
for _, pod := range pods {
klog.V(1).InfoS("Evicting pod", "pod", klog.KObj(pod))
d.logger.V(1).Info("Evicting pod", "pod", klog.KObj(pod))
err := d.handle.Evictor().Evict(ctx, pod, evictions.EvictOptions{StrategyName: PluginName})
if err == nil {
continue
@@ -147,7 +151,7 @@ func (d *RemovePodsViolatingNodeAffinity) processNodes(ctx context.Context, node
case *evictions.EvictionTotalLimitError:
return nil
default:
klog.Errorf("eviction failed: %v", err)
d.logger.Error(err, "eviction failed")
}
}
}