mirror of
https://github.com/kubernetes-sigs/descheduler.git
synced 2026-01-26 05:14:13 +01:00
Merge pull request #885 from damemi/evict-options
Add EvictOptions struct to EvictPod()
This commit is contained in:
@@ -301,7 +301,20 @@ func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer
|
||||
klog.ErrorS(err, "Failed to get threshold priority from strategy's params")
|
||||
continue
|
||||
}
|
||||
evictorFilter := evictions.NewEvictorFilter(nodes, getPodsAssignedToNode, evictLocalStoragePods, evictSystemCriticalPods, ignorePvcPods, evictBarePods, evictions.WithNodeFit(nodeFit), evictions.WithPriorityThreshold(thresholdPriority))
|
||||
evictorFilter := evictions.NewEvictorFilter(
|
||||
nodes,
|
||||
getPodsAssignedToNode,
|
||||
evictLocalStoragePods,
|
||||
evictSystemCriticalPods,
|
||||
ignorePvcPods,
|
||||
evictBarePods,
|
||||
evictions.WithNodeFit(nodeFit),
|
||||
evictions.WithPriorityThreshold(thresholdPriority),
|
||||
)
|
||||
// TODO: strategyName should be accessible from within the strategy using a framework
|
||||
// handle or function which the Evictor has access to. For migration/in-progress framework
|
||||
// work, we are currently passing this via context. To be removed
|
||||
// (See discussion thread https://github.com/kubernetes-sigs/descheduler/pull/885#discussion_r919962292)
|
||||
f(context.WithValue(ctx, "strategyName", string(name)), rs.Client, strategy, nodes, podEvictor, evictorFilter, getPodsAssignedToNode)
|
||||
}
|
||||
} else {
|
||||
|
||||
@@ -110,18 +110,20 @@ func (pe *PodEvictor) NodeLimitExceeded(node *v1.Node) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// EvictOptions provides a handle for passing additional info to EvictPod
|
||||
type EvictOptions struct {
|
||||
// Reason allows for passing details about the specific eviction for logging.
|
||||
Reason string
|
||||
}
|
||||
|
||||
// EvictPod evicts a pod while exercising eviction limits.
|
||||
// Returns true when the pod is evicted on the server side.
|
||||
// Eviction reason can be set through the ctx's evictionReason:STRING pair
|
||||
func (pe *PodEvictor) EvictPod(ctx context.Context, pod *v1.Pod) bool {
|
||||
func (pe *PodEvictor) EvictPod(ctx context.Context, pod *v1.Pod, opts EvictOptions) bool {
|
||||
// TODO: Replace context-propagated Strategy name with a defined framework handle for accessing Strategy info
|
||||
strategy := ""
|
||||
if ctx.Value("strategyName") != nil {
|
||||
strategy = ctx.Value("strategyName").(string)
|
||||
}
|
||||
reason := ""
|
||||
if ctx.Value("evictionReason") != nil {
|
||||
reason = ctx.Value("evictionReason").(string)
|
||||
}
|
||||
|
||||
if pod.Spec.NodeName != "" {
|
||||
if pe.maxPodsToEvictPerNode != nil && pe.nodepodCount[pod.Spec.NodeName]+1 > *pe.maxPodsToEvictPerNode {
|
||||
@@ -144,7 +146,7 @@ func (pe *PodEvictor) EvictPod(ctx context.Context, pod *v1.Pod) bool {
|
||||
err := evictPod(ctx, pe.client, pod, pe.policyGroupVersion)
|
||||
if err != nil {
|
||||
// err is used only for logging purposes
|
||||
klog.ErrorS(err, "Error evicting pod", "pod", klog.KObj(pod), "reason", reason)
|
||||
klog.ErrorS(err, "Error evicting pod", "pod", klog.KObj(pod), "reason", opts.Reason)
|
||||
if pe.metricsEnabled {
|
||||
metrics.PodsEvicted.With(map[string]string{"result": "error", "strategy": strategy, "namespace": pod.Namespace, "node": pod.Spec.NodeName}).Inc()
|
||||
}
|
||||
@@ -161,14 +163,14 @@ func (pe *PodEvictor) EvictPod(ctx context.Context, pod *v1.Pod) bool {
|
||||
}
|
||||
|
||||
if pe.dryRun {
|
||||
klog.V(1).InfoS("Evicted pod in dry run mode", "pod", klog.KObj(pod), "reason", reason, "strategy", strategy, "node", pod.Spec.NodeName)
|
||||
klog.V(1).InfoS("Evicted pod in dry run mode", "pod", klog.KObj(pod), "reason", opts.Reason, "strategy", strategy, "node", pod.Spec.NodeName)
|
||||
} else {
|
||||
klog.V(1).InfoS("Evicted pod", "pod", klog.KObj(pod), "reason", reason, "strategy", strategy, "node", pod.Spec.NodeName)
|
||||
klog.V(1).InfoS("Evicted pod", "pod", klog.KObj(pod), "reason", opts.Reason, "strategy", strategy, "node", pod.Spec.NodeName)
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartStructuredLogging(3)
|
||||
eventBroadcaster.StartRecordingToSink(&clientcorev1.EventSinkImpl{Interface: pe.client.CoreV1().Events(pod.Namespace)})
|
||||
r := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "sigs.k8s.io.descheduler"})
|
||||
r.Event(pod, v1.EventTypeNormal, "Descheduled", fmt.Sprintf("pod evicted by sigs.k8s.io/descheduler%s", reason))
|
||||
r.Event(pod, v1.EventTypeNormal, "Descheduled", fmt.Sprintf("pod evicted by sigs.k8s.io/descheduler%s", opts.Reason))
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -200,7 +200,7 @@ func RemoveDuplicatePods(
|
||||
// It's assumed all duplicated pods are in the same priority class
|
||||
// TODO(jchaloup): check if the pod has a different node to lend to
|
||||
for _, pod := range pods[upperAvg-1:] {
|
||||
podEvictor.EvictPod(ctx, pod)
|
||||
podEvictor.EvictPod(ctx, pod, evictions.EvictOptions{})
|
||||
if podEvictor.NodeLimitExceeded(nodeMap[nodeName]) {
|
||||
continue loop
|
||||
}
|
||||
|
||||
@@ -75,7 +75,7 @@ func RemoveFailedPods(
|
||||
continue
|
||||
}
|
||||
|
||||
podEvictor.EvictPod(ctx, pods[i])
|
||||
podEvictor.EvictPod(ctx, pods[i], evictions.EvictOptions{})
|
||||
if podEvictor.NodeLimitExceeded(node) {
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -24,7 +24,6 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
|
||||
@@ -94,7 +93,7 @@ func RemovePodsViolatingNodeAffinity(ctx context.Context, client clientset.Inter
|
||||
for _, pod := range pods {
|
||||
if pod.Spec.Affinity != nil && pod.Spec.Affinity.NodeAffinity != nil && pod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil {
|
||||
klog.V(1).InfoS("Evicting pod", "pod", klog.KObj(pod))
|
||||
podEvictor.EvictPod(ctx, pod)
|
||||
podEvictor.EvictPod(ctx, pod, evictions.EvictOptions{})
|
||||
if podEvictor.NodeLimitExceeded(node) {
|
||||
continue loop
|
||||
}
|
||||
|
||||
@@ -107,7 +107,7 @@ loop:
|
||||
taintFilterFnc,
|
||||
) {
|
||||
klog.V(2).InfoS("Not all taints with NoSchedule effect are tolerated after update for pod on node", "pod", klog.KObj(pods[i]), "node", klog.KObj(node))
|
||||
podEvictor.EvictPod(ctx, pods[i])
|
||||
podEvictor.EvictPod(ctx, pods[i], evictions.EvictOptions{})
|
||||
if podEvictor.NodeLimitExceeded(node) {
|
||||
continue loop
|
||||
}
|
||||
|
||||
@@ -313,7 +313,7 @@ func evictPods(
|
||||
continue
|
||||
}
|
||||
|
||||
if podEvictor.EvictPod(ctx, pod) {
|
||||
if podEvictor.EvictPod(ctx, pod, evictions.EvictOptions{}) {
|
||||
klog.V(3).InfoS("Evicted pods", "pod", klog.KObj(pod))
|
||||
|
||||
for name := range totalAvailableUsage {
|
||||
|
||||
@@ -87,7 +87,7 @@ loop:
|
||||
totalPods := len(pods)
|
||||
for i := 0; i < totalPods; i++ {
|
||||
if checkPodsWithAntiAffinityExist(pods[i], pods) && evictorFilter.Filter(pods[i]) {
|
||||
if podEvictor.EvictPod(ctx, pods[i]) {
|
||||
if podEvictor.EvictPod(ctx, pods[i], evictions.EvictOptions{}) {
|
||||
// Since the current pod is evicted all other pods which have anti-affinity with this
|
||||
// pod need not be evicted.
|
||||
// Update pods.
|
||||
|
||||
@@ -131,7 +131,7 @@ func PodLifeTime(ctx context.Context, client clientset.Interface, strategy api.D
|
||||
if nodeLimitExceeded[pod.Spec.NodeName] {
|
||||
continue
|
||||
}
|
||||
if podEvictor.EvictPod(ctx, pod) {
|
||||
if podEvictor.EvictPod(ctx, pod, evictions.EvictOptions{}) {
|
||||
klog.V(1).InfoS("Evicted pod because it exceeded its lifetime", "pod", klog.KObj(pod), "maxPodLifeTime", *strategy.Params.PodLifeTime.MaxPodLifeTimeSeconds)
|
||||
}
|
||||
if podEvictor.NodeLimitExceeded(nodeMap[pod.Spec.NodeName]) {
|
||||
|
||||
@@ -90,7 +90,7 @@ loop:
|
||||
} else if restarts < strategy.Params.PodsHavingTooManyRestarts.PodRestartThreshold {
|
||||
continue
|
||||
}
|
||||
podEvictor.EvictPod(ctx, pods[i])
|
||||
podEvictor.EvictPod(ctx, pods[i], evictions.EvictOptions{})
|
||||
if podEvictor.NodeLimitExceeded(node) {
|
||||
continue loop
|
||||
}
|
||||
|
||||
@@ -191,7 +191,7 @@ func RemovePodsViolatingTopologySpreadConstraint(
|
||||
if !isEvictable(pod) {
|
||||
continue
|
||||
}
|
||||
podEvictor.EvictPod(ctx, pod)
|
||||
podEvictor.EvictPod(ctx, pod, evictions.EvictOptions{})
|
||||
if podEvictor.NodeLimitExceeded(nodeMap[pod.Spec.NodeName]) {
|
||||
nodeLimitExceeded[pod.Spec.NodeName] = true
|
||||
}
|
||||
@@ -240,7 +240,7 @@ func topologyIsBalanced(topology map[topologyPair][]*v1.Pod, constraint v1.Topol
|
||||
// whichever number is less.
|
||||
//
|
||||
// (Note, we will only move as many pods from a domain as possible without bringing it below the ideal average,
|
||||
// and we will not bring any smaller domain above the average)
|
||||
// and we will not bring any smaller domain above the average)
|
||||
// If the diff is within the skew, we move to the next highest domain.
|
||||
// If the higher domain can't give any more without falling below the average, we move to the next lowest "high" domain
|
||||
//
|
||||
|
||||
Reference in New Issue
Block a user