mirror of
https://github.com/kubernetes-sigs/descheduler.git
synced 2026-01-25 20:59:28 +01:00
Drop node parameter from EvictPod
The method uses the node object to only get the node name. The node name can be retrieved from the pod object. Some strategies might try to evict a pod in Pending state which does not have the .spec.nodeName field set. Thus, skipping the test for the node limit.
This commit is contained in:
@@ -44,7 +44,7 @@ const (
|
||||
)
|
||||
|
||||
// nodePodEvictedCount keeps count of pods evicted on node
|
||||
type nodePodEvictedCount map[*v1.Node]uint
|
||||
type nodePodEvictedCount map[string]uint
|
||||
type namespacePodEvictCount map[string]uint
|
||||
|
||||
type PodEvictor struct {
|
||||
@@ -72,7 +72,7 @@ func NewPodEvictor(
|
||||
var namespacePodCount = make(namespacePodEvictCount)
|
||||
for _, node := range nodes {
|
||||
// Initialize podsEvicted till now with 0.
|
||||
nodePodCount[node] = 0
|
||||
nodePodCount[node.Name] = 0
|
||||
}
|
||||
|
||||
return &PodEvictor{
|
||||
@@ -90,7 +90,7 @@ func NewPodEvictor(
|
||||
|
||||
// NodeEvicted gives a number of pods evicted for node
|
||||
func (pe *PodEvictor) NodeEvicted(node *v1.Node) uint {
|
||||
return pe.nodepodCount[node]
|
||||
return pe.nodepodCount[node.Name]
|
||||
}
|
||||
|
||||
// TotalEvicted gives a number of pods evicted through all nodes
|
||||
@@ -106,7 +106,7 @@ func (pe *PodEvictor) TotalEvicted() uint {
|
||||
// possible (due to maxPodsToEvictPerNode constraint). Success is true when the pod
|
||||
// is evicted on the server side.
|
||||
// eviction reason can be set through the ctx's evictionReason:STRING pair
|
||||
func (pe *PodEvictor) EvictPod(ctx context.Context, pod *v1.Pod, node *v1.Node) (bool, error) {
|
||||
func (pe *PodEvictor) EvictPod(ctx context.Context, pod *v1.Pod) (bool, error) {
|
||||
strategy := ""
|
||||
if ctx.Value("strategyName") != nil {
|
||||
strategy = ctx.Value("strategyName").(string)
|
||||
@@ -116,16 +116,18 @@ func (pe *PodEvictor) EvictPod(ctx context.Context, pod *v1.Pod, node *v1.Node)
|
||||
reason = ctx.Value("evictionReason").(string)
|
||||
}
|
||||
|
||||
if pe.maxPodsToEvictPerNode != nil && pe.nodepodCount[node]+1 > *pe.maxPodsToEvictPerNode {
|
||||
if pe.metricsEnabled {
|
||||
metrics.PodsEvicted.With(map[string]string{"result": "maximum number of pods per node reached", "strategy": strategy, "namespace": pod.Namespace, "node": node.Name}).Inc()
|
||||
if pod.Spec.NodeName != "" {
|
||||
if pe.maxPodsToEvictPerNode != nil && pe.nodepodCount[pod.Spec.NodeName]+1 > *pe.maxPodsToEvictPerNode {
|
||||
if pe.metricsEnabled {
|
||||
metrics.PodsEvicted.With(map[string]string{"result": "maximum number of pods per node reached", "strategy": strategy, "namespace": pod.Namespace, "node": pod.Spec.NodeName}).Inc()
|
||||
}
|
||||
return false, fmt.Errorf("Maximum number %v of evicted pods per %q node reached", *pe.maxPodsToEvictPerNode, pod.Spec.NodeName)
|
||||
}
|
||||
return false, fmt.Errorf("Maximum number %v of evicted pods per %q node reached", *pe.maxPodsToEvictPerNode, node.Name)
|
||||
}
|
||||
|
||||
if pe.maxPodsToEvictPerNamespace != nil && pe.namespacePodCount[pod.Namespace]+1 > *pe.maxPodsToEvictPerNamespace {
|
||||
if pe.metricsEnabled {
|
||||
metrics.PodsEvicted.With(map[string]string{"result": "maximum number of pods per namespace reached", "strategy": strategy, "namespace": pod.Namespace, "node": node.Name}).Inc()
|
||||
metrics.PodsEvicted.With(map[string]string{"result": "maximum number of pods per namespace reached", "strategy": strategy, "namespace": pod.Namespace, "node": pod.Spec.NodeName}).Inc()
|
||||
}
|
||||
return false, fmt.Errorf("Maximum number %v of evicted pods per %q namespace reached", *pe.maxPodsToEvictPerNamespace, pod.Namespace)
|
||||
}
|
||||
@@ -135,22 +137,24 @@ func (pe *PodEvictor) EvictPod(ctx context.Context, pod *v1.Pod, node *v1.Node)
|
||||
// err is used only for logging purposes
|
||||
klog.ErrorS(err, "Error evicting pod", "pod", klog.KObj(pod), "reason", reason)
|
||||
if pe.metricsEnabled {
|
||||
metrics.PodsEvicted.With(map[string]string{"result": "error", "strategy": strategy, "namespace": pod.Namespace, "node": node.Name}).Inc()
|
||||
metrics.PodsEvicted.With(map[string]string{"result": "error", "strategy": strategy, "namespace": pod.Namespace, "node": pod.Spec.NodeName}).Inc()
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
pe.nodepodCount[node]++
|
||||
if pod.Spec.NodeName != "" {
|
||||
pe.nodepodCount[pod.Spec.NodeName]++
|
||||
}
|
||||
pe.namespacePodCount[pod.Namespace]++
|
||||
|
||||
if pe.metricsEnabled {
|
||||
metrics.PodsEvicted.With(map[string]string{"result": "success", "strategy": strategy, "namespace": pod.Namespace, "node": node.Name}).Inc()
|
||||
metrics.PodsEvicted.With(map[string]string{"result": "success", "strategy": strategy, "namespace": pod.Namespace, "node": pod.Spec.NodeName}).Inc()
|
||||
}
|
||||
|
||||
if pe.dryRun {
|
||||
klog.V(1).InfoS("Evicted pod in dry run mode", "pod", klog.KObj(pod), "reason", reason, "strategy", strategy, "node", node.Name)
|
||||
klog.V(1).InfoS("Evicted pod in dry run mode", "pod", klog.KObj(pod), "reason", reason, "strategy", strategy, "node", pod.Spec.NodeName)
|
||||
} else {
|
||||
klog.V(1).InfoS("Evicted pod", "pod", klog.KObj(pod), "reason", reason, "strategy", strategy, "node", node.Name)
|
||||
klog.V(1).InfoS("Evicted pod", "pod", klog.KObj(pod), "reason", reason, "strategy", strategy, "node", pod.Spec.NodeName)
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartStructuredLogging(3)
|
||||
eventBroadcaster.StartRecordingToSink(&clientcorev1.EventSinkImpl{Interface: pe.client.CoreV1().Events(pod.Namespace)})
|
||||
|
||||
@@ -199,7 +199,7 @@ func RemoveDuplicatePods(
|
||||
// It's assumed all duplicated pods are in the same priority class
|
||||
// TODO(jchaloup): check if the pod has a different node to lend to
|
||||
for _, pod := range pods[upperAvg-1:] {
|
||||
if _, err := podEvictor.EvictPod(ctx, pod, nodeMap[nodeName]); err != nil {
|
||||
if _, err := podEvictor.EvictPod(ctx, pod); err != nil {
|
||||
klog.ErrorS(err, "Error evicting pod", "pod", klog.KObj(pod))
|
||||
break
|
||||
}
|
||||
|
||||
@@ -75,7 +75,7 @@ func RemoveFailedPods(
|
||||
continue
|
||||
}
|
||||
|
||||
if _, err = podEvictor.EvictPod(ctx, pods[i], node); err != nil {
|
||||
if _, err = podEvictor.EvictPod(ctx, pods[i]); err != nil {
|
||||
klog.ErrorS(err, "Error evicting pod", "pod", klog.KObj(pod))
|
||||
break
|
||||
}
|
||||
|
||||
@@ -93,7 +93,7 @@ func RemovePodsViolatingNodeAffinity(ctx context.Context, client clientset.Inter
|
||||
for _, pod := range pods {
|
||||
if pod.Spec.Affinity != nil && pod.Spec.Affinity.NodeAffinity != nil && pod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil {
|
||||
klog.V(1).InfoS("Evicting pod", "pod", klog.KObj(pod))
|
||||
if _, err := podEvictor.EvictPod(ctx, pod, node); err != nil {
|
||||
if _, err := podEvictor.EvictPod(ctx, pod); err != nil {
|
||||
klog.ErrorS(err, "Error evicting pod")
|
||||
break
|
||||
}
|
||||
|
||||
@@ -106,7 +106,7 @@ func RemovePodsViolatingNodeTaints(ctx context.Context, client clientset.Interfa
|
||||
taintFilterFnc,
|
||||
) {
|
||||
klog.V(2).InfoS("Not all taints with NoSchedule effect are tolerated after update for pod on node", "pod", klog.KObj(pods[i]), "node", klog.KObj(node))
|
||||
if _, err := podEvictor.EvictPod(ctx, pods[i], node); err != nil {
|
||||
if _, err := podEvictor.EvictPod(ctx, pods[i]); err != nil {
|
||||
klog.ErrorS(err, "Error evicting pod")
|
||||
break
|
||||
}
|
||||
|
||||
@@ -313,7 +313,7 @@ func evictPods(
|
||||
continue
|
||||
}
|
||||
|
||||
success, err := podEvictor.EvictPod(ctx, pod, nodeInfo.node)
|
||||
success, err := podEvictor.EvictPod(ctx, pod)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Error evicting pod", "pod", klog.KObj(pod))
|
||||
break
|
||||
|
||||
@@ -86,7 +86,7 @@ func RemovePodsViolatingInterPodAntiAffinity(ctx context.Context, client clients
|
||||
totalPods := len(pods)
|
||||
for i := 0; i < totalPods; i++ {
|
||||
if checkPodsWithAntiAffinityExist(pods[i], pods) && evictorFilter.Filter(pods[i]) {
|
||||
success, err := podEvictor.EvictPod(ctx, pods[i], node)
|
||||
success, err := podEvictor.EvictPod(ctx, pods[i])
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Error evicting pod")
|
||||
break
|
||||
|
||||
@@ -107,7 +107,7 @@ func PodLifeTime(ctx context.Context, client clientset.Interface, strategy api.D
|
||||
podutil.SortPodsBasedOnAge(podsToEvict)
|
||||
|
||||
for _, pod := range podsToEvict {
|
||||
success, err := podEvictor.EvictPod(ctx, pod, nodeMap[pod.Spec.NodeName])
|
||||
success, err := podEvictor.EvictPod(ctx, pod)
|
||||
if success {
|
||||
klog.V(1).InfoS("Evicted pod because it exceeded its lifetime", "pod", klog.KObj(pod), "maxPodLifeTime", *strategy.Params.PodLifeTime.MaxPodLifeTimeSeconds)
|
||||
}
|
||||
|
||||
@@ -89,7 +89,7 @@ func RemovePodsHavingTooManyRestarts(ctx context.Context, client clientset.Inter
|
||||
} else if restarts < strategy.Params.PodsHavingTooManyRestarts.PodRestartThreshold {
|
||||
continue
|
||||
}
|
||||
if _, err := podEvictor.EvictPod(ctx, pods[i], node); err != nil {
|
||||
if _, err := podEvictor.EvictPod(ctx, pods[i]); err != nil {
|
||||
klog.ErrorS(err, "Error evicting pod", "pod", klog.KObj(pod))
|
||||
break
|
||||
}
|
||||
|
||||
@@ -187,7 +187,7 @@ func RemovePodsViolatingTopologySpreadConstraint(
|
||||
if !isEvictable(pod) {
|
||||
continue
|
||||
}
|
||||
if _, err := podEvictor.EvictPod(ctx, pod, nodeMap[pod.Spec.NodeName]); err != nil {
|
||||
if _, err := podEvictor.EvictPod(ctx, pod); err != nil {
|
||||
klog.ErrorS(err, "Error evicting pod", "pod", klog.KObj(pod))
|
||||
break
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user