mirror of
https://github.com/kubernetes-sigs/descheduler.git
synced 2026-01-26 13:29:11 +01:00
* feat: Implement preferredDuringSchedulingIgnoredDuringExecution for RemovePodsViolatingNodeAffinity Now, the descheduler can detect and evict pods that are not optimally allocated according to the "preferred..." node affinity. It only evicts a pod if it can be scheduled on a node that scores higher in terms of preferred node affinity than the current one. This can be activated by enabling the RemovePodsViolatingNodeAffinity plugin and passing "preferredDuringSchedulingIgnoredDuringExecution" in the args. For example, imagine we have a pod that prefers nodes with label "key1: value1" with a weight of 10. If this pod is scheduled on a node that doesn't have "key1: value1" as label but there's another node that has this label and where this pod can potentially run, then the descheduler will evict the pod. Another effect of this commit is that the RemovePodsViolatingNodeAffinity plugin will not remove pods that don't fit in the current node but for other reasons than violating the node affinity. Before that, enabling this plugin could cause evictions on pods that were running on tainted nodes without the necessary tolerations. This commit also fixes the wording of some tests from node_affinity_test.go and some parameters and expectations of these tests, which were wrong. * Optimization on RemovePodsViolatingNodeAffinity Before checking if a pod can be evicted or if it can be scheduled somewhere else, we first check if it has the corresponding nodeAffinity field defined. Otherwise, the pod is automatically discarded as a candidate. Apart from that, the method that calculates the weight that a pod gives to a node based on its preferred node affinity has been renamed to better reflect what it does.
147 lines
5.4 KiB
Go
147 lines
5.4 KiB
Go
/*
|
|
Copyright 2022 The Kubernetes Authors.
|
|
Licensed under the Apache License, Version 2.0 (the "License");
|
|
you may not use this file except in compliance with the License.
|
|
You may obtain a copy of the License at
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
Unless required by applicable law or agreed to in writing, software
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
See the License for the specific language governing permissions and
|
|
limitations under the License.
|
|
*/
|
|
|
|
package removepodsviolatingnodeaffinity
|
|
|
|
import (
|
|
"context"
|
|
"fmt"
|
|
|
|
"k8s.io/apimachinery/pkg/runtime"
|
|
"k8s.io/apimachinery/pkg/util/sets"
|
|
|
|
v1 "k8s.io/api/core/v1"
|
|
"k8s.io/klog/v2"
|
|
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
|
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
|
|
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
|
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
|
"sigs.k8s.io/descheduler/pkg/utils"
|
|
)
|
|
|
|
const PluginName = "RemovePodsViolatingNodeAffinity"
|
|
|
|
// RemovePodsViolatingNodeAffinity evicts pods on the node which violate node affinity
|
|
type RemovePodsViolatingNodeAffinity struct {
|
|
handle frameworktypes.Handle
|
|
args *RemovePodsViolatingNodeAffinityArgs
|
|
podFilter podutil.FilterFunc
|
|
}
|
|
|
|
var _ frameworktypes.DeschedulePlugin = &RemovePodsViolatingNodeAffinity{}
|
|
|
|
// New builds plugin from its arguments while passing a handle
|
|
func New(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
|
|
nodeAffinityArgs, ok := args.(*RemovePodsViolatingNodeAffinityArgs)
|
|
if !ok {
|
|
return nil, fmt.Errorf("want args to be of type RemovePodsViolatingNodeAffinityArgs, got %T", args)
|
|
}
|
|
|
|
var includedNamespaces, excludedNamespaces sets.Set[string]
|
|
if nodeAffinityArgs.Namespaces != nil {
|
|
includedNamespaces = sets.New(nodeAffinityArgs.Namespaces.Include...)
|
|
excludedNamespaces = sets.New(nodeAffinityArgs.Namespaces.Exclude...)
|
|
}
|
|
|
|
// We can combine Filter and PreEvictionFilter since for this strategy it does not matter where we run PreEvictionFilter
|
|
podFilter, err := podutil.NewOptions().
|
|
WithFilter(podutil.WrapFilterFuncs(handle.Evictor().Filter, handle.Evictor().PreEvictionFilter)).
|
|
WithNamespaces(includedNamespaces).
|
|
WithoutNamespaces(excludedNamespaces).
|
|
WithLabelSelector(nodeAffinityArgs.LabelSelector).
|
|
BuildFilterFunc()
|
|
if err != nil {
|
|
return nil, fmt.Errorf("error initializing pod filter function: %v", err)
|
|
}
|
|
|
|
return &RemovePodsViolatingNodeAffinity{
|
|
handle: handle,
|
|
podFilter: podFilter,
|
|
args: nodeAffinityArgs,
|
|
}, nil
|
|
}
|
|
|
|
// Name retrieves the plugin name
|
|
func (d *RemovePodsViolatingNodeAffinity) Name() string {
|
|
return PluginName
|
|
}
|
|
|
|
func (d *RemovePodsViolatingNodeAffinity) Deschedule(ctx context.Context, nodes []*v1.Node) *frameworktypes.Status {
|
|
for _, nodeAffinity := range d.args.NodeAffinityType {
|
|
klog.V(2).InfoS("Executing for nodeAffinityType", "nodeAffinity", nodeAffinity)
|
|
var err *frameworktypes.Status = nil
|
|
|
|
// The pods that we'll evict must be evictable. For example, the current number of replicas
|
|
// must be greater than the pdb.minValue.
|
|
// The pods must be able to get scheduled on a different node. Otherwise, it doesn't make much
|
|
// sense to evict them.
|
|
switch nodeAffinity {
|
|
case "requiredDuringSchedulingIgnoredDuringExecution":
|
|
// In this specific case, the pod must also violate the nodeSelector to be evicted
|
|
filterFunc := func(pod *v1.Pod, node *v1.Node, nodes []*v1.Node) bool {
|
|
return utils.PodHasNodeAffinity(pod, utils.RequiredDuringSchedulingIgnoredDuringExecution) &&
|
|
d.handle.Evictor().Filter(pod) &&
|
|
nodeutil.PodFitsAnyNode(d.handle.GetPodsAssignedToNodeFunc(), pod, nodes) &&
|
|
!nodeutil.PodMatchNodeSelector(pod, node)
|
|
}
|
|
err = d.processNodes(ctx, nodes, filterFunc)
|
|
case "preferredDuringSchedulingIgnoredDuringExecution":
|
|
// In this specific case, the pod must have a better fit on another node than
|
|
// in the current one based on the preferred node affinity
|
|
filterFunc := func(pod *v1.Pod, node *v1.Node, nodes []*v1.Node) bool {
|
|
return utils.PodHasNodeAffinity(pod, utils.PreferredDuringSchedulingIgnoredDuringExecution) &&
|
|
d.handle.Evictor().Filter(pod) &&
|
|
nodeutil.PodFitsAnyNode(d.handle.GetPodsAssignedToNodeFunc(), pod, nodes) &&
|
|
(nodeutil.GetBestNodeWeightGivenPodPreferredAffinity(pod, nodes) > nodeutil.GetNodeWeightGivenPodPreferredAffinity(pod, node))
|
|
}
|
|
err = d.processNodes(ctx, nodes, filterFunc)
|
|
default:
|
|
klog.ErrorS(nil, "Invalid nodeAffinityType", "nodeAffinity", nodeAffinity)
|
|
}
|
|
|
|
if err != nil {
|
|
return err
|
|
}
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func (d *RemovePodsViolatingNodeAffinity) processNodes(ctx context.Context, nodes []*v1.Node, filterFunc func(*v1.Pod, *v1.Node, []*v1.Node) bool) *frameworktypes.Status {
|
|
for _, node := range nodes {
|
|
klog.V(2).InfoS("Processing node", "node", klog.KObj(node))
|
|
|
|
// Potentially evictable pods
|
|
pods, err := podutil.ListPodsOnANode(
|
|
node.Name,
|
|
d.handle.GetPodsAssignedToNodeFunc(),
|
|
podutil.WrapFilterFuncs(d.podFilter, func(pod *v1.Pod) bool {
|
|
return filterFunc(pod, node, nodes)
|
|
}),
|
|
)
|
|
if err != nil {
|
|
return &frameworktypes.Status{
|
|
Err: fmt.Errorf("error listing pods on a node: %v", err),
|
|
}
|
|
}
|
|
|
|
for _, pod := range pods {
|
|
klog.V(1).InfoS("Evicting pod", "pod", klog.KObj(pod))
|
|
d.handle.Evictor().Evict(ctx, pod, evictions.EvictOptions{})
|
|
if d.handle.Evictor().NodeLimitExceeded(node) {
|
|
break
|
|
}
|
|
}
|
|
}
|
|
return nil
|
|
}
|