mirror of
https://github.com/kubernetes-sigs/descheduler.git
synced 2026-01-26 13:29:11 +01:00
PodEvictor: add a new param thresholdPriority to IsEvictable
This commit is contained in:
@@ -78,12 +78,16 @@ func NewPodEvictor(
|
||||
}
|
||||
|
||||
// IsEvictable checks if a pod is evictable or not.
|
||||
func (pe *PodEvictor) IsEvictable(pod *v1.Pod) bool {
|
||||
func (pe *PodEvictor) IsEvictable(pod *v1.Pod, thresholdPriority int32) bool {
|
||||
checkErrs := []error{}
|
||||
if IsCriticalPod(pod) {
|
||||
checkErrs = append(checkErrs, fmt.Errorf("pod is critical"))
|
||||
}
|
||||
|
||||
if !IsPodEvictableBasedOnPriority(pod, thresholdPriority) {
|
||||
checkErrs = append(checkErrs, fmt.Errorf("pod is not evictable due to its priority"))
|
||||
}
|
||||
|
||||
ownerRefList := podutil.OwnerRef(pod)
|
||||
if IsDaemonsetPod(ownerRefList) {
|
||||
checkErrs = append(checkErrs, fmt.Errorf("pod is a DaemonSet pod"))
|
||||
@@ -216,3 +220,8 @@ func IsPodWithLocalStorage(pod *v1.Pod) bool {
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// IsPodEvictableBasedOnPriority checks if the given pod is evictable based on priority resolved from pod Spec.
|
||||
func IsPodEvictableBasedOnPriority(pod *v1.Pod, priority int32) bool {
|
||||
return pod.Spec.Priority == nil || (*pod.Spec.Priority < priority && *pod.Spec.Priority < utils.SystemCriticalPriority)
|
||||
}
|
||||
|
||||
@@ -71,10 +71,13 @@ func TestEvictPod(t *testing.T) {
|
||||
|
||||
func TestIsEvictable(t *testing.T) {
|
||||
n1 := test.BuildTestNode("node1", 1000, 2000, 13, nil)
|
||||
lowPriority := int32(800)
|
||||
highPriority := int32(900)
|
||||
type testCase struct {
|
||||
pod *v1.Pod
|
||||
runBefore func(*v1.Pod)
|
||||
evictLocalStoragePods bool
|
||||
priorityThreshold *int32
|
||||
result bool
|
||||
}
|
||||
|
||||
@@ -179,6 +182,7 @@ func TestIsEvictable(t *testing.T) {
|
||||
}, {
|
||||
pod: test.BuildTestPod("p10", 400, 0, n1.Name, nil),
|
||||
runBefore: func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
pod.Annotations = test.GetMirrorPodAnnotation()
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
@@ -186,6 +190,7 @@ func TestIsEvictable(t *testing.T) {
|
||||
}, {
|
||||
pod: test.BuildTestPod("p11", 400, 0, n1.Name, nil),
|
||||
runBefore: func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
pod.Annotations = test.GetMirrorPodAnnotation()
|
||||
pod.Annotations["descheduler.alpha.kubernetes.io/evict"] = "true"
|
||||
},
|
||||
@@ -194,6 +199,7 @@ func TestIsEvictable(t *testing.T) {
|
||||
}, {
|
||||
pod: test.BuildTestPod("p12", 400, 0, n1.Name, nil),
|
||||
runBefore: func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
priority := utils.SystemCriticalPriority
|
||||
pod.Spec.Priority = &priority
|
||||
},
|
||||
@@ -202,6 +208,7 @@ func TestIsEvictable(t *testing.T) {
|
||||
}, {
|
||||
pod: test.BuildTestPod("p13", 400, 0, n1.Name, nil),
|
||||
runBefore: func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
priority := utils.SystemCriticalPriority
|
||||
pod.Spec.Priority = &priority
|
||||
pod.Annotations = map[string]string{
|
||||
@@ -210,6 +217,25 @@ func TestIsEvictable(t *testing.T) {
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
result: true,
|
||||
}, {
|
||||
pod: test.BuildTestPod("p14", 400, 0, n1.Name, nil),
|
||||
runBefore: func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
pod.Spec.Priority = &highPriority
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
priorityThreshold: &lowPriority,
|
||||
result: false,
|
||||
}, {
|
||||
pod: test.BuildTestPod("p15", 400, 0, n1.Name, nil),
|
||||
runBefore: func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
pod.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"}
|
||||
pod.Spec.Priority = &highPriority
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
priorityThreshold: &lowPriority,
|
||||
result: true,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -218,7 +244,11 @@ func TestIsEvictable(t *testing.T) {
|
||||
podEvictor := &PodEvictor{
|
||||
evictLocalStoragePods: test.evictLocalStoragePods,
|
||||
}
|
||||
result := podEvictor.IsEvictable(test.pod)
|
||||
testPriorityThreshold := utils.SystemCriticalPriority
|
||||
if test.priorityThreshold != nil {
|
||||
testPriorityThreshold = *test.priorityThreshold
|
||||
}
|
||||
result := podEvictor.IsEvictable(test.pod, testPriorityThreshold)
|
||||
if result != test.result {
|
||||
t.Errorf("IsEvictable should return for pod %s %t, but it returns %t", test.pod.Name, test.result, result)
|
||||
}
|
||||
@@ -233,10 +263,6 @@ func TestPodTypes(t *testing.T) {
|
||||
p2 := test.BuildTestPod("p2", 400, 0, n1.Name, nil)
|
||||
p3 := test.BuildTestPod("p3", 400, 0, n1.Name, nil)
|
||||
p4 := test.BuildTestPod("p4", 400, 0, n1.Name, nil)
|
||||
p5 := test.BuildTestPod("p5", 400, 0, n1.Name, nil)
|
||||
p6 := test.BuildTestPod("p6", 400, 0, n1.Name, nil)
|
||||
|
||||
p6.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
|
||||
p1.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
|
||||
// The following 4 pods won't get evicted.
|
||||
@@ -257,18 +283,9 @@ func TestPodTypes(t *testing.T) {
|
||||
}
|
||||
// A Mirror Pod.
|
||||
p4.Annotations = test.GetMirrorPodAnnotation()
|
||||
// A Critical Pod.
|
||||
p5.Namespace = "kube-system"
|
||||
priority := utils.SystemCriticalPriority
|
||||
p5.Spec.Priority = &priority
|
||||
systemCriticalPriority := utils.SystemCriticalPriority
|
||||
p5.Spec.Priority = &systemCriticalPriority
|
||||
if !IsMirrorPod(p4) {
|
||||
t.Errorf("Expected p4 to be a mirror pod.")
|
||||
}
|
||||
if !IsCriticalPod(p5) {
|
||||
t.Errorf("Expected p5 to be a critical pod.")
|
||||
}
|
||||
if !IsPodWithLocalStorage(p3) {
|
||||
t.Errorf("Expected p3 to be a pod with local storage.")
|
||||
}
|
||||
|
||||
@@ -31,6 +31,7 @@ import (
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
)
|
||||
|
||||
// RemoveDuplicatePods removes the duplicate pods on node. This strategy evicts all duplicate pods on node.
|
||||
@@ -46,7 +47,9 @@ func RemoveDuplicatePods(
|
||||
) {
|
||||
for _, node := range nodes {
|
||||
klog.V(1).Infof("Processing node: %#v", node.Name)
|
||||
pods, err := podutil.ListPodsOnANode(ctx, client, node, podutil.WithFilter(podEvictor.IsEvictable))
|
||||
pods, err := podutil.ListPodsOnANode(ctx, client, node, podutil.WithFilter(func(pod *v1.Pod) bool {
|
||||
return podEvictor.IsEvictable(pod, utils.SystemCriticalPriority)
|
||||
}))
|
||||
if err != nil {
|
||||
klog.Errorf("error listing evictable pods on node %s: %+v", node.Name, err)
|
||||
continue
|
||||
|
||||
@@ -397,7 +397,7 @@ func classifyPods(pods []*v1.Pod, evictor *evictions.PodEvictor) ([]*v1.Pod, []*
|
||||
var nonRemovablePods, removablePods []*v1.Pod
|
||||
|
||||
for _, pod := range pods {
|
||||
if !evictor.IsEvictable(pod) {
|
||||
if !evictor.IsEvictable(pod, utils.SystemCriticalPriority) {
|
||||
nonRemovablePods = append(nonRemovablePods, pod)
|
||||
} else {
|
||||
removablePods = append(removablePods, pod)
|
||||
|
||||
@@ -28,6 +28,7 @@ import (
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
)
|
||||
|
||||
func validatePodsViolatingNodeAffinityParams(params *api.StrategyParameters) error {
|
||||
@@ -61,7 +62,7 @@ func RemovePodsViolatingNodeAffinity(ctx context.Context, client clientset.Inter
|
||||
client,
|
||||
node,
|
||||
podutil.WithFilter(func(pod *v1.Pod) bool {
|
||||
return podEvictor.IsEvictable(pod) &&
|
||||
return podEvictor.IsEvictable(pod, utils.SystemCriticalPriority) &&
|
||||
!nodeutil.PodFitsCurrentNode(pod, node) &&
|
||||
nodeutil.PodFitsAnyNode(pod, nodes)
|
||||
}),
|
||||
|
||||
@@ -60,7 +60,9 @@ func RemovePodsViolatingNodeTaints(ctx context.Context, client clientset.Interfa
|
||||
ctx,
|
||||
client,
|
||||
node,
|
||||
podutil.WithFilter(podEvictor.IsEvictable),
|
||||
podutil.WithFilter(func(pod *v1.Pod) bool {
|
||||
return podEvictor.IsEvictable(pod, utils.SystemCriticalPriority)
|
||||
}),
|
||||
podutil.WithNamespaces(namespaces.Include),
|
||||
podutil.WithoutNamespaces(namespaces.Exclude),
|
||||
)
|
||||
|
||||
@@ -57,7 +57,9 @@ func RemovePodsViolatingInterPodAntiAffinity(ctx context.Context, client clients
|
||||
ctx,
|
||||
client,
|
||||
node,
|
||||
podutil.WithFilter(podEvictor.IsEvictable),
|
||||
podutil.WithFilter(func(pod *v1.Pod) bool {
|
||||
return podEvictor.IsEvictable(pod, utils.SystemCriticalPriority)
|
||||
}),
|
||||
podutil.WithNamespaces(namespaces.Include),
|
||||
podutil.WithoutNamespaces(namespaces.Exclude),
|
||||
)
|
||||
|
||||
@@ -28,6 +28,7 @@ import (
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
)
|
||||
|
||||
func validatePodLifeTimeParams(params *api.StrategyParameters) error {
|
||||
@@ -69,12 +70,14 @@ func PodLifeTime(ctx context.Context, client clientset.Interface, strategy api.D
|
||||
}
|
||||
}
|
||||
|
||||
func listOldPodsOnNode(ctx context.Context, client clientset.Interface, node *v1.Node, params *api.StrategyParameters, evictor *evictions.PodEvictor) []*v1.Pod {
|
||||
func listOldPodsOnNode(ctx context.Context, client clientset.Interface, node *v1.Node, params *api.StrategyParameters, podEvictor *evictions.PodEvictor) []*v1.Pod {
|
||||
pods, err := podutil.ListPodsOnANode(
|
||||
ctx,
|
||||
client,
|
||||
node,
|
||||
podutil.WithFilter(evictor.IsEvictable),
|
||||
podutil.WithFilter(func(pod *v1.Pod) bool {
|
||||
return podEvictor.IsEvictable(pod, utils.SystemCriticalPriority)
|
||||
}),
|
||||
podutil.WithNamespaces(params.Namespaces.Include),
|
||||
podutil.WithoutNamespaces(params.Namespaces.Exclude),
|
||||
)
|
||||
|
||||
@@ -27,6 +27,7 @@ import (
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
)
|
||||
|
||||
func validateRemovePodsHavingTooManyRestartsParams(params *api.StrategyParameters) error {
|
||||
@@ -56,7 +57,9 @@ func RemovePodsHavingTooManyRestarts(ctx context.Context, client clientset.Inter
|
||||
ctx,
|
||||
client,
|
||||
node,
|
||||
podutil.WithFilter(podEvictor.IsEvictable),
|
||||
podutil.WithFilter(func(pod *v1.Pod) bool {
|
||||
return podEvictor.IsEvictable(pod, utils.SystemCriticalPriority)
|
||||
}),
|
||||
podutil.WithNamespaces(strategy.Params.Namespaces.Include),
|
||||
podutil.WithoutNamespaces(strategy.Params.Namespaces.Exclude),
|
||||
)
|
||||
|
||||
@@ -104,7 +104,7 @@ func GetPodSource(pod *v1.Pod) (string, error) {
|
||||
return "", fmt.Errorf("cannot get source of pod %q", pod.UID)
|
||||
}
|
||||
|
||||
// IsCriticalPod returns true if pod's priority is greater than or equal to SystemCriticalPriority.
|
||||
// IsCriticalPod returns true if the pod is a static or mirror pod.
|
||||
func IsCriticalPod(pod *v1.Pod) bool {
|
||||
if IsStaticPod(pod) {
|
||||
return true
|
||||
@@ -112,17 +112,9 @@ func IsCriticalPod(pod *v1.Pod) bool {
|
||||
if IsMirrorPod(pod) {
|
||||
return true
|
||||
}
|
||||
if pod.Spec.Priority != nil && IsCriticalPodBasedOnPriority(*pod.Spec.Priority) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// IsCriticalPodBasedOnPriority checks if the given pod is a critical pod based on priority resolved from pod Spec.
|
||||
func IsCriticalPodBasedOnPriority(priority int32) bool {
|
||||
return priority >= SystemCriticalPriority
|
||||
}
|
||||
|
||||
// PodRequestsAndLimits returns a dictionary of all defined resources summed up for all
|
||||
// containers of the pod. If PodOverhead feature is enabled, pod overhead is added to the
|
||||
// total container resource requests and to the total container limits which have a
|
||||
|
||||
Reference in New Issue
Block a user