mirror of
https://github.com/kubernetes-sigs/descheduler.git
synced 2026-01-25 20:59:28 +01:00
Small refactor to accomodate descheduling only evictable pods
Signed-off-by: ravisantoshgudimetla <ravisantoshgudimetla@gmail.com>
This commit is contained in:
@@ -27,6 +27,35 @@ import (
|
||||
"k8s.io/kubernetes/pkg/kubelet/types"
|
||||
)
|
||||
|
||||
// IsEvictable checks if a pod is evictable or not.
|
||||
func IsEvictable(pod *v1.Pod) bool {
|
||||
sr, err := CreatorRef(pod)
|
||||
if err != nil {
|
||||
sr = nil
|
||||
}
|
||||
if IsMirrorPod(pod) || IsPodWithLocalStorage(pod) || sr == nil || IsDaemonsetPod(sr) || IsCriticalPod(pod) {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// ListEvictablePodsOnNode returns the list of evictable pods on node.
|
||||
func ListEvictablePodsOnNode(client clientset.Interface, node *v1.Node) ([]*v1.Pod, error) {
|
||||
pods, err := ListPodsOnANode(client, node)
|
||||
if err != nil {
|
||||
return []*v1.Pod{}, err
|
||||
}
|
||||
evictablePods := make([]*v1.Pod, 0)
|
||||
for _, pod := range pods {
|
||||
if !IsEvictable(pod) {
|
||||
continue
|
||||
} else {
|
||||
evictablePods = append(evictablePods, pod)
|
||||
}
|
||||
}
|
||||
return evictablePods, nil
|
||||
}
|
||||
|
||||
func ListPodsOnANode(client clientset.Interface, node *v1.Node) ([]*v1.Pod, error) {
|
||||
podList, err := client.CoreV1().Pods(v1.NamespaceAll).List(
|
||||
metav1.ListOptions{FieldSelector: fields.SelectorFromSet(fields.Set{"spec.nodeName": node.Name}).String()})
|
||||
@@ -38,7 +67,6 @@ func ListPodsOnANode(client clientset.Interface, node *v1.Node) ([]*v1.Pod, erro
|
||||
for i := range podList.Items {
|
||||
pods = append(pods, &podList.Items[i])
|
||||
}
|
||||
|
||||
return pods, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -70,7 +70,7 @@ func deleteDuplicatePods(client clientset.Interface, policyGroupVersion string,
|
||||
|
||||
// ListDuplicatePodsOnANode lists duplicate pods on a given node.
|
||||
func ListDuplicatePodsOnANode(client clientset.Interface, node *v1.Node) DuplicatePodsMap {
|
||||
pods, err := podutil.ListPodsOnANode(client, node)
|
||||
pods, err := podutil.ListEvictablePodsOnNode(client, node)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
@@ -81,13 +81,9 @@ func ListDuplicatePodsOnANode(client clientset.Interface, node *v1.Node) Duplica
|
||||
func FindDuplicatePods(pods []*v1.Pod) DuplicatePodsMap {
|
||||
dpm := DuplicatePodsMap{}
|
||||
for _, pod := range pods {
|
||||
sr, err := podutil.CreatorRef(pod)
|
||||
if err != nil || sr == nil {
|
||||
continue
|
||||
}
|
||||
if podutil.IsMirrorPod(pod) || podutil.IsDaemonsetPod(sr) || podutil.IsPodWithLocalStorage(pod) || podutil.IsCriticalPod(pod) {
|
||||
continue
|
||||
}
|
||||
// Ignoring the error here as in the ListDuplicatePodsOnNode function we call ListEvictablePodsOnNode
|
||||
// which checks for error.
|
||||
sr, _ := podutil.CreatorRef(pod)
|
||||
s := strings.Join([]string{sr.Reference.Kind, sr.Reference.Namespace, sr.Reference.Name}, "/")
|
||||
dpm[s] = append(dpm[s], pod)
|
||||
}
|
||||
|
||||
@@ -284,12 +284,8 @@ func NodeUtilization(node *v1.Node, pods []*v1.Pod) (api.ResourceThresholds, []*
|
||||
gPods := []*v1.Pod{}
|
||||
totalReqs := map[v1.ResourceName]resource.Quantity{}
|
||||
for _, pod := range pods {
|
||||
sr, err := podutil.CreatorRef(pod)
|
||||
if err != nil {
|
||||
sr = nil
|
||||
}
|
||||
|
||||
if podutil.IsMirrorPod(pod) || podutil.IsPodWithLocalStorage(pod) || sr == nil || podutil.IsDaemonsetPod(sr) || podutil.IsCriticalPod(pod) {
|
||||
// We need to compute the usage of nonRemovablePods unless it is a best effort pod. So, cannot use podutil.ListEvictablePodsOnNode
|
||||
if !podutil.IsEvictable(pod) {
|
||||
nonRemovablePods = append(nonRemovablePods, pod)
|
||||
if podutil.IsBestEffortPod(pod) {
|
||||
continue
|
||||
|
||||
@@ -44,7 +44,7 @@ func removePodsWithAffinityRules(client clientset.Interface, policyGroupVersion
|
||||
podsEvicted := 0
|
||||
for _, node := range nodes {
|
||||
glog.V(1).Infof("Processing node: %#v\n", node.Name)
|
||||
pods, err := podutil.ListPodsOnANode(client, node)
|
||||
pods, err := podutil.ListEvictablePodsOnNode(client, node)
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
@@ -19,7 +19,6 @@ package strategies
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"fmt"
|
||||
"github.com/kubernetes-incubator/descheduler/test"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
@@ -34,6 +33,9 @@ func TestPodAntiAffinity(t *testing.T) {
|
||||
p2 := test.BuildTestPod("p2", 100, 0, node.Name)
|
||||
p3 := test.BuildTestPod("p3", 100, 0, node.Name)
|
||||
p3.Labels = map[string]string{"foo": "bar"}
|
||||
p1.Annotations = test.GetNormalPodAnnotation()
|
||||
p2.Annotations = test.GetNormalPodAnnotation()
|
||||
p3.Annotations = test.GetNormalPodAnnotation()
|
||||
p1.Spec.Affinity = &v1.Affinity{
|
||||
PodAntiAffinity: &v1.PodAntiAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
|
||||
@@ -80,7 +82,6 @@ func TestPodAntiAffinity(t *testing.T) {
|
||||
expectedEvictedPodCount := 1
|
||||
podsEvicted := removePodsWithAffinityRules(fakeClient, "v1", []*v1.Node{node}, false)
|
||||
if podsEvicted != expectedEvictedPodCount {
|
||||
fmt.Println(podsEvicted)
|
||||
t.Errorf("Unexpected no of pods evicted")
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user