1
0
mirror of https://github.com/kubernetes-sigs/descheduler.git synced 2026-01-26 05:14:13 +01:00

Small refactor to accomodate descheduling only evictable pods

Signed-off-by: ravisantoshgudimetla <ravisantoshgudimetla@gmail.com>
This commit is contained in:
ravisantoshgudimetla
2017-11-28 17:00:46 -05:00
parent afc17a62ea
commit a63f815116
5 changed files with 39 additions and 18 deletions

View File

@@ -27,6 +27,35 @@ import (
"k8s.io/kubernetes/pkg/kubelet/types" "k8s.io/kubernetes/pkg/kubelet/types"
) )
// IsEvictable checks if a pod is evictable or not.
func IsEvictable(pod *v1.Pod) bool {
sr, err := CreatorRef(pod)
if err != nil {
sr = nil
}
if IsMirrorPod(pod) || IsPodWithLocalStorage(pod) || sr == nil || IsDaemonsetPod(sr) || IsCriticalPod(pod) {
return false
}
return true
}
// ListEvictablePodsOnNode returns the list of evictable pods on node.
func ListEvictablePodsOnNode(client clientset.Interface, node *v1.Node) ([]*v1.Pod, error) {
pods, err := ListPodsOnANode(client, node)
if err != nil {
return []*v1.Pod{}, err
}
evictablePods := make([]*v1.Pod, 0)
for _, pod := range pods {
if !IsEvictable(pod) {
continue
} else {
evictablePods = append(evictablePods, pod)
}
}
return evictablePods, nil
}
func ListPodsOnANode(client clientset.Interface, node *v1.Node) ([]*v1.Pod, error) { func ListPodsOnANode(client clientset.Interface, node *v1.Node) ([]*v1.Pod, error) {
podList, err := client.CoreV1().Pods(v1.NamespaceAll).List( podList, err := client.CoreV1().Pods(v1.NamespaceAll).List(
metav1.ListOptions{FieldSelector: fields.SelectorFromSet(fields.Set{"spec.nodeName": node.Name}).String()}) metav1.ListOptions{FieldSelector: fields.SelectorFromSet(fields.Set{"spec.nodeName": node.Name}).String()})
@@ -38,7 +67,6 @@ func ListPodsOnANode(client clientset.Interface, node *v1.Node) ([]*v1.Pod, erro
for i := range podList.Items { for i := range podList.Items {
pods = append(pods, &podList.Items[i]) pods = append(pods, &podList.Items[i])
} }
return pods, nil return pods, nil
} }

View File

@@ -70,7 +70,7 @@ func deleteDuplicatePods(client clientset.Interface, policyGroupVersion string,
// ListDuplicatePodsOnANode lists duplicate pods on a given node. // ListDuplicatePodsOnANode lists duplicate pods on a given node.
func ListDuplicatePodsOnANode(client clientset.Interface, node *v1.Node) DuplicatePodsMap { func ListDuplicatePodsOnANode(client clientset.Interface, node *v1.Node) DuplicatePodsMap {
pods, err := podutil.ListPodsOnANode(client, node) pods, err := podutil.ListEvictablePodsOnNode(client, node)
if err != nil { if err != nil {
return nil return nil
} }
@@ -81,13 +81,9 @@ func ListDuplicatePodsOnANode(client clientset.Interface, node *v1.Node) Duplica
func FindDuplicatePods(pods []*v1.Pod) DuplicatePodsMap { func FindDuplicatePods(pods []*v1.Pod) DuplicatePodsMap {
dpm := DuplicatePodsMap{} dpm := DuplicatePodsMap{}
for _, pod := range pods { for _, pod := range pods {
sr, err := podutil.CreatorRef(pod) // Ignoring the error here as in the ListDuplicatePodsOnNode function we call ListEvictablePodsOnNode
if err != nil || sr == nil { // which checks for error.
continue sr, _ := podutil.CreatorRef(pod)
}
if podutil.IsMirrorPod(pod) || podutil.IsDaemonsetPod(sr) || podutil.IsPodWithLocalStorage(pod) || podutil.IsCriticalPod(pod) {
continue
}
s := strings.Join([]string{sr.Reference.Kind, sr.Reference.Namespace, sr.Reference.Name}, "/") s := strings.Join([]string{sr.Reference.Kind, sr.Reference.Namespace, sr.Reference.Name}, "/")
dpm[s] = append(dpm[s], pod) dpm[s] = append(dpm[s], pod)
} }

View File

@@ -284,12 +284,8 @@ func NodeUtilization(node *v1.Node, pods []*v1.Pod) (api.ResourceThresholds, []*
gPods := []*v1.Pod{} gPods := []*v1.Pod{}
totalReqs := map[v1.ResourceName]resource.Quantity{} totalReqs := map[v1.ResourceName]resource.Quantity{}
for _, pod := range pods { for _, pod := range pods {
sr, err := podutil.CreatorRef(pod) // We need to compute the usage of nonRemovablePods unless it is a best effort pod. So, cannot use podutil.ListEvictablePodsOnNode
if err != nil { if !podutil.IsEvictable(pod) {
sr = nil
}
if podutil.IsMirrorPod(pod) || podutil.IsPodWithLocalStorage(pod) || sr == nil || podutil.IsDaemonsetPod(sr) || podutil.IsCriticalPod(pod) {
nonRemovablePods = append(nonRemovablePods, pod) nonRemovablePods = append(nonRemovablePods, pod)
if podutil.IsBestEffortPod(pod) { if podutil.IsBestEffortPod(pod) {
continue continue

View File

@@ -44,7 +44,7 @@ func removePodsWithAffinityRules(client clientset.Interface, policyGroupVersion
podsEvicted := 0 podsEvicted := 0
for _, node := range nodes { for _, node := range nodes {
glog.V(1).Infof("Processing node: %#v\n", node.Name) glog.V(1).Infof("Processing node: %#v\n", node.Name)
pods, err := podutil.ListPodsOnANode(client, node) pods, err := podutil.ListEvictablePodsOnNode(client, node)
if err != nil { if err != nil {
return 0 return 0
} }

View File

@@ -19,7 +19,6 @@ package strategies
import ( import (
"testing" "testing"
"fmt"
"github.com/kubernetes-incubator/descheduler/test" "github.com/kubernetes-incubator/descheduler/test"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
@@ -34,6 +33,9 @@ func TestPodAntiAffinity(t *testing.T) {
p2 := test.BuildTestPod("p2", 100, 0, node.Name) p2 := test.BuildTestPod("p2", 100, 0, node.Name)
p3 := test.BuildTestPod("p3", 100, 0, node.Name) p3 := test.BuildTestPod("p3", 100, 0, node.Name)
p3.Labels = map[string]string{"foo": "bar"} p3.Labels = map[string]string{"foo": "bar"}
p1.Annotations = test.GetNormalPodAnnotation()
p2.Annotations = test.GetNormalPodAnnotation()
p3.Annotations = test.GetNormalPodAnnotation()
p1.Spec.Affinity = &v1.Affinity{ p1.Spec.Affinity = &v1.Affinity{
PodAntiAffinity: &v1.PodAntiAffinity{ PodAntiAffinity: &v1.PodAntiAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{ RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
@@ -80,7 +82,6 @@ func TestPodAntiAffinity(t *testing.T) {
expectedEvictedPodCount := 1 expectedEvictedPodCount := 1
podsEvicted := removePodsWithAffinityRules(fakeClient, "v1", []*v1.Node{node}, false) podsEvicted := removePodsWithAffinityRules(fakeClient, "v1", []*v1.Node{node}, false)
if podsEvicted != expectedEvictedPodCount { if podsEvicted != expectedEvictedPodCount {
fmt.Println(podsEvicted)
t.Errorf("Unexpected no of pods evicted") t.Errorf("Unexpected no of pods evicted")
} }
} }