1
0
mirror of https://github.com/kubernetes-sigs/descheduler.git synced 2026-01-25 20:59:28 +01:00

ListPodsOnANode: define Options type to pass various options

Options like:
- filter
- included/excluded namespaces
- labels
This commit is contained in:
Jan Chaloupka
2020-07-11 20:00:47 +02:00
parent 0006fb039d
commit 74f70fdbc9
10 changed files with 31 additions and 14 deletions

View File

@@ -27,17 +27,33 @@ import (
"sigs.k8s.io/descheduler/pkg/utils"
)
type Options struct {
filter func(pod *v1.Pod) bool
}
// WithFilter sets a pod filter.
// The filter function should return true if the pod should be returned from ListPodsOnANode
func WithFilter(filter func(pod *v1.Pod) bool) func(opts *Options) {
return func(opts *Options) {
opts.filter = filter
}
}
// ListPodsOnANode lists all of the pods on a node
// It also accepts an optional "filter" function which can be used to further limit the pods that are returned.
// (Usually this is podEvictor.IsEvictable, in order to only list the evictable pods on a node, but can
// be used by strategies to extend IsEvictable if there are further restrictions, such as with NodeAffinity).
// The filter function should return true if the pod should be returned from ListPodsOnANode
func ListPodsOnANode(
ctx context.Context,
client clientset.Interface,
node *v1.Node,
filter func(pod *v1.Pod) bool,
opts ...func(opts *Options),
) ([]*v1.Pod, error) {
options := &Options{}
for _, opt := range opts {
opt(options)
}
fieldSelector, err := fields.ParseSelector("spec.nodeName=" + node.Name + ",status.phase!=" + string(v1.PodSucceeded) + ",status.phase!=" + string(v1.PodFailed))
if err != nil {
return []*v1.Pod{}, err
@@ -51,7 +67,7 @@ func ListPodsOnANode(
pods := make([]*v1.Pod, 0)
for i := range podList.Items {
if filter != nil && !filter(&podList.Items[i]) {
if options.filter != nil && !options.filter(&podList.Items[i]) {
continue
}
pods = append(pods, &podList.Items[i])

View File

@@ -67,7 +67,7 @@ func TestListPodsOnANode(t *testing.T) {
}
return true, nil, fmt.Errorf("Failed to list: %v", list)
})
pods, _ := ListPodsOnANode(context.TODO(), fakeClient, testCase.node, nil)
pods, _ := ListPodsOnANode(context.TODO(), fakeClient, testCase.node)
if len(pods) != testCase.expectedPodCount {
t.Errorf("expected %v pods on node %v, got %+v", testCase.expectedPodCount, testCase.node.Name, len(pods))
}

View File

@@ -46,7 +46,7 @@ func RemoveDuplicatePods(
) {
for _, node := range nodes {
klog.V(1).Infof("Processing node: %#v", node.Name)
pods, err := podutil.ListPodsOnANode(ctx, client, node, podEvictor.IsEvictable)
pods, err := podutil.ListPodsOnANode(ctx, client, node, podutil.WithFilter(podEvictor.IsEvictable))
if err != nil {
klog.Errorf("error listing evictable pods on node %s: %+v", node.Name, err)
continue

View File

@@ -326,7 +326,7 @@ func sortNodesByUsage(nodes []NodeUsageMap) {
func createNodePodsMap(ctx context.Context, client clientset.Interface, nodes []*v1.Node) NodePodsMap {
npm := NodePodsMap{}
for _, node := range nodes {
pods, err := podutil.ListPodsOnANode(ctx, client, node, nil)
pods, err := podutil.ListPodsOnANode(ctx, client, node)
if err != nil {
klog.Warningf("node %s will not be processed, error in accessing its pods (%#v)", node.Name, err)
} else {

View File

@@ -43,11 +43,11 @@ func RemovePodsViolatingNodeAffinity(ctx context.Context, client clientset.Inter
for _, node := range nodes {
klog.V(1).Infof("Processing node: %#v\n", node.Name)
pods, err := podutil.ListPodsOnANode(ctx, client, node, func(pod *v1.Pod) bool {
pods, err := podutil.ListPodsOnANode(ctx, client, node, podutil.WithFilter(func(pod *v1.Pod) bool {
return podEvictor.IsEvictable(pod) &&
!nodeutil.PodFitsCurrentNode(pod, node) &&
nodeutil.PodFitsAnyNode(pod, nodes)
})
}))
if err != nil {
klog.Errorf("failed to get pods from %v: %v", node.Name, err)
}

View File

@@ -33,7 +33,7 @@ import (
func RemovePodsViolatingNodeTaints(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor) {
for _, node := range nodes {
klog.V(1).Infof("Processing node: %#v\n", node.Name)
pods, err := podutil.ListPodsOnANode(ctx, client, node, podEvictor.IsEvictable)
pods, err := podutil.ListPodsOnANode(ctx, client, node, podutil.WithFilter(podEvictor.IsEvictable))
if err != nil {
//no pods evicted as error encountered retrieving evictable Pods
return

View File

@@ -18,6 +18,7 @@ package strategies
import (
"context"
"sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
@@ -33,7 +34,7 @@ import (
func RemovePodsViolatingInterPodAntiAffinity(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor) {
for _, node := range nodes {
klog.V(1).Infof("Processing node: %#v\n", node.Name)
pods, err := podutil.ListPodsOnANode(ctx, client, node, podEvictor.IsEvictable)
pods, err := podutil.ListPodsOnANode(ctx, client, node, podutil.WithFilter(podEvictor.IsEvictable))
if err != nil {
return
}

View File

@@ -54,7 +54,7 @@ func PodLifeTime(ctx context.Context, client clientset.Interface, strategy api.D
}
func listOldPodsOnNode(ctx context.Context, client clientset.Interface, node *v1.Node, maxAge uint, evictor *evictions.PodEvictor) []*v1.Pod {
pods, err := podutil.ListPodsOnANode(ctx, client, node, evictor.IsEvictable)
pods, err := podutil.ListPodsOnANode(ctx, client, node, podutil.WithFilter(evictor.IsEvictable))
if err != nil {
return nil
}

View File

@@ -38,7 +38,7 @@ func RemovePodsHavingTooManyRestarts(ctx context.Context, client clientset.Inter
}
for _, node := range nodes {
klog.V(1).Infof("Processing node: %s", node.Name)
pods, err := podutil.ListPodsOnANode(ctx, client, node, podEvictor.IsEvictable)
pods, err := podutil.ListPodsOnANode(ctx, client, node, podutil.WithFilter(podEvictor.IsEvictable))
if err != nil {
klog.Errorf("Error when list pods at node %s", node.Name)
continue

View File

@@ -324,7 +324,7 @@ func evictPods(ctx context.Context, t *testing.T, clientSet clientset.Interface,
continue
}
// List all the pods on the current Node
podsOnANode, err := podutil.ListPodsOnANode(ctx, clientSet, node, podEvictor.IsEvictable)
podsOnANode, err := podutil.ListPodsOnANode(ctx, clientSet, node, podutil.WithFilter(podEvictor.IsEvictable))
if err != nil {
t.Errorf("Error listing pods on a node %v", err)
}
@@ -336,7 +336,7 @@ func evictPods(ctx context.Context, t *testing.T, clientSet clientset.Interface,
}
t.Log("Eviction of pods starting")
startEndToEndForLowNodeUtilization(ctx, clientSet, nodeInformer, podEvictor)
podsOnleastUtilizedNode, err := podutil.ListPodsOnANode(ctx, clientSet, leastLoadedNode, podEvictor.IsEvictable)
podsOnleastUtilizedNode, err := podutil.ListPodsOnANode(ctx, clientSet, leastLoadedNode, podutil.WithFilter(podEvictor.IsEvictable))
if err != nil {
t.Errorf("Error listing pods on a node %v", err)
}