mirror of
https://github.com/kubernetes-sigs/descheduler.git
synced 2026-01-25 20:59:28 +01:00
@@ -55,7 +55,7 @@ func TestTaintsUpdated(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to run descheduler strategies: %v", err)
|
||||
}
|
||||
case <-time.After(300 * time.Millisecond):
|
||||
case <-time.After(1 * time.Second):
|
||||
// Wait for few cycles and then verify the only pod still exists
|
||||
}
|
||||
|
||||
|
||||
@@ -18,16 +18,15 @@ package pod
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
core "k8s.io/client-go/testing"
|
||||
|
||||
"sigs.k8s.io/descheduler/test"
|
||||
)
|
||||
|
||||
@@ -39,19 +38,17 @@ var (
|
||||
func TestListPodsOnANode(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
pods map[string][]v1.Pod
|
||||
pods []*v1.Pod
|
||||
node *v1.Node
|
||||
labelSelector *metav1.LabelSelector
|
||||
expectedPodCount int
|
||||
}{
|
||||
{
|
||||
name: "test listing pods on a node",
|
||||
pods: map[string][]v1.Pod{
|
||||
"n1": {
|
||||
*test.BuildTestPod("pod1", 100, 0, "n1", nil),
|
||||
*test.BuildTestPod("pod2", 100, 0, "n1", nil),
|
||||
},
|
||||
"n2": {*test.BuildTestPod("pod3", 100, 0, "n2", nil)},
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("pod1", 100, 0, "n1", nil),
|
||||
test.BuildTestPod("pod2", 100, 0, "n1", nil),
|
||||
test.BuildTestPod("pod3", 100, 0, "n2", nil),
|
||||
},
|
||||
node: test.BuildTestNode("n1", 2000, 3000, 10, nil),
|
||||
labelSelector: nil,
|
||||
@@ -59,17 +56,15 @@ func TestListPodsOnANode(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "test listing pods with label selector",
|
||||
pods: map[string][]v1.Pod{
|
||||
"n1": {
|
||||
*test.BuildTestPod("pod1", 100, 0, "n1", nil),
|
||||
*test.BuildTestPod("pod2", 100, 0, "n1", func(pod *v1.Pod) {
|
||||
pod.Labels = map[string]string{"foo": "bar"}
|
||||
}),
|
||||
*test.BuildTestPod("pod3", 100, 0, "n1", func(pod *v1.Pod) {
|
||||
pod.Labels = map[string]string{"foo": "bar1"}
|
||||
}),
|
||||
},
|
||||
"n2": {*test.BuildTestPod("pod4", 100, 0, "n2", nil)},
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("pod1", 100, 0, "n1", nil),
|
||||
test.BuildTestPod("pod2", 100, 0, "n1", func(pod *v1.Pod) {
|
||||
pod.Labels = map[string]string{"foo": "bar"}
|
||||
}),
|
||||
test.BuildTestPod("pod3", 100, 0, "n1", func(pod *v1.Pod) {
|
||||
pod.Labels = map[string]string{"foo": "bar1"}
|
||||
}),
|
||||
test.BuildTestPod("pod4", 100, 0, "n2", nil),
|
||||
},
|
||||
node: test.BuildTestNode("n1", 2000, 3000, 10, nil),
|
||||
labelSelector: &metav1.LabelSelector{
|
||||
@@ -85,21 +80,38 @@ func TestListPodsOnANode(t *testing.T) {
|
||||
},
|
||||
}
|
||||
for _, testCase := range testCases {
|
||||
fakeClient := &fake.Clientset{}
|
||||
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
|
||||
list := action.(core.ListAction)
|
||||
fieldString := list.GetListRestrictions().Fields.String()
|
||||
if strings.Contains(fieldString, "n1") {
|
||||
return true, &v1.PodList{Items: testCase.pods["n1"]}, nil
|
||||
} else if strings.Contains(fieldString, "n2") {
|
||||
return true, &v1.PodList{Items: testCase.pods["n2"]}, nil
|
||||
t.Run(testCase.name, func(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
var objs []runtime.Object
|
||||
objs = append(objs, testCase.node)
|
||||
for _, pod := range testCase.pods {
|
||||
objs = append(objs, pod)
|
||||
}
|
||||
fakeClient := fake.NewSimpleClientset(objs...)
|
||||
|
||||
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
|
||||
podInformer := sharedInformerFactory.Core().V1().Pods()
|
||||
|
||||
getPodsAssignedToNode, err := BuildGetPodsAssignedToNodeFunc(podInformer)
|
||||
if err != nil {
|
||||
t.Errorf("Build get pods assigned to node function error: %v", err)
|
||||
}
|
||||
|
||||
sharedInformerFactory.Start(ctx.Done())
|
||||
sharedInformerFactory.WaitForCacheSync(ctx.Done())
|
||||
|
||||
filter, err := NewOptions().WithLabelSelector(testCase.labelSelector).BuildFilterFunc()
|
||||
if err != nil {
|
||||
t.Errorf("Build filter function error: %v", err)
|
||||
}
|
||||
|
||||
pods, _ := ListPodsOnANode(testCase.node.Name, getPodsAssignedToNode, filter)
|
||||
if len(pods) != testCase.expectedPodCount {
|
||||
t.Errorf("Expected %v pods on node %v, got %+v", testCase.expectedPodCount, testCase.node.Name, len(pods))
|
||||
}
|
||||
return true, nil, fmt.Errorf("Failed to list: %v", list)
|
||||
})
|
||||
pods, _ := ListPodsOnANode(context.TODO(), fakeClient, testCase.node, WithLabelSelector(testCase.labelSelector))
|
||||
if len(pods) != testCase.expectedPodCount {
|
||||
t.Errorf("expected %v pods on node %v, got %+v", testCase.expectedPodCount, testCase.node.Name, len(pods))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -25,10 +25,12 @@ import (
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
core "k8s.io/client-go/testing"
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
"sigs.k8s.io/descheduler/test"
|
||||
)
|
||||
@@ -43,7 +45,6 @@ func buildTestPodWithImage(podName, node, image string) *v1.Pod {
|
||||
}
|
||||
|
||||
func TestFindDuplicatePods(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
// first setup pods
|
||||
node1 := test.BuildTestNode("n1", 2000, 3000, 10, nil)
|
||||
node2 := test.BuildTestNode("n2", 2000, 3000, 10, nil)
|
||||
@@ -173,91 +174,91 @@ func TestFindDuplicatePods(t *testing.T) {
|
||||
|
||||
testCases := []struct {
|
||||
description string
|
||||
pods []v1.Pod
|
||||
pods []*v1.Pod
|
||||
nodes []*v1.Node
|
||||
expectedEvictedPodCount uint
|
||||
strategy api.DeschedulerStrategy
|
||||
}{
|
||||
{
|
||||
description: "Three pods in the `dev` Namespace, bound to same ReplicaSet. 1 should be evicted.",
|
||||
pods: []v1.Pod{*p1, *p2, *p3},
|
||||
pods: []*v1.Pod{p1, p2, p3},
|
||||
nodes: []*v1.Node{node1, node2},
|
||||
expectedEvictedPodCount: 1,
|
||||
strategy: api.DeschedulerStrategy{},
|
||||
},
|
||||
{
|
||||
description: "Three pods in the `dev` Namespace, bound to same ReplicaSet, but ReplicaSet kind is excluded. 0 should be evicted.",
|
||||
pods: []v1.Pod{*p1, *p2, *p3},
|
||||
pods: []*v1.Pod{p1, p2, p3},
|
||||
nodes: []*v1.Node{node1, node2},
|
||||
expectedEvictedPodCount: 0,
|
||||
strategy: api.DeschedulerStrategy{Params: &api.StrategyParameters{RemoveDuplicates: &api.RemoveDuplicates{ExcludeOwnerKinds: []string{"ReplicaSet"}}}},
|
||||
},
|
||||
{
|
||||
description: "Three Pods in the `test` Namespace, bound to same ReplicaSet. 1 should be evicted.",
|
||||
pods: []v1.Pod{*p8, *p9, *p10},
|
||||
pods: []*v1.Pod{p8, p9, p10},
|
||||
nodes: []*v1.Node{node1, node2},
|
||||
expectedEvictedPodCount: 1,
|
||||
strategy: api.DeschedulerStrategy{},
|
||||
},
|
||||
{
|
||||
description: "Three Pods in the `dev` Namespace, three Pods in the `test` Namespace. Bound to ReplicaSet with same name. 4 should be evicted.",
|
||||
pods: []v1.Pod{*p1, *p2, *p3, *p8, *p9, *p10},
|
||||
pods: []*v1.Pod{p1, p2, p3, p8, p9, p10},
|
||||
nodes: []*v1.Node{node1, node2},
|
||||
expectedEvictedPodCount: 2,
|
||||
strategy: api.DeschedulerStrategy{},
|
||||
},
|
||||
{
|
||||
description: "Pods are: part of DaemonSet, with local storage, mirror pod annotation, critical pod annotation - none should be evicted.",
|
||||
pods: []v1.Pod{*p4, *p5, *p6, *p7},
|
||||
pods: []*v1.Pod{p4, p5, p6, p7},
|
||||
nodes: []*v1.Node{node1, node2},
|
||||
expectedEvictedPodCount: 0,
|
||||
strategy: api.DeschedulerStrategy{},
|
||||
},
|
||||
{
|
||||
description: "Test all Pods: 4 should be evicted.",
|
||||
pods: []v1.Pod{*p1, *p2, *p3, *p4, *p5, *p6, *p7, *p8, *p9, *p10},
|
||||
pods: []*v1.Pod{p1, p2, p3, p4, p5, p6, p7, p8, p9, p10},
|
||||
nodes: []*v1.Node{node1, node2},
|
||||
expectedEvictedPodCount: 2,
|
||||
strategy: api.DeschedulerStrategy{},
|
||||
},
|
||||
{
|
||||
description: "Pods with the same owner but different images should not be evicted",
|
||||
pods: []v1.Pod{*p11, *p12},
|
||||
pods: []*v1.Pod{p11, p12},
|
||||
nodes: []*v1.Node{node1, node2},
|
||||
expectedEvictedPodCount: 0,
|
||||
strategy: api.DeschedulerStrategy{},
|
||||
},
|
||||
{
|
||||
description: "Pods with multiple containers should not match themselves",
|
||||
pods: []v1.Pod{*p13},
|
||||
pods: []*v1.Pod{p13},
|
||||
nodes: []*v1.Node{node1, node2},
|
||||
expectedEvictedPodCount: 0,
|
||||
strategy: api.DeschedulerStrategy{},
|
||||
},
|
||||
{
|
||||
description: "Pods with matching ownerrefs and at not all matching image should not trigger an eviction",
|
||||
pods: []v1.Pod{*p11, *p13},
|
||||
pods: []*v1.Pod{p11, p13},
|
||||
nodes: []*v1.Node{node1, node2},
|
||||
expectedEvictedPodCount: 0,
|
||||
strategy: api.DeschedulerStrategy{},
|
||||
},
|
||||
{
|
||||
description: "Three pods in the `dev` Namespace, bound to same ReplicaSet. Only node available has a taint, and nodeFit set to true. 0 should be evicted.",
|
||||
pods: []v1.Pod{*p1, *p2, *p3},
|
||||
pods: []*v1.Pod{p1, p2, p3},
|
||||
nodes: []*v1.Node{node1, node3},
|
||||
expectedEvictedPodCount: 0,
|
||||
strategy: api.DeschedulerStrategy{Params: &api.StrategyParameters{NodeFit: true}},
|
||||
},
|
||||
{
|
||||
description: "Three pods in the `node-fit` Namespace, bound to same ReplicaSet, all with a nodeSelector. Only node available has an incorrect node label, and nodeFit set to true. 0 should be evicted.",
|
||||
pods: []v1.Pod{*p15, *p16, *p17},
|
||||
pods: []*v1.Pod{p15, p16, p17},
|
||||
nodes: []*v1.Node{node1, node4},
|
||||
expectedEvictedPodCount: 0,
|
||||
strategy: api.DeschedulerStrategy{Params: &api.StrategyParameters{NodeFit: true}},
|
||||
},
|
||||
{
|
||||
description: "Three pods in the `node-fit` Namespace, bound to same ReplicaSet. Only node available is not schedulable, and nodeFit set to true. 0 should be evicted.",
|
||||
pods: []v1.Pod{*p1, *p2, *p3},
|
||||
pods: []*v1.Pod{p1, p2, p3},
|
||||
nodes: []*v1.Node{node1, node5},
|
||||
expectedEvictedPodCount: 0,
|
||||
strategy: api.DeschedulerStrategy{Params: &api.StrategyParameters{NodeFit: true}},
|
||||
@@ -266,10 +267,29 @@ func TestFindDuplicatePods(t *testing.T) {
|
||||
|
||||
for _, testCase := range testCases {
|
||||
t.Run(testCase.description, func(t *testing.T) {
|
||||
fakeClient := &fake.Clientset{}
|
||||
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
|
||||
return true, &v1.PodList{Items: testCase.pods}, nil
|
||||
})
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
var objs []runtime.Object
|
||||
for _, node := range testCase.nodes {
|
||||
objs = append(objs, node)
|
||||
}
|
||||
for _, pod := range testCase.pods {
|
||||
objs = append(objs, pod)
|
||||
}
|
||||
fakeClient := fake.NewSimpleClientset(objs...)
|
||||
|
||||
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
|
||||
podInformer := sharedInformerFactory.Core().V1().Pods()
|
||||
|
||||
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
|
||||
if err != nil {
|
||||
t.Errorf("Build get pods assigned to node function error: %v", err)
|
||||
}
|
||||
|
||||
sharedInformerFactory.Start(ctx.Done())
|
||||
sharedInformerFactory.WaitForCacheSync(ctx.Done())
|
||||
|
||||
podEvictor := evictions.NewPodEvictor(
|
||||
fakeClient,
|
||||
"v1",
|
||||
@@ -282,7 +302,7 @@ func TestFindDuplicatePods(t *testing.T) {
|
||||
false,
|
||||
)
|
||||
|
||||
RemoveDuplicatePods(ctx, fakeClient, testCase.strategy, testCase.nodes, podEvictor)
|
||||
RemoveDuplicatePods(ctx, fakeClient, testCase.strategy, testCase.nodes, podEvictor, getPodsAssignedToNode)
|
||||
podsEvicted := podEvictor.TotalEvicted()
|
||||
if podsEvicted != testCase.expectedEvictedPodCount {
|
||||
t.Errorf("Test error for description: %s. Expected evicted pods count %v, got %v", testCase.description, testCase.expectedEvictedPodCount, podsEvicted)
|
||||
@@ -293,8 +313,6 @@ func TestFindDuplicatePods(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestRemoveDuplicatesUniformly(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
setRSOwnerRef2 := func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = []metav1.OwnerReference{
|
||||
{Kind: "ReplicaSet", APIVersion: "v1", Name: "replicaset-2"},
|
||||
@@ -420,24 +438,24 @@ func TestRemoveDuplicatesUniformly(t *testing.T) {
|
||||
|
||||
testCases := []struct {
|
||||
description string
|
||||
pods []v1.Pod
|
||||
pods []*v1.Pod
|
||||
nodes []*v1.Node
|
||||
expectedEvictedPodCount uint
|
||||
strategy api.DeschedulerStrategy
|
||||
}{
|
||||
{
|
||||
description: "Evict pods uniformly",
|
||||
pods: []v1.Pod{
|
||||
pods: []*v1.Pod{
|
||||
// (5,3,1) -> (3,3,3) -> 2 evictions
|
||||
*test.BuildTestPod("p1", 100, 0, "n1", test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p2", 100, 0, "n1", test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p3", 100, 0, "n1", test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p4", 100, 0, "n1", test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p5", 100, 0, "n1", test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p6", 100, 0, "n2", test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p7", 100, 0, "n2", test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p8", 100, 0, "n2", test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p9", 100, 0, "n3", test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p1", 100, 0, "n1", test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p2", 100, 0, "n1", test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p3", 100, 0, "n1", test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p4", 100, 0, "n1", test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p5", 100, 0, "n1", test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p6", 100, 0, "n2", test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p7", 100, 0, "n2", test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p8", 100, 0, "n2", test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p9", 100, 0, "n3", test.SetRSOwnerRef),
|
||||
},
|
||||
expectedEvictedPodCount: 2,
|
||||
nodes: []*v1.Node{
|
||||
@@ -449,17 +467,17 @@ func TestRemoveDuplicatesUniformly(t *testing.T) {
|
||||
},
|
||||
{
|
||||
description: "Evict pods uniformly with one node left out",
|
||||
pods: []v1.Pod{
|
||||
pods: []*v1.Pod{
|
||||
// (5,3,1) -> (4,4,1) -> 1 eviction
|
||||
*test.BuildTestPod("p1", 100, 0, "n1", test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p2", 100, 0, "n1", test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p3", 100, 0, "n1", test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p4", 100, 0, "n1", test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p5", 100, 0, "n1", test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p6", 100, 0, "n2", test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p7", 100, 0, "n2", test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p8", 100, 0, "n2", test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p9", 100, 0, "n3", test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p1", 100, 0, "n1", test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p2", 100, 0, "n1", test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p3", 100, 0, "n1", test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p4", 100, 0, "n1", test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p5", 100, 0, "n1", test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p6", 100, 0, "n2", test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p7", 100, 0, "n2", test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p8", 100, 0, "n2", test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p9", 100, 0, "n3", test.SetRSOwnerRef),
|
||||
},
|
||||
expectedEvictedPodCount: 1,
|
||||
nodes: []*v1.Node{
|
||||
@@ -470,17 +488,17 @@ func TestRemoveDuplicatesUniformly(t *testing.T) {
|
||||
},
|
||||
{
|
||||
description: "Evict pods uniformly with two replica sets",
|
||||
pods: []v1.Pod{
|
||||
pods: []*v1.Pod{
|
||||
// (5,3,1) -> (3,3,3) -> 2 evictions
|
||||
*test.BuildTestPod("p11", 100, 0, "n1", setTwoRSOwnerRef),
|
||||
*test.BuildTestPod("p12", 100, 0, "n1", setTwoRSOwnerRef),
|
||||
*test.BuildTestPod("p13", 100, 0, "n1", setTwoRSOwnerRef),
|
||||
*test.BuildTestPod("p14", 100, 0, "n1", setTwoRSOwnerRef),
|
||||
*test.BuildTestPod("p15", 100, 0, "n1", setTwoRSOwnerRef),
|
||||
*test.BuildTestPod("p16", 100, 0, "n2", setTwoRSOwnerRef),
|
||||
*test.BuildTestPod("p17", 100, 0, "n2", setTwoRSOwnerRef),
|
||||
*test.BuildTestPod("p18", 100, 0, "n2", setTwoRSOwnerRef),
|
||||
*test.BuildTestPod("p19", 100, 0, "n3", setTwoRSOwnerRef),
|
||||
test.BuildTestPod("p11", 100, 0, "n1", setTwoRSOwnerRef),
|
||||
test.BuildTestPod("p12", 100, 0, "n1", setTwoRSOwnerRef),
|
||||
test.BuildTestPod("p13", 100, 0, "n1", setTwoRSOwnerRef),
|
||||
test.BuildTestPod("p14", 100, 0, "n1", setTwoRSOwnerRef),
|
||||
test.BuildTestPod("p15", 100, 0, "n1", setTwoRSOwnerRef),
|
||||
test.BuildTestPod("p16", 100, 0, "n2", setTwoRSOwnerRef),
|
||||
test.BuildTestPod("p17", 100, 0, "n2", setTwoRSOwnerRef),
|
||||
test.BuildTestPod("p18", 100, 0, "n2", setTwoRSOwnerRef),
|
||||
test.BuildTestPod("p19", 100, 0, "n3", setTwoRSOwnerRef),
|
||||
},
|
||||
expectedEvictedPodCount: 4,
|
||||
nodes: []*v1.Node{
|
||||
@@ -492,27 +510,27 @@ func TestRemoveDuplicatesUniformly(t *testing.T) {
|
||||
},
|
||||
{
|
||||
description: "Evict pods uniformly with two owner references",
|
||||
pods: []v1.Pod{
|
||||
pods: []*v1.Pod{
|
||||
// (5,3,1) -> (3,3,3) -> 2 evictions
|
||||
*test.BuildTestPod("p11", 100, 0, "n1", test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p12", 100, 0, "n1", test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p13", 100, 0, "n1", test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p14", 100, 0, "n1", test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p15", 100, 0, "n1", test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p16", 100, 0, "n2", test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p17", 100, 0, "n2", test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p18", 100, 0, "n2", test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p19", 100, 0, "n3", test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p11", 100, 0, "n1", test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p12", 100, 0, "n1", test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p13", 100, 0, "n1", test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p14", 100, 0, "n1", test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p15", 100, 0, "n1", test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p16", 100, 0, "n2", test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p17", 100, 0, "n2", test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p18", 100, 0, "n2", test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p19", 100, 0, "n3", test.SetRSOwnerRef),
|
||||
// (1,3,5) -> (3,3,3) -> 2 evictions
|
||||
*test.BuildTestPod("p21", 100, 0, "n1", setRSOwnerRef2),
|
||||
*test.BuildTestPod("p22", 100, 0, "n2", setRSOwnerRef2),
|
||||
*test.BuildTestPod("p23", 100, 0, "n2", setRSOwnerRef2),
|
||||
*test.BuildTestPod("p24", 100, 0, "n2", setRSOwnerRef2),
|
||||
*test.BuildTestPod("p25", 100, 0, "n3", setRSOwnerRef2),
|
||||
*test.BuildTestPod("p26", 100, 0, "n3", setRSOwnerRef2),
|
||||
*test.BuildTestPod("p27", 100, 0, "n3", setRSOwnerRef2),
|
||||
*test.BuildTestPod("p28", 100, 0, "n3", setRSOwnerRef2),
|
||||
*test.BuildTestPod("p29", 100, 0, "n3", setRSOwnerRef2),
|
||||
test.BuildTestPod("p21", 100, 0, "n1", setRSOwnerRef2),
|
||||
test.BuildTestPod("p22", 100, 0, "n2", setRSOwnerRef2),
|
||||
test.BuildTestPod("p23", 100, 0, "n2", setRSOwnerRef2),
|
||||
test.BuildTestPod("p24", 100, 0, "n2", setRSOwnerRef2),
|
||||
test.BuildTestPod("p25", 100, 0, "n3", setRSOwnerRef2),
|
||||
test.BuildTestPod("p26", 100, 0, "n3", setRSOwnerRef2),
|
||||
test.BuildTestPod("p27", 100, 0, "n3", setRSOwnerRef2),
|
||||
test.BuildTestPod("p28", 100, 0, "n3", setRSOwnerRef2),
|
||||
test.BuildTestPod("p29", 100, 0, "n3", setRSOwnerRef2),
|
||||
},
|
||||
expectedEvictedPodCount: 4,
|
||||
nodes: []*v1.Node{
|
||||
@@ -524,10 +542,10 @@ func TestRemoveDuplicatesUniformly(t *testing.T) {
|
||||
},
|
||||
{
|
||||
description: "Evict pods with number of pods less than nodes",
|
||||
pods: []v1.Pod{
|
||||
pods: []*v1.Pod{
|
||||
// (2,0,0) -> (1,1,0) -> 1 eviction
|
||||
*test.BuildTestPod("p1", 100, 0, "n1", test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p2", 100, 0, "n1", test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p1", 100, 0, "n1", test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p2", 100, 0, "n1", test.SetRSOwnerRef),
|
||||
},
|
||||
expectedEvictedPodCount: 1,
|
||||
nodes: []*v1.Node{
|
||||
@@ -539,14 +557,14 @@ func TestRemoveDuplicatesUniformly(t *testing.T) {
|
||||
},
|
||||
{
|
||||
description: "Evict pods with number of pods less than nodes, but ignore different pods with the same ownerref",
|
||||
pods: []v1.Pod{
|
||||
pods: []*v1.Pod{
|
||||
// (1, 0, 0) for "bar","baz" images -> no eviction, even with a matching ownerKey
|
||||
// (2, 0, 0) for "foo" image -> (1,1,0) - 1 eviction
|
||||
// In this case the only "real" duplicates are p1 and p4, so one of those should be evicted
|
||||
*buildTestPodWithImage("p1", "n1", "foo"),
|
||||
*buildTestPodWithImage("p2", "n1", "bar"),
|
||||
*buildTestPodWithImage("p3", "n1", "baz"),
|
||||
*buildTestPodWithImage("p4", "n1", "foo"),
|
||||
buildTestPodWithImage("p1", "n1", "foo"),
|
||||
buildTestPodWithImage("p2", "n1", "bar"),
|
||||
buildTestPodWithImage("p3", "n1", "baz"),
|
||||
buildTestPodWithImage("p4", "n1", "foo"),
|
||||
},
|
||||
expectedEvictedPodCount: 1,
|
||||
nodes: []*v1.Node{
|
||||
@@ -558,9 +576,9 @@ func TestRemoveDuplicatesUniformly(t *testing.T) {
|
||||
},
|
||||
{
|
||||
description: "Evict pods with a single pod with three nodes",
|
||||
pods: []v1.Pod{
|
||||
pods: []*v1.Pod{
|
||||
// (2,0,0) -> (1,1,0) -> 1 eviction
|
||||
*test.BuildTestPod("p1", 100, 0, "n1", test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p1", 100, 0, "n1", test.SetRSOwnerRef),
|
||||
},
|
||||
expectedEvictedPodCount: 0,
|
||||
nodes: []*v1.Node{
|
||||
@@ -572,17 +590,17 @@ func TestRemoveDuplicatesUniformly(t *testing.T) {
|
||||
},
|
||||
{
|
||||
description: "Evict pods uniformly respecting taints",
|
||||
pods: []v1.Pod{
|
||||
pods: []*v1.Pod{
|
||||
// (5,3,1,0,0,0) -> (3,3,3,0,0,0) -> 2 evictions
|
||||
*test.BuildTestPod("p1", 100, 0, "worker1", setTolerationsK1),
|
||||
*test.BuildTestPod("p2", 100, 0, "worker1", setTolerationsK2),
|
||||
*test.BuildTestPod("p3", 100, 0, "worker1", setTolerationsK1),
|
||||
*test.BuildTestPod("p4", 100, 0, "worker1", setTolerationsK2),
|
||||
*test.BuildTestPod("p5", 100, 0, "worker1", setTolerationsK1),
|
||||
*test.BuildTestPod("p6", 100, 0, "worker2", setTolerationsK2),
|
||||
*test.BuildTestPod("p7", 100, 0, "worker2", setTolerationsK1),
|
||||
*test.BuildTestPod("p8", 100, 0, "worker2", setTolerationsK2),
|
||||
*test.BuildTestPod("p9", 100, 0, "worker3", setTolerationsK1),
|
||||
test.BuildTestPod("p1", 100, 0, "worker1", setTolerationsK1),
|
||||
test.BuildTestPod("p2", 100, 0, "worker1", setTolerationsK2),
|
||||
test.BuildTestPod("p3", 100, 0, "worker1", setTolerationsK1),
|
||||
test.BuildTestPod("p4", 100, 0, "worker1", setTolerationsK2),
|
||||
test.BuildTestPod("p5", 100, 0, "worker1", setTolerationsK1),
|
||||
test.BuildTestPod("p6", 100, 0, "worker2", setTolerationsK2),
|
||||
test.BuildTestPod("p7", 100, 0, "worker2", setTolerationsK1),
|
||||
test.BuildTestPod("p8", 100, 0, "worker2", setTolerationsK2),
|
||||
test.BuildTestPod("p9", 100, 0, "worker3", setTolerationsK1),
|
||||
},
|
||||
expectedEvictedPodCount: 2,
|
||||
nodes: []*v1.Node{
|
||||
@@ -597,17 +615,17 @@ func TestRemoveDuplicatesUniformly(t *testing.T) {
|
||||
},
|
||||
{
|
||||
description: "Evict pods uniformly respecting RequiredDuringSchedulingIgnoredDuringExecution node affinity",
|
||||
pods: []v1.Pod{
|
||||
pods: []*v1.Pod{
|
||||
// (5,3,1,0,0,0) -> (3,3,3,0,0,0) -> 2 evictions
|
||||
*test.BuildTestPod("p1", 100, 0, "worker1", setNotMasterNodeSelectorK1),
|
||||
*test.BuildTestPod("p2", 100, 0, "worker1", setNotMasterNodeSelectorK2),
|
||||
*test.BuildTestPod("p3", 100, 0, "worker1", setNotMasterNodeSelectorK1),
|
||||
*test.BuildTestPod("p4", 100, 0, "worker1", setNotMasterNodeSelectorK2),
|
||||
*test.BuildTestPod("p5", 100, 0, "worker1", setNotMasterNodeSelectorK1),
|
||||
*test.BuildTestPod("p6", 100, 0, "worker2", setNotMasterNodeSelectorK2),
|
||||
*test.BuildTestPod("p7", 100, 0, "worker2", setNotMasterNodeSelectorK1),
|
||||
*test.BuildTestPod("p8", 100, 0, "worker2", setNotMasterNodeSelectorK2),
|
||||
*test.BuildTestPod("p9", 100, 0, "worker3", setNotMasterNodeSelectorK1),
|
||||
test.BuildTestPod("p1", 100, 0, "worker1", setNotMasterNodeSelectorK1),
|
||||
test.BuildTestPod("p2", 100, 0, "worker1", setNotMasterNodeSelectorK2),
|
||||
test.BuildTestPod("p3", 100, 0, "worker1", setNotMasterNodeSelectorK1),
|
||||
test.BuildTestPod("p4", 100, 0, "worker1", setNotMasterNodeSelectorK2),
|
||||
test.BuildTestPod("p5", 100, 0, "worker1", setNotMasterNodeSelectorK1),
|
||||
test.BuildTestPod("p6", 100, 0, "worker2", setNotMasterNodeSelectorK2),
|
||||
test.BuildTestPod("p7", 100, 0, "worker2", setNotMasterNodeSelectorK1),
|
||||
test.BuildTestPod("p8", 100, 0, "worker2", setNotMasterNodeSelectorK2),
|
||||
test.BuildTestPod("p9", 100, 0, "worker3", setNotMasterNodeSelectorK1),
|
||||
},
|
||||
expectedEvictedPodCount: 2,
|
||||
nodes: []*v1.Node{
|
||||
@@ -622,17 +640,17 @@ func TestRemoveDuplicatesUniformly(t *testing.T) {
|
||||
},
|
||||
{
|
||||
description: "Evict pods uniformly respecting node selector",
|
||||
pods: []v1.Pod{
|
||||
pods: []*v1.Pod{
|
||||
// (5,3,1,0,0,0) -> (3,3,3,0,0,0) -> 2 evictions
|
||||
*test.BuildTestPod("p1", 100, 0, "worker1", setWorkerLabelSelectorK1),
|
||||
*test.BuildTestPod("p2", 100, 0, "worker1", setWorkerLabelSelectorK2),
|
||||
*test.BuildTestPod("p3", 100, 0, "worker1", setWorkerLabelSelectorK1),
|
||||
*test.BuildTestPod("p4", 100, 0, "worker1", setWorkerLabelSelectorK2),
|
||||
*test.BuildTestPod("p5", 100, 0, "worker1", setWorkerLabelSelectorK1),
|
||||
*test.BuildTestPod("p6", 100, 0, "worker2", setWorkerLabelSelectorK2),
|
||||
*test.BuildTestPod("p7", 100, 0, "worker2", setWorkerLabelSelectorK1),
|
||||
*test.BuildTestPod("p8", 100, 0, "worker2", setWorkerLabelSelectorK2),
|
||||
*test.BuildTestPod("p9", 100, 0, "worker3", setWorkerLabelSelectorK1),
|
||||
test.BuildTestPod("p1", 100, 0, "worker1", setWorkerLabelSelectorK1),
|
||||
test.BuildTestPod("p2", 100, 0, "worker1", setWorkerLabelSelectorK2),
|
||||
test.BuildTestPod("p3", 100, 0, "worker1", setWorkerLabelSelectorK1),
|
||||
test.BuildTestPod("p4", 100, 0, "worker1", setWorkerLabelSelectorK2),
|
||||
test.BuildTestPod("p5", 100, 0, "worker1", setWorkerLabelSelectorK1),
|
||||
test.BuildTestPod("p6", 100, 0, "worker2", setWorkerLabelSelectorK2),
|
||||
test.BuildTestPod("p7", 100, 0, "worker2", setWorkerLabelSelectorK1),
|
||||
test.BuildTestPod("p8", 100, 0, "worker2", setWorkerLabelSelectorK2),
|
||||
test.BuildTestPod("p9", 100, 0, "worker3", setWorkerLabelSelectorK1),
|
||||
},
|
||||
expectedEvictedPodCount: 2,
|
||||
nodes: []*v1.Node{
|
||||
@@ -647,17 +665,17 @@ func TestRemoveDuplicatesUniformly(t *testing.T) {
|
||||
},
|
||||
{
|
||||
description: "Evict pods uniformly respecting node selector with zero target nodes",
|
||||
pods: []v1.Pod{
|
||||
pods: []*v1.Pod{
|
||||
// (5,3,1,0,0,0) -> (3,3,3,0,0,0) -> 2 evictions
|
||||
*test.BuildTestPod("p1", 100, 0, "worker1", setWorkerLabelSelectorK1),
|
||||
*test.BuildTestPod("p2", 100, 0, "worker1", setWorkerLabelSelectorK2),
|
||||
*test.BuildTestPod("p3", 100, 0, "worker1", setWorkerLabelSelectorK1),
|
||||
*test.BuildTestPod("p4", 100, 0, "worker1", setWorkerLabelSelectorK2),
|
||||
*test.BuildTestPod("p5", 100, 0, "worker1", setWorkerLabelSelectorK1),
|
||||
*test.BuildTestPod("p6", 100, 0, "worker2", setWorkerLabelSelectorK2),
|
||||
*test.BuildTestPod("p7", 100, 0, "worker2", setWorkerLabelSelectorK1),
|
||||
*test.BuildTestPod("p8", 100, 0, "worker2", setWorkerLabelSelectorK2),
|
||||
*test.BuildTestPod("p9", 100, 0, "worker3", setWorkerLabelSelectorK1),
|
||||
test.BuildTestPod("p1", 100, 0, "worker1", setWorkerLabelSelectorK1),
|
||||
test.BuildTestPod("p2", 100, 0, "worker1", setWorkerLabelSelectorK2),
|
||||
test.BuildTestPod("p3", 100, 0, "worker1", setWorkerLabelSelectorK1),
|
||||
test.BuildTestPod("p4", 100, 0, "worker1", setWorkerLabelSelectorK2),
|
||||
test.BuildTestPod("p5", 100, 0, "worker1", setWorkerLabelSelectorK1),
|
||||
test.BuildTestPod("p6", 100, 0, "worker2", setWorkerLabelSelectorK2),
|
||||
test.BuildTestPod("p7", 100, 0, "worker2", setWorkerLabelSelectorK1),
|
||||
test.BuildTestPod("p8", 100, 0, "worker2", setWorkerLabelSelectorK2),
|
||||
test.BuildTestPod("p9", 100, 0, "worker3", setWorkerLabelSelectorK1),
|
||||
},
|
||||
expectedEvictedPodCount: 0,
|
||||
nodes: []*v1.Node{
|
||||
@@ -674,10 +692,29 @@ func TestRemoveDuplicatesUniformly(t *testing.T) {
|
||||
|
||||
for _, testCase := range testCases {
|
||||
t.Run(testCase.description, func(t *testing.T) {
|
||||
fakeClient := &fake.Clientset{}
|
||||
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
|
||||
return true, &v1.PodList{Items: testCase.pods}, nil
|
||||
})
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
var objs []runtime.Object
|
||||
for _, node := range testCase.nodes {
|
||||
objs = append(objs, node)
|
||||
}
|
||||
for _, pod := range testCase.pods {
|
||||
objs = append(objs, pod)
|
||||
}
|
||||
fakeClient := fake.NewSimpleClientset(objs...)
|
||||
|
||||
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
|
||||
podInformer := sharedInformerFactory.Core().V1().Pods()
|
||||
|
||||
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
|
||||
if err != nil {
|
||||
t.Errorf("Build get pods assigned to node function error: %v", err)
|
||||
}
|
||||
|
||||
sharedInformerFactory.Start(ctx.Done())
|
||||
sharedInformerFactory.WaitForCacheSync(ctx.Done())
|
||||
|
||||
podEvictor := evictions.NewPodEvictor(
|
||||
fakeClient,
|
||||
policyv1.SchemeGroupVersion.String(),
|
||||
@@ -690,7 +727,7 @@ func TestRemoveDuplicatesUniformly(t *testing.T) {
|
||||
false,
|
||||
)
|
||||
|
||||
RemoveDuplicatePods(ctx, fakeClient, testCase.strategy, testCase.nodes, podEvictor)
|
||||
RemoveDuplicatePods(ctx, fakeClient, testCase.strategy, testCase.nodes, podEvictor, getPodsAssignedToNode)
|
||||
podsEvicted := podEvictor.TotalEvicted()
|
||||
if podsEvicted != testCase.expectedEvictedPodCount {
|
||||
t.Errorf("Test error for description: %s. Expected evicted pods count %v, got %v", testCase.description, testCase.expectedEvictedPodCount, podsEvicted)
|
||||
|
||||
@@ -4,15 +4,16 @@ import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
policyv1 "k8s.io/api/policy/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
core "k8s.io/client-go/testing"
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/test"
|
||||
)
|
||||
|
||||
@@ -21,8 +22,6 @@ var (
|
||||
)
|
||||
|
||||
func TestRemoveFailedPods(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
createStrategy := func(enabled, includingInitContainers bool, reasons, excludeKinds []string, minAgeSeconds *uint, nodeFit bool) api.DeschedulerStrategy {
|
||||
return api.DeschedulerStrategy{
|
||||
Enabled: enabled,
|
||||
@@ -43,28 +42,28 @@ func TestRemoveFailedPods(t *testing.T) {
|
||||
nodes []*v1.Node
|
||||
strategy api.DeschedulerStrategy
|
||||
expectedEvictedPodCount uint
|
||||
pods []v1.Pod
|
||||
pods []*v1.Pod
|
||||
}{
|
||||
{
|
||||
description: "default empty strategy, 0 failures, 0 evictions",
|
||||
strategy: api.DeschedulerStrategy{},
|
||||
nodes: []*v1.Node{test.BuildTestNode("node1", 2000, 3000, 10, nil)},
|
||||
expectedEvictedPodCount: 0,
|
||||
pods: []v1.Pod{}, // no pods come back with field selector phase=Failed
|
||||
pods: []*v1.Pod{}, // no pods come back with field selector phase=Failed
|
||||
},
|
||||
{
|
||||
description: "0 failures, 0 evictions",
|
||||
strategy: createStrategy(true, false, nil, nil, nil, false),
|
||||
nodes: []*v1.Node{test.BuildTestNode("node1", 2000, 3000, 10, nil)},
|
||||
expectedEvictedPodCount: 0,
|
||||
pods: []v1.Pod{}, // no pods come back with field selector phase=Failed
|
||||
pods: []*v1.Pod{}, // no pods come back with field selector phase=Failed
|
||||
},
|
||||
{
|
||||
description: "1 container terminated with reason NodeAffinity, 1 eviction",
|
||||
strategy: createStrategy(true, false, nil, nil, nil, false),
|
||||
nodes: []*v1.Node{test.BuildTestNode("node1", 2000, 3000, 10, nil)},
|
||||
expectedEvictedPodCount: 1,
|
||||
pods: []v1.Pod{
|
||||
pods: []*v1.Pod{
|
||||
buildTestPod("p1", "node1", nil, &v1.ContainerState{
|
||||
Terminated: &v1.ContainerStateTerminated{Reason: "NodeAffinity"},
|
||||
}, nil),
|
||||
@@ -75,7 +74,7 @@ func TestRemoveFailedPods(t *testing.T) {
|
||||
strategy: createStrategy(true, true, nil, nil, nil, false),
|
||||
nodes: []*v1.Node{test.BuildTestNode("node1", 2000, 3000, 10, nil)},
|
||||
expectedEvictedPodCount: 1,
|
||||
pods: []v1.Pod{
|
||||
pods: []*v1.Pod{
|
||||
buildTestPod("p1", "node1", &v1.ContainerState{
|
||||
Terminated: &v1.ContainerStateTerminated{Reason: "NodeAffinity"},
|
||||
}, nil, nil),
|
||||
@@ -86,7 +85,7 @@ func TestRemoveFailedPods(t *testing.T) {
|
||||
strategy: createStrategy(true, true, nil, nil, nil, false),
|
||||
nodes: []*v1.Node{test.BuildTestNode("node1", 2000, 3000, 10, nil)},
|
||||
expectedEvictedPodCount: 1,
|
||||
pods: []v1.Pod{
|
||||
pods: []*v1.Pod{
|
||||
buildTestPod("p1", "node1", &v1.ContainerState{
|
||||
Waiting: &v1.ContainerStateWaiting{Reason: "CreateContainerConfigError"},
|
||||
}, nil, nil),
|
||||
@@ -100,7 +99,7 @@ func TestRemoveFailedPods(t *testing.T) {
|
||||
test.BuildTestNode("node2", 2000, 3000, 10, nil),
|
||||
},
|
||||
expectedEvictedPodCount: 2,
|
||||
pods: []v1.Pod{
|
||||
pods: []*v1.Pod{
|
||||
buildTestPod("p1", "node1", &v1.ContainerState{
|
||||
Terminated: &v1.ContainerStateTerminated{Reason: "CreateContainerConfigError"},
|
||||
}, nil, nil),
|
||||
@@ -114,7 +113,7 @@ func TestRemoveFailedPods(t *testing.T) {
|
||||
strategy: createStrategy(true, false, []string{"CreateContainerConfigError"}, nil, nil, false),
|
||||
nodes: []*v1.Node{test.BuildTestNode("node1", 2000, 3000, 10, nil)},
|
||||
expectedEvictedPodCount: 1,
|
||||
pods: []v1.Pod{
|
||||
pods: []*v1.Pod{
|
||||
buildTestPod("p1", "node1", nil, &v1.ContainerState{
|
||||
Terminated: &v1.ContainerStateTerminated{Reason: "CreateContainerConfigError"},
|
||||
}, nil),
|
||||
@@ -125,7 +124,7 @@ func TestRemoveFailedPods(t *testing.T) {
|
||||
strategy: createStrategy(true, false, []string{"CreateContainerConfigError", "NodeAffinity"}, nil, nil, false),
|
||||
nodes: []*v1.Node{test.BuildTestNode("node1", 2000, 3000, 10, nil)},
|
||||
expectedEvictedPodCount: 1,
|
||||
pods: []v1.Pod{
|
||||
pods: []*v1.Pod{
|
||||
buildTestPod("p1", "node1", nil, &v1.ContainerState{
|
||||
Terminated: &v1.ContainerStateTerminated{Reason: "CreateContainerConfigError"},
|
||||
}, nil),
|
||||
@@ -136,7 +135,7 @@ func TestRemoveFailedPods(t *testing.T) {
|
||||
strategy: createStrategy(true, false, []string{"CreateContainerConfigError"}, nil, nil, false),
|
||||
nodes: []*v1.Node{test.BuildTestNode("node1", 2000, 3000, 10, nil)},
|
||||
expectedEvictedPodCount: 0,
|
||||
pods: []v1.Pod{
|
||||
pods: []*v1.Pod{
|
||||
buildTestPod("p1", "node1", nil, &v1.ContainerState{
|
||||
Terminated: &v1.ContainerStateTerminated{Reason: "NodeAffinity"},
|
||||
}, nil),
|
||||
@@ -147,7 +146,7 @@ func TestRemoveFailedPods(t *testing.T) {
|
||||
strategy: createStrategy(true, false, []string{"CreateContainerConfigError"}, nil, nil, false),
|
||||
nodes: []*v1.Node{test.BuildTestNode("node1", 2000, 3000, 10, nil)},
|
||||
expectedEvictedPodCount: 0,
|
||||
pods: []v1.Pod{
|
||||
pods: []*v1.Pod{
|
||||
buildTestPod("p1", "node1", &v1.ContainerState{
|
||||
Waiting: &v1.ContainerStateWaiting{Reason: "CreateContainerConfigError"},
|
||||
}, nil, nil),
|
||||
@@ -158,7 +157,7 @@ func TestRemoveFailedPods(t *testing.T) {
|
||||
strategy: createStrategy(true, false, nil, nil, &OneHourInSeconds, false),
|
||||
nodes: []*v1.Node{test.BuildTestNode("node1", 2000, 3000, 10, nil)},
|
||||
expectedEvictedPodCount: 0,
|
||||
pods: []v1.Pod{
|
||||
pods: []*v1.Pod{
|
||||
buildTestPod("p1", "node1", nil, &v1.ContainerState{
|
||||
Terminated: &v1.ContainerStateTerminated{Reason: "NodeAffinity"},
|
||||
}, nil),
|
||||
@@ -171,7 +170,7 @@ func TestRemoveFailedPods(t *testing.T) {
|
||||
node.Spec.Unschedulable = true
|
||||
})},
|
||||
expectedEvictedPodCount: 0,
|
||||
pods: []v1.Pod{
|
||||
pods: []*v1.Pod{
|
||||
buildTestPod("p1", "node1", nil, &v1.ContainerState{
|
||||
Terminated: &v1.ContainerStateTerminated{Reason: "NodeAffinity"},
|
||||
}, nil),
|
||||
@@ -182,7 +181,7 @@ func TestRemoveFailedPods(t *testing.T) {
|
||||
strategy: createStrategy(true, true, nil, []string{"ReplicaSet"}, nil, false),
|
||||
nodes: []*v1.Node{test.BuildTestNode("node1", 2000, 3000, 10, nil)},
|
||||
expectedEvictedPodCount: 0,
|
||||
pods: []v1.Pod{
|
||||
pods: []*v1.Pod{
|
||||
buildTestPod("p1", "node1", &v1.ContainerState{
|
||||
Terminated: &v1.ContainerStateTerminated{Reason: "NodeAffinity"},
|
||||
}, nil, nil),
|
||||
@@ -193,7 +192,7 @@ func TestRemoveFailedPods(t *testing.T) {
|
||||
strategy: createStrategy(true, true, nil, []string{"DaemonSet"}, nil, false),
|
||||
nodes: []*v1.Node{test.BuildTestNode("node1", 2000, 3000, 10, nil)},
|
||||
expectedEvictedPodCount: 1,
|
||||
pods: []v1.Pod{
|
||||
pods: []*v1.Pod{
|
||||
buildTestPod("p1", "node1", &v1.ContainerState{
|
||||
Terminated: &v1.ContainerStateTerminated{Reason: "NodeAffinity"},
|
||||
}, nil, nil),
|
||||
@@ -204,7 +203,7 @@ func TestRemoveFailedPods(t *testing.T) {
|
||||
strategy: createStrategy(true, true, nil, []string{"DaemonSet"}, nil, false),
|
||||
nodes: []*v1.Node{test.BuildTestNode("node1", 2000, 3000, 10, nil)},
|
||||
expectedEvictedPodCount: 0,
|
||||
pods: []v1.Pod{
|
||||
pods: []*v1.Pod{
|
||||
buildTestPod("p1", "node1", &v1.ContainerState{
|
||||
Terminated: &v1.ContainerStateTerminated{Reason: "NodeAffinity"},
|
||||
}, nil, &metav1.Time{}),
|
||||
@@ -212,28 +211,48 @@ func TestRemoveFailedPods(t *testing.T) {
|
||||
},
|
||||
}
|
||||
for _, tc := range tests {
|
||||
fakeClient := &fake.Clientset{}
|
||||
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
|
||||
return true, &v1.PodList{Items: tc.pods}, nil
|
||||
t.Run(tc.description, func(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
var objs []runtime.Object
|
||||
for _, node := range tc.nodes {
|
||||
objs = append(objs, node)
|
||||
}
|
||||
for _, pod := range tc.pods {
|
||||
objs = append(objs, pod)
|
||||
}
|
||||
fakeClient := fake.NewSimpleClientset(objs...)
|
||||
|
||||
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
|
||||
podInformer := sharedInformerFactory.Core().V1().Pods()
|
||||
|
||||
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
|
||||
if err != nil {
|
||||
t.Errorf("Build get pods assigned to node function error: %v", err)
|
||||
}
|
||||
|
||||
sharedInformerFactory.Start(ctx.Done())
|
||||
sharedInformerFactory.WaitForCacheSync(ctx.Done())
|
||||
|
||||
podEvictor := evictions.NewPodEvictor(
|
||||
fakeClient,
|
||||
policyv1.SchemeGroupVersion.String(),
|
||||
false,
|
||||
nil,
|
||||
nil,
|
||||
tc.nodes,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
)
|
||||
|
||||
RemoveFailedPods(ctx, fakeClient, tc.strategy, tc.nodes, podEvictor, getPodsAssignedToNode)
|
||||
actualEvictedPodCount := podEvictor.TotalEvicted()
|
||||
if actualEvictedPodCount != tc.expectedEvictedPodCount {
|
||||
t.Errorf("Test %#v failed, expected %v pod evictions, but got %v pod evictions\n", tc.description, tc.expectedEvictedPodCount, actualEvictedPodCount)
|
||||
}
|
||||
})
|
||||
|
||||
podEvictor := evictions.NewPodEvictor(
|
||||
fakeClient,
|
||||
policyv1.SchemeGroupVersion.String(),
|
||||
false,
|
||||
nil,
|
||||
nil,
|
||||
tc.nodes,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
)
|
||||
|
||||
RemoveFailedPods(ctx, fakeClient, tc.strategy, tc.nodes, podEvictor)
|
||||
actualEvictedPodCount := podEvictor.TotalEvicted()
|
||||
if actualEvictedPodCount != tc.expectedEvictedPodCount {
|
||||
t.Errorf("Test %#v failed, expected %v pod evictions, but got %v pod evictions\n", tc.description, tc.expectedEvictedPodCount, actualEvictedPodCount)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -273,7 +292,7 @@ func TestValidRemoveFailedPodsParams(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func buildTestPod(podName, nodeName string, initContainerState, containerState *v1.ContainerState, deletionTimestamp *metav1.Time) v1.Pod {
|
||||
func buildTestPod(podName, nodeName string, initContainerState, containerState *v1.ContainerState, deletionTimestamp *metav1.Time) *v1.Pod {
|
||||
pod := test.BuildTestPod(podName, 1, 1, nodeName, func(p *v1.Pod) {
|
||||
ps := v1.PodStatus{}
|
||||
|
||||
@@ -292,5 +311,5 @@ func buildTestPod(podName, nodeName string, initContainerState, containerState *
|
||||
pod.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
|
||||
pod.ObjectMeta.SetCreationTimestamp(metav1.Now())
|
||||
pod.DeletionTimestamp = deletionTimestamp
|
||||
return *pod
|
||||
return pod
|
||||
}
|
||||
|
||||
@@ -24,15 +24,16 @@ import (
|
||||
policyv1 "k8s.io/api/policy/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
core "k8s.io/client-go/testing"
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/test"
|
||||
)
|
||||
|
||||
func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
requiredDuringSchedulingIgnoredDuringExecutionStrategy := api.DeschedulerStrategy{
|
||||
Enabled: true,
|
||||
Params: &api.StrategyParameters{
|
||||
@@ -63,7 +64,7 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||
unschedulableNodeWithLabels.Labels[nodeLabelKey] = nodeLabelValue
|
||||
unschedulableNodeWithLabels.Spec.Unschedulable = true
|
||||
|
||||
addPodsToNode := func(node *v1.Node, deletionTimestamp *metav1.Time) []v1.Pod {
|
||||
addPodsToNode := func(node *v1.Node, deletionTimestamp *metav1.Time) []*v1.Pod {
|
||||
podWithNodeAffinity := test.BuildTestPod("podWithNodeAffinity", 100, 0, node.Name, nil)
|
||||
podWithNodeAffinity.Spec.Affinity = &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{
|
||||
@@ -95,10 +96,10 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||
pod1.DeletionTimestamp = deletionTimestamp
|
||||
pod2.DeletionTimestamp = deletionTimestamp
|
||||
|
||||
return []v1.Pod{
|
||||
*podWithNodeAffinity,
|
||||
*pod1,
|
||||
*pod2,
|
||||
return []*v1.Pod{
|
||||
podWithNodeAffinity,
|
||||
pod1,
|
||||
pod2,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -106,7 +107,7 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||
tests := []struct {
|
||||
description string
|
||||
nodes []*v1.Node
|
||||
pods []v1.Pod
|
||||
pods []*v1.Pod
|
||||
strategy api.DeschedulerStrategy
|
||||
expectedEvictedPodCount uint
|
||||
maxPodsToEvictPerNode *uint
|
||||
@@ -190,28 +191,47 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.description, func(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
fakeClient := &fake.Clientset{}
|
||||
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
|
||||
return true, &v1.PodList{Items: tc.pods}, nil
|
||||
var objs []runtime.Object
|
||||
for _, node := range tc.nodes {
|
||||
objs = append(objs, node)
|
||||
}
|
||||
for _, pod := range tc.pods {
|
||||
objs = append(objs, pod)
|
||||
}
|
||||
fakeClient := fake.NewSimpleClientset(objs...)
|
||||
|
||||
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
|
||||
podInformer := sharedInformerFactory.Core().V1().Pods()
|
||||
|
||||
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
|
||||
if err != nil {
|
||||
t.Errorf("Build get pods assigned to node function error: %v", err)
|
||||
}
|
||||
|
||||
sharedInformerFactory.Start(ctx.Done())
|
||||
sharedInformerFactory.WaitForCacheSync(ctx.Done())
|
||||
|
||||
podEvictor := evictions.NewPodEvictor(
|
||||
fakeClient,
|
||||
policyv1.SchemeGroupVersion.String(),
|
||||
false,
|
||||
tc.maxPodsToEvictPerNode,
|
||||
tc.maxNoOfPodsToEvictPerNamespace,
|
||||
tc.nodes,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
)
|
||||
|
||||
RemovePodsViolatingNodeAffinity(ctx, fakeClient, tc.strategy, tc.nodes, podEvictor, getPodsAssignedToNode)
|
||||
actualEvictedPodCount := podEvictor.TotalEvicted()
|
||||
if actualEvictedPodCount != tc.expectedEvictedPodCount {
|
||||
t.Errorf("Test %#v failed, expected %v pod evictions, but got %v pod evictions\n", tc.description, tc.expectedEvictedPodCount, actualEvictedPodCount)
|
||||
}
|
||||
})
|
||||
|
||||
podEvictor := evictions.NewPodEvictor(
|
||||
fakeClient,
|
||||
policyv1.SchemeGroupVersion.String(),
|
||||
false,
|
||||
tc.maxPodsToEvictPerNode,
|
||||
tc.maxNoOfPodsToEvictPerNamespace,
|
||||
tc.nodes,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
)
|
||||
|
||||
RemovePodsViolatingNodeAffinity(ctx, fakeClient, tc.strategy, tc.nodes, podEvictor)
|
||||
actualEvictedPodCount := podEvictor.TotalEvicted()
|
||||
if actualEvictedPodCount != tc.expectedEvictedPodCount {
|
||||
t.Errorf("Test %#v failed, expected %v pod evictions, but got %v pod evictions\n", tc.description, tc.expectedEvictedPodCount, actualEvictedPodCount)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -9,10 +9,12 @@ import (
|
||||
policyv1 "k8s.io/api/policy/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
core "k8s.io/client-go/testing"
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
"sigs.k8s.io/descheduler/test"
|
||||
)
|
||||
@@ -45,11 +47,10 @@ func addTolerationToPod(pod *v1.Pod, key, value string, index int) *v1.Pod {
|
||||
}
|
||||
|
||||
func TestDeletePodsViolatingNodeTaints(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
node1 := test.BuildTestNode("n1", 2000, 3000, 10, nil)
|
||||
node1 = addTaintsToNode(node1, "testTaint", "test", []int{1})
|
||||
node2 := test.BuildTestNode("n2", 2000, 3000, 10, nil)
|
||||
node1 = addTaintsToNode(node2, "testingTaint", "testing", []int{1})
|
||||
node2 = addTaintsToNode(node2, "testingTaint", "testing", []int{1})
|
||||
|
||||
node3 := test.BuildTestNode("n3", 2000, 3000, 10, func(node *v1.Node) {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
@@ -121,7 +122,7 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
|
||||
tests := []struct {
|
||||
description string
|
||||
nodes []*v1.Node
|
||||
pods []v1.Pod
|
||||
pods []*v1.Pod
|
||||
evictLocalStoragePods bool
|
||||
evictSystemCriticalPods bool
|
||||
maxPodsToEvictPerNode *uint
|
||||
@@ -132,7 +133,7 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
|
||||
|
||||
{
|
||||
description: "Pods not tolerating node taint should be evicted",
|
||||
pods: []v1.Pod{*p1, *p2, *p3},
|
||||
pods: []*v1.Pod{p1, p2, p3},
|
||||
nodes: []*v1.Node{node1},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
@@ -140,7 +141,7 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
|
||||
},
|
||||
{
|
||||
description: "Pods with tolerations but not tolerating node taint should be evicted",
|
||||
pods: []v1.Pod{*p1, *p3, *p4},
|
||||
pods: []*v1.Pod{p1, p3, p4},
|
||||
nodes: []*v1.Node{node1},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
@@ -148,7 +149,7 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
|
||||
},
|
||||
{
|
||||
description: "Only <maxPodsToEvictPerNode> number of Pods not tolerating node taint should be evicted",
|
||||
pods: []v1.Pod{*p1, *p5, *p6},
|
||||
pods: []*v1.Pod{p1, p5, p6},
|
||||
nodes: []*v1.Node{node1},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
@@ -157,7 +158,7 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
|
||||
},
|
||||
{
|
||||
description: "Only <maxNoOfPodsToEvictPerNamespace> number of Pods not tolerating node taint should be evicted",
|
||||
pods: []v1.Pod{*p1, *p5, *p6},
|
||||
pods: []*v1.Pod{p1, p5, p6},
|
||||
nodes: []*v1.Node{node1},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
@@ -166,7 +167,7 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
|
||||
},
|
||||
{
|
||||
description: "Critical pods not tolerating node taint should not be evicted",
|
||||
pods: []v1.Pod{*p7, *p8, *p9, *p10},
|
||||
pods: []*v1.Pod{p7, p8, p9, p10},
|
||||
nodes: []*v1.Node{node2},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
@@ -174,7 +175,7 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
|
||||
},
|
||||
{
|
||||
description: "Critical pods except storage pods not tolerating node taint should not be evicted",
|
||||
pods: []v1.Pod{*p7, *p8, *p9, *p10},
|
||||
pods: []*v1.Pod{p7, p8, p9, p10},
|
||||
nodes: []*v1.Node{node2},
|
||||
evictLocalStoragePods: true,
|
||||
evictSystemCriticalPods: false,
|
||||
@@ -182,7 +183,7 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
|
||||
},
|
||||
{
|
||||
description: "Critical and non critical pods, only non critical pods not tolerating node taint should be evicted",
|
||||
pods: []v1.Pod{*p7, *p8, *p10, *p11},
|
||||
pods: []*v1.Pod{p7, p8, p10, p11},
|
||||
nodes: []*v1.Node{node2},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
@@ -190,15 +191,15 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
|
||||
},
|
||||
{
|
||||
description: "Critical and non critical pods, pods not tolerating node taint should be evicted even if they are critical",
|
||||
pods: []v1.Pod{*p2, *p7, *p9, *p10},
|
||||
nodes: []*v1.Node{node2},
|
||||
pods: []*v1.Pod{p2, p7, p9, p10},
|
||||
nodes: []*v1.Node{node1, node2},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: true,
|
||||
expectedEvictedPodCount: 2, //p2 and p7 are evicted
|
||||
},
|
||||
{
|
||||
description: "Pod p2 doesn't tolerate taint on it's node, but also doesn't tolerate taints on other nodes",
|
||||
pods: []v1.Pod{*p1, *p2, *p3},
|
||||
pods: []*v1.Pod{p1, p2, p3},
|
||||
nodes: []*v1.Node{node1, node2},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
@@ -207,7 +208,7 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
|
||||
},
|
||||
{
|
||||
description: "Pod p12 doesn't tolerate taint on it's node, but other nodes don't match it's selector",
|
||||
pods: []v1.Pod{*p1, *p3, *p12},
|
||||
pods: []*v1.Pod{p1, p3, p12},
|
||||
nodes: []*v1.Node{node1, node3},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
@@ -216,7 +217,7 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
|
||||
},
|
||||
{
|
||||
description: "Pod p2 doesn't tolerate taint on it's node, but other nodes are unschedulable",
|
||||
pods: []v1.Pod{*p1, *p2, *p3},
|
||||
pods: []*v1.Pod{p1, p2, p3},
|
||||
nodes: []*v1.Node{node1, node4},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
@@ -226,38 +227,56 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.description, func(t *testing.T) {
|
||||
|
||||
// create fake client
|
||||
fakeClient := &fake.Clientset{}
|
||||
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
|
||||
return true, &v1.PodList{Items: tc.pods}, nil
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
var objs []runtime.Object
|
||||
for _, node := range tc.nodes {
|
||||
objs = append(objs, node)
|
||||
}
|
||||
for _, pod := range tc.pods {
|
||||
objs = append(objs, pod)
|
||||
}
|
||||
fakeClient := fake.NewSimpleClientset(objs...)
|
||||
|
||||
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
|
||||
podInformer := sharedInformerFactory.Core().V1().Pods()
|
||||
|
||||
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
|
||||
if err != nil {
|
||||
t.Errorf("Build get pods assigned to node function error: %v", err)
|
||||
}
|
||||
|
||||
sharedInformerFactory.Start(ctx.Done())
|
||||
sharedInformerFactory.WaitForCacheSync(ctx.Done())
|
||||
|
||||
podEvictor := evictions.NewPodEvictor(
|
||||
fakeClient,
|
||||
policyv1.SchemeGroupVersion.String(),
|
||||
false,
|
||||
tc.maxPodsToEvictPerNode,
|
||||
tc.maxNoOfPodsToEvictPerNamespace,
|
||||
tc.nodes,
|
||||
tc.evictLocalStoragePods,
|
||||
tc.evictSystemCriticalPods,
|
||||
false,
|
||||
)
|
||||
|
||||
strategy := api.DeschedulerStrategy{
|
||||
Params: &api.StrategyParameters{
|
||||
NodeFit: tc.nodeFit,
|
||||
},
|
||||
}
|
||||
|
||||
RemovePodsViolatingNodeTaints(ctx, fakeClient, strategy, tc.nodes, podEvictor, getPodsAssignedToNode)
|
||||
actualEvictedPodCount := podEvictor.TotalEvicted()
|
||||
if actualEvictedPodCount != tc.expectedEvictedPodCount {
|
||||
t.Errorf("Test %#v failed, Unexpected no of pods evicted: pods evicted: %d, expected: %d", tc.description, actualEvictedPodCount, tc.expectedEvictedPodCount)
|
||||
}
|
||||
})
|
||||
|
||||
podEvictor := evictions.NewPodEvictor(
|
||||
fakeClient,
|
||||
policyv1.SchemeGroupVersion.String(),
|
||||
false,
|
||||
tc.maxPodsToEvictPerNode,
|
||||
tc.maxNoOfPodsToEvictPerNamespace,
|
||||
tc.nodes,
|
||||
tc.evictLocalStoragePods,
|
||||
tc.evictSystemCriticalPods,
|
||||
false,
|
||||
)
|
||||
|
||||
strategy := api.DeschedulerStrategy{
|
||||
Params: &api.StrategyParameters{
|
||||
NodeFit: tc.nodeFit,
|
||||
},
|
||||
}
|
||||
|
||||
RemovePodsViolatingNodeTaints(ctx, fakeClient, strategy, tc.nodes, podEvictor)
|
||||
actualEvictedPodCount := podEvictor.TotalEvicted()
|
||||
if actualEvictedPodCount != tc.expectedEvictedPodCount {
|
||||
t.Errorf("Test %#v failed, Unexpected no of pods evicted: pods evicted: %d, expected: %d", tc.description, actualEvictedPodCount, tc.expectedEvictedPodCount)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestToleratesTaint(t *testing.T) {
|
||||
|
||||
@@ -19,23 +19,24 @@ package nodeutilization
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/api/policy/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
core "k8s.io/client-go/testing"
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
"sigs.k8s.io/descheduler/test"
|
||||
)
|
||||
|
||||
func TestHighNodeUtilization(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
n1NodeName := "n1"
|
||||
n2NodeName := "n2"
|
||||
n3NodeName := "n3"
|
||||
@@ -46,8 +47,8 @@ func TestHighNodeUtilization(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
thresholds api.ResourceThresholds
|
||||
nodes map[string]*v1.Node
|
||||
pods map[string]*v1.PodList
|
||||
nodes []*v1.Node
|
||||
pods []*v1.Pod
|
||||
expectedPodsEvicted uint
|
||||
evictedPods []string
|
||||
}{
|
||||
@@ -57,36 +58,24 @@ func TestHighNodeUtilization(t *testing.T) {
|
||||
v1.ResourceCPU: 20,
|
||||
v1.ResourcePods: 20,
|
||||
},
|
||||
nodes: map[string]*v1.Node{
|
||||
n1NodeName: test.BuildTestNode(n1NodeName, 4000, 3000, 10, nil),
|
||||
n2NodeName: test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil),
|
||||
n3NodeName: test.BuildTestNode(n3NodeName, 4000, 3000, 10, nil),
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode(n1NodeName, 4000, 3000, 10, nil),
|
||||
test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil),
|
||||
test.BuildTestNode(n3NodeName, 4000, 3000, 10, nil),
|
||||
},
|
||||
pods: map[string]*v1.PodList{
|
||||
n1NodeName: {
|
||||
Items: []v1.Pod{
|
||||
// These won't be evicted.
|
||||
*test.BuildTestPod("p1", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p2", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p3", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
},
|
||||
},
|
||||
n2NodeName: {
|
||||
Items: []v1.Pod{
|
||||
// These won't be evicted.
|
||||
*test.BuildTestPod("p4", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p5", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p6", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
},
|
||||
},
|
||||
n3NodeName: {
|
||||
Items: []v1.Pod{
|
||||
// These won't be evicted.
|
||||
*test.BuildTestPod("p7", 400, 0, n3NodeName, test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p8", 400, 0, n3NodeName, test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p9", 400, 0, n3NodeName, test.SetRSOwnerRef),
|
||||
},
|
||||
},
|
||||
pods: []*v1.Pod{
|
||||
// These won't be evicted.
|
||||
test.BuildTestPod("p1", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p2", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p3", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
// These won't be evicted.
|
||||
test.BuildTestPod("p4", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p5", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p6", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
// These won't be evicted.
|
||||
test.BuildTestPod("p7", 400, 0, n3NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p8", 400, 0, n3NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p9", 400, 0, n3NodeName, test.SetRSOwnerRef),
|
||||
},
|
||||
expectedPodsEvicted: 0,
|
||||
},
|
||||
@@ -96,54 +85,42 @@ func TestHighNodeUtilization(t *testing.T) {
|
||||
v1.ResourceCPU: 40,
|
||||
v1.ResourcePods: 40,
|
||||
},
|
||||
nodes: map[string]*v1.Node{
|
||||
n1NodeName: test.BuildTestNode(n1NodeName, 4000, 3000, 9, nil),
|
||||
n2NodeName: test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil),
|
||||
n3NodeName: test.BuildTestNode(n3NodeName, 4000, 3000, 10, nil),
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode(n1NodeName, 4000, 3000, 9, nil),
|
||||
test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil),
|
||||
test.BuildTestNode(n3NodeName, 4000, 3000, 10, nil),
|
||||
},
|
||||
pods: map[string]*v1.PodList{
|
||||
n1NodeName: {
|
||||
Items: []v1.Pod{
|
||||
// These won't be evicted.
|
||||
*test.BuildTestPod("p1", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
// A pod with local storage.
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Spec.Volumes = []v1.Volume{
|
||||
{
|
||||
Name: "sample",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
|
||||
},
|
||||
},
|
||||
}
|
||||
// A Mirror Pod.
|
||||
pod.Annotations = test.GetMirrorPodAnnotation()
|
||||
}),
|
||||
*test.BuildTestPod("p2", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
// A Critical Pod.
|
||||
pod.Namespace = "kube-system"
|
||||
priority := utils.SystemCriticalPriority
|
||||
pod.Spec.Priority = &priority
|
||||
}),
|
||||
},
|
||||
},
|
||||
n2NodeName: {
|
||||
Items: []v1.Pod{
|
||||
// These won't be evicted.
|
||||
*test.BuildTestPod("p3", 400, 0, n2NodeName, test.SetDSOwnerRef),
|
||||
*test.BuildTestPod("p4", 400, 0, n2NodeName, test.SetDSOwnerRef),
|
||||
},
|
||||
},
|
||||
n3NodeName: {
|
||||
Items: []v1.Pod{
|
||||
*test.BuildTestPod("p5", 400, 0, n3NodeName, test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p6", 400, 0, n3NodeName, test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p7", 400, 0, n3NodeName, test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p8", 400, 0, n3NodeName, test.SetRSOwnerRef),
|
||||
},
|
||||
},
|
||||
pods: []*v1.Pod{
|
||||
// These won't be evicted.
|
||||
test.BuildTestPod("p1", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
// A pod with local storage.
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Spec.Volumes = []v1.Volume{
|
||||
{
|
||||
Name: "sample",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
|
||||
},
|
||||
},
|
||||
}
|
||||
// A Mirror Pod.
|
||||
pod.Annotations = test.GetMirrorPodAnnotation()
|
||||
}),
|
||||
test.BuildTestPod("p2", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
// A Critical Pod.
|
||||
pod.Namespace = "kube-system"
|
||||
priority := utils.SystemCriticalPriority
|
||||
pod.Spec.Priority = &priority
|
||||
}),
|
||||
// These won't be evicted.
|
||||
test.BuildTestPod("p3", 400, 0, n2NodeName, test.SetDSOwnerRef),
|
||||
test.BuildTestPod("p4", 400, 0, n2NodeName, test.SetDSOwnerRef),
|
||||
test.BuildTestPod("p5", 400, 0, n3NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p6", 400, 0, n3NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p7", 400, 0, n3NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p8", 400, 0, n3NodeName, test.SetRSOwnerRef),
|
||||
},
|
||||
expectedPodsEvicted: 0,
|
||||
},
|
||||
@@ -153,31 +130,19 @@ func TestHighNodeUtilization(t *testing.T) {
|
||||
v1.ResourceCPU: 20,
|
||||
v1.ResourcePods: 20,
|
||||
},
|
||||
nodes: map[string]*v1.Node{
|
||||
n1NodeName: test.BuildTestNode(n1NodeName, 4000, 3000, 10, nil),
|
||||
n2NodeName: test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil),
|
||||
n3NodeName: test.BuildTestNode(n3NodeName, 4000, 3000, 10, test.SetNodeUnschedulable),
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode(n1NodeName, 4000, 3000, 10, nil),
|
||||
test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil),
|
||||
test.BuildTestNode(n3NodeName, 4000, 3000, 10, test.SetNodeUnschedulable),
|
||||
},
|
||||
pods: map[string]*v1.PodList{
|
||||
n1NodeName: {
|
||||
Items: []v1.Pod{
|
||||
// These can't be evicted.
|
||||
*test.BuildTestPod("p1", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
},
|
||||
},
|
||||
n2NodeName: {
|
||||
Items: []v1.Pod{
|
||||
// These can't be evicted.
|
||||
*test.BuildTestPod("p2", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
},
|
||||
},
|
||||
n3NodeName: {
|
||||
Items: []v1.Pod{
|
||||
*test.BuildTestPod("p3", 400, 0, n3NodeName, test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p4", 400, 0, n3NodeName, test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p5", 400, 0, n3NodeName, test.SetRSOwnerRef),
|
||||
},
|
||||
},
|
||||
pods: []*v1.Pod{
|
||||
// These can't be evicted.
|
||||
test.BuildTestPod("p1", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
// These can't be evicted.
|
||||
test.BuildTestPod("p2", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p3", 400, 0, n3NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p4", 400, 0, n3NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p5", 400, 0, n3NodeName, test.SetRSOwnerRef),
|
||||
},
|
||||
expectedPodsEvicted: 0,
|
||||
},
|
||||
@@ -187,38 +152,26 @@ func TestHighNodeUtilization(t *testing.T) {
|
||||
v1.ResourceCPU: 30,
|
||||
v1.ResourcePods: 30,
|
||||
},
|
||||
nodes: map[string]*v1.Node{
|
||||
n1NodeName: test.BuildTestNode(n1NodeName, 4000, 3000, 10, nil),
|
||||
n2NodeName: test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil),
|
||||
n3NodeName: test.BuildTestNode(n3NodeName, 4000, 3000, 10, test.SetNodeUnschedulable),
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode(n1NodeName, 4000, 3000, 10, nil),
|
||||
test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil),
|
||||
test.BuildTestNode(n3NodeName, 4000, 3000, 10, test.SetNodeUnschedulable),
|
||||
},
|
||||
pods: map[string]*v1.PodList{
|
||||
n1NodeName: {
|
||||
Items: []v1.Pod{
|
||||
*test.BuildTestPod("p1", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
// These won't be evicted.
|
||||
*test.BuildTestPod("p2", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
// A Critical Pod.
|
||||
pod.Namespace = "kube-system"
|
||||
priority := utils.SystemCriticalPriority
|
||||
pod.Spec.Priority = &priority
|
||||
}),
|
||||
},
|
||||
},
|
||||
n2NodeName: {
|
||||
Items: []v1.Pod{
|
||||
// These won't be evicted.
|
||||
*test.BuildTestPod("p3", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p4", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p5", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p6", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
},
|
||||
},
|
||||
n3NodeName: {
|
||||
Items: []v1.Pod{
|
||||
*test.BuildTestPod("p7", 400, 0, n3NodeName, test.SetRSOwnerRef),
|
||||
},
|
||||
},
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p1", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
// These won't be evicted.
|
||||
test.BuildTestPod("p2", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
// A Critical Pod.
|
||||
pod.Namespace = "kube-system"
|
||||
priority := utils.SystemCriticalPriority
|
||||
pod.Spec.Priority = &priority
|
||||
}),
|
||||
// These won't be evicted.
|
||||
test.BuildTestPod("p3", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p4", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p5", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p6", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p7", 400, 0, n3NodeName, test.SetRSOwnerRef),
|
||||
},
|
||||
expectedPodsEvicted: 2,
|
||||
evictedPods: []string{"p1", "p7"},
|
||||
@@ -229,31 +182,19 @@ func TestHighNodeUtilization(t *testing.T) {
|
||||
v1.ResourceCPU: 30,
|
||||
v1.ResourcePods: 30,
|
||||
},
|
||||
nodes: map[string]*v1.Node{
|
||||
n1NodeName: test.BuildTestNode(n1NodeName, 2000, 3000, 10, nil),
|
||||
n2NodeName: test.BuildTestNode(n2NodeName, 2000, 3000, 10, nil),
|
||||
n3NodeName: test.BuildTestNode(n3NodeName, 2000, 3000, 10, test.SetNodeUnschedulable),
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode(n1NodeName, 2000, 3000, 10, nil),
|
||||
test.BuildTestNode(n2NodeName, 2000, 3000, 10, nil),
|
||||
test.BuildTestNode(n3NodeName, 2000, 3000, 10, test.SetNodeUnschedulable),
|
||||
},
|
||||
pods: map[string]*v1.PodList{
|
||||
n1NodeName: {
|
||||
Items: []v1.Pod{
|
||||
*test.BuildTestPod("p1", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
},
|
||||
},
|
||||
n2NodeName: {
|
||||
Items: []v1.Pod{
|
||||
// These won't be evicted.
|
||||
*test.BuildTestPod("p2", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p3", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p4", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p5", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
},
|
||||
},
|
||||
n3NodeName: {
|
||||
Items: []v1.Pod{
|
||||
*test.BuildTestPod("p6", 400, 0, n3NodeName, test.SetRSOwnerRef),
|
||||
},
|
||||
},
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p1", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
// These won't be evicted.
|
||||
test.BuildTestPod("p2", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p3", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p4", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p5", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p6", 400, 0, n3NodeName, test.SetRSOwnerRef),
|
||||
},
|
||||
expectedPodsEvicted: 1,
|
||||
},
|
||||
@@ -262,39 +203,27 @@ func TestHighNodeUtilization(t *testing.T) {
|
||||
thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 30,
|
||||
},
|
||||
nodes: map[string]*v1.Node{
|
||||
n1NodeName: test.BuildTestNode(n1NodeName, 4000, 3000, 10, nil),
|
||||
n2NodeName: test.BuildTestNode(n2NodeName, 2000, 3000, 10, nil),
|
||||
n3NodeName: test.BuildTestNode(n3NodeName, 2000, 3000, 10, test.SetNodeUnschedulable),
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode(n1NodeName, 4000, 3000, 10, nil),
|
||||
test.BuildTestNode(n2NodeName, 2000, 3000, 10, nil),
|
||||
test.BuildTestNode(n3NodeName, 2000, 3000, 10, test.SetNodeUnschedulable),
|
||||
},
|
||||
pods: map[string]*v1.PodList{
|
||||
n1NodeName: {
|
||||
Items: []v1.Pod{
|
||||
*test.BuildTestPod("p1", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
test.SetRSOwnerRef(pod)
|
||||
test.SetPodPriority(pod, lowPriority)
|
||||
}),
|
||||
*test.BuildTestPod("p2", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
test.SetRSOwnerRef(pod)
|
||||
test.SetPodPriority(pod, highPriority)
|
||||
}),
|
||||
},
|
||||
},
|
||||
n2NodeName: {
|
||||
Items: []v1.Pod{
|
||||
// These won't be evicted.
|
||||
*test.BuildTestPod("p5", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p6", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p7", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p8", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
},
|
||||
},
|
||||
n3NodeName: {
|
||||
Items: []v1.Pod{
|
||||
// These won't be evicted.
|
||||
*test.BuildTestPod("p9", 400, 0, n3NodeName, test.SetDSOwnerRef),
|
||||
},
|
||||
},
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p1", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
test.SetRSOwnerRef(pod)
|
||||
test.SetPodPriority(pod, lowPriority)
|
||||
}),
|
||||
test.BuildTestPod("p2", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
test.SetRSOwnerRef(pod)
|
||||
test.SetPodPriority(pod, highPriority)
|
||||
}),
|
||||
// These won't be evicted.
|
||||
test.BuildTestPod("p5", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p6", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p7", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p8", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
// These won't be evicted.
|
||||
test.BuildTestPod("p9", 400, 0, n3NodeName, test.SetDSOwnerRef),
|
||||
},
|
||||
expectedPodsEvicted: 1,
|
||||
evictedPods: []string{"p1"},
|
||||
@@ -304,36 +233,25 @@ func TestHighNodeUtilization(t *testing.T) {
|
||||
thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 30,
|
||||
},
|
||||
nodes: map[string]*v1.Node{
|
||||
n1NodeName: test.BuildTestNode(n1NodeName, 3000, 3000, 10, nil),
|
||||
n2NodeName: test.BuildTestNode(n2NodeName, 3000, 3000, 5, nil),
|
||||
n3NodeName: test.BuildTestNode(n3NodeName, 3000, 3000, 10, test.SetNodeUnschedulable),
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode(n1NodeName, 3000, 3000, 10, nil),
|
||||
test.BuildTestNode(n2NodeName, 3000, 3000, 5, nil),
|
||||
test.BuildTestNode(n3NodeName, 3000, 3000, 10, test.SetNodeUnschedulable),
|
||||
},
|
||||
// All pods are assumed to be burstable (test.BuildTestNode always sets both cpu/memory resource requests to some value)
|
||||
pods: map[string]*v1.PodList{
|
||||
n1NodeName: {
|
||||
Items: []v1.Pod{
|
||||
*test.BuildTestPod("p1", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
test.SetRSOwnerRef(pod)
|
||||
test.MakeBestEffortPod(pod)
|
||||
}),
|
||||
*test.BuildTestPod("p2", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
test.SetRSOwnerRef(pod)
|
||||
}),
|
||||
},
|
||||
},
|
||||
n2NodeName: {
|
||||
Items: []v1.Pod{
|
||||
// These won't be evicted.
|
||||
*test.BuildTestPod("p3", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p4", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p5", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p6", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
},
|
||||
},
|
||||
n3NodeName: {
|
||||
Items: []v1.Pod{},
|
||||
},
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p1", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
test.SetRSOwnerRef(pod)
|
||||
test.MakeBestEffortPod(pod)
|
||||
}),
|
||||
test.BuildTestPod("p2", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
test.SetRSOwnerRef(pod)
|
||||
}),
|
||||
// These won't be evicted.
|
||||
test.BuildTestPod("p3", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p4", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p5", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p6", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
},
|
||||
expectedPodsEvicted: 1,
|
||||
evictedPods: []string{"p1"},
|
||||
@@ -344,56 +262,41 @@ func TestHighNodeUtilization(t *testing.T) {
|
||||
v1.ResourceCPU: 20,
|
||||
extendedResource: 40,
|
||||
},
|
||||
nodes: map[string]*v1.Node{
|
||||
n1NodeName: test.BuildTestNode(n1NodeName, 4000, 3000, 10, func(node *v1.Node) {
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode(n1NodeName, 4000, 3000, 10, func(node *v1.Node) {
|
||||
test.SetNodeExtendedResource(node, extendedResource, 8)
|
||||
}),
|
||||
n2NodeName: test.BuildTestNode(n2NodeName, 4000, 3000, 10, func(node *v1.Node) {
|
||||
test.BuildTestNode(n2NodeName, 4000, 3000, 10, func(node *v1.Node) {
|
||||
test.SetNodeExtendedResource(node, extendedResource, 8)
|
||||
}),
|
||||
n3NodeName: test.BuildTestNode(n3NodeName, 4000, 3000, 10, test.SetNodeUnschedulable),
|
||||
test.BuildTestNode(n3NodeName, 4000, 3000, 10, test.SetNodeUnschedulable),
|
||||
},
|
||||
pods: map[string]*v1.PodList{
|
||||
n1NodeName: {
|
||||
Items: []v1.Pod{
|
||||
*test.BuildTestPod("p1", 100, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
test.SetRSOwnerRef(pod)
|
||||
test.SetPodExtendedResourceRequest(pod, extendedResource, 1)
|
||||
}),
|
||||
*test.BuildTestPod("p2", 100, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
test.SetRSOwnerRef(pod)
|
||||
test.SetPodExtendedResourceRequest(pod, extendedResource, 1)
|
||||
}),
|
||||
// These won't be evicted
|
||||
*test.BuildTestPod("p2", 100, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
test.SetDSOwnerRef(pod)
|
||||
test.SetPodExtendedResourceRequest(pod, extendedResource, 1)
|
||||
}),
|
||||
},
|
||||
},
|
||||
n2NodeName: {
|
||||
Items: []v1.Pod{
|
||||
*test.BuildTestPod("p3", 500, 0, n2NodeName, func(pod *v1.Pod) {
|
||||
test.SetRSOwnerRef(pod)
|
||||
test.SetPodExtendedResourceRequest(pod, extendedResource, 1)
|
||||
}),
|
||||
*test.BuildTestPod("p4", 500, 0, n2NodeName, func(pod *v1.Pod) {
|
||||
test.SetRSOwnerRef(pod)
|
||||
test.SetPodExtendedResourceRequest(pod, extendedResource, 1)
|
||||
}),
|
||||
*test.BuildTestPod("p5", 500, 0, n2NodeName, func(pod *v1.Pod) {
|
||||
test.SetRSOwnerRef(pod)
|
||||
test.SetPodExtendedResourceRequest(pod, extendedResource, 1)
|
||||
}),
|
||||
*test.BuildTestPod("p6", 500, 0, n2NodeName, func(pod *v1.Pod) {
|
||||
test.SetRSOwnerRef(pod)
|
||||
test.SetPodExtendedResourceRequest(pod, extendedResource, 1)
|
||||
}),
|
||||
},
|
||||
},
|
||||
n3NodeName: {
|
||||
Items: []v1.Pod{},
|
||||
},
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p1", 100, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
test.SetRSOwnerRef(pod)
|
||||
test.SetPodExtendedResourceRequest(pod, extendedResource, 1)
|
||||
}),
|
||||
test.BuildTestPod("p2", 100, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
test.SetRSOwnerRef(pod)
|
||||
test.SetPodExtendedResourceRequest(pod, extendedResource, 1)
|
||||
}),
|
||||
// These won't be evicted
|
||||
test.BuildTestPod("p3", 500, 0, n2NodeName, func(pod *v1.Pod) {
|
||||
test.SetRSOwnerRef(pod)
|
||||
test.SetPodExtendedResourceRequest(pod, extendedResource, 1)
|
||||
}),
|
||||
test.BuildTestPod("p4", 500, 0, n2NodeName, func(pod *v1.Pod) {
|
||||
test.SetRSOwnerRef(pod)
|
||||
test.SetPodExtendedResourceRequest(pod, extendedResource, 1)
|
||||
}),
|
||||
test.BuildTestPod("p5", 500, 0, n2NodeName, func(pod *v1.Pod) {
|
||||
test.SetRSOwnerRef(pod)
|
||||
test.SetPodExtendedResourceRequest(pod, extendedResource, 1)
|
||||
}),
|
||||
test.BuildTestPod("p6", 500, 0, n2NodeName, func(pod *v1.Pod) {
|
||||
test.SetRSOwnerRef(pod)
|
||||
test.SetPodExtendedResourceRequest(pod, extendedResource, 1)
|
||||
}),
|
||||
},
|
||||
expectedPodsEvicted: 2,
|
||||
evictedPods: []string{"p1", "p2"},
|
||||
@@ -404,38 +307,27 @@ func TestHighNodeUtilization(t *testing.T) {
|
||||
v1.ResourceCPU: 40,
|
||||
extendedResource: 40,
|
||||
},
|
||||
nodes: map[string]*v1.Node{
|
||||
n1NodeName: test.BuildTestNode(n1NodeName, 4000, 3000, 10, func(node *v1.Node) {
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode(n1NodeName, 4000, 3000, 10, func(node *v1.Node) {
|
||||
test.SetNodeExtendedResource(node, extendedResource, 8)
|
||||
}),
|
||||
n2NodeName: test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil),
|
||||
n3NodeName: test.BuildTestNode(n3NodeName, 4000, 3000, 10, test.SetNodeUnschedulable),
|
||||
test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil),
|
||||
test.BuildTestNode(n3NodeName, 4000, 3000, 10, test.SetNodeUnschedulable),
|
||||
},
|
||||
pods: map[string]*v1.PodList{
|
||||
n1NodeName: {
|
||||
Items: []v1.Pod{
|
||||
//These won't be evicted
|
||||
*test.BuildTestPod("p1", 100, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
test.SetRSOwnerRef(pod)
|
||||
test.SetPodExtendedResourceRequest(pod, extendedResource, 1)
|
||||
}),
|
||||
*test.BuildTestPod("p2", 100, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
test.SetRSOwnerRef(pod)
|
||||
test.SetPodExtendedResourceRequest(pod, extendedResource, 1)
|
||||
}),
|
||||
},
|
||||
},
|
||||
n2NodeName: {
|
||||
Items: []v1.Pod{
|
||||
*test.BuildTestPod("p3", 500, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p4", 500, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p5", 500, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p6", 500, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
},
|
||||
},
|
||||
n3NodeName: {
|
||||
Items: []v1.Pod{},
|
||||
},
|
||||
pods: []*v1.Pod{
|
||||
//These won't be evicted
|
||||
test.BuildTestPod("p1", 100, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
test.SetRSOwnerRef(pod)
|
||||
test.SetPodExtendedResourceRequest(pod, extendedResource, 1)
|
||||
}),
|
||||
test.BuildTestPod("p2", 100, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
test.SetRSOwnerRef(pod)
|
||||
test.SetPodExtendedResourceRequest(pod, extendedResource, 1)
|
||||
}),
|
||||
test.BuildTestPod("p3", 500, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p4", 500, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p5", 500, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p6", 500, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
},
|
||||
expectedPodsEvicted: 0,
|
||||
},
|
||||
@@ -445,34 +337,26 @@ func TestHighNodeUtilization(t *testing.T) {
|
||||
v1.ResourceCPU: 30,
|
||||
v1.ResourcePods: 30,
|
||||
},
|
||||
nodes: map[string]*v1.Node{
|
||||
n1NodeName: test.BuildTestNode(n1NodeName, 4000, 3000, 9, func(node *v1.Node) {
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode(n1NodeName, 4000, 3000, 9, func(node *v1.Node) {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
nodeSelectorKey: nodeSelectorValue,
|
||||
}
|
||||
}),
|
||||
n2NodeName: test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil),
|
||||
test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil),
|
||||
},
|
||||
pods: map[string]*v1.PodList{
|
||||
n1NodeName: {
|
||||
Items: []v1.Pod{
|
||||
*test.BuildTestPod("p1", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p2", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p3", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p4", 400, 0, n1NodeName, test.SetDSOwnerRef),
|
||||
},
|
||||
},
|
||||
n2NodeName: {
|
||||
Items: []v1.Pod{
|
||||
*test.BuildTestPod("p5", 400, 0, n2NodeName, func(pod *v1.Pod) {
|
||||
// A pod selecting nodes in the "west" datacenter
|
||||
test.SetRSOwnerRef(pod)
|
||||
pod.Spec.NodeSelector = map[string]string{
|
||||
nodeSelectorKey: nodeSelectorValue,
|
||||
}
|
||||
}),
|
||||
},
|
||||
},
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p1", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p2", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p3", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p4", 400, 0, n1NodeName, test.SetDSOwnerRef),
|
||||
test.BuildTestPod("p5", 400, 0, n2NodeName, func(pod *v1.Pod) {
|
||||
// A pod selecting nodes in the "west" datacenter
|
||||
test.SetRSOwnerRef(pod)
|
||||
pod.Spec.NodeSelector = map[string]string{
|
||||
nodeSelectorKey: nodeSelectorValue,
|
||||
}
|
||||
}),
|
||||
},
|
||||
expectedPodsEvicted: 1,
|
||||
},
|
||||
@@ -482,66 +366,56 @@ func TestHighNodeUtilization(t *testing.T) {
|
||||
v1.ResourceCPU: 30,
|
||||
v1.ResourcePods: 30,
|
||||
},
|
||||
nodes: map[string]*v1.Node{
|
||||
n1NodeName: test.BuildTestNode(n1NodeName, 4000, 3000, 9, nil),
|
||||
n2NodeName: test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil),
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode(n1NodeName, 4000, 3000, 9, nil),
|
||||
test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil),
|
||||
},
|
||||
pods: map[string]*v1.PodList{
|
||||
n1NodeName: {
|
||||
Items: []v1.Pod{
|
||||
*test.BuildTestPod("p1", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p2", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p3", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p4", 400, 0, n1NodeName, test.SetDSOwnerRef),
|
||||
},
|
||||
},
|
||||
n2NodeName: {
|
||||
Items: []v1.Pod{
|
||||
*test.BuildTestPod("p5", 400, 0, n2NodeName, func(pod *v1.Pod) {
|
||||
// A pod selecting nodes in the "west" datacenter
|
||||
test.SetRSOwnerRef(pod)
|
||||
pod.Spec.NodeSelector = map[string]string{
|
||||
nodeSelectorKey: nodeSelectorValue,
|
||||
}
|
||||
}),
|
||||
},
|
||||
},
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p1", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p2", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p3", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p4", 400, 0, n1NodeName, test.SetDSOwnerRef),
|
||||
test.BuildTestPod("p5", 400, 0, n2NodeName, func(pod *v1.Pod) {
|
||||
// A pod selecting nodes in the "west" datacenter
|
||||
test.SetRSOwnerRef(pod)
|
||||
pod.Spec.NodeSelector = map[string]string{
|
||||
nodeSelectorKey: nodeSelectorValue,
|
||||
}
|
||||
}),
|
||||
},
|
||||
expectedPodsEvicted: 0,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range testCases {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
fakeClient := &fake.Clientset{}
|
||||
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
|
||||
list := action.(core.ListAction)
|
||||
fieldString := list.GetListRestrictions().Fields.String()
|
||||
if strings.Contains(fieldString, n1NodeName) {
|
||||
return true, test.pods[n1NodeName], nil
|
||||
}
|
||||
if strings.Contains(fieldString, n2NodeName) {
|
||||
return true, test.pods[n2NodeName], nil
|
||||
}
|
||||
if strings.Contains(fieldString, n3NodeName) {
|
||||
return true, test.pods[n3NodeName], nil
|
||||
}
|
||||
return true, nil, fmt.Errorf("Failed to list: %v", list)
|
||||
})
|
||||
fakeClient.Fake.AddReactor("get", "nodes", func(action core.Action) (bool, runtime.Object, error) {
|
||||
getAction := action.(core.GetAction)
|
||||
if node, exists := test.nodes[getAction.GetName()]; exists {
|
||||
return true, node, nil
|
||||
}
|
||||
return true, nil, fmt.Errorf("Wrong node: %v", getAction.GetName())
|
||||
})
|
||||
for _, testCase := range testCases {
|
||||
t.Run(testCase.name, func(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
var objs []runtime.Object
|
||||
for _, node := range testCase.nodes {
|
||||
objs = append(objs, node)
|
||||
}
|
||||
for _, pod := range testCase.pods {
|
||||
objs = append(objs, pod)
|
||||
}
|
||||
fakeClient := fake.NewSimpleClientset(objs...)
|
||||
|
||||
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
|
||||
podInformer := sharedInformerFactory.Core().V1().Pods()
|
||||
|
||||
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
|
||||
if err != nil {
|
||||
t.Errorf("Build get pods assigned to node function error: %v", err)
|
||||
}
|
||||
|
||||
podsForEviction := make(map[string]struct{})
|
||||
for _, pod := range test.evictedPods {
|
||||
for _, pod := range testCase.evictedPods {
|
||||
podsForEviction[pod] = struct{}{}
|
||||
}
|
||||
|
||||
evictionFailed := false
|
||||
if len(test.evictedPods) > 0 {
|
||||
if len(testCase.evictedPods) > 0 {
|
||||
fakeClient.Fake.AddReactor("create", "pods", func(action core.Action) (bool, runtime.Object, error) {
|
||||
getAction := action.(core.CreateAction)
|
||||
obj := getAction.GetObject()
|
||||
@@ -556,10 +430,31 @@ func TestHighNodeUtilization(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
var nodes []*v1.Node
|
||||
for _, node := range test.nodes {
|
||||
nodes = append(nodes, node)
|
||||
}
|
||||
sharedInformerFactory.Start(ctx.Done())
|
||||
sharedInformerFactory.WaitForCacheSync(ctx.Done())
|
||||
|
||||
//fakeClient := &fake.Clientset{}
|
||||
//fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
|
||||
// list := action.(core.ListAction)
|
||||
// fieldString := list.GetListRestrictions().Fields.String()
|
||||
// if strings.Contains(fieldString, n1NodeName) {
|
||||
// return true, test.pods[n1NodeName], nil
|
||||
// }
|
||||
// if strings.Contains(fieldString, n2NodeName) {
|
||||
// return true, test.pods[n2NodeName], nil
|
||||
// }
|
||||
// if strings.Contains(fieldString, n3NodeName) {
|
||||
// return true, test.pods[n3NodeName], nil
|
||||
// }
|
||||
// return true, nil, fmt.Errorf("Failed to list: %v", list)
|
||||
//})
|
||||
//fakeClient.Fake.AddReactor("get", "nodes", func(action core.Action) (bool, runtime.Object, error) {
|
||||
// getAction := action.(core.GetAction)
|
||||
// if node, exists := testCase.nodes[getAction.GetName()]; exists {
|
||||
// return true, node, nil
|
||||
// }
|
||||
// return true, nil, fmt.Errorf("Wrong node: %v", getAction.GetName())
|
||||
//})
|
||||
|
||||
podEvictor := evictions.NewPodEvictor(
|
||||
fakeClient,
|
||||
@@ -567,7 +462,7 @@ func TestHighNodeUtilization(t *testing.T) {
|
||||
false,
|
||||
nil,
|
||||
nil,
|
||||
nodes,
|
||||
testCase.nodes,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
@@ -577,16 +472,16 @@ func TestHighNodeUtilization(t *testing.T) {
|
||||
Enabled: true,
|
||||
Params: &api.StrategyParameters{
|
||||
NodeResourceUtilizationThresholds: &api.NodeResourceUtilizationThresholds{
|
||||
Thresholds: test.thresholds,
|
||||
Thresholds: testCase.thresholds,
|
||||
},
|
||||
NodeFit: true,
|
||||
},
|
||||
}
|
||||
HighNodeUtilization(ctx, fakeClient, strategy, nodes, podEvictor)
|
||||
HighNodeUtilization(ctx, fakeClient, strategy, testCase.nodes, podEvictor, getPodsAssignedToNode)
|
||||
|
||||
podsEvicted := podEvictor.TotalEvicted()
|
||||
if test.expectedPodsEvicted != podsEvicted {
|
||||
t.Errorf("Expected %v pods to be evicted but %v got evicted", test.expectedPodsEvicted, podsEvicted)
|
||||
if testCase.expectedPodsEvicted != podsEvicted {
|
||||
t.Errorf("Expected %v pods to be evicted but %v got evicted", testCase.expectedPodsEvicted, podsEvicted)
|
||||
}
|
||||
if evictionFailed {
|
||||
t.Errorf("Pod evictions failed unexpectedly")
|
||||
@@ -663,7 +558,6 @@ func TestValidateHighNodeUtilizationStrategyConfig(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestHighNodeUtilizationWithTaints(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
strategy := api.DeschedulerStrategy{
|
||||
Enabled: true,
|
||||
Params: &api.StrategyParameters{
|
||||
@@ -741,6 +635,9 @@ func TestHighNodeUtilizationWithTaints(t *testing.T) {
|
||||
|
||||
for _, item := range tests {
|
||||
t.Run(item.name, func(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
var objs []runtime.Object
|
||||
for _, node := range item.nodes {
|
||||
objs = append(objs, node)
|
||||
@@ -751,6 +648,16 @@ func TestHighNodeUtilizationWithTaints(t *testing.T) {
|
||||
}
|
||||
|
||||
fakeClient := fake.NewSimpleClientset(objs...)
|
||||
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
|
||||
podInformer := sharedInformerFactory.Core().V1().Pods()
|
||||
|
||||
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
|
||||
if err != nil {
|
||||
t.Errorf("Build get pods assigned to node function error: %v", err)
|
||||
}
|
||||
|
||||
sharedInformerFactory.Start(ctx.Done())
|
||||
sharedInformerFactory.WaitForCacheSync(ctx.Done())
|
||||
|
||||
podEvictor := evictions.NewPodEvictor(
|
||||
fakeClient,
|
||||
@@ -764,7 +671,7 @@ func TestHighNodeUtilizationWithTaints(t *testing.T) {
|
||||
false,
|
||||
)
|
||||
|
||||
HighNodeUtilization(ctx, fakeClient, strategy, item.nodes, podEvictor)
|
||||
HighNodeUtilization(ctx, fakeClient, strategy, item.nodes, podEvictor, getPodsAssignedToNode)
|
||||
|
||||
if item.evictionsExpected != podEvictor.TotalEvicted() {
|
||||
t.Errorf("Expected %v evictions, got %v", item.evictionsExpected, podEvictor.TotalEvicted())
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -24,16 +24,17 @@ import (
|
||||
policyv1 "k8s.io/api/policy/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
core "k8s.io/client-go/testing"
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
"sigs.k8s.io/descheduler/test"
|
||||
)
|
||||
|
||||
func TestPodAntiAffinity(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
node1 := test.BuildTestNode("n1", 2000, 3000, 10, nil)
|
||||
node2 := test.BuildTestNode("n2", 2000, 3000, 10, func(node *v1.Node) {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
@@ -106,55 +107,55 @@ func TestPodAntiAffinity(t *testing.T) {
|
||||
description string
|
||||
maxPodsToEvictPerNode *uint
|
||||
maxNoOfPodsToEvictPerNamespace *uint
|
||||
pods []v1.Pod
|
||||
pods []*v1.Pod
|
||||
expectedEvictedPodCount uint
|
||||
nodeFit bool
|
||||
nodes []*v1.Node
|
||||
}{
|
||||
{
|
||||
description: "Maximum pods to evict - 0",
|
||||
pods: []v1.Pod{*p1, *p2, *p3, *p4},
|
||||
pods: []*v1.Pod{p1, p2, p3, p4},
|
||||
nodes: []*v1.Node{node1},
|
||||
expectedEvictedPodCount: 3,
|
||||
},
|
||||
{
|
||||
description: "Maximum pods to evict - 3",
|
||||
maxPodsToEvictPerNode: &uint3,
|
||||
pods: []v1.Pod{*p1, *p2, *p3, *p4},
|
||||
pods: []*v1.Pod{p1, p2, p3, p4},
|
||||
nodes: []*v1.Node{node1},
|
||||
expectedEvictedPodCount: 3,
|
||||
},
|
||||
{
|
||||
description: "Maximum pods to evict (maxPodsToEvictPerNamespace=3) - 3",
|
||||
maxNoOfPodsToEvictPerNamespace: &uint3,
|
||||
pods: []v1.Pod{*p1, *p2, *p3, *p4},
|
||||
pods: []*v1.Pod{p1, p2, p3, p4},
|
||||
nodes: []*v1.Node{node1},
|
||||
expectedEvictedPodCount: 3,
|
||||
},
|
||||
{
|
||||
description: "Evict only 1 pod after sorting",
|
||||
pods: []v1.Pod{*p5, *p6, *p7},
|
||||
pods: []*v1.Pod{p5, p6, p7},
|
||||
nodes: []*v1.Node{node1},
|
||||
expectedEvictedPodCount: 1,
|
||||
},
|
||||
{
|
||||
description: "Evicts pod that conflicts with critical pod (but does not evict critical pod)",
|
||||
maxPodsToEvictPerNode: &uint1,
|
||||
pods: []v1.Pod{*p1, *nonEvictablePod},
|
||||
pods: []*v1.Pod{p1, nonEvictablePod},
|
||||
nodes: []*v1.Node{node1},
|
||||
expectedEvictedPodCount: 1,
|
||||
},
|
||||
{
|
||||
description: "Evicts pod that conflicts with critical pod (but does not evict critical pod)",
|
||||
maxPodsToEvictPerNode: &uint1,
|
||||
pods: []v1.Pod{*p1, *nonEvictablePod},
|
||||
pods: []*v1.Pod{p1, nonEvictablePod},
|
||||
nodes: []*v1.Node{node1},
|
||||
expectedEvictedPodCount: 1,
|
||||
},
|
||||
{
|
||||
description: "Won't evict pods because node selectors don't match available nodes",
|
||||
maxPodsToEvictPerNode: &uint1,
|
||||
pods: []v1.Pod{*p8, *nonEvictablePod},
|
||||
pods: []*v1.Pod{p8, nonEvictablePod},
|
||||
nodes: []*v1.Node{node1, node2},
|
||||
expectedEvictedPodCount: 0,
|
||||
nodeFit: true,
|
||||
@@ -162,51 +163,68 @@ func TestPodAntiAffinity(t *testing.T) {
|
||||
{
|
||||
description: "Won't evict pods because only other node is not schedulable",
|
||||
maxPodsToEvictPerNode: &uint1,
|
||||
pods: []v1.Pod{*p8, *nonEvictablePod},
|
||||
pods: []*v1.Pod{p8, nonEvictablePod},
|
||||
nodes: []*v1.Node{node1, node3},
|
||||
expectedEvictedPodCount: 0,
|
||||
nodeFit: true,
|
||||
},
|
||||
{
|
||||
description: "No pod to evicted since all pod terminating",
|
||||
pods: []v1.Pod{*p9, *p10},
|
||||
pods: []*v1.Pod{p9, p10},
|
||||
nodes: []*v1.Node{node1},
|
||||
expectedEvictedPodCount: 0,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
// create fake client
|
||||
fakeClient := &fake.Clientset{}
|
||||
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
|
||||
return true, &v1.PodList{Items: test.pods}, nil
|
||||
})
|
||||
fakeClient.Fake.AddReactor("get", "nodes", func(action core.Action) (bool, runtime.Object, error) {
|
||||
return true, node1, nil
|
||||
})
|
||||
t.Run(test.description, func(t *testing.T) {
|
||||
|
||||
podEvictor := evictions.NewPodEvictor(
|
||||
fakeClient,
|
||||
policyv1.SchemeGroupVersion.String(),
|
||||
false,
|
||||
test.maxPodsToEvictPerNode,
|
||||
test.maxNoOfPodsToEvictPerNamespace,
|
||||
test.nodes,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
)
|
||||
strategy := api.DeschedulerStrategy{
|
||||
Params: &api.StrategyParameters{
|
||||
NodeFit: test.nodeFit,
|
||||
},
|
||||
}
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
RemovePodsViolatingInterPodAntiAffinity(ctx, fakeClient, strategy, test.nodes, podEvictor)
|
||||
podsEvicted := podEvictor.TotalEvicted()
|
||||
if podsEvicted != test.expectedEvictedPodCount {
|
||||
t.Errorf("Unexpected no of pods evicted: pods evicted: %d, expected: %d", podsEvicted, test.expectedEvictedPodCount)
|
||||
}
|
||||
var objs []runtime.Object
|
||||
for _, node := range test.nodes {
|
||||
objs = append(objs, node)
|
||||
}
|
||||
for _, pod := range test.pods {
|
||||
objs = append(objs, pod)
|
||||
}
|
||||
fakeClient := fake.NewSimpleClientset(objs...)
|
||||
|
||||
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
|
||||
podInformer := sharedInformerFactory.Core().V1().Pods()
|
||||
|
||||
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
|
||||
if err != nil {
|
||||
t.Errorf("Build get pods assigned to node function error: %v", err)
|
||||
}
|
||||
|
||||
sharedInformerFactory.Start(ctx.Done())
|
||||
sharedInformerFactory.WaitForCacheSync(ctx.Done())
|
||||
|
||||
podEvictor := evictions.NewPodEvictor(
|
||||
fakeClient,
|
||||
policyv1.SchemeGroupVersion.String(),
|
||||
false,
|
||||
test.maxPodsToEvictPerNode,
|
||||
test.maxNoOfPodsToEvictPerNamespace,
|
||||
test.nodes,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
)
|
||||
strategy := api.DeschedulerStrategy{
|
||||
Params: &api.StrategyParameters{
|
||||
NodeFit: test.nodeFit,
|
||||
},
|
||||
}
|
||||
|
||||
RemovePodsViolatingInterPodAntiAffinity(ctx, fakeClient, strategy, test.nodes, podEvictor, getPodsAssignedToNode)
|
||||
podsEvicted := podEvictor.TotalEvicted()
|
||||
if podsEvicted != test.expectedEvictedPodCount {
|
||||
t.Errorf("Unexpected no of pods evicted: pods evicted: %d, expected: %d", podsEvicted, test.expectedEvictedPodCount)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -25,15 +25,16 @@ import (
|
||||
policyv1 "k8s.io/api/policy/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
core "k8s.io/client-go/testing"
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/test"
|
||||
)
|
||||
|
||||
func TestPodLifeTime(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
node1 := test.BuildTestNode("n1", 2000, 3000, 10, nil)
|
||||
olderPodCreationTime := metav1.NewTime(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC))
|
||||
newerPodCreationTime := metav1.NewTime(time.Now())
|
||||
@@ -140,7 +141,7 @@ func TestPodLifeTime(t *testing.T) {
|
||||
testCases := []struct {
|
||||
description string
|
||||
strategy api.DeschedulerStrategy
|
||||
pods []v1.Pod
|
||||
pods []*v1.Pod
|
||||
nodes []*v1.Node
|
||||
expectedEvictedPodCount uint
|
||||
ignorePvcPods bool
|
||||
@@ -153,7 +154,7 @@ func TestPodLifeTime(t *testing.T) {
|
||||
PodLifeTime: &api.PodLifeTime{MaxPodLifeTimeSeconds: &maxLifeTime},
|
||||
},
|
||||
},
|
||||
pods: []v1.Pod{*p1, *p2},
|
||||
pods: []*v1.Pod{p1, p2},
|
||||
nodes: []*v1.Node{node1},
|
||||
expectedEvictedPodCount: 1,
|
||||
},
|
||||
@@ -165,7 +166,7 @@ func TestPodLifeTime(t *testing.T) {
|
||||
PodLifeTime: &api.PodLifeTime{MaxPodLifeTimeSeconds: &maxLifeTime},
|
||||
},
|
||||
},
|
||||
pods: []v1.Pod{*p3, *p4},
|
||||
pods: []*v1.Pod{p3, p4},
|
||||
nodes: []*v1.Node{node1},
|
||||
expectedEvictedPodCount: 0,
|
||||
},
|
||||
@@ -177,7 +178,7 @@ func TestPodLifeTime(t *testing.T) {
|
||||
PodLifeTime: &api.PodLifeTime{MaxPodLifeTimeSeconds: &maxLifeTime},
|
||||
},
|
||||
},
|
||||
pods: []v1.Pod{*p5, *p6},
|
||||
pods: []*v1.Pod{p5, p6},
|
||||
nodes: []*v1.Node{node1},
|
||||
expectedEvictedPodCount: 1,
|
||||
},
|
||||
@@ -189,7 +190,7 @@ func TestPodLifeTime(t *testing.T) {
|
||||
PodLifeTime: &api.PodLifeTime{MaxPodLifeTimeSeconds: &maxLifeTime},
|
||||
},
|
||||
},
|
||||
pods: []v1.Pod{*p7, *p8},
|
||||
pods: []*v1.Pod{p7, p8},
|
||||
nodes: []*v1.Node{node1},
|
||||
expectedEvictedPodCount: 0,
|
||||
},
|
||||
@@ -204,7 +205,7 @@ func TestPodLifeTime(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
pods: []v1.Pod{*p9, *p10},
|
||||
pods: []*v1.Pod{p9, p10},
|
||||
nodes: []*v1.Node{node1},
|
||||
expectedEvictedPodCount: 1,
|
||||
},
|
||||
@@ -216,7 +217,7 @@ func TestPodLifeTime(t *testing.T) {
|
||||
PodLifeTime: &api.PodLifeTime{MaxPodLifeTimeSeconds: &maxLifeTime},
|
||||
},
|
||||
},
|
||||
pods: []v1.Pod{*p11},
|
||||
pods: []*v1.Pod{p11},
|
||||
nodes: []*v1.Node{node1},
|
||||
expectedEvictedPodCount: 0,
|
||||
ignorePvcPods: true,
|
||||
@@ -229,7 +230,7 @@ func TestPodLifeTime(t *testing.T) {
|
||||
PodLifeTime: &api.PodLifeTime{MaxPodLifeTimeSeconds: &maxLifeTime},
|
||||
},
|
||||
},
|
||||
pods: []v1.Pod{*p11},
|
||||
pods: []*v1.Pod{p11},
|
||||
nodes: []*v1.Node{node1},
|
||||
expectedEvictedPodCount: 1,
|
||||
},
|
||||
@@ -244,7 +245,7 @@ func TestPodLifeTime(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
pods: []v1.Pod{*p12, *p13},
|
||||
pods: []*v1.Pod{p12, p13},
|
||||
nodes: []*v1.Node{node1},
|
||||
expectedEvictedPodCount: 1,
|
||||
},
|
||||
@@ -259,35 +260,54 @@ func TestPodLifeTime(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
pods: []v1.Pod{*p14, *p15},
|
||||
pods: []*v1.Pod{p14, p15},
|
||||
nodes: []*v1.Node{node1},
|
||||
expectedEvictedPodCount: 0,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
fakeClient := &fake.Clientset{}
|
||||
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
|
||||
return true, &v1.PodList{Items: tc.pods}, nil
|
||||
t.Run(tc.description, func(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
var objs []runtime.Object
|
||||
for _, node := range tc.nodes {
|
||||
objs = append(objs, node)
|
||||
}
|
||||
for _, pod := range tc.pods {
|
||||
objs = append(objs, pod)
|
||||
}
|
||||
fakeClient := fake.NewSimpleClientset(objs...)
|
||||
|
||||
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
|
||||
podInformer := sharedInformerFactory.Core().V1().Pods()
|
||||
|
||||
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
|
||||
if err != nil {
|
||||
t.Errorf("Build get pods assigned to node function error: %v", err)
|
||||
}
|
||||
|
||||
sharedInformerFactory.Start(ctx.Done())
|
||||
sharedInformerFactory.WaitForCacheSync(ctx.Done())
|
||||
|
||||
podEvictor := evictions.NewPodEvictor(
|
||||
fakeClient,
|
||||
policyv1.SchemeGroupVersion.String(),
|
||||
false,
|
||||
nil,
|
||||
nil,
|
||||
tc.nodes,
|
||||
false,
|
||||
false,
|
||||
tc.ignorePvcPods,
|
||||
)
|
||||
|
||||
PodLifeTime(ctx, fakeClient, tc.strategy, tc.nodes, podEvictor, getPodsAssignedToNode)
|
||||
podsEvicted := podEvictor.TotalEvicted()
|
||||
if podsEvicted != tc.expectedEvictedPodCount {
|
||||
t.Errorf("Test error for description: %s. Expected evicted pods count %v, got %v", tc.description, tc.expectedEvictedPodCount, podsEvicted)
|
||||
}
|
||||
})
|
||||
|
||||
podEvictor := evictions.NewPodEvictor(
|
||||
fakeClient,
|
||||
policyv1.SchemeGroupVersion.String(),
|
||||
false,
|
||||
nil,
|
||||
nil,
|
||||
tc.nodes,
|
||||
false,
|
||||
false,
|
||||
tc.ignorePvcPods,
|
||||
)
|
||||
|
||||
PodLifeTime(ctx, fakeClient, tc.strategy, tc.nodes, podEvictor)
|
||||
podsEvicted := podEvictor.TotalEvicted()
|
||||
if podsEvicted != tc.expectedEvictedPodCount {
|
||||
t.Errorf("Test error for description: %s. Expected evicted pods count %v, got %v", tc.description, tc.expectedEvictedPodCount, podsEvicted)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -18,23 +18,24 @@ package strategies
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
policyv1 "k8s.io/api/policy/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
core "k8s.io/client-go/testing"
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/test"
|
||||
)
|
||||
|
||||
func initPods(node *v1.Node) []v1.Pod {
|
||||
pods := make([]v1.Pod, 0)
|
||||
func initPods(node *v1.Node) []*v1.Pod {
|
||||
pods := make([]*v1.Pod, 0)
|
||||
|
||||
for i := int32(0); i <= 9; i++ {
|
||||
pod := test.BuildTestPod(fmt.Sprintf("pod-%d", i), 100, 0, node.Name, nil)
|
||||
@@ -56,7 +57,7 @@ func initPods(node *v1.Node) []v1.Pod {
|
||||
},
|
||||
},
|
||||
}
|
||||
pods = append(pods, *pod)
|
||||
pods = append(pods, pod)
|
||||
}
|
||||
|
||||
// The following 3 pods won't get evicted.
|
||||
@@ -81,8 +82,6 @@ func initPods(node *v1.Node) []v1.Pod {
|
||||
}
|
||||
|
||||
func TestRemovePodsHavingTooManyRestarts(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
node1 := test.BuildTestNode("node1", 2000, 3000, 10, nil)
|
||||
node2 := test.BuildTestNode("node2", 2000, 3000, 10, func(node *v1.Node) {
|
||||
node.Spec.Taints = []v1.Taint{
|
||||
@@ -203,29 +202,48 @@ func TestRemovePodsHavingTooManyRestarts(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.description, func(t *testing.T) {
|
||||
|
||||
fakeClient := &fake.Clientset{}
|
||||
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
|
||||
return true, &v1.PodList{Items: pods}, nil
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
var objs []runtime.Object
|
||||
for _, node := range tc.nodes {
|
||||
objs = append(objs, node)
|
||||
}
|
||||
for _, pod := range pods {
|
||||
objs = append(objs, pod)
|
||||
}
|
||||
fakeClient := fake.NewSimpleClientset(objs...)
|
||||
|
||||
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
|
||||
podInformer := sharedInformerFactory.Core().V1().Pods()
|
||||
|
||||
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
|
||||
if err != nil {
|
||||
t.Errorf("Build get pods assigned to node function error: %v", err)
|
||||
}
|
||||
|
||||
sharedInformerFactory.Start(ctx.Done())
|
||||
sharedInformerFactory.WaitForCacheSync(ctx.Done())
|
||||
|
||||
podEvictor := evictions.NewPodEvictor(
|
||||
fakeClient,
|
||||
policyv1.SchemeGroupVersion.String(),
|
||||
false,
|
||||
tc.maxPodsToEvictPerNode,
|
||||
tc.maxNoOfPodsToEvictPerNamespace,
|
||||
tc.nodes,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
)
|
||||
|
||||
RemovePodsHavingTooManyRestarts(ctx, fakeClient, tc.strategy, tc.nodes, podEvictor, getPodsAssignedToNode)
|
||||
actualEvictedPodCount := podEvictor.TotalEvicted()
|
||||
if actualEvictedPodCount != tc.expectedEvictedPodCount {
|
||||
t.Errorf("Test %#v failed, expected %v pod evictions, but got %v pod evictions\n", tc.description, tc.expectedEvictedPodCount, actualEvictedPodCount)
|
||||
}
|
||||
})
|
||||
|
||||
podEvictor := evictions.NewPodEvictor(
|
||||
fakeClient,
|
||||
policyv1.SchemeGroupVersion.String(),
|
||||
false,
|
||||
tc.maxPodsToEvictPerNode,
|
||||
tc.maxNoOfPodsToEvictPerNamespace,
|
||||
tc.nodes,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
)
|
||||
|
||||
RemovePodsHavingTooManyRestarts(ctx, fakeClient, tc.strategy, tc.nodes, podEvictor)
|
||||
actualEvictedPodCount := podEvictor.TotalEvicted()
|
||||
if actualEvictedPodCount != tc.expectedEvictedPodCount {
|
||||
t.Errorf("Test %#v failed, expected %v pod evictions, but got %v pod evictions\n", tc.description, tc.expectedEvictedPodCount, actualEvictedPodCount)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -8,15 +8,16 @@ import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
core "k8s.io/client-go/testing"
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/test"
|
||||
)
|
||||
|
||||
func TestTopologySpreadConstraint(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
testCases := []struct {
|
||||
name string
|
||||
pods []*v1.Pod
|
||||
@@ -870,17 +871,29 @@ func TestTopologySpreadConstraint(t *testing.T) {
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
fakeClient := &fake.Clientset{}
|
||||
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
|
||||
podList := make([]v1.Pod, 0, len(tc.pods))
|
||||
for _, pod := range tc.pods {
|
||||
podList = append(podList, *pod)
|
||||
}
|
||||
return true, &v1.PodList{Items: podList}, nil
|
||||
})
|
||||
fakeClient.Fake.AddReactor("list", "namespaces", func(action core.Action) (bool, runtime.Object, error) {
|
||||
return true, &v1.NamespaceList{Items: []v1.Namespace{{ObjectMeta: metav1.ObjectMeta{Name: "ns1", Namespace: "ns1"}}}}, nil
|
||||
})
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
var objs []runtime.Object
|
||||
for _, node := range tc.nodes {
|
||||
objs = append(objs, node)
|
||||
}
|
||||
for _, pod := range tc.pods {
|
||||
objs = append(objs, pod)
|
||||
}
|
||||
objs = append(objs, &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "ns1"}})
|
||||
fakeClient := fake.NewSimpleClientset(objs...)
|
||||
|
||||
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
|
||||
podInformer := sharedInformerFactory.Core().V1().Pods()
|
||||
|
||||
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
|
||||
if err != nil {
|
||||
t.Errorf("Build get pods assigned to node function error: %v", err)
|
||||
}
|
||||
|
||||
sharedInformerFactory.Start(ctx.Done())
|
||||
sharedInformerFactory.WaitForCacheSync(ctx.Done())
|
||||
|
||||
podEvictor := evictions.NewPodEvictor(
|
||||
fakeClient,
|
||||
@@ -893,7 +906,7 @@ func TestTopologySpreadConstraint(t *testing.T) {
|
||||
false,
|
||||
false,
|
||||
)
|
||||
RemovePodsViolatingTopologySpreadConstraint(ctx, fakeClient, tc.strategy, tc.nodes, podEvictor)
|
||||
RemovePodsViolatingTopologySpreadConstraint(ctx, fakeClient, tc.strategy, tc.nodes, podEvictor, getPodsAssignedToNode)
|
||||
podsEvicted := podEvictor.TotalEvicted()
|
||||
if podsEvicted != tc.expectedEvictedCount {
|
||||
t.Errorf("Test error for description: %s. Expected evicted pods count %v, got %v", tc.name, tc.expectedEvictedCount, podsEvicted)
|
||||
|
||||
@@ -39,7 +39,7 @@ import (
|
||||
func TestRemoveDuplicates(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
clientSet, _, stopCh := initializeClient(t)
|
||||
clientSet, _, getPodsAssignedToNode, stopCh := initializeClient(t)
|
||||
defer close(stopCh)
|
||||
|
||||
nodeList, err := clientSet.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
|
||||
@@ -164,6 +164,7 @@ func TestRemoveDuplicates(t *testing.T) {
|
||||
},
|
||||
workerNodes,
|
||||
podEvictor,
|
||||
getPodsAssignedToNode,
|
||||
)
|
||||
|
||||
waitForTerminatingPodsToDisappear(ctx, t, clientSet, testNamespace.Name)
|
||||
|
||||
@@ -21,7 +21,7 @@ var oneHourPodLifetimeSeconds uint = 3600
|
||||
|
||||
func TestFailedPods(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
clientSet, _, stopCh := initializeClient(t)
|
||||
clientSet, _, getPodsAssignedToNode, stopCh := initializeClient(t)
|
||||
defer close(stopCh)
|
||||
nodeList, err := clientSet.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
@@ -95,6 +95,7 @@ func TestFailedPods(t *testing.T) {
|
||||
},
|
||||
nodes,
|
||||
podEvictor,
|
||||
getPodsAssignedToNode,
|
||||
)
|
||||
t.Logf("Finished RemoveFailedPods strategy for %s", name)
|
||||
|
||||
|
||||
@@ -108,7 +108,7 @@ func RcByNameContainer(name, namespace string, replicas int32, labels map[string
|
||||
}
|
||||
}
|
||||
|
||||
func initializeClient(t *testing.T) (clientset.Interface, coreinformers.NodeInformer, chan struct{}) {
|
||||
func initializeClient(t *testing.T) (clientset.Interface, coreinformers.NodeInformer, podutil.GetPodsAssignedToNodeFunc, chan struct{}) {
|
||||
clientSet, err := client.CreateClient(os.Getenv("KUBECONFIG"))
|
||||
if err != nil {
|
||||
t.Errorf("Error during client creation with %v", err)
|
||||
@@ -117,12 +117,18 @@ func initializeClient(t *testing.T) (clientset.Interface, coreinformers.NodeInfo
|
||||
stopChannel := make(chan struct{})
|
||||
|
||||
sharedInformerFactory := informers.NewSharedInformerFactory(clientSet, 0)
|
||||
nodeInformer := sharedInformerFactory.Core().V1().Nodes()
|
||||
podInformer := sharedInformerFactory.Core().V1().Pods()
|
||||
|
||||
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
|
||||
if err != nil {
|
||||
t.Errorf("build get pods assigned to node function error: %v", err)
|
||||
}
|
||||
|
||||
sharedInformerFactory.Start(stopChannel)
|
||||
sharedInformerFactory.WaitForCacheSync(stopChannel)
|
||||
|
||||
nodeInformer := sharedInformerFactory.Core().V1().Nodes()
|
||||
|
||||
return clientSet, nodeInformer, stopChannel
|
||||
return clientSet, nodeInformer, getPodsAssignedToNode, stopChannel
|
||||
}
|
||||
|
||||
func runPodLifetimeStrategy(
|
||||
@@ -135,6 +141,7 @@ func runPodLifetimeStrategy(
|
||||
priority *int32,
|
||||
evictCritical bool,
|
||||
labelSelector *metav1.LabelSelector,
|
||||
getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc,
|
||||
) {
|
||||
// Run descheduler.
|
||||
evictionPolicyGroupVersion, err := eutils.SupportEviction(clientset)
|
||||
@@ -173,6 +180,7 @@ func runPodLifetimeStrategy(
|
||||
evictCritical,
|
||||
false,
|
||||
),
|
||||
getPodsAssignedToNode,
|
||||
)
|
||||
}
|
||||
|
||||
@@ -202,7 +210,7 @@ func intersectStrings(lista, listb []string) []string {
|
||||
func TestLowNodeUtilization(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
clientSet, _, stopCh := initializeClient(t)
|
||||
clientSet, _, getPodsAssignedToNode, stopCh := initializeClient(t)
|
||||
defer close(stopCh)
|
||||
|
||||
nodeList, err := clientSet.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
|
||||
@@ -293,7 +301,12 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
// Run LowNodeUtilization strategy
|
||||
podEvictor := initPodEvictorOrFail(t, clientSet, nodes)
|
||||
|
||||
podsOnMosttUtilizedNode, err := podutil.ListPodsOnANode(ctx, clientSet, workerNodes[0], podutil.WithFilter(podEvictor.Evictable().IsEvictable))
|
||||
podFilter, err := podutil.NewOptions().WithFilter(podEvictor.Evictable().IsEvictable).BuildFilterFunc()
|
||||
if err != nil {
|
||||
t.Errorf("Error initializing pod filter function, %v", err)
|
||||
}
|
||||
|
||||
podsOnMosttUtilizedNode, err := podutil.ListPodsOnANode(workerNodes[0].Name, getPodsAssignedToNode, podFilter)
|
||||
if err != nil {
|
||||
t.Errorf("Error listing pods on a node %v", err)
|
||||
}
|
||||
@@ -318,11 +331,17 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
},
|
||||
workerNodes,
|
||||
podEvictor,
|
||||
getPodsAssignedToNode,
|
||||
)
|
||||
|
||||
waitForTerminatingPodsToDisappear(ctx, t, clientSet, rc.Namespace)
|
||||
|
||||
podsOnMosttUtilizedNode, err = podutil.ListPodsOnANode(ctx, clientSet, workerNodes[0], podutil.WithFilter(podEvictor.Evictable().IsEvictable))
|
||||
podFilter, err = podutil.NewOptions().WithFilter(podEvictor.Evictable().IsEvictable).BuildFilterFunc()
|
||||
if err != nil {
|
||||
t.Errorf("Error initializing pod filter function, %v", err)
|
||||
}
|
||||
|
||||
podsOnMosttUtilizedNode, err = podutil.ListPodsOnANode(workerNodes[0].Name, getPodsAssignedToNode, podFilter)
|
||||
if err != nil {
|
||||
t.Errorf("Error listing pods on a node %v", err)
|
||||
}
|
||||
@@ -339,7 +358,7 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
func TestNamespaceConstraintsInclude(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
clientSet, nodeInformer, stopCh := initializeClient(t)
|
||||
clientSet, nodeInformer, getPodsAssignedToNode, stopCh := initializeClient(t)
|
||||
defer close(stopCh)
|
||||
|
||||
testNamespace := &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "e2e-" + strings.ToLower(t.Name())}}
|
||||
@@ -374,7 +393,7 @@ func TestNamespaceConstraintsInclude(t *testing.T) {
|
||||
t.Logf("set the strategy to delete pods from %v namespace", rc.Namespace)
|
||||
runPodLifetimeStrategy(ctx, t, clientSet, nodeInformer, &deschedulerapi.Namespaces{
|
||||
Include: []string{rc.Namespace},
|
||||
}, "", nil, false, nil)
|
||||
}, "", nil, false, nil, getPodsAssignedToNode)
|
||||
|
||||
// All pods are supposed to be deleted, wait until all the old pods are deleted
|
||||
if err := wait.PollImmediate(time.Second, 20*time.Second, func() (bool, error) {
|
||||
@@ -410,7 +429,7 @@ func TestNamespaceConstraintsInclude(t *testing.T) {
|
||||
func TestNamespaceConstraintsExclude(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
clientSet, nodeInformer, stopCh := initializeClient(t)
|
||||
clientSet, nodeInformer, getPodsAssignedToNode, stopCh := initializeClient(t)
|
||||
defer close(stopCh)
|
||||
|
||||
testNamespace := &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "e2e-" + strings.ToLower(t.Name())}}
|
||||
@@ -445,7 +464,7 @@ func TestNamespaceConstraintsExclude(t *testing.T) {
|
||||
t.Logf("set the strategy to delete pods from namespaces except the %v namespace", rc.Namespace)
|
||||
runPodLifetimeStrategy(ctx, t, clientSet, nodeInformer, &deschedulerapi.Namespaces{
|
||||
Exclude: []string{rc.Namespace},
|
||||
}, "", nil, false, nil)
|
||||
}, "", nil, false, nil, getPodsAssignedToNode)
|
||||
|
||||
t.Logf("Waiting 10s")
|
||||
time.Sleep(10 * time.Second)
|
||||
@@ -477,7 +496,7 @@ func testEvictSystemCritical(t *testing.T, isPriorityClass bool) {
|
||||
var lowPriority = int32(500)
|
||||
ctx := context.Background()
|
||||
|
||||
clientSet, nodeInformer, stopCh := initializeClient(t)
|
||||
clientSet, nodeInformer, getPodsAssignedToNode, stopCh := initializeClient(t)
|
||||
defer close(stopCh)
|
||||
|
||||
testNamespace := &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "e2e-" + strings.ToLower(t.Name())}}
|
||||
@@ -558,9 +577,9 @@ func testEvictSystemCritical(t *testing.T, isPriorityClass bool) {
|
||||
t.Logf("Existing pods: %v", initialPodNames)
|
||||
|
||||
if isPriorityClass {
|
||||
runPodLifetimeStrategy(ctx, t, clientSet, nodeInformer, nil, highPriorityClass.Name, nil, true, nil)
|
||||
runPodLifetimeStrategy(ctx, t, clientSet, nodeInformer, nil, highPriorityClass.Name, nil, true, nil, getPodsAssignedToNode)
|
||||
} else {
|
||||
runPodLifetimeStrategy(ctx, t, clientSet, nodeInformer, nil, "", &highPriority, true, nil)
|
||||
runPodLifetimeStrategy(ctx, t, clientSet, nodeInformer, nil, "", &highPriority, true, nil, getPodsAssignedToNode)
|
||||
}
|
||||
|
||||
// All pods are supposed to be deleted, wait until all pods in the test namespace are terminating
|
||||
@@ -607,7 +626,7 @@ func testPriority(t *testing.T, isPriorityClass bool) {
|
||||
var lowPriority = int32(500)
|
||||
ctx := context.Background()
|
||||
|
||||
clientSet, nodeInformer, stopCh := initializeClient(t)
|
||||
clientSet, nodeInformer, getPodsAssignedToNode, stopCh := initializeClient(t)
|
||||
defer close(stopCh)
|
||||
|
||||
testNamespace := &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "e2e-" + strings.ToLower(t.Name())}}
|
||||
@@ -677,10 +696,10 @@ func testPriority(t *testing.T, isPriorityClass bool) {
|
||||
|
||||
if isPriorityClass {
|
||||
t.Logf("set the strategy to delete pods with priority lower than priority class %s", highPriorityClass.Name)
|
||||
runPodLifetimeStrategy(ctx, t, clientSet, nodeInformer, nil, highPriorityClass.Name, nil, false, nil)
|
||||
runPodLifetimeStrategy(ctx, t, clientSet, nodeInformer, nil, highPriorityClass.Name, nil, false, nil, getPodsAssignedToNode)
|
||||
} else {
|
||||
t.Logf("set the strategy to delete pods with priority lower than %d", highPriority)
|
||||
runPodLifetimeStrategy(ctx, t, clientSet, nodeInformer, nil, "", &highPriority, false, nil)
|
||||
runPodLifetimeStrategy(ctx, t, clientSet, nodeInformer, nil, "", &highPriority, false, nil, getPodsAssignedToNode)
|
||||
}
|
||||
|
||||
t.Logf("Waiting 10s")
|
||||
@@ -736,7 +755,7 @@ func testPriority(t *testing.T, isPriorityClass bool) {
|
||||
func TestPodLabelSelector(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
clientSet, nodeInformer, stopCh := initializeClient(t)
|
||||
clientSet, nodeInformer, getPodsAssignedToNode, stopCh := initializeClient(t)
|
||||
defer close(stopCh)
|
||||
|
||||
testNamespace := &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "e2e-" + strings.ToLower(t.Name())}}
|
||||
@@ -784,7 +803,7 @@ func TestPodLabelSelector(t *testing.T) {
|
||||
t.Logf("Pods not expected to be evicted: %v, pods expected to be evicted: %v", expectReservePodNames, expectEvictPodNames)
|
||||
|
||||
t.Logf("set the strategy to delete pods with label test:podlifetime-evict")
|
||||
runPodLifetimeStrategy(ctx, t, clientSet, nodeInformer, nil, "", nil, false, &metav1.LabelSelector{MatchLabels: map[string]string{"test": "podlifetime-evict"}})
|
||||
runPodLifetimeStrategy(ctx, t, clientSet, nodeInformer, nil, "", nil, false, &metav1.LabelSelector{MatchLabels: map[string]string{"test": "podlifetime-evict"}}, getPodsAssignedToNode)
|
||||
|
||||
t.Logf("Waiting 10s")
|
||||
time.Sleep(10 * time.Second)
|
||||
@@ -839,7 +858,7 @@ func TestPodLabelSelector(t *testing.T) {
|
||||
func TestEvictAnnotation(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
clientSet, nodeInformer, stopCh := initializeClient(t)
|
||||
clientSet, nodeInformer, getPodsAssignedToNode, stopCh := initializeClient(t)
|
||||
defer close(stopCh)
|
||||
|
||||
nodeList, err := clientSet.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
|
||||
@@ -894,7 +913,7 @@ func TestEvictAnnotation(t *testing.T) {
|
||||
t.Logf("Existing pods: %v", initialPodNames)
|
||||
|
||||
t.Log("Running PodLifetime strategy")
|
||||
runPodLifetimeStrategy(ctx, t, clientSet, nodeInformer, nil, "", nil, false, nil)
|
||||
runPodLifetimeStrategy(ctx, t, clientSet, nodeInformer, nil, "", nil, false, nil, getPodsAssignedToNode)
|
||||
|
||||
if err := wait.PollImmediate(5*time.Second, time.Minute, func() (bool, error) {
|
||||
podList, err = clientSet.CoreV1().Pods(rc.Namespace).List(ctx, metav1.ListOptions{LabelSelector: labels.SelectorFromSet(rc.Spec.Template.Labels).String()})
|
||||
|
||||
@@ -37,7 +37,7 @@ import (
|
||||
func TestTooManyRestarts(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
clientSet, _, stopCh := initializeClient(t)
|
||||
clientSet, _, getPodsAssignedToNode, stopCh := initializeClient(t)
|
||||
defer close(stopCh)
|
||||
|
||||
nodeList, err := clientSet.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
|
||||
@@ -157,6 +157,7 @@ func TestTooManyRestarts(t *testing.T) {
|
||||
},
|
||||
workerNodes,
|
||||
podEvictor,
|
||||
getPodsAssignedToNode,
|
||||
)
|
||||
|
||||
waitForTerminatingPodsToDisappear(ctx, t, clientSet, testNamespace.Name)
|
||||
|
||||
@@ -18,7 +18,7 @@ const zoneTopologyKey string = "topology.kubernetes.io/zone"
|
||||
|
||||
func TestTopologySpreadConstraint(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
clientSet, _, stopCh := initializeClient(t)
|
||||
clientSet, _, getPodsAssignedToNode, stopCh := initializeClient(t)
|
||||
defer close(stopCh)
|
||||
nodeList, err := clientSet.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
@@ -92,6 +92,7 @@ func TestTopologySpreadConstraint(t *testing.T) {
|
||||
},
|
||||
nodes,
|
||||
podEvictor,
|
||||
getPodsAssignedToNode,
|
||||
)
|
||||
t.Logf("Finished RemovePodsViolatingTopologySpreadConstraint strategy for %s", name)
|
||||
|
||||
|
||||
Reference in New Issue
Block a user