mirror of
https://github.com/kubernetes-sigs/descheduler.git
synced 2026-01-26 05:14:13 +01:00
Merge pull request #322 from lixiang233/move_pod_sort
Move sortPodsBasedOnPriority to pod util
This commit is contained in:
@@ -238,7 +238,8 @@ When the descheduler decides to evict pods from a node, it employs the following
|
|||||||
never evicted because these pods won't be recreated.
|
never evicted because these pods won't be recreated.
|
||||||
* Pods associated with DaemonSets are never evicted.
|
* Pods associated with DaemonSets are never evicted.
|
||||||
* Pods with local storage are never evicted.
|
* Pods with local storage are never evicted.
|
||||||
* Best efforts pods are evicted before burstable and guaranteed pods.
|
* In `LowNodeUtilization` and `RemovePodsViolatingInterPodAntiAffinity`, pods are evicted by their priority from low to high, and if they have same priority,
|
||||||
|
best effort pods are evicted before burstable and guaranteed pods.
|
||||||
* All types of pods with the annotation descheduler.alpha.kubernetes.io/evict are evicted. This
|
* All types of pods with the annotation descheduler.alpha.kubernetes.io/evict are evicted. This
|
||||||
annotation is used to override checks which prevent eviction and users can select which pod is evicted.
|
annotation is used to override checks which prevent eviction and users can select which pod is evicted.
|
||||||
Users should know how and if the pod will be recreated.
|
Users should know how and if the pod will be recreated.
|
||||||
|
|||||||
@@ -23,6 +23,7 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/fields"
|
"k8s.io/apimachinery/pkg/fields"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
"sigs.k8s.io/descheduler/pkg/utils"
|
"sigs.k8s.io/descheduler/pkg/utils"
|
||||||
|
"sort"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ListPodsOnANode lists all of the pods on a node
|
// ListPodsOnANode lists all of the pods on a node
|
||||||
@@ -68,3 +69,27 @@ func IsBurstablePod(pod *v1.Pod) bool {
|
|||||||
func IsGuaranteedPod(pod *v1.Pod) bool {
|
func IsGuaranteedPod(pod *v1.Pod) bool {
|
||||||
return utils.GetPodQOS(pod) == v1.PodQOSGuaranteed
|
return utils.GetPodQOS(pod) == v1.PodQOSGuaranteed
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SortPodsBasedOnPriorityLowToHigh sorts pods based on their priorities from low to high.
|
||||||
|
// If pods have same priorities, they will be sorted by QoS in the following order:
|
||||||
|
// BestEffort, Burstable, Guaranteed
|
||||||
|
func SortPodsBasedOnPriorityLowToHigh(pods []*v1.Pod) {
|
||||||
|
sort.Slice(pods, func(i, j int) bool {
|
||||||
|
if pods[i].Spec.Priority == nil && pods[j].Spec.Priority != nil {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if pods[j].Spec.Priority == nil && pods[i].Spec.Priority != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if (pods[j].Spec.Priority == nil && pods[i].Spec.Priority == nil) || (*pods[i].Spec.Priority == *pods[j].Spec.Priority) {
|
||||||
|
if IsBestEffortPod(pods[i]) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if IsBurstablePod(pods[i]) && IsGuaranteedPod(pods[j]) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return *pods[i].Spec.Priority < *pods[j].Spec.Priority
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|||||||
@@ -19,6 +19,7 @@ package pod
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"reflect"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
@@ -29,6 +30,11 @@ import (
|
|||||||
"sigs.k8s.io/descheduler/test"
|
"sigs.k8s.io/descheduler/test"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
lowPriority = int32(0)
|
||||||
|
highPriority = int32(10000)
|
||||||
|
)
|
||||||
|
|
||||||
func TestListPodsOnANode(t *testing.T) {
|
func TestListPodsOnANode(t *testing.T) {
|
||||||
testCases := []struct {
|
testCases := []struct {
|
||||||
name string
|
name string
|
||||||
@@ -67,3 +73,43 @@ func TestListPodsOnANode(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestSortPodsBasedOnPriorityLowToHigh(t *testing.T) {
|
||||||
|
n1 := test.BuildTestNode("n1", 4000, 3000, 9, nil)
|
||||||
|
|
||||||
|
p1 := test.BuildTestPod("p1", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||||
|
test.SetPodPriority(pod, lowPriority)
|
||||||
|
})
|
||||||
|
|
||||||
|
// BestEffort
|
||||||
|
p2 := test.BuildTestPod("p2", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||||
|
test.SetPodPriority(pod, highPriority)
|
||||||
|
test.MakeBestEffortPod(pod)
|
||||||
|
})
|
||||||
|
|
||||||
|
// Burstable
|
||||||
|
p3 := test.BuildTestPod("p3", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||||
|
test.SetPodPriority(pod, highPriority)
|
||||||
|
test.MakeBurstablePod(pod)
|
||||||
|
})
|
||||||
|
|
||||||
|
// Guaranteed
|
||||||
|
p4 := test.BuildTestPod("p4", 400, 100, n1.Name, func(pod *v1.Pod) {
|
||||||
|
test.SetPodPriority(pod, highPriority)
|
||||||
|
test.MakeGuaranteedPod(pod)
|
||||||
|
})
|
||||||
|
|
||||||
|
// Best effort with nil priorities.
|
||||||
|
p5 := test.BuildTestPod("p5", 400, 100, n1.Name, test.MakeBestEffortPod)
|
||||||
|
p5.Spec.Priority = nil
|
||||||
|
|
||||||
|
p6 := test.BuildTestPod("p6", 400, 100, n1.Name, test.MakeGuaranteedPod)
|
||||||
|
p6.Spec.Priority = nil
|
||||||
|
|
||||||
|
podList := []*v1.Pod{p4, p3, p2, p1, p6, p5}
|
||||||
|
|
||||||
|
SortPodsBasedOnPriorityLowToHigh(podList)
|
||||||
|
if !reflect.DeepEqual(podList[len(podList)-1], p4) {
|
||||||
|
t.Errorf("Expected last pod in sorted list to be %v which of highest priority and guaranteed but got %v", p4, podList[len(podList)-1])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -247,7 +247,7 @@ func evictPodsFromTargetNodes(
|
|||||||
evictablePods = append(append(burstablePods, bestEffortPods...), guaranteedPods...)
|
evictablePods = append(append(burstablePods, bestEffortPods...), guaranteedPods...)
|
||||||
|
|
||||||
// sort the evictable Pods based on priority. This also sorts them based on QoS. If there are multiple pods with same priority, they are sorted based on QoS tiers.
|
// sort the evictable Pods based on priority. This also sorts them based on QoS. If there are multiple pods with same priority, they are sorted based on QoS tiers.
|
||||||
sortPodsBasedOnPriority(evictablePods)
|
podutil.SortPodsBasedOnPriorityLowToHigh(evictablePods)
|
||||||
evictPods(ctx, evictablePods, targetThresholds, nodeCapacity, node.usage, &totalPods, &totalCPU, &totalMem, taintsOfLowNodes, podEvictor, node.node)
|
evictPods(ctx, evictablePods, targetThresholds, nodeCapacity, node.usage, &totalPods, &totalCPU, &totalMem, taintsOfLowNodes, podEvictor, node.node)
|
||||||
} else {
|
} else {
|
||||||
// TODO: Remove this when we support only priority.
|
// TODO: Remove this when we support only priority.
|
||||||
@@ -338,28 +338,6 @@ func sortNodesByUsage(nodes []NodeUsageMap) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// sortPodsBasedOnPriority sorts pods based on priority and if their priorities are equal, they are sorted based on QoS tiers.
|
|
||||||
func sortPodsBasedOnPriority(evictablePods []*v1.Pod) {
|
|
||||||
sort.Slice(evictablePods, func(i, j int) bool {
|
|
||||||
if evictablePods[i].Spec.Priority == nil && evictablePods[j].Spec.Priority != nil {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
if evictablePods[j].Spec.Priority == nil && evictablePods[i].Spec.Priority != nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if (evictablePods[j].Spec.Priority == nil && evictablePods[i].Spec.Priority == nil) || (*evictablePods[i].Spec.Priority == *evictablePods[j].Spec.Priority) {
|
|
||||||
if podutil.IsBestEffortPod(evictablePods[i]) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
if podutil.IsBurstablePod(evictablePods[i]) && podutil.IsGuaranteedPod(evictablePods[j]) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return *evictablePods[i].Spec.Priority < *evictablePods[j].Spec.Priority
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// createNodePodsMap returns nodepodsmap with evictable pods on node.
|
// createNodePodsMap returns nodepodsmap with evictable pods on node.
|
||||||
func createNodePodsMap(ctx context.Context, client clientset.Interface, nodes []*v1.Node) NodePodsMap {
|
func createNodePodsMap(ctx context.Context, client clientset.Interface, nodes []*v1.Node) NodePodsMap {
|
||||||
npm := NodePodsMap{}
|
npm := NodePodsMap{}
|
||||||
|
|||||||
@@ -22,15 +22,13 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"reflect"
|
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/api/policy/v1beta1"
|
"k8s.io/api/policy/v1beta1"
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
"k8s.io/apimachinery/pkg/fields"
|
"k8s.io/apimachinery/pkg/fields"
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
serializer "k8s.io/apimachinery/pkg/runtime/serializer"
|
"k8s.io/apimachinery/pkg/runtime/serializer"
|
||||||
"k8s.io/apimachinery/pkg/watch"
|
"k8s.io/apimachinery/pkg/watch"
|
||||||
"k8s.io/client-go/kubernetes/fake"
|
"k8s.io/client-go/kubernetes/fake"
|
||||||
core "k8s.io/client-go/testing"
|
core "k8s.io/client-go/testing"
|
||||||
@@ -45,30 +43,6 @@ var (
|
|||||||
highPriority = int32(10000)
|
highPriority = int32(10000)
|
||||||
)
|
)
|
||||||
|
|
||||||
func setRSOwnerRef(pod *v1.Pod) { pod.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList() }
|
|
||||||
func setDSOwnerRef(pod *v1.Pod) { pod.ObjectMeta.OwnerReferences = test.GetDaemonSetOwnerRefList() }
|
|
||||||
func setNormalOwnerRef(pod *v1.Pod) { pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList() }
|
|
||||||
func setHighPriority(pod *v1.Pod) { pod.Spec.Priority = &highPriority }
|
|
||||||
func setLowPriority(pod *v1.Pod) { pod.Spec.Priority = &lowPriority }
|
|
||||||
func setNodeUnschedulable(node *v1.Node) { node.Spec.Unschedulable = true }
|
|
||||||
|
|
||||||
func makeBestEffortPod(pod *v1.Pod) {
|
|
||||||
pod.Spec.Containers[0].Resources.Requests = nil
|
|
||||||
pod.Spec.Containers[0].Resources.Requests = nil
|
|
||||||
pod.Spec.Containers[0].Resources.Limits = nil
|
|
||||||
pod.Spec.Containers[0].Resources.Limits = nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func makeBurstablePod(pod *v1.Pod) {
|
|
||||||
pod.Spec.Containers[0].Resources.Limits = nil
|
|
||||||
pod.Spec.Containers[0].Resources.Limits = nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func makeGuaranteedPod(pod *v1.Pod) {
|
|
||||||
pod.Spec.Containers[0].Resources.Limits[v1.ResourceCPU] = pod.Spec.Containers[0].Resources.Requests[v1.ResourceCPU]
|
|
||||||
pod.Spec.Containers[0].Resources.Limits[v1.ResourceMemory] = pod.Spec.Containers[0].Resources.Requests[v1.ResourceMemory]
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestLowNodeUtilization(t *testing.T) {
|
func TestLowNodeUtilization(t *testing.T) {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
n1NodeName := "n1"
|
n1NodeName := "n1"
|
||||||
@@ -99,19 +73,19 @@ func TestLowNodeUtilization(t *testing.T) {
|
|||||||
nodes: map[string]*v1.Node{
|
nodes: map[string]*v1.Node{
|
||||||
n1NodeName: test.BuildTestNode(n1NodeName, 4000, 3000, 9, nil),
|
n1NodeName: test.BuildTestNode(n1NodeName, 4000, 3000, 9, nil),
|
||||||
n2NodeName: test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil),
|
n2NodeName: test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil),
|
||||||
n3NodeName: test.BuildTestNode(n3NodeName, 4000, 3000, 10, setNodeUnschedulable),
|
n3NodeName: test.BuildTestNode(n3NodeName, 4000, 3000, 10, test.SetNodeUnschedulable),
|
||||||
},
|
},
|
||||||
pods: map[string]*v1.PodList{
|
pods: map[string]*v1.PodList{
|
||||||
n1NodeName: {
|
n1NodeName: {
|
||||||
Items: []v1.Pod{
|
Items: []v1.Pod{
|
||||||
// These won't be evicted.
|
// These won't be evicted.
|
||||||
*test.BuildTestPod("p1", 400, 0, n1NodeName, setDSOwnerRef),
|
*test.BuildTestPod("p1", 400, 0, n1NodeName, test.SetDSOwnerRef),
|
||||||
*test.BuildTestPod("p2", 400, 0, n1NodeName, setDSOwnerRef),
|
*test.BuildTestPod("p2", 400, 0, n1NodeName, test.SetDSOwnerRef),
|
||||||
*test.BuildTestPod("p3", 400, 0, n1NodeName, setDSOwnerRef),
|
*test.BuildTestPod("p3", 400, 0, n1NodeName, test.SetDSOwnerRef),
|
||||||
*test.BuildTestPod("p4", 400, 0, n1NodeName, setDSOwnerRef),
|
*test.BuildTestPod("p4", 400, 0, n1NodeName, test.SetDSOwnerRef),
|
||||||
*test.BuildTestPod("p5", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
*test.BuildTestPod("p5", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||||
// A pod with local storage.
|
// A pod with local storage.
|
||||||
setNormalOwnerRef(pod)
|
test.SetNormalOwnerRef(pod)
|
||||||
pod.Spec.Volumes = []v1.Volume{
|
pod.Spec.Volumes = []v1.Volume{
|
||||||
{
|
{
|
||||||
Name: "sample",
|
Name: "sample",
|
||||||
@@ -135,7 +109,7 @@ func TestLowNodeUtilization(t *testing.T) {
|
|||||||
},
|
},
|
||||||
n2NodeName: {
|
n2NodeName: {
|
||||||
Items: []v1.Pod{
|
Items: []v1.Pod{
|
||||||
*test.BuildTestPod("p9", 400, 0, n1NodeName, setRSOwnerRef),
|
*test.BuildTestPod("p9", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
n3NodeName: {},
|
n3NodeName: {},
|
||||||
@@ -155,21 +129,21 @@ func TestLowNodeUtilization(t *testing.T) {
|
|||||||
nodes: map[string]*v1.Node{
|
nodes: map[string]*v1.Node{
|
||||||
n1NodeName: test.BuildTestNode(n1NodeName, 4000, 3000, 9, nil),
|
n1NodeName: test.BuildTestNode(n1NodeName, 4000, 3000, 9, nil),
|
||||||
n2NodeName: test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil),
|
n2NodeName: test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil),
|
||||||
n3NodeName: test.BuildTestNode(n3NodeName, 4000, 3000, 10, setNodeUnschedulable),
|
n3NodeName: test.BuildTestNode(n3NodeName, 4000, 3000, 10, test.SetNodeUnschedulable),
|
||||||
},
|
},
|
||||||
pods: map[string]*v1.PodList{
|
pods: map[string]*v1.PodList{
|
||||||
n1NodeName: {
|
n1NodeName: {
|
||||||
Items: []v1.Pod{
|
Items: []v1.Pod{
|
||||||
*test.BuildTestPod("p1", 400, 0, n1NodeName, setRSOwnerRef),
|
*test.BuildTestPod("p1", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||||
*test.BuildTestPod("p2", 400, 0, n1NodeName, setRSOwnerRef),
|
*test.BuildTestPod("p2", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||||
*test.BuildTestPod("p3", 400, 0, n1NodeName, setRSOwnerRef),
|
*test.BuildTestPod("p3", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||||
*test.BuildTestPod("p4", 400, 0, n1NodeName, setRSOwnerRef),
|
*test.BuildTestPod("p4", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||||
*test.BuildTestPod("p5", 400, 0, n1NodeName, setRSOwnerRef),
|
*test.BuildTestPod("p5", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||||
// These won't be evicted.
|
// These won't be evicted.
|
||||||
*test.BuildTestPod("p6", 400, 0, n1NodeName, setDSOwnerRef),
|
*test.BuildTestPod("p6", 400, 0, n1NodeName, test.SetDSOwnerRef),
|
||||||
*test.BuildTestPod("p7", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
*test.BuildTestPod("p7", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||||
// A pod with local storage.
|
// A pod with local storage.
|
||||||
setNormalOwnerRef(pod)
|
test.SetNormalOwnerRef(pod)
|
||||||
pod.Spec.Volumes = []v1.Volume{
|
pod.Spec.Volumes = []v1.Volume{
|
||||||
{
|
{
|
||||||
Name: "sample",
|
Name: "sample",
|
||||||
@@ -193,7 +167,7 @@ func TestLowNodeUtilization(t *testing.T) {
|
|||||||
},
|
},
|
||||||
n2NodeName: {
|
n2NodeName: {
|
||||||
Items: []v1.Pod{
|
Items: []v1.Pod{
|
||||||
*test.BuildTestPod("p9", 400, 0, n1NodeName, setRSOwnerRef),
|
*test.BuildTestPod("p9", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
n3NodeName: {},
|
n3NodeName: {},
|
||||||
@@ -213,21 +187,21 @@ func TestLowNodeUtilization(t *testing.T) {
|
|||||||
nodes: map[string]*v1.Node{
|
nodes: map[string]*v1.Node{
|
||||||
n1NodeName: test.BuildTestNode(n1NodeName, 4000, 3000, 9, nil),
|
n1NodeName: test.BuildTestNode(n1NodeName, 4000, 3000, 9, nil),
|
||||||
n2NodeName: test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil),
|
n2NodeName: test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil),
|
||||||
n3NodeName: test.BuildTestNode(n3NodeName, 4000, 3000, 10, setNodeUnschedulable),
|
n3NodeName: test.BuildTestNode(n3NodeName, 4000, 3000, 10, test.SetNodeUnschedulable),
|
||||||
},
|
},
|
||||||
pods: map[string]*v1.PodList{
|
pods: map[string]*v1.PodList{
|
||||||
n1NodeName: {
|
n1NodeName: {
|
||||||
Items: []v1.Pod{
|
Items: []v1.Pod{
|
||||||
*test.BuildTestPod("p1", 400, 300, n1NodeName, setRSOwnerRef),
|
*test.BuildTestPod("p1", 400, 300, n1NodeName, test.SetRSOwnerRef),
|
||||||
*test.BuildTestPod("p2", 400, 300, n1NodeName, setRSOwnerRef),
|
*test.BuildTestPod("p2", 400, 300, n1NodeName, test.SetRSOwnerRef),
|
||||||
*test.BuildTestPod("p3", 400, 300, n1NodeName, setRSOwnerRef),
|
*test.BuildTestPod("p3", 400, 300, n1NodeName, test.SetRSOwnerRef),
|
||||||
*test.BuildTestPod("p4", 400, 300, n1NodeName, setRSOwnerRef),
|
*test.BuildTestPod("p4", 400, 300, n1NodeName, test.SetRSOwnerRef),
|
||||||
*test.BuildTestPod("p5", 400, 300, n1NodeName, setRSOwnerRef),
|
*test.BuildTestPod("p5", 400, 300, n1NodeName, test.SetRSOwnerRef),
|
||||||
// These won't be evicted.
|
// These won't be evicted.
|
||||||
*test.BuildTestPod("p6", 400, 300, n1NodeName, setDSOwnerRef),
|
*test.BuildTestPod("p6", 400, 300, n1NodeName, test.SetDSOwnerRef),
|
||||||
*test.BuildTestPod("p7", 400, 300, n1NodeName, func(pod *v1.Pod) {
|
*test.BuildTestPod("p7", 400, 300, n1NodeName, func(pod *v1.Pod) {
|
||||||
// A pod with local storage.
|
// A pod with local storage.
|
||||||
setNormalOwnerRef(pod)
|
test.SetNormalOwnerRef(pod)
|
||||||
pod.Spec.Volumes = []v1.Volume{
|
pod.Spec.Volumes = []v1.Volume{
|
||||||
{
|
{
|
||||||
Name: "sample",
|
Name: "sample",
|
||||||
@@ -251,7 +225,7 @@ func TestLowNodeUtilization(t *testing.T) {
|
|||||||
},
|
},
|
||||||
n2NodeName: {
|
n2NodeName: {
|
||||||
Items: []v1.Pod{
|
Items: []v1.Pod{
|
||||||
*test.BuildTestPod("p9", 400, 2100, n1NodeName, setRSOwnerRef),
|
*test.BuildTestPod("p9", 400, 2100, n1NodeName, test.SetRSOwnerRef),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
n3NodeName: {},
|
n3NodeName: {},
|
||||||
@@ -272,40 +246,40 @@ func TestLowNodeUtilization(t *testing.T) {
|
|||||||
nodes: map[string]*v1.Node{
|
nodes: map[string]*v1.Node{
|
||||||
n1NodeName: test.BuildTestNode(n1NodeName, 4000, 3000, 9, nil),
|
n1NodeName: test.BuildTestNode(n1NodeName, 4000, 3000, 9, nil),
|
||||||
n2NodeName: test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil),
|
n2NodeName: test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil),
|
||||||
n3NodeName: test.BuildTestNode(n3NodeName, 4000, 3000, 10, setNodeUnschedulable),
|
n3NodeName: test.BuildTestNode(n3NodeName, 4000, 3000, 10, test.SetNodeUnschedulable),
|
||||||
},
|
},
|
||||||
pods: map[string]*v1.PodList{
|
pods: map[string]*v1.PodList{
|
||||||
n1NodeName: {
|
n1NodeName: {
|
||||||
Items: []v1.Pod{
|
Items: []v1.Pod{
|
||||||
*test.BuildTestPod("p1", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
*test.BuildTestPod("p1", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||||
setRSOwnerRef(pod)
|
test.SetRSOwnerRef(pod)
|
||||||
setHighPriority(pod)
|
test.SetPodPriority(pod, highPriority)
|
||||||
}),
|
}),
|
||||||
*test.BuildTestPod("p2", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
*test.BuildTestPod("p2", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||||
setRSOwnerRef(pod)
|
test.SetRSOwnerRef(pod)
|
||||||
setHighPriority(pod)
|
test.SetPodPriority(pod, highPriority)
|
||||||
}),
|
}),
|
||||||
*test.BuildTestPod("p3", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
*test.BuildTestPod("p3", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||||
setRSOwnerRef(pod)
|
test.SetRSOwnerRef(pod)
|
||||||
setHighPriority(pod)
|
test.SetPodPriority(pod, highPriority)
|
||||||
}),
|
}),
|
||||||
*test.BuildTestPod("p4", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
*test.BuildTestPod("p4", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||||
setRSOwnerRef(pod)
|
test.SetRSOwnerRef(pod)
|
||||||
setHighPriority(pod)
|
test.SetPodPriority(pod, highPriority)
|
||||||
}),
|
}),
|
||||||
*test.BuildTestPod("p5", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
*test.BuildTestPod("p5", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||||
setRSOwnerRef(pod)
|
test.SetRSOwnerRef(pod)
|
||||||
setLowPriority(pod)
|
test.SetPodPriority(pod, lowPriority)
|
||||||
}),
|
}),
|
||||||
// These won't be evicted.
|
// These won't be evicted.
|
||||||
*test.BuildTestPod("p6", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
*test.BuildTestPod("p6", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||||
setDSOwnerRef(pod)
|
test.SetDSOwnerRef(pod)
|
||||||
setHighPriority(pod)
|
test.SetPodPriority(pod, highPriority)
|
||||||
}),
|
}),
|
||||||
*test.BuildTestPod("p7", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
*test.BuildTestPod("p7", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||||
// A pod with local storage.
|
// A pod with local storage.
|
||||||
setNormalOwnerRef(pod)
|
test.SetNormalOwnerRef(pod)
|
||||||
setLowPriority(pod)
|
test.SetPodPriority(pod, lowPriority)
|
||||||
pod.Spec.Volumes = []v1.Volume{
|
pod.Spec.Volumes = []v1.Volume{
|
||||||
{
|
{
|
||||||
Name: "sample",
|
Name: "sample",
|
||||||
@@ -329,7 +303,7 @@ func TestLowNodeUtilization(t *testing.T) {
|
|||||||
},
|
},
|
||||||
n2NodeName: {
|
n2NodeName: {
|
||||||
Items: []v1.Pod{
|
Items: []v1.Pod{
|
||||||
*test.BuildTestPod("p9", 400, 0, n1NodeName, setRSOwnerRef),
|
*test.BuildTestPod("p9", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
n3NodeName: {},
|
n3NodeName: {},
|
||||||
@@ -349,38 +323,38 @@ func TestLowNodeUtilization(t *testing.T) {
|
|||||||
nodes: map[string]*v1.Node{
|
nodes: map[string]*v1.Node{
|
||||||
n1NodeName: test.BuildTestNode(n1NodeName, 4000, 3000, 9, nil),
|
n1NodeName: test.BuildTestNode(n1NodeName, 4000, 3000, 9, nil),
|
||||||
n2NodeName: test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil),
|
n2NodeName: test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil),
|
||||||
n3NodeName: test.BuildTestNode(n3NodeName, 4000, 3000, 10, setNodeUnschedulable),
|
n3NodeName: test.BuildTestNode(n3NodeName, 4000, 3000, 10, test.SetNodeUnschedulable),
|
||||||
},
|
},
|
||||||
// All pods are assumed to be burstable (test.BuildTestNode always sets both cpu/memory resource requests to some value)
|
// All pods are assumed to be burstable (test.BuildTestNode always sets both cpu/memory resource requests to some value)
|
||||||
pods: map[string]*v1.PodList{
|
pods: map[string]*v1.PodList{
|
||||||
n1NodeName: {
|
n1NodeName: {
|
||||||
Items: []v1.Pod{
|
Items: []v1.Pod{
|
||||||
*test.BuildTestPod("p1", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
*test.BuildTestPod("p1", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||||
setRSOwnerRef(pod)
|
test.SetRSOwnerRef(pod)
|
||||||
makeBestEffortPod(pod)
|
test.MakeBestEffortPod(pod)
|
||||||
}),
|
}),
|
||||||
*test.BuildTestPod("p2", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
*test.BuildTestPod("p2", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||||
setRSOwnerRef(pod)
|
test.SetRSOwnerRef(pod)
|
||||||
makeBestEffortPod(pod)
|
test.MakeBestEffortPod(pod)
|
||||||
}),
|
}),
|
||||||
*test.BuildTestPod("p3", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
*test.BuildTestPod("p3", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||||
setRSOwnerRef(pod)
|
test.SetRSOwnerRef(pod)
|
||||||
}),
|
}),
|
||||||
*test.BuildTestPod("p4", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
*test.BuildTestPod("p4", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||||
setRSOwnerRef(pod)
|
test.SetRSOwnerRef(pod)
|
||||||
makeBestEffortPod(pod)
|
test.MakeBestEffortPod(pod)
|
||||||
}),
|
}),
|
||||||
*test.BuildTestPod("p5", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
*test.BuildTestPod("p5", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||||
setRSOwnerRef(pod)
|
test.SetRSOwnerRef(pod)
|
||||||
makeBestEffortPod(pod)
|
test.MakeBestEffortPod(pod)
|
||||||
}),
|
}),
|
||||||
// These won't be evicted.
|
// These won't be evicted.
|
||||||
*test.BuildTestPod("p6", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
*test.BuildTestPod("p6", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||||
setDSOwnerRef(pod)
|
test.SetDSOwnerRef(pod)
|
||||||
}),
|
}),
|
||||||
*test.BuildTestPod("p7", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
*test.BuildTestPod("p7", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||||
// A pod with local storage.
|
// A pod with local storage.
|
||||||
setNormalOwnerRef(pod)
|
test.SetNormalOwnerRef(pod)
|
||||||
pod.Spec.Volumes = []v1.Volume{
|
pod.Spec.Volumes = []v1.Volume{
|
||||||
{
|
{
|
||||||
Name: "sample",
|
Name: "sample",
|
||||||
@@ -404,7 +378,7 @@ func TestLowNodeUtilization(t *testing.T) {
|
|||||||
},
|
},
|
||||||
n2NodeName: {
|
n2NodeName: {
|
||||||
Items: []v1.Pod{
|
Items: []v1.Pod{
|
||||||
*test.BuildTestPod("p9", 400, 0, n1NodeName, setRSOwnerRef),
|
*test.BuildTestPod("p9", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
n3NodeName: {},
|
n3NodeName: {},
|
||||||
@@ -495,44 +469,6 @@ func TestLowNodeUtilization(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestSortPodsByPriority(t *testing.T) {
|
|
||||||
n1 := test.BuildTestNode("n1", 4000, 3000, 9, nil)
|
|
||||||
|
|
||||||
p1 := test.BuildTestPod("p1", 400, 0, n1.Name, setLowPriority)
|
|
||||||
|
|
||||||
// BestEffort
|
|
||||||
p2 := test.BuildTestPod("p2", 400, 0, n1.Name, func(pod *v1.Pod) {
|
|
||||||
setHighPriority(pod)
|
|
||||||
makeBestEffortPod(pod)
|
|
||||||
})
|
|
||||||
|
|
||||||
// Burstable
|
|
||||||
p3 := test.BuildTestPod("p3", 400, 0, n1.Name, func(pod *v1.Pod) {
|
|
||||||
setHighPriority(pod)
|
|
||||||
makeBurstablePod(pod)
|
|
||||||
})
|
|
||||||
|
|
||||||
// Guaranteed
|
|
||||||
p4 := test.BuildTestPod("p4", 400, 100, n1.Name, func(pod *v1.Pod) {
|
|
||||||
setHighPriority(pod)
|
|
||||||
makeGuaranteedPod(pod)
|
|
||||||
})
|
|
||||||
|
|
||||||
// Best effort with nil priorities.
|
|
||||||
p5 := test.BuildTestPod("p5", 400, 100, n1.Name, makeBestEffortPod)
|
|
||||||
p5.Spec.Priority = nil
|
|
||||||
|
|
||||||
p6 := test.BuildTestPod("p6", 400, 100, n1.Name, makeGuaranteedPod)
|
|
||||||
p6.Spec.Priority = nil
|
|
||||||
|
|
||||||
podList := []*v1.Pod{p4, p3, p2, p1, p6, p5}
|
|
||||||
|
|
||||||
sortPodsBasedOnPriority(podList)
|
|
||||||
if !reflect.DeepEqual(podList[len(podList)-1], p4) {
|
|
||||||
t.Errorf("Expected last pod in sorted list to be %v which of highest priority and guaranteed but got %v", p4, podList[len(podList)-1])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestValidateStrategyConfig(t *testing.T) {
|
func TestValidateStrategyConfig(t *testing.T) {
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
@@ -783,7 +719,7 @@ func TestWithTaints(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
podThatToleratesTaint := test.BuildTestPod("tolerate_pod", 200, 0, n1.Name, setRSOwnerRef)
|
podThatToleratesTaint := test.BuildTestPod("tolerate_pod", 200, 0, n1.Name, test.SetRSOwnerRef)
|
||||||
podThatToleratesTaint.Spec.Tolerations = []v1.Toleration{
|
podThatToleratesTaint.Spec.Tolerations = []v1.Toleration{
|
||||||
{
|
{
|
||||||
Key: "key",
|
Key: "key",
|
||||||
@@ -802,16 +738,16 @@ func TestWithTaints(t *testing.T) {
|
|||||||
nodes: []*v1.Node{n1, n2, n3},
|
nodes: []*v1.Node{n1, n2, n3},
|
||||||
pods: []*v1.Pod{
|
pods: []*v1.Pod{
|
||||||
//Node 1 pods
|
//Node 1 pods
|
||||||
test.BuildTestPod(fmt.Sprintf("pod_1_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef),
|
test.BuildTestPod(fmt.Sprintf("pod_1_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
|
||||||
test.BuildTestPod(fmt.Sprintf("pod_2_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef),
|
test.BuildTestPod(fmt.Sprintf("pod_2_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
|
||||||
test.BuildTestPod(fmt.Sprintf("pod_3_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef),
|
test.BuildTestPod(fmt.Sprintf("pod_3_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
|
||||||
test.BuildTestPod(fmt.Sprintf("pod_4_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef),
|
test.BuildTestPod(fmt.Sprintf("pod_4_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
|
||||||
test.BuildTestPod(fmt.Sprintf("pod_5_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef),
|
test.BuildTestPod(fmt.Sprintf("pod_5_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
|
||||||
test.BuildTestPod(fmt.Sprintf("pod_6_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef),
|
test.BuildTestPod(fmt.Sprintf("pod_6_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
|
||||||
test.BuildTestPod(fmt.Sprintf("pod_7_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef),
|
test.BuildTestPod(fmt.Sprintf("pod_7_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
|
||||||
test.BuildTestPod(fmt.Sprintf("pod_8_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef),
|
test.BuildTestPod(fmt.Sprintf("pod_8_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
|
||||||
// Node 2 pods
|
// Node 2 pods
|
||||||
test.BuildTestPod(fmt.Sprintf("pod_9_%s", n2.Name), 200, 0, n2.Name, setRSOwnerRef),
|
test.BuildTestPod(fmt.Sprintf("pod_9_%s", n2.Name), 200, 0, n2.Name, test.SetRSOwnerRef),
|
||||||
},
|
},
|
||||||
evictionsExpected: 1,
|
evictionsExpected: 1,
|
||||||
},
|
},
|
||||||
@@ -820,16 +756,16 @@ func TestWithTaints(t *testing.T) {
|
|||||||
nodes: []*v1.Node{n1, n3withTaints},
|
nodes: []*v1.Node{n1, n3withTaints},
|
||||||
pods: []*v1.Pod{
|
pods: []*v1.Pod{
|
||||||
//Node 1 pods
|
//Node 1 pods
|
||||||
test.BuildTestPod(fmt.Sprintf("pod_1_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef),
|
test.BuildTestPod(fmt.Sprintf("pod_1_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
|
||||||
test.BuildTestPod(fmt.Sprintf("pod_2_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef),
|
test.BuildTestPod(fmt.Sprintf("pod_2_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
|
||||||
test.BuildTestPod(fmt.Sprintf("pod_3_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef),
|
test.BuildTestPod(fmt.Sprintf("pod_3_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
|
||||||
test.BuildTestPod(fmt.Sprintf("pod_4_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef),
|
test.BuildTestPod(fmt.Sprintf("pod_4_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
|
||||||
test.BuildTestPod(fmt.Sprintf("pod_5_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef),
|
test.BuildTestPod(fmt.Sprintf("pod_5_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
|
||||||
test.BuildTestPod(fmt.Sprintf("pod_6_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef),
|
test.BuildTestPod(fmt.Sprintf("pod_6_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
|
||||||
test.BuildTestPod(fmt.Sprintf("pod_7_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef),
|
test.BuildTestPod(fmt.Sprintf("pod_7_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
|
||||||
test.BuildTestPod(fmt.Sprintf("pod_8_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef),
|
test.BuildTestPod(fmt.Sprintf("pod_8_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
|
||||||
// Node 3 pods
|
// Node 3 pods
|
||||||
test.BuildTestPod(fmt.Sprintf("pod_9_%s", n3withTaints.Name), 200, 0, n3withTaints.Name, setRSOwnerRef),
|
test.BuildTestPod(fmt.Sprintf("pod_9_%s", n3withTaints.Name), 200, 0, n3withTaints.Name, test.SetRSOwnerRef),
|
||||||
},
|
},
|
||||||
evictionsExpected: 0,
|
evictionsExpected: 0,
|
||||||
},
|
},
|
||||||
@@ -838,16 +774,16 @@ func TestWithTaints(t *testing.T) {
|
|||||||
nodes: []*v1.Node{n1, n3withTaints},
|
nodes: []*v1.Node{n1, n3withTaints},
|
||||||
pods: []*v1.Pod{
|
pods: []*v1.Pod{
|
||||||
//Node 1 pods
|
//Node 1 pods
|
||||||
test.BuildTestPod(fmt.Sprintf("pod_1_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef),
|
test.BuildTestPod(fmt.Sprintf("pod_1_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
|
||||||
test.BuildTestPod(fmt.Sprintf("pod_2_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef),
|
test.BuildTestPod(fmt.Sprintf("pod_2_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
|
||||||
test.BuildTestPod(fmt.Sprintf("pod_3_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef),
|
test.BuildTestPod(fmt.Sprintf("pod_3_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
|
||||||
test.BuildTestPod(fmt.Sprintf("pod_4_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef),
|
test.BuildTestPod(fmt.Sprintf("pod_4_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
|
||||||
test.BuildTestPod(fmt.Sprintf("pod_5_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef),
|
test.BuildTestPod(fmt.Sprintf("pod_5_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
|
||||||
test.BuildTestPod(fmt.Sprintf("pod_6_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef),
|
test.BuildTestPod(fmt.Sprintf("pod_6_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
|
||||||
test.BuildTestPod(fmt.Sprintf("pod_7_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef),
|
test.BuildTestPod(fmt.Sprintf("pod_7_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
|
||||||
podThatToleratesTaint,
|
podThatToleratesTaint,
|
||||||
// Node 3 pods
|
// Node 3 pods
|
||||||
test.BuildTestPod(fmt.Sprintf("pod_9_%s", n3withTaints.Name), 200, 0, n3withTaints.Name, setRSOwnerRef),
|
test.BuildTestPod(fmt.Sprintf("pod_9_%s", n3withTaints.Name), 200, 0, n3withTaints.Name, test.SetRSOwnerRef),
|
||||||
},
|
},
|
||||||
evictionsExpected: 1,
|
evictionsExpected: 1,
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -37,6 +37,8 @@ func RemovePodsViolatingInterPodAntiAffinity(ctx context.Context, client clients
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
// sort the evictable Pods based on priority, if there are multiple pods with same priority, they are sorted based on QoS tiers.
|
||||||
|
podutil.SortPodsBasedOnPriorityLowToHigh(pods)
|
||||||
totalPods := len(pods)
|
totalPods := len(pods)
|
||||||
for i := 0; i < totalPods; i++ {
|
for i := 0; i < totalPods; i++ {
|
||||||
if checkPodsWithAntiAffinityExist(pods[i], pods) {
|
if checkPodsWithAntiAffinityExist(pods[i], pods) {
|
||||||
|
|||||||
@@ -37,16 +37,33 @@ func TestPodAntiAffinity(t *testing.T) {
|
|||||||
p2 := test.BuildTestPod("p2", 100, 0, node.Name, nil)
|
p2 := test.BuildTestPod("p2", 100, 0, node.Name, nil)
|
||||||
p3 := test.BuildTestPod("p3", 100, 0, node.Name, nil)
|
p3 := test.BuildTestPod("p3", 100, 0, node.Name, nil)
|
||||||
p4 := test.BuildTestPod("p4", 100, 0, node.Name, nil)
|
p4 := test.BuildTestPod("p4", 100, 0, node.Name, nil)
|
||||||
|
p5 := test.BuildTestPod("p5", 100, 0, node.Name, nil)
|
||||||
|
p6 := test.BuildTestPod("p6", 100, 0, node.Name, nil)
|
||||||
|
p7 := test.BuildTestPod("p7", 100, 0, node.Name, nil)
|
||||||
p2.Labels = map[string]string{"foo": "bar"}
|
p2.Labels = map[string]string{"foo": "bar"}
|
||||||
p1.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
p5.Labels = map[string]string{"foo": "bar"}
|
||||||
p2.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
p6.Labels = map[string]string{"foo": "bar"}
|
||||||
p3.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
p7.Labels = map[string]string{"foo1": "bar1"}
|
||||||
p4.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
test.SetNormalOwnerRef(p1)
|
||||||
|
test.SetNormalOwnerRef(p2)
|
||||||
|
test.SetNormalOwnerRef(p3)
|
||||||
|
test.SetNormalOwnerRef(p4)
|
||||||
|
test.SetNormalOwnerRef(p5)
|
||||||
|
test.SetNormalOwnerRef(p6)
|
||||||
|
test.SetNormalOwnerRef(p7)
|
||||||
|
|
||||||
// set pod anti affinity
|
// set pod anti affinity
|
||||||
setPodAntiAffinity(p1)
|
setPodAntiAffinity(p1, "foo", "bar")
|
||||||
setPodAntiAffinity(p3)
|
setPodAntiAffinity(p3, "foo", "bar")
|
||||||
setPodAntiAffinity(p4)
|
setPodAntiAffinity(p4, "foo", "bar")
|
||||||
|
setPodAntiAffinity(p5, "foo1", "bar1")
|
||||||
|
setPodAntiAffinity(p6, "foo1", "bar1")
|
||||||
|
setPodAntiAffinity(p7, "foo", "bar")
|
||||||
|
|
||||||
|
// set pod priority
|
||||||
|
test.SetPodPriority(p5, 100)
|
||||||
|
test.SetPodPriority(p6, 50)
|
||||||
|
test.SetPodPriority(p7, 0)
|
||||||
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
description string
|
description string
|
||||||
@@ -66,13 +83,19 @@ func TestPodAntiAffinity(t *testing.T) {
|
|||||||
pods: []v1.Pod{*p1, *p2, *p3, *p4},
|
pods: []v1.Pod{*p1, *p2, *p3, *p4},
|
||||||
expectedEvictedPodCount: 3,
|
expectedEvictedPodCount: 3,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
description: "Evict only 1 pod after sorting",
|
||||||
|
maxPodsToEvict: 0,
|
||||||
|
pods: []v1.Pod{*p5, *p6, *p7},
|
||||||
|
expectedEvictedPodCount: 1,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
// create fake client
|
// create fake client
|
||||||
fakeClient := &fake.Clientset{}
|
fakeClient := &fake.Clientset{}
|
||||||
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
|
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
|
||||||
return true, &v1.PodList{Items: []v1.Pod{*p1, *p2, *p3, *p4}}, nil
|
return true, &v1.PodList{Items: test.pods}, nil
|
||||||
})
|
})
|
||||||
fakeClient.Fake.AddReactor("get", "nodes", func(action core.Action) (bool, runtime.Object, error) {
|
fakeClient.Fake.AddReactor("get", "nodes", func(action core.Action) (bool, runtime.Object, error) {
|
||||||
return true, node, nil
|
return true, node, nil
|
||||||
@@ -95,7 +118,7 @@ func TestPodAntiAffinity(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func setPodAntiAffinity(inputPod *v1.Pod) {
|
func setPodAntiAffinity(inputPod *v1.Pod, labelKey, labelValue string) {
|
||||||
inputPod.Spec.Affinity = &v1.Affinity{
|
inputPod.Spec.Affinity = &v1.Affinity{
|
||||||
PodAntiAffinity: &v1.PodAntiAffinity{
|
PodAntiAffinity: &v1.PodAntiAffinity{
|
||||||
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
|
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
|
||||||
@@ -103,9 +126,9 @@ func setPodAntiAffinity(inputPod *v1.Pod) {
|
|||||||
LabelSelector: &metav1.LabelSelector{
|
LabelSelector: &metav1.LabelSelector{
|
||||||
MatchExpressions: []metav1.LabelSelectorRequirement{
|
MatchExpressions: []metav1.LabelSelectorRequirement{
|
||||||
{
|
{
|
||||||
Key: "foo",
|
Key: labelKey,
|
||||||
Operator: metav1.LabelSelectorOpIn,
|
Operator: metav1.LabelSelectorOpIn,
|
||||||
Values: []string{"bar"},
|
Values: []string{labelValue},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -116,3 +116,48 @@ func BuildTestNode(name string, millicpu int64, mem int64, pods int64, apply fun
|
|||||||
}
|
}
|
||||||
return node
|
return node
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// MakeBestEffortPod makes the given pod a BestEffort pod
|
||||||
|
func MakeBestEffortPod(pod *v1.Pod) {
|
||||||
|
pod.Spec.Containers[0].Resources.Requests = nil
|
||||||
|
pod.Spec.Containers[0].Resources.Requests = nil
|
||||||
|
pod.Spec.Containers[0].Resources.Limits = nil
|
||||||
|
pod.Spec.Containers[0].Resources.Limits = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// MakeBurstablePod makes the given pod a Burstable pod
|
||||||
|
func MakeBurstablePod(pod *v1.Pod) {
|
||||||
|
pod.Spec.Containers[0].Resources.Limits = nil
|
||||||
|
pod.Spec.Containers[0].Resources.Limits = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// MakeGuaranteedPod makes the given pod an Guaranteed pod
|
||||||
|
func MakeGuaranteedPod(pod *v1.Pod) {
|
||||||
|
pod.Spec.Containers[0].Resources.Limits[v1.ResourceCPU] = pod.Spec.Containers[0].Resources.Requests[v1.ResourceCPU]
|
||||||
|
pod.Spec.Containers[0].Resources.Limits[v1.ResourceMemory] = pod.Spec.Containers[0].Resources.Requests[v1.ResourceMemory]
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetRSOwnerRef sets the given pod's owner to ReplicaSet
|
||||||
|
func SetRSOwnerRef(pod *v1.Pod) {
|
||||||
|
pod.ObjectMeta.OwnerReferences = GetReplicaSetOwnerRefList()
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetDSOwnerRef sets the given pod's owner to DaemonSet
|
||||||
|
func SetDSOwnerRef(pod *v1.Pod) {
|
||||||
|
pod.ObjectMeta.OwnerReferences = GetDaemonSetOwnerRefList()
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNormalOwnerRef sets the given pod's owner to Pod
|
||||||
|
func SetNormalOwnerRef(pod *v1.Pod) {
|
||||||
|
pod.ObjectMeta.OwnerReferences = GetNormalPodOwnerRefList()
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetPodPriority sets the given pod's priority
|
||||||
|
func SetPodPriority(pod *v1.Pod, priority int32) {
|
||||||
|
pod.Spec.Priority = &priority
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNodeUnschedulable sets the given node unschedulable
|
||||||
|
func SetNodeUnschedulable(node *v1.Node) {
|
||||||
|
node.Spec.Unschedulable = true
|
||||||
|
}
|
||||||
|
|||||||
Reference in New Issue
Block a user