mirror of
https://github.com/kubernetes-sigs/descheduler.git
synced 2026-01-26 05:14:13 +01:00
Merge pull request #262 from ingvagabund/order-pods-by-priority-only-over-over-utilized-nodes
Order pods by priority only over over utilized nodes
This commit is contained in:
@@ -16,10 +16,10 @@ import (
|
||||
)
|
||||
|
||||
func TestTaintsUpdated(t *testing.T) {
|
||||
n1 := test.BuildTestNode("n1", 2000, 3000, 10)
|
||||
n2 := test.BuildTestNode("n2", 2000, 3000, 10)
|
||||
n1 := test.BuildTestNode("n1", 2000, 3000, 10, nil)
|
||||
n2 := test.BuildTestNode("n2", 2000, 3000, 10, nil)
|
||||
|
||||
p1 := test.BuildTestPod(fmt.Sprintf("pod_1_%s", n1.Name), 200, 0, n1.Name)
|
||||
p1 := test.BuildTestPod(fmt.Sprintf("pod_1_%s", n1.Name), 200, 0, n1.Name, nil)
|
||||
p1.ObjectMeta.OwnerReferences = []metav1.OwnerReference{
|
||||
{},
|
||||
}
|
||||
|
||||
@@ -17,17 +17,18 @@ limitations under the License.
|
||||
package evictions
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
core "k8s.io/client-go/testing"
|
||||
"sigs.k8s.io/descheduler/test"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestEvictPod(t *testing.T) {
|
||||
node1 := test.BuildTestNode("node1", 1000, 2000, 9)
|
||||
pod1 := test.BuildTestPod("p1", 400, 0, "node1")
|
||||
node1 := test.BuildTestNode("node1", 1000, 2000, 9, nil)
|
||||
pod1 := test.BuildTestPod("p1", 400, 0, "node1", nil)
|
||||
tests := []struct {
|
||||
description string
|
||||
node *v1.Node
|
||||
@@ -46,7 +47,7 @@ func TestEvictPod(t *testing.T) {
|
||||
description: "test pod eviction - pod absent",
|
||||
node: node1,
|
||||
pod: pod1,
|
||||
pods: []v1.Pod{*test.BuildTestPod("p2", 400, 0, "node1"), *test.BuildTestPod("p3", 450, 0, "node1")},
|
||||
pods: []v1.Pod{*test.BuildTestPod("p2", 400, 0, "node1", nil), *test.BuildTestPod("p3", 450, 0, "node1", nil)},
|
||||
want: true,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -27,14 +27,14 @@ import (
|
||||
)
|
||||
|
||||
func TestReadyNodes(t *testing.T) {
|
||||
node1 := test.BuildTestNode("node2", 1000, 2000, 9)
|
||||
node2 := test.BuildTestNode("node3", 1000, 2000, 9)
|
||||
node1 := test.BuildTestNode("node2", 1000, 2000, 9, nil)
|
||||
node2 := test.BuildTestNode("node3", 1000, 2000, 9, nil)
|
||||
node2.Status.Conditions = []v1.NodeCondition{{Type: v1.NodeMemoryPressure, Status: v1.ConditionTrue}}
|
||||
node3 := test.BuildTestNode("node4", 1000, 2000, 9)
|
||||
node3 := test.BuildTestNode("node4", 1000, 2000, 9, nil)
|
||||
node3.Status.Conditions = []v1.NodeCondition{{Type: v1.NodeNetworkUnavailable, Status: v1.ConditionTrue}}
|
||||
node4 := test.BuildTestNode("node5", 1000, 2000, 9)
|
||||
node4 := test.BuildTestNode("node5", 1000, 2000, 9, nil)
|
||||
node4.Spec.Unschedulable = true
|
||||
node5 := test.BuildTestNode("node6", 1000, 2000, 9)
|
||||
node5 := test.BuildTestNode("node6", 1000, 2000, 9, nil)
|
||||
node5.Status.Conditions = []v1.NodeCondition{{Type: v1.NodeReady, Status: v1.ConditionFalse}}
|
||||
|
||||
if !IsReady(node1) {
|
||||
@@ -56,9 +56,9 @@ func TestReadyNodes(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestReadyNodesWithNodeSelector(t *testing.T) {
|
||||
node1 := test.BuildTestNode("node1", 1000, 2000, 9)
|
||||
node1 := test.BuildTestNode("node1", 1000, 2000, 9, nil)
|
||||
node1.Labels = map[string]string{"type": "compute"}
|
||||
node2 := test.BuildTestNode("node2", 1000, 2000, 9)
|
||||
node2 := test.BuildTestNode("node2", 1000, 2000, 9, nil)
|
||||
node2.Labels = map[string]string{"type": "infra"}
|
||||
|
||||
fakeClient := fake.NewSimpleClientset(node1, node2)
|
||||
|
||||
@@ -26,7 +26,7 @@ import (
|
||||
)
|
||||
|
||||
func TestIsEvictable(t *testing.T) {
|
||||
n1 := test.BuildTestNode("node1", 1000, 2000, 13)
|
||||
n1 := test.BuildTestNode("node1", 1000, 2000, 13, nil)
|
||||
type testCase struct {
|
||||
pod *v1.Pod
|
||||
runBefore func(*v1.Pod)
|
||||
@@ -36,14 +36,14 @@ func TestIsEvictable(t *testing.T) {
|
||||
|
||||
testCases := []testCase{
|
||||
{
|
||||
pod: test.BuildTestPod("p1", 400, 0, n1.Name),
|
||||
pod: test.BuildTestPod("p1", 400, 0, n1.Name, nil),
|
||||
runBefore: func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
result: true,
|
||||
}, {
|
||||
pod: test.BuildTestPod("p2", 400, 0, n1.Name),
|
||||
pod: test.BuildTestPod("p2", 400, 0, n1.Name, nil),
|
||||
runBefore: func(pod *v1.Pod) {
|
||||
pod.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"}
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
@@ -51,14 +51,14 @@ func TestIsEvictable(t *testing.T) {
|
||||
evictLocalStoragePods: false,
|
||||
result: true,
|
||||
}, {
|
||||
pod: test.BuildTestPod("p3", 400, 0, n1.Name),
|
||||
pod: test.BuildTestPod("p3", 400, 0, n1.Name, nil),
|
||||
runBefore: func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
result: true,
|
||||
}, {
|
||||
pod: test.BuildTestPod("p4", 400, 0, n1.Name),
|
||||
pod: test.BuildTestPod("p4", 400, 0, n1.Name, nil),
|
||||
runBefore: func(pod *v1.Pod) {
|
||||
pod.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"}
|
||||
pod.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
|
||||
@@ -66,7 +66,7 @@ func TestIsEvictable(t *testing.T) {
|
||||
evictLocalStoragePods: false,
|
||||
result: true,
|
||||
}, {
|
||||
pod: test.BuildTestPod("p5", 400, 0, n1.Name),
|
||||
pod: test.BuildTestPod("p5", 400, 0, n1.Name, nil),
|
||||
runBefore: func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
pod.Spec.Volumes = []v1.Volume{
|
||||
@@ -83,7 +83,7 @@ func TestIsEvictable(t *testing.T) {
|
||||
evictLocalStoragePods: false,
|
||||
result: false,
|
||||
}, {
|
||||
pod: test.BuildTestPod("p6", 400, 0, n1.Name),
|
||||
pod: test.BuildTestPod("p6", 400, 0, n1.Name, nil),
|
||||
runBefore: func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
pod.Spec.Volumes = []v1.Volume{
|
||||
@@ -100,7 +100,7 @@ func TestIsEvictable(t *testing.T) {
|
||||
evictLocalStoragePods: true,
|
||||
result: true,
|
||||
}, {
|
||||
pod: test.BuildTestPod("p7", 400, 0, n1.Name),
|
||||
pod: test.BuildTestPod("p7", 400, 0, n1.Name, nil),
|
||||
runBefore: func(pod *v1.Pod) {
|
||||
pod.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"}
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
@@ -118,14 +118,14 @@ func TestIsEvictable(t *testing.T) {
|
||||
evictLocalStoragePods: false,
|
||||
result: true,
|
||||
}, {
|
||||
pod: test.BuildTestPod("p8", 400, 0, n1.Name),
|
||||
pod: test.BuildTestPod("p8", 400, 0, n1.Name, nil),
|
||||
runBefore: func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetDaemonSetOwnerRefList()
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
result: false,
|
||||
}, {
|
||||
pod: test.BuildTestPod("p9", 400, 0, n1.Name),
|
||||
pod: test.BuildTestPod("p9", 400, 0, n1.Name, nil),
|
||||
runBefore: func(pod *v1.Pod) {
|
||||
pod.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"}
|
||||
pod.ObjectMeta.OwnerReferences = test.GetDaemonSetOwnerRefList()
|
||||
@@ -133,14 +133,14 @@ func TestIsEvictable(t *testing.T) {
|
||||
evictLocalStoragePods: false,
|
||||
result: true,
|
||||
}, {
|
||||
pod: test.BuildTestPod("p10", 400, 0, n1.Name),
|
||||
pod: test.BuildTestPod("p10", 400, 0, n1.Name, nil),
|
||||
runBefore: func(pod *v1.Pod) {
|
||||
pod.Annotations = test.GetMirrorPodAnnotation()
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
result: false,
|
||||
}, {
|
||||
pod: test.BuildTestPod("p11", 400, 0, n1.Name),
|
||||
pod: test.BuildTestPod("p11", 400, 0, n1.Name, nil),
|
||||
runBefore: func(pod *v1.Pod) {
|
||||
pod.Annotations = test.GetMirrorPodAnnotation()
|
||||
pod.Annotations["descheduler.alpha.kubernetes.io/evict"] = "true"
|
||||
@@ -148,7 +148,7 @@ func TestIsEvictable(t *testing.T) {
|
||||
evictLocalStoragePods: false,
|
||||
result: true,
|
||||
}, {
|
||||
pod: test.BuildTestPod("p12", 400, 0, n1.Name),
|
||||
pod: test.BuildTestPod("p12", 400, 0, n1.Name, nil),
|
||||
runBefore: func(pod *v1.Pod) {
|
||||
priority := utils.SystemCriticalPriority
|
||||
pod.Spec.Priority = &priority
|
||||
@@ -156,7 +156,7 @@ func TestIsEvictable(t *testing.T) {
|
||||
evictLocalStoragePods: false,
|
||||
result: false,
|
||||
}, {
|
||||
pod: test.BuildTestPod("p13", 400, 0, n1.Name),
|
||||
pod: test.BuildTestPod("p13", 400, 0, n1.Name, nil),
|
||||
runBefore: func(pod *v1.Pod) {
|
||||
priority := utils.SystemCriticalPriority
|
||||
pod.Spec.Priority = &priority
|
||||
@@ -179,15 +179,15 @@ func TestIsEvictable(t *testing.T) {
|
||||
}
|
||||
}
|
||||
func TestPodTypes(t *testing.T) {
|
||||
n1 := test.BuildTestNode("node1", 1000, 2000, 9)
|
||||
p1 := test.BuildTestPod("p1", 400, 0, n1.Name)
|
||||
n1 := test.BuildTestNode("node1", 1000, 2000, 9, nil)
|
||||
p1 := test.BuildTestPod("p1", 400, 0, n1.Name, nil)
|
||||
|
||||
// These won't be evicted.
|
||||
p2 := test.BuildTestPod("p2", 400, 0, n1.Name)
|
||||
p3 := test.BuildTestPod("p3", 400, 0, n1.Name)
|
||||
p4 := test.BuildTestPod("p4", 400, 0, n1.Name)
|
||||
p5 := test.BuildTestPod("p5", 400, 0, n1.Name)
|
||||
p6 := test.BuildTestPod("p6", 400, 0, n1.Name)
|
||||
p2 := test.BuildTestPod("p2", 400, 0, n1.Name, nil)
|
||||
p3 := test.BuildTestPod("p3", 400, 0, n1.Name, nil)
|
||||
p4 := test.BuildTestPod("p4", 400, 0, n1.Name, nil)
|
||||
p5 := test.BuildTestPod("p5", 400, 0, n1.Name, nil)
|
||||
p6 := test.BuildTestPod("p6", 400, 0, n1.Name, nil)
|
||||
|
||||
p6.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
|
||||
|
||||
@@ -30,23 +30,23 @@ import (
|
||||
|
||||
func TestFindDuplicatePods(t *testing.T) {
|
||||
// first setup pods
|
||||
node := test.BuildTestNode("n1", 2000, 3000, 10)
|
||||
p1 := test.BuildTestPod("p1", 100, 0, node.Name)
|
||||
node := test.BuildTestNode("n1", 2000, 3000, 10, nil)
|
||||
p1 := test.BuildTestPod("p1", 100, 0, node.Name, nil)
|
||||
p1.Namespace = "dev"
|
||||
p2 := test.BuildTestPod("p2", 100, 0, node.Name)
|
||||
p2 := test.BuildTestPod("p2", 100, 0, node.Name, nil)
|
||||
p2.Namespace = "dev"
|
||||
p3 := test.BuildTestPod("p3", 100, 0, node.Name)
|
||||
p3 := test.BuildTestPod("p3", 100, 0, node.Name, nil)
|
||||
p3.Namespace = "dev"
|
||||
p4 := test.BuildTestPod("p4", 100, 0, node.Name)
|
||||
p5 := test.BuildTestPod("p5", 100, 0, node.Name)
|
||||
p6 := test.BuildTestPod("p6", 100, 0, node.Name)
|
||||
p7 := test.BuildTestPod("p7", 100, 0, node.Name)
|
||||
p4 := test.BuildTestPod("p4", 100, 0, node.Name, nil)
|
||||
p5 := test.BuildTestPod("p5", 100, 0, node.Name, nil)
|
||||
p6 := test.BuildTestPod("p6", 100, 0, node.Name, nil)
|
||||
p7 := test.BuildTestPod("p7", 100, 0, node.Name, nil)
|
||||
p7.Namespace = "kube-system"
|
||||
p8 := test.BuildTestPod("p8", 100, 0, node.Name)
|
||||
p8 := test.BuildTestPod("p8", 100, 0, node.Name, nil)
|
||||
p8.Namespace = "test"
|
||||
p9 := test.BuildTestPod("p9", 100, 0, node.Name)
|
||||
p9 := test.BuildTestPod("p9", 100, 0, node.Name, nil)
|
||||
p9.Namespace = "test"
|
||||
p10 := test.BuildTestPod("p10", 100, 0, node.Name)
|
||||
p10 := test.BuildTestPod("p10", 100, 0, node.Name, nil)
|
||||
p10.Namespace = "test"
|
||||
|
||||
// ### Evictable Pods ###
|
||||
|
||||
@@ -33,13 +33,9 @@ import (
|
||||
)
|
||||
|
||||
type NodeUsageMap struct {
|
||||
node *v1.Node
|
||||
usage api.ResourceThresholds
|
||||
allPods []*v1.Pod
|
||||
nonRemovablePods []*v1.Pod
|
||||
bePods []*v1.Pod
|
||||
bPods []*v1.Pod
|
||||
gPods []*v1.Pod
|
||||
node *v1.Node
|
||||
usage api.ResourceThresholds
|
||||
allPods []*v1.Pod
|
||||
}
|
||||
|
||||
type NodePodsMap map[*v1.Node][]*v1.Pod
|
||||
@@ -91,7 +87,16 @@ func LowNodeUtilization(ds *options.DeschedulerServer, strategy api.DeschedulerS
|
||||
targetThresholds[v1.ResourceCPU], targetThresholds[v1.ResourceMemory], targetThresholds[v1.ResourcePods])
|
||||
klog.V(1).Infof("Total number of nodes above target utilization: %v", len(targetNodes))
|
||||
|
||||
totalPodsEvicted := evictPodsFromTargetNodes(ds.Client, evictionPolicyGroupVersion, targetNodes, lowNodes, targetThresholds, ds.DryRun, ds.MaxNoOfPodsToEvictPerNode, nodepodCount)
|
||||
totalPodsEvicted := evictPodsFromTargetNodes(
|
||||
ds.Client,
|
||||
evictionPolicyGroupVersion,
|
||||
targetNodes,
|
||||
lowNodes,
|
||||
targetThresholds,
|
||||
ds.DryRun,
|
||||
ds.MaxNoOfPodsToEvictPerNode,
|
||||
ds.EvictLocalStoragePods,
|
||||
nodepodCount)
|
||||
klog.V(1).Infof("Total number of pods evicted: %v", totalPodsEvicted)
|
||||
|
||||
}
|
||||
@@ -134,9 +139,12 @@ func validateTargetThresholds(targetThresholds api.ResourceThresholds) bool {
|
||||
func classifyNodes(npm NodePodsMap, thresholds api.ResourceThresholds, targetThresholds api.ResourceThresholds, evictLocalStoragePods bool) ([]NodeUsageMap, []NodeUsageMap) {
|
||||
lowNodes, targetNodes := []NodeUsageMap{}, []NodeUsageMap{}
|
||||
for node, pods := range npm {
|
||||
usage, allPods, nonRemovablePods, bePods, bPods, gPods := NodeUtilization(node, pods, evictLocalStoragePods)
|
||||
nuMap := NodeUsageMap{node, usage, allPods, nonRemovablePods, bePods, bPods, gPods}
|
||||
|
||||
usage := nodeUtilization(node, pods, evictLocalStoragePods)
|
||||
nuMap := NodeUsageMap{
|
||||
node: node,
|
||||
usage: usage,
|
||||
allPods: pods,
|
||||
}
|
||||
// Check if node is underutilized and if we can schedule pods on it.
|
||||
if !nodeutil.IsNodeUnschedulable(node) && IsNodeWithLowUtilization(usage, thresholds) {
|
||||
klog.V(2).Infof("Node %#v is under utilized with usage: %#v", node.Name, usage)
|
||||
@@ -147,7 +155,6 @@ func classifyNodes(npm NodePodsMap, thresholds api.ResourceThresholds, targetThr
|
||||
} else {
|
||||
klog.V(2).Infof("Node %#v is appropriately utilized with usage: %#v", node.Name, usage)
|
||||
}
|
||||
klog.V(2).Infof("allPods:%v, nonRemovablePods:%v, bePods:%v, bPods:%v, gPods:%v", len(allPods), len(nonRemovablePods), len(bePods), len(bPods), len(gPods))
|
||||
}
|
||||
return lowNodes, targetNodes
|
||||
}
|
||||
@@ -155,7 +162,17 @@ func classifyNodes(npm NodePodsMap, thresholds api.ResourceThresholds, targetThr
|
||||
// evictPodsFromTargetNodes evicts pods based on priority, if all the pods on the node have priority, if not
|
||||
// evicts them based on QoS as fallback option.
|
||||
// TODO: @ravig Break this function into smaller functions.
|
||||
func evictPodsFromTargetNodes(client clientset.Interface, evictionPolicyGroupVersion string, targetNodes, lowNodes []NodeUsageMap, targetThresholds api.ResourceThresholds, dryRun bool, maxPodsToEvict int, nodepodCount utils.NodePodEvictedCount) int {
|
||||
func evictPodsFromTargetNodes(
|
||||
client clientset.Interface,
|
||||
evictionPolicyGroupVersion string,
|
||||
targetNodes,
|
||||
lowNodes []NodeUsageMap,
|
||||
targetThresholds api.ResourceThresholds,
|
||||
dryRun bool,
|
||||
maxPodsToEvict int,
|
||||
evictLocalStoragePods bool,
|
||||
nodepodCount utils.NodePodEvictedCount,
|
||||
) int {
|
||||
podsEvicted := 0
|
||||
|
||||
SortNodesByUsage(targetNodes)
|
||||
@@ -197,11 +214,14 @@ func evictPodsFromTargetNodes(client clientset.Interface, evictionPolicyGroupVer
|
||||
klog.V(3).Infof("evicting pods from node %#v with usage: %#v", node.node.Name, node.usage)
|
||||
currentPodsEvicted := nodepodCount[node.node]
|
||||
|
||||
nonRemovablePods, bestEffortPods, burstablePods, guaranteedPods := classifyPods(node.allPods, evictLocalStoragePods)
|
||||
klog.V(2).Infof("allPods:%v, nonRemovablePods:%v, bestEffortPods:%v, burstablePods:%v, guaranteedPods:%v", len(node.allPods), len(nonRemovablePods), len(bestEffortPods), len(burstablePods), len(guaranteedPods))
|
||||
|
||||
// Check if one pod has priority, if yes, assume that all pods have priority and evict pods based on priority.
|
||||
if node.allPods[0].Spec.Priority != nil {
|
||||
klog.V(1).Infof("All pods have priority associated with them. Evicting pods based on priority")
|
||||
evictablePods := make([]*v1.Pod, 0)
|
||||
evictablePods = append(append(node.bPods, node.bePods...), node.gPods...)
|
||||
evictablePods = append(append(burstablePods, bestEffortPods...), guaranteedPods...)
|
||||
|
||||
// sort the evictable Pods based on priority. This also sorts them based on QoS. If there are multiple pods with same priority, they are sorted based on QoS tiers.
|
||||
sortPodsBasedOnPriority(evictablePods)
|
||||
@@ -210,13 +230,13 @@ func evictPodsFromTargetNodes(client clientset.Interface, evictionPolicyGroupVer
|
||||
// TODO: Remove this when we support only priority.
|
||||
// Falling back to evicting pods based on priority.
|
||||
klog.V(1).Infof("Evicting pods based on QoS")
|
||||
klog.V(1).Infof("There are %v non-evictable pods on the node", len(node.nonRemovablePods))
|
||||
klog.V(1).Infof("There are %v non-evictable pods on the node", len(nonRemovablePods))
|
||||
// evict best effort pods
|
||||
evictPods(node.bePods, client, evictionPolicyGroupVersion, targetThresholds, nodeCapacity, node.usage, &totalPods, &totalCPU, &totalMem, ¤tPodsEvicted, dryRun, maxPodsToEvict, taintsOfLowNodes)
|
||||
evictPods(bestEffortPods, client, evictionPolicyGroupVersion, targetThresholds, nodeCapacity, node.usage, &totalPods, &totalCPU, &totalMem, ¤tPodsEvicted, dryRun, maxPodsToEvict, taintsOfLowNodes)
|
||||
// evict burstable pods
|
||||
evictPods(node.bPods, client, evictionPolicyGroupVersion, targetThresholds, nodeCapacity, node.usage, &totalPods, &totalCPU, &totalMem, ¤tPodsEvicted, dryRun, maxPodsToEvict, taintsOfLowNodes)
|
||||
evictPods(burstablePods, client, evictionPolicyGroupVersion, targetThresholds, nodeCapacity, node.usage, &totalPods, &totalCPU, &totalMem, ¤tPodsEvicted, dryRun, maxPodsToEvict, taintsOfLowNodes)
|
||||
// evict guaranteed pods
|
||||
evictPods(node.gPods, client, evictionPolicyGroupVersion, targetThresholds, nodeCapacity, node.usage, &totalPods, &totalCPU, &totalMem, ¤tPodsEvicted, dryRun, maxPodsToEvict, taintsOfLowNodes)
|
||||
evictPods(guaranteedPods, client, evictionPolicyGroupVersion, targetThresholds, nodeCapacity, node.usage, &totalPods, &totalCPU, &totalMem, ¤tPodsEvicted, dryRun, maxPodsToEvict, taintsOfLowNodes)
|
||||
}
|
||||
nodepodCount[node.node] = currentPodsEvicted
|
||||
podsEvicted = podsEvicted + nodepodCount[node.node]
|
||||
@@ -361,38 +381,18 @@ func IsNodeWithLowUtilization(nodeThresholds api.ResourceThresholds, thresholds
|
||||
return true
|
||||
}
|
||||
|
||||
// NodeUtilization returns the current usage of node.
|
||||
func NodeUtilization(node *v1.Node, pods []*v1.Pod, evictLocalStoragePods bool) (api.ResourceThresholds, []*v1.Pod, []*v1.Pod, []*v1.Pod, []*v1.Pod, []*v1.Pod) {
|
||||
bePods := []*v1.Pod{}
|
||||
nonRemovablePods := []*v1.Pod{}
|
||||
bPods := []*v1.Pod{}
|
||||
gPods := []*v1.Pod{}
|
||||
totalReqs := map[v1.ResourceName]resource.Quantity{}
|
||||
func nodeUtilization(node *v1.Node, pods []*v1.Pod, evictLocalStoragePods bool) api.ResourceThresholds {
|
||||
totalReqs := map[v1.ResourceName]*resource.Quantity{
|
||||
v1.ResourceCPU: {},
|
||||
v1.ResourceMemory: {},
|
||||
}
|
||||
for _, pod := range pods {
|
||||
// We need to compute the usage of nonRemovablePods unless it is a best effort pod. So, cannot use podutil.ListEvictablePodsOnNode
|
||||
if !podutil.IsEvictable(pod, evictLocalStoragePods) {
|
||||
nonRemovablePods = append(nonRemovablePods, pod)
|
||||
if podutil.IsBestEffortPod(pod) {
|
||||
continue
|
||||
}
|
||||
} else if podutil.IsBestEffortPod(pod) {
|
||||
bePods = append(bePods, pod)
|
||||
continue
|
||||
} else if podutil.IsBurstablePod(pod) {
|
||||
bPods = append(bPods, pod)
|
||||
} else {
|
||||
gPods = append(gPods, pod)
|
||||
}
|
||||
|
||||
req, _ := utils.PodRequestsAndLimits(pod)
|
||||
for name, quantity := range req {
|
||||
if name == v1.ResourceCPU || name == v1.ResourceMemory {
|
||||
if value, ok := totalReqs[name]; !ok {
|
||||
totalReqs[name] = quantity.DeepCopy()
|
||||
} else {
|
||||
value.Add(quantity)
|
||||
totalReqs[name] = value
|
||||
}
|
||||
// As Quantity.Add says: Add adds the provided y quantity to the current value. If the current value is zero,
|
||||
// the format of the quantity will be updated to the format of y.
|
||||
totalReqs[name].Add(quantity)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -402,12 +402,42 @@ func NodeUtilization(node *v1.Node, pods []*v1.Pod, evictLocalStoragePods bool)
|
||||
nodeCapacity = node.Status.Allocatable
|
||||
}
|
||||
|
||||
usage := api.ResourceThresholds{}
|
||||
totalCPUReq := totalReqs[v1.ResourceCPU]
|
||||
totalMemReq := totalReqs[v1.ResourceMemory]
|
||||
totalPods := len(pods)
|
||||
usage[v1.ResourceCPU] = api.Percentage((float64(totalCPUReq.MilliValue()) * 100) / float64(nodeCapacity.Cpu().MilliValue()))
|
||||
usage[v1.ResourceMemory] = api.Percentage(float64(totalMemReq.Value()) / float64(nodeCapacity.Memory().Value()) * 100)
|
||||
usage[v1.ResourcePods] = api.Percentage((float64(totalPods) * 100) / float64(nodeCapacity.Pods().Value()))
|
||||
return usage, pods, nonRemovablePods, bePods, bPods, gPods
|
||||
return api.ResourceThresholds{
|
||||
v1.ResourceCPU: api.Percentage((float64(totalReqs[v1.ResourceCPU].MilliValue()) * 100) / float64(nodeCapacity.Cpu().MilliValue())),
|
||||
v1.ResourceMemory: api.Percentage(float64(totalReqs[v1.ResourceMemory].Value()) / float64(nodeCapacity.Memory().Value()) * 100),
|
||||
v1.ResourcePods: api.Percentage((float64(totalPods) * 100) / float64(nodeCapacity.Pods().Value())),
|
||||
}
|
||||
}
|
||||
|
||||
func classifyPods(pods []*v1.Pod, evictLocalStoragePods bool) ([]*v1.Pod, []*v1.Pod, []*v1.Pod, []*v1.Pod) {
|
||||
var nonRemovablePods, bestEffortPods, burstablePods, guaranteedPods []*v1.Pod
|
||||
|
||||
// From https://kubernetes.io/docs/tasks/configure-pod-container/quality-service-pod/
|
||||
//
|
||||
// For a Pod to be given a QoS class of Guaranteed:
|
||||
// - every Container in the Pod must have a memory limit and a memory request, and they must be the same.
|
||||
// - every Container in the Pod must have a CPU limit and a CPU request, and they must be the same.
|
||||
// A Pod is given a QoS class of Burstable if:
|
||||
// - the Pod does not meet the criteria for QoS class Guaranteed.
|
||||
// - at least one Container in the Pod has a memory or CPU request.
|
||||
// For a Pod to be given a QoS class of BestEffort, the Containers in the Pod must not have any memory or CPU limits or requests.
|
||||
|
||||
for _, pod := range pods {
|
||||
if !podutil.IsEvictable(pod, evictLocalStoragePods) {
|
||||
nonRemovablePods = append(nonRemovablePods, pod)
|
||||
continue
|
||||
}
|
||||
|
||||
switch utils.GetPodQOS(pod) {
|
||||
case v1.PodQOSGuaranteed:
|
||||
guaranteedPods = append(guaranteedPods, pod)
|
||||
case v1.PodQOSBurstable:
|
||||
burstablePods = append(burstablePods, pod)
|
||||
default: // alias v1.PodQOSBestEffort
|
||||
bestEffortPods = append(bestEffortPods, pod)
|
||||
}
|
||||
}
|
||||
|
||||
return nonRemovablePods, bestEffortPods, burstablePods, guaranteedPods
|
||||
}
|
||||
|
||||
@@ -24,6 +24,7 @@ import (
|
||||
"reflect"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/api/policy/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
@@ -39,239 +40,357 @@ import (
|
||||
"sigs.k8s.io/descheduler/test"
|
||||
)
|
||||
|
||||
// TODO: Make this table driven.
|
||||
func TestLowNodeUtilizationWithoutPriority(t *testing.T) {
|
||||
var thresholds = make(api.ResourceThresholds)
|
||||
var targetThresholds = make(api.ResourceThresholds)
|
||||
thresholds[v1.ResourceCPU] = 30
|
||||
thresholds[v1.ResourcePods] = 30
|
||||
targetThresholds[v1.ResourceCPU] = 50
|
||||
targetThresholds[v1.ResourcePods] = 50
|
||||
var (
|
||||
lowPriority = int32(0)
|
||||
highPriority = int32(10000)
|
||||
)
|
||||
|
||||
n1 := test.BuildTestNode("n1", 4000, 3000, 9)
|
||||
n2 := test.BuildTestNode("n2", 4000, 3000, 10)
|
||||
n3 := test.BuildTestNode("n3", 4000, 3000, 10)
|
||||
// Making n3 node unschedulable so that it won't counted in lowUtilized nodes list.
|
||||
n3.Spec.Unschedulable = true
|
||||
p1 := test.BuildTestPod("p1", 400, 0, n1.Name)
|
||||
p2 := test.BuildTestPod("p2", 400, 0, n1.Name)
|
||||
p3 := test.BuildTestPod("p3", 400, 0, n1.Name)
|
||||
p4 := test.BuildTestPod("p4", 400, 0, n1.Name)
|
||||
p5 := test.BuildTestPod("p5", 400, 0, n1.Name)
|
||||
|
||||
// These won't be evicted.
|
||||
p6 := test.BuildTestPod("p6", 400, 0, n1.Name)
|
||||
p7 := test.BuildTestPod("p7", 400, 0, n1.Name)
|
||||
p8 := test.BuildTestPod("p8", 400, 0, n1.Name)
|
||||
|
||||
p1.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
|
||||
p2.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
|
||||
p3.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
|
||||
p4.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
|
||||
p5.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
|
||||
// The following 4 pods won't get evicted.
|
||||
// A daemonset.
|
||||
p6.ObjectMeta.OwnerReferences = test.GetDaemonSetOwnerRefList()
|
||||
// A pod with local storage.
|
||||
p7.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
p7.Spec.Volumes = []v1.Volume{
|
||||
{
|
||||
Name: "sample",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
|
||||
},
|
||||
},
|
||||
}
|
||||
// A Mirror Pod.
|
||||
p7.Annotations = test.GetMirrorPodAnnotation()
|
||||
// A Critical Pod.
|
||||
p8.Namespace = "kube-system"
|
||||
priority := utils.SystemCriticalPriority
|
||||
p8.Spec.Priority = &priority
|
||||
p9 := test.BuildTestPod("p9", 400, 0, n1.Name)
|
||||
p9.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
|
||||
fakeClient := &fake.Clientset{}
|
||||
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
|
||||
list := action.(core.ListAction)
|
||||
fieldString := list.GetListRestrictions().Fields.String()
|
||||
if strings.Contains(fieldString, "n1") {
|
||||
return true, &v1.PodList{Items: []v1.Pod{*p1, *p2, *p3, *p4, *p5, *p6, *p7, *p8}}, nil
|
||||
}
|
||||
if strings.Contains(fieldString, "n2") {
|
||||
return true, &v1.PodList{Items: []v1.Pod{*p9}}, nil
|
||||
}
|
||||
if strings.Contains(fieldString, "n3") {
|
||||
return true, &v1.PodList{Items: []v1.Pod{}}, nil
|
||||
}
|
||||
return true, nil, fmt.Errorf("Failed to list: %v", list)
|
||||
})
|
||||
fakeClient.Fake.AddReactor("get", "nodes", func(action core.Action) (bool, runtime.Object, error) {
|
||||
getAction := action.(core.GetAction)
|
||||
switch getAction.GetName() {
|
||||
case n1.Name:
|
||||
return true, n1, nil
|
||||
case n2.Name:
|
||||
return true, n2, nil
|
||||
case n3.Name:
|
||||
return true, n3, nil
|
||||
}
|
||||
return true, nil, fmt.Errorf("Wrong node: %v", getAction.GetName())
|
||||
})
|
||||
expectedPodsEvicted := 3
|
||||
npm := createNodePodsMap(fakeClient, []*v1.Node{n1, n2, n3})
|
||||
lowNodes, targetNodes := classifyNodes(npm, thresholds, targetThresholds, false)
|
||||
if len(lowNodes) != 1 {
|
||||
t.Errorf("After ignoring unschedulable nodes, expected only one node to be under utilized.")
|
||||
}
|
||||
npe := utils.NodePodEvictedCount{}
|
||||
npe[n1] = 0
|
||||
npe[n2] = 0
|
||||
npe[n3] = 0
|
||||
podsEvicted := evictPodsFromTargetNodes(fakeClient, "v1", targetNodes, lowNodes, targetThresholds, false, 3, npe)
|
||||
if expectedPodsEvicted != podsEvicted {
|
||||
t.Errorf("Expected %#v pods to be evicted but %#v got evicted", expectedPodsEvicted, podsEvicted)
|
||||
}
|
||||
func setRSOwnerRef(pod *v1.Pod) { pod.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList() }
|
||||
func setDSOwnerRef(pod *v1.Pod) { pod.ObjectMeta.OwnerReferences = test.GetDaemonSetOwnerRefList() }
|
||||
func setNormalOwnerRef(pod *v1.Pod) { pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList() }
|
||||
func setHighPriority(pod *v1.Pod) { pod.Spec.Priority = &highPriority }
|
||||
func setLowPriority(pod *v1.Pod) { pod.Spec.Priority = &lowPriority }
|
||||
func setNodeUnschedulable(node *v1.Node) { node.Spec.Unschedulable = true }
|
||||
|
||||
func makeBestEffortPod(pod *v1.Pod) {
|
||||
pod.Spec.Containers[0].Resources.Requests = nil
|
||||
pod.Spec.Containers[0].Resources.Requests = nil
|
||||
pod.Spec.Containers[0].Resources.Limits = nil
|
||||
pod.Spec.Containers[0].Resources.Limits = nil
|
||||
}
|
||||
|
||||
// TODO: Make this table driven.
|
||||
func TestLowNodeUtilizationWithPriorities(t *testing.T) {
|
||||
var thresholds = make(api.ResourceThresholds)
|
||||
var targetThresholds = make(api.ResourceThresholds)
|
||||
thresholds[v1.ResourceCPU] = 30
|
||||
thresholds[v1.ResourcePods] = 30
|
||||
targetThresholds[v1.ResourceCPU] = 50
|
||||
targetThresholds[v1.ResourcePods] = 50
|
||||
lowPriority := int32(0)
|
||||
highPriority := int32(10000)
|
||||
n1 := test.BuildTestNode("n1", 4000, 3000, 9)
|
||||
n2 := test.BuildTestNode("n2", 4000, 3000, 10)
|
||||
n3 := test.BuildTestNode("n3", 4000, 3000, 10)
|
||||
// Making n3 node unschedulable so that it won't counted in lowUtilized nodes list.
|
||||
n3.Spec.Unschedulable = true
|
||||
p1 := test.BuildTestPod("p1", 400, 0, n1.Name)
|
||||
p1.Spec.Priority = &highPriority
|
||||
p2 := test.BuildTestPod("p2", 400, 0, n1.Name)
|
||||
p2.Spec.Priority = &highPriority
|
||||
p3 := test.BuildTestPod("p3", 400, 0, n1.Name)
|
||||
p3.Spec.Priority = &highPriority
|
||||
p4 := test.BuildTestPod("p4", 400, 0, n1.Name)
|
||||
p4.Spec.Priority = &highPriority
|
||||
p5 := test.BuildTestPod("p5", 400, 0, n1.Name)
|
||||
p5.Spec.Priority = &lowPriority
|
||||
func makeBurstablePod(pod *v1.Pod) {
|
||||
pod.Spec.Containers[0].Resources.Limits = nil
|
||||
pod.Spec.Containers[0].Resources.Limits = nil
|
||||
}
|
||||
|
||||
// These won't be evicted.
|
||||
p6 := test.BuildTestPod("p6", 400, 0, n1.Name)
|
||||
p6.Spec.Priority = &highPriority
|
||||
p7 := test.BuildTestPod("p7", 400, 0, n1.Name)
|
||||
p7.Spec.Priority = &lowPriority
|
||||
p8 := test.BuildTestPod("p8", 400, 0, n1.Name)
|
||||
p8.Spec.Priority = &lowPriority
|
||||
func makeGuaranteedPod(pod *v1.Pod) {
|
||||
pod.Spec.Containers[0].Resources.Limits[v1.ResourceCPU] = pod.Spec.Containers[0].Resources.Requests[v1.ResourceCPU]
|
||||
pod.Spec.Containers[0].Resources.Limits[v1.ResourceMemory] = pod.Spec.Containers[0].Resources.Requests[v1.ResourceMemory]
|
||||
}
|
||||
|
||||
p1.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
|
||||
p2.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
|
||||
p3.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
|
||||
p4.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
|
||||
p5.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
|
||||
// The following 4 pods won't get evicted.
|
||||
// A daemonset.
|
||||
p6.ObjectMeta.OwnerReferences = test.GetDaemonSetOwnerRefList()
|
||||
// A pod with local storage.
|
||||
p7.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
p7.Spec.Volumes = []v1.Volume{
|
||||
func TestLowNodeUtilization(t *testing.T) {
|
||||
n1NodeName := "n1"
|
||||
n2NodeName := "n2"
|
||||
n3NodeName := "n3"
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
thresholds, targetThresholds api.ResourceThresholds
|
||||
nodes map[string]*v1.Node
|
||||
pods map[string]*v1.PodList
|
||||
expectedPodsEvicted int
|
||||
evictedPods []string
|
||||
}{
|
||||
{
|
||||
Name: "sample",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
|
||||
name: "without priorities",
|
||||
thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 30,
|
||||
v1.ResourcePods: 30,
|
||||
},
|
||||
targetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 50,
|
||||
v1.ResourcePods: 50,
|
||||
},
|
||||
nodes: map[string]*v1.Node{
|
||||
n1NodeName: test.BuildTestNode(n1NodeName, 4000, 3000, 9, nil),
|
||||
n2NodeName: test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil),
|
||||
n3NodeName: test.BuildTestNode(n3NodeName, 4000, 3000, 10, setNodeUnschedulable),
|
||||
},
|
||||
pods: map[string]*v1.PodList{
|
||||
n1NodeName: {
|
||||
Items: []v1.Pod{
|
||||
*test.BuildTestPod("p1", 400, 0, n1NodeName, setRSOwnerRef),
|
||||
*test.BuildTestPod("p2", 400, 0, n1NodeName, setRSOwnerRef),
|
||||
*test.BuildTestPod("p3", 400, 0, n1NodeName, setRSOwnerRef),
|
||||
*test.BuildTestPod("p4", 400, 0, n1NodeName, setRSOwnerRef),
|
||||
*test.BuildTestPod("p5", 400, 0, n1NodeName, setRSOwnerRef),
|
||||
// These won't be evicted.
|
||||
*test.BuildTestPod("p6", 400, 0, n1NodeName, setDSOwnerRef),
|
||||
*test.BuildTestPod("p7", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
// A pod with local storage.
|
||||
setNormalOwnerRef(pod)
|
||||
pod.Spec.Volumes = []v1.Volume{
|
||||
{
|
||||
Name: "sample",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
|
||||
},
|
||||
},
|
||||
}
|
||||
// A Mirror Pod.
|
||||
pod.Annotations = test.GetMirrorPodAnnotation()
|
||||
}),
|
||||
*test.BuildTestPod("p8", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
// A Critical Pod.
|
||||
pod.Namespace = "kube-system"
|
||||
priority := utils.SystemCriticalPriority
|
||||
pod.Spec.Priority = &priority
|
||||
}),
|
||||
},
|
||||
},
|
||||
n2NodeName: {
|
||||
Items: []v1.Pod{
|
||||
*test.BuildTestPod("p9", 400, 0, n1NodeName, setRSOwnerRef),
|
||||
},
|
||||
},
|
||||
n3NodeName: {},
|
||||
},
|
||||
expectedPodsEvicted: 3,
|
||||
},
|
||||
{
|
||||
name: "with priorities",
|
||||
thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 30,
|
||||
v1.ResourcePods: 30,
|
||||
},
|
||||
targetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 50,
|
||||
v1.ResourcePods: 50,
|
||||
},
|
||||
nodes: map[string]*v1.Node{
|
||||
n1NodeName: test.BuildTestNode(n1NodeName, 4000, 3000, 9, nil),
|
||||
n2NodeName: test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil),
|
||||
n3NodeName: test.BuildTestNode(n3NodeName, 4000, 3000, 10, setNodeUnschedulable),
|
||||
},
|
||||
pods: map[string]*v1.PodList{
|
||||
n1NodeName: {
|
||||
Items: []v1.Pod{
|
||||
*test.BuildTestPod("p1", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
setRSOwnerRef(pod)
|
||||
setHighPriority(pod)
|
||||
}),
|
||||
*test.BuildTestPod("p2", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
setRSOwnerRef(pod)
|
||||
setHighPriority(pod)
|
||||
}),
|
||||
*test.BuildTestPod("p3", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
setRSOwnerRef(pod)
|
||||
setHighPriority(pod)
|
||||
}),
|
||||
*test.BuildTestPod("p4", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
setRSOwnerRef(pod)
|
||||
setHighPriority(pod)
|
||||
}),
|
||||
*test.BuildTestPod("p5", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
setRSOwnerRef(pod)
|
||||
setLowPriority(pod)
|
||||
}),
|
||||
// These won't be evicted.
|
||||
*test.BuildTestPod("p6", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
setDSOwnerRef(pod)
|
||||
setHighPriority(pod)
|
||||
}),
|
||||
*test.BuildTestPod("p7", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
// A pod with local storage.
|
||||
setNormalOwnerRef(pod)
|
||||
setLowPriority(pod)
|
||||
pod.Spec.Volumes = []v1.Volume{
|
||||
{
|
||||
Name: "sample",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
|
||||
},
|
||||
},
|
||||
}
|
||||
// A Mirror Pod.
|
||||
pod.Annotations = test.GetMirrorPodAnnotation()
|
||||
}),
|
||||
*test.BuildTestPod("p8", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
// A Critical Pod.
|
||||
pod.Namespace = "kube-system"
|
||||
priority := utils.SystemCriticalPriority
|
||||
pod.Spec.Priority = &priority
|
||||
}),
|
||||
},
|
||||
},
|
||||
n2NodeName: {
|
||||
Items: []v1.Pod{
|
||||
*test.BuildTestPod("p9", 400, 0, n1NodeName, setRSOwnerRef),
|
||||
},
|
||||
},
|
||||
n3NodeName: {},
|
||||
},
|
||||
expectedPodsEvicted: 3,
|
||||
},
|
||||
{
|
||||
name: "without priorities evicting best-effort pods only",
|
||||
thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 30,
|
||||
v1.ResourcePods: 30,
|
||||
},
|
||||
targetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 50,
|
||||
v1.ResourcePods: 50,
|
||||
},
|
||||
nodes: map[string]*v1.Node{
|
||||
n1NodeName: test.BuildTestNode(n1NodeName, 4000, 3000, 9, nil),
|
||||
n2NodeName: test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil),
|
||||
n3NodeName: test.BuildTestNode(n3NodeName, 4000, 3000, 10, setNodeUnschedulable),
|
||||
},
|
||||
// All pods are assumed to be burstable (test.BuildTestNode always sets both cpu/memory resource requests to some value)
|
||||
pods: map[string]*v1.PodList{
|
||||
n1NodeName: {
|
||||
Items: []v1.Pod{
|
||||
*test.BuildTestPod("p1", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
setRSOwnerRef(pod)
|
||||
makeBestEffortPod(pod)
|
||||
}),
|
||||
*test.BuildTestPod("p2", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
setRSOwnerRef(pod)
|
||||
makeBestEffortPod(pod)
|
||||
}),
|
||||
*test.BuildTestPod("p3", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
setRSOwnerRef(pod)
|
||||
}),
|
||||
*test.BuildTestPod("p4", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
setRSOwnerRef(pod)
|
||||
makeBestEffortPod(pod)
|
||||
}),
|
||||
*test.BuildTestPod("p5", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
setRSOwnerRef(pod)
|
||||
makeBestEffortPod(pod)
|
||||
}),
|
||||
// These won't be evicted.
|
||||
*test.BuildTestPod("p6", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
setDSOwnerRef(pod)
|
||||
}),
|
||||
*test.BuildTestPod("p7", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
// A pod with local storage.
|
||||
setNormalOwnerRef(pod)
|
||||
pod.Spec.Volumes = []v1.Volume{
|
||||
{
|
||||
Name: "sample",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
|
||||
},
|
||||
},
|
||||
}
|
||||
// A Mirror Pod.
|
||||
pod.Annotations = test.GetMirrorPodAnnotation()
|
||||
}),
|
||||
*test.BuildTestPod("p8", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
// A Critical Pod.
|
||||
pod.Namespace = "kube-system"
|
||||
priority := utils.SystemCriticalPriority
|
||||
pod.Spec.Priority = &priority
|
||||
}),
|
||||
},
|
||||
},
|
||||
n2NodeName: {
|
||||
Items: []v1.Pod{
|
||||
*test.BuildTestPod("p9", 400, 0, n1NodeName, setRSOwnerRef),
|
||||
},
|
||||
},
|
||||
n3NodeName: {},
|
||||
},
|
||||
expectedPodsEvicted: 4,
|
||||
evictedPods: []string{"p1", "p2", "p4", "p5"},
|
||||
},
|
||||
}
|
||||
// A Mirror Pod.
|
||||
p7.Annotations = test.GetMirrorPodAnnotation()
|
||||
// A Critical Pod.
|
||||
p8.Namespace = "kube-system"
|
||||
priority := utils.SystemCriticalPriority
|
||||
p8.Spec.Priority = &priority
|
||||
p9 := test.BuildTestPod("p9", 400, 0, n1.Name)
|
||||
p9.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
|
||||
fakeClient := &fake.Clientset{}
|
||||
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
|
||||
list := action.(core.ListAction)
|
||||
fieldString := list.GetListRestrictions().Fields.String()
|
||||
if strings.Contains(fieldString, "n1") {
|
||||
return true, &v1.PodList{Items: []v1.Pod{*p1, *p2, *p3, *p4, *p5, *p6, *p7, *p8}}, nil
|
||||
}
|
||||
if strings.Contains(fieldString, "n2") {
|
||||
return true, &v1.PodList{Items: []v1.Pod{*p9}}, nil
|
||||
}
|
||||
if strings.Contains(fieldString, "n3") {
|
||||
return true, &v1.PodList{Items: []v1.Pod{}}, nil
|
||||
}
|
||||
return true, nil, fmt.Errorf("Failed to list: %v", list)
|
||||
})
|
||||
fakeClient.Fake.AddReactor("get", "nodes", func(action core.Action) (bool, runtime.Object, error) {
|
||||
getAction := action.(core.GetAction)
|
||||
switch getAction.GetName() {
|
||||
case n1.Name:
|
||||
return true, n1, nil
|
||||
case n2.Name:
|
||||
return true, n2, nil
|
||||
case n3.Name:
|
||||
return true, n3, nil
|
||||
}
|
||||
return true, nil, fmt.Errorf("Wrong node: %v", getAction.GetName())
|
||||
})
|
||||
expectedPodsEvicted := 3
|
||||
npm := createNodePodsMap(fakeClient, []*v1.Node{n1, n2, n3})
|
||||
lowNodes, targetNodes := classifyNodes(npm, thresholds, targetThresholds, false)
|
||||
if len(lowNodes) != 1 {
|
||||
t.Errorf("After ignoring unschedulable nodes, expected only one node to be under utilized.")
|
||||
}
|
||||
npe := utils.NodePodEvictedCount{}
|
||||
npe[n1] = 0
|
||||
npe[n2] = 0
|
||||
npe[n3] = 0
|
||||
podsEvicted := evictPodsFromTargetNodes(fakeClient, "v1", targetNodes, lowNodes, targetThresholds, false, 3, npe)
|
||||
if expectedPodsEvicted != podsEvicted {
|
||||
t.Errorf("Expected %#v pods to be evicted but %#v got evicted", expectedPodsEvicted, podsEvicted)
|
||||
}
|
||||
|
||||
for _, test := range testCases {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
fakeClient := &fake.Clientset{}
|
||||
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
|
||||
list := action.(core.ListAction)
|
||||
fieldString := list.GetListRestrictions().Fields.String()
|
||||
if strings.Contains(fieldString, n1NodeName) {
|
||||
return true, test.pods[n1NodeName], nil
|
||||
}
|
||||
if strings.Contains(fieldString, n2NodeName) {
|
||||
return true, test.pods[n2NodeName], nil
|
||||
}
|
||||
if strings.Contains(fieldString, n3NodeName) {
|
||||
return true, test.pods[n3NodeName], nil
|
||||
}
|
||||
return true, nil, fmt.Errorf("Failed to list: %v", list)
|
||||
})
|
||||
fakeClient.Fake.AddReactor("get", "nodes", func(action core.Action) (bool, runtime.Object, error) {
|
||||
getAction := action.(core.GetAction)
|
||||
if node, exists := test.nodes[getAction.GetName()]; exists {
|
||||
return true, node, nil
|
||||
}
|
||||
return true, nil, fmt.Errorf("Wrong node: %v", getAction.GetName())
|
||||
})
|
||||
podsForEviction := make(map[string]struct{})
|
||||
for _, pod := range test.evictedPods {
|
||||
podsForEviction[pod] = struct{}{}
|
||||
}
|
||||
|
||||
evictionFailed := false
|
||||
if len(test.evictedPods) > 0 {
|
||||
fakeClient.Fake.AddReactor("create", "pods", func(action core.Action) (bool, runtime.Object, error) {
|
||||
getAction := action.(core.CreateAction)
|
||||
obj := getAction.GetObject()
|
||||
if eviction, ok := obj.(*v1beta1.Eviction); ok {
|
||||
if _, exists := podsForEviction[eviction.Name]; exists {
|
||||
return true, obj, nil
|
||||
}
|
||||
evictionFailed = true
|
||||
return true, nil, fmt.Errorf("pod %q was unexpectedly evicted", eviction.Name)
|
||||
}
|
||||
return true, obj, nil
|
||||
})
|
||||
}
|
||||
|
||||
var nodes []*v1.Node
|
||||
npe := utils.NodePodEvictedCount{}
|
||||
for _, node := range test.nodes {
|
||||
nodes = append(nodes, node)
|
||||
npe[node] = 0
|
||||
}
|
||||
|
||||
npm := createNodePodsMap(fakeClient, nodes)
|
||||
lowNodes, targetNodes := classifyNodes(npm, test.thresholds, test.targetThresholds, false)
|
||||
if len(lowNodes) != 1 {
|
||||
t.Errorf("After ignoring unschedulable nodes, expected only one node to be under utilized.")
|
||||
}
|
||||
podsEvicted := evictPodsFromTargetNodes(fakeClient, "v1", targetNodes, lowNodes, test.targetThresholds, false, test.expectedPodsEvicted, false, npe)
|
||||
if test.expectedPodsEvicted != podsEvicted {
|
||||
t.Errorf("Expected %#v pods to be evicted but %#v got evicted", test.expectedPodsEvicted, podsEvicted)
|
||||
}
|
||||
if evictionFailed {
|
||||
t.Errorf("Pod evictions failed unexpectedly")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSortPodsByPriority(t *testing.T) {
|
||||
n1 := test.BuildTestNode("n1", 4000, 3000, 9)
|
||||
lowPriority := int32(0)
|
||||
highPriority := int32(10000)
|
||||
p1 := test.BuildTestPod("p1", 400, 0, n1.Name)
|
||||
p1.Spec.Priority = &lowPriority
|
||||
n1 := test.BuildTestNode("n1", 4000, 3000, 9, nil)
|
||||
|
||||
p1 := test.BuildTestPod("p1", 400, 0, n1.Name, setLowPriority)
|
||||
|
||||
// BestEffort
|
||||
p2 := test.BuildTestPod("p2", 400, 0, n1.Name)
|
||||
p2.Spec.Priority = &highPriority
|
||||
|
||||
p2.Spec.Containers[0].Resources.Requests = nil
|
||||
p2.Spec.Containers[0].Resources.Limits = nil
|
||||
p2 := test.BuildTestPod("p2", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
setHighPriority(pod)
|
||||
makeBestEffortPod(pod)
|
||||
})
|
||||
|
||||
// Burstable
|
||||
p3 := test.BuildTestPod("p3", 400, 0, n1.Name)
|
||||
p3.Spec.Priority = &highPriority
|
||||
p3 := test.BuildTestPod("p3", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
setHighPriority(pod)
|
||||
makeBurstablePod(pod)
|
||||
})
|
||||
|
||||
// Guaranteed
|
||||
p4 := test.BuildTestPod("p4", 400, 100, n1.Name)
|
||||
p4.Spec.Priority = &highPriority
|
||||
p4.Spec.Containers[0].Resources.Limits[v1.ResourceCPU] = *resource.NewMilliQuantity(400, resource.DecimalSI)
|
||||
p4.Spec.Containers[0].Resources.Limits[v1.ResourceMemory] = *resource.NewQuantity(100, resource.DecimalSI)
|
||||
p4 := test.BuildTestPod("p4", 400, 100, n1.Name, func(pod *v1.Pod) {
|
||||
setHighPriority(pod)
|
||||
makeGuaranteedPod(pod)
|
||||
})
|
||||
|
||||
// Best effort with nil priorities.
|
||||
p5 := test.BuildTestPod("p5", 400, 100, n1.Name)
|
||||
p5 := test.BuildTestPod("p5", 400, 100, n1.Name, makeBestEffortPod)
|
||||
p5.Spec.Priority = nil
|
||||
p6 := test.BuildTestPod("p6", 400, 100, n1.Name)
|
||||
p6.Spec.Containers[0].Resources.Limits[v1.ResourceCPU] = *resource.NewMilliQuantity(400, resource.DecimalSI)
|
||||
p6.Spec.Containers[0].Resources.Limits[v1.ResourceMemory] = *resource.NewQuantity(100, resource.DecimalSI)
|
||||
|
||||
p6 := test.BuildTestPod("p6", 400, 100, n1.Name, makeGuaranteedPod)
|
||||
p6.Spec.Priority = nil
|
||||
|
||||
podList := []*v1.Pod{p4, p3, p2, p1, p6, p5}
|
||||
@@ -393,9 +512,9 @@ func TestWithTaints(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
n1 := test.BuildTestNode("n1", 2000, 3000, 10)
|
||||
n2 := test.BuildTestNode("n2", 1000, 3000, 10)
|
||||
n3 := test.BuildTestNode("n3", 1000, 3000, 10)
|
||||
n1 := test.BuildTestNode("n1", 2000, 3000, 10, nil)
|
||||
n2 := test.BuildTestNode("n2", 1000, 3000, 10, nil)
|
||||
n3 := test.BuildTestNode("n3", 1000, 3000, 10, nil)
|
||||
n3withTaints := n3.DeepCopy()
|
||||
n3withTaints.Spec.Taints = []v1.Taint{
|
||||
{
|
||||
@@ -405,7 +524,7 @@ func TestWithTaints(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
podThatToleratesTaint := test.BuildTestPod("tolerate_pod", 200, 0, n1.Name)
|
||||
podThatToleratesTaint := test.BuildTestPod("tolerate_pod", 200, 0, n1.Name, setRSOwnerRef)
|
||||
podThatToleratesTaint.Spec.Tolerations = []v1.Toleration{
|
||||
{
|
||||
Key: "key",
|
||||
@@ -424,16 +543,16 @@ func TestWithTaints(t *testing.T) {
|
||||
nodes: []*v1.Node{n1, n2, n3},
|
||||
pods: []*v1.Pod{
|
||||
//Node 1 pods
|
||||
test.BuildTestPod(fmt.Sprintf("pod_1_%s", n1.Name), 200, 0, n1.Name),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_2_%s", n1.Name), 200, 0, n1.Name),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_3_%s", n1.Name), 200, 0, n1.Name),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_4_%s", n1.Name), 200, 0, n1.Name),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_5_%s", n1.Name), 200, 0, n1.Name),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_6_%s", n1.Name), 200, 0, n1.Name),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_7_%s", n1.Name), 200, 0, n1.Name),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_8_%s", n1.Name), 200, 0, n1.Name),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_1_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_2_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_3_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_4_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_5_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_6_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_7_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_8_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef),
|
||||
// Node 2 pods
|
||||
test.BuildTestPod(fmt.Sprintf("pod_9_%s", n2.Name), 200, 0, n2.Name),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_9_%s", n2.Name), 200, 0, n2.Name, setRSOwnerRef),
|
||||
},
|
||||
evictionsExpected: 1,
|
||||
},
|
||||
@@ -442,16 +561,16 @@ func TestWithTaints(t *testing.T) {
|
||||
nodes: []*v1.Node{n1, n3withTaints},
|
||||
pods: []*v1.Pod{
|
||||
//Node 1 pods
|
||||
test.BuildTestPod(fmt.Sprintf("pod_1_%s", n1.Name), 200, 0, n1.Name),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_2_%s", n1.Name), 200, 0, n1.Name),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_3_%s", n1.Name), 200, 0, n1.Name),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_4_%s", n1.Name), 200, 0, n1.Name),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_5_%s", n1.Name), 200, 0, n1.Name),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_6_%s", n1.Name), 200, 0, n1.Name),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_7_%s", n1.Name), 200, 0, n1.Name),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_8_%s", n1.Name), 200, 0, n1.Name),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_1_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_2_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_3_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_4_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_5_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_6_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_7_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_8_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef),
|
||||
// Node 3 pods
|
||||
test.BuildTestPod(fmt.Sprintf("pod_9_%s", n3withTaints.Name), 200, 0, n3withTaints.Name),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_9_%s", n3withTaints.Name), 200, 0, n3withTaints.Name, setRSOwnerRef),
|
||||
},
|
||||
evictionsExpected: 0,
|
||||
},
|
||||
@@ -460,16 +579,16 @@ func TestWithTaints(t *testing.T) {
|
||||
nodes: []*v1.Node{n1, n3withTaints},
|
||||
pods: []*v1.Pod{
|
||||
//Node 1 pods
|
||||
test.BuildTestPod(fmt.Sprintf("pod_1_%s", n1.Name), 200, 0, n1.Name),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_2_%s", n1.Name), 200, 0, n1.Name),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_3_%s", n1.Name), 200, 0, n1.Name),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_4_%s", n1.Name), 200, 0, n1.Name),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_5_%s", n1.Name), 200, 0, n1.Name),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_6_%s", n1.Name), 200, 0, n1.Name),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_7_%s", n1.Name), 200, 0, n1.Name),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_1_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_2_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_3_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_4_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_5_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_6_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_7_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef),
|
||||
podThatToleratesTaint,
|
||||
// Node 3 pods
|
||||
test.BuildTestPod(fmt.Sprintf("pod_9_%s", n3withTaints.Name), 200, 0, n3withTaints.Name),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_9_%s", n3withTaints.Name), 200, 0, n3withTaints.Name, setRSOwnerRef),
|
||||
},
|
||||
evictionsExpected: 1,
|
||||
},
|
||||
@@ -483,7 +602,6 @@ func TestWithTaints(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, pod := range item.pods {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
|
||||
objs = append(objs, pod)
|
||||
}
|
||||
|
||||
|
||||
@@ -42,17 +42,17 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||
|
||||
nodeLabelKey := "kubernetes.io/desiredNode"
|
||||
nodeLabelValue := "yes"
|
||||
nodeWithLabels := test.BuildTestNode("nodeWithLabels", 2000, 3000, 10)
|
||||
nodeWithLabels := test.BuildTestNode("nodeWithLabels", 2000, 3000, 10, nil)
|
||||
nodeWithLabels.Labels[nodeLabelKey] = nodeLabelValue
|
||||
|
||||
nodeWithoutLabels := test.BuildTestNode("nodeWithoutLabels", 2000, 3000, 10)
|
||||
nodeWithoutLabels := test.BuildTestNode("nodeWithoutLabels", 2000, 3000, 10, nil)
|
||||
|
||||
unschedulableNodeWithLabels := test.BuildTestNode("unschedulableNodeWithLabels", 2000, 3000, 10)
|
||||
unschedulableNodeWithLabels := test.BuildTestNode("unschedulableNodeWithLabels", 2000, 3000, 10, nil)
|
||||
nodeWithLabels.Labels[nodeLabelKey] = nodeLabelValue
|
||||
unschedulableNodeWithLabels.Spec.Unschedulable = true
|
||||
|
||||
addPodsToNode := func(node *v1.Node) []v1.Pod {
|
||||
podWithNodeAffinity := test.BuildTestPod("podWithNodeAffinity", 100, 0, node.Name)
|
||||
podWithNodeAffinity := test.BuildTestPod("podWithNodeAffinity", 100, 0, node.Name, nil)
|
||||
podWithNodeAffinity.Spec.Affinity = &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
||||
@@ -73,8 +73,8 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
pod1 := test.BuildTestPod("pod1", 100, 0, node.Name)
|
||||
pod2 := test.BuildTestPod("pod2", 100, 0, node.Name)
|
||||
pod1 := test.BuildTestPod("pod1", 100, 0, node.Name, nil)
|
||||
pod2 := test.BuildTestPod("pod2", 100, 0, node.Name, nil)
|
||||
|
||||
podWithNodeAffinity.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
pod1.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
|
||||
@@ -42,17 +42,17 @@ func addTolerationToPod(pod *v1.Pod, key, value string, index int) *v1.Pod {
|
||||
|
||||
func TestDeletePodsViolatingNodeTaints(t *testing.T) {
|
||||
|
||||
node1 := test.BuildTestNode("n1", 2000, 3000, 10)
|
||||
node1 := test.BuildTestNode("n1", 2000, 3000, 10, nil)
|
||||
node1 = addTaintsToNode(node1, "testTaint", "test", []int{1})
|
||||
node2 := test.BuildTestNode("n2", 2000, 3000, 10)
|
||||
node2 := test.BuildTestNode("n2", 2000, 3000, 10, nil)
|
||||
node1 = addTaintsToNode(node2, "testingTaint", "testing", []int{1})
|
||||
|
||||
p1 := test.BuildTestPod("p1", 100, 0, node1.Name)
|
||||
p2 := test.BuildTestPod("p2", 100, 0, node1.Name)
|
||||
p3 := test.BuildTestPod("p3", 100, 0, node1.Name)
|
||||
p4 := test.BuildTestPod("p4", 100, 0, node1.Name)
|
||||
p5 := test.BuildTestPod("p5", 100, 0, node1.Name)
|
||||
p6 := test.BuildTestPod("p6", 100, 0, node1.Name)
|
||||
p1 := test.BuildTestPod("p1", 100, 0, node1.Name, nil)
|
||||
p2 := test.BuildTestPod("p2", 100, 0, node1.Name, nil)
|
||||
p3 := test.BuildTestPod("p3", 100, 0, node1.Name, nil)
|
||||
p4 := test.BuildTestPod("p4", 100, 0, node1.Name, nil)
|
||||
p5 := test.BuildTestPod("p5", 100, 0, node1.Name, nil)
|
||||
p6 := test.BuildTestPod("p6", 100, 0, node1.Name, nil)
|
||||
|
||||
p1.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
p2.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
@@ -60,11 +60,11 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
|
||||
p4.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
p5.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
p6.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
p7 := test.BuildTestPod("p7", 100, 0, node2.Name)
|
||||
p8 := test.BuildTestPod("p8", 100, 0, node2.Name)
|
||||
p9 := test.BuildTestPod("p9", 100, 0, node2.Name)
|
||||
p10 := test.BuildTestPod("p10", 100, 0, node2.Name)
|
||||
p11 := test.BuildTestPod("p11", 100, 0, node2.Name)
|
||||
p7 := test.BuildTestPod("p7", 100, 0, node2.Name, nil)
|
||||
p8 := test.BuildTestPod("p8", 100, 0, node2.Name, nil)
|
||||
p9 := test.BuildTestPod("p9", 100, 0, node2.Name, nil)
|
||||
p10 := test.BuildTestPod("p10", 100, 0, node2.Name, nil)
|
||||
p11 := test.BuildTestPod("p11", 100, 0, node2.Name, nil)
|
||||
p11.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
|
||||
// The following 4 pods won't get evicted.
|
||||
|
||||
@@ -29,11 +29,11 @@ import (
|
||||
)
|
||||
|
||||
func TestPodAntiAffinity(t *testing.T) {
|
||||
node := test.BuildTestNode("n1", 2000, 3000, 10)
|
||||
p1 := test.BuildTestPod("p1", 100, 0, node.Name)
|
||||
p2 := test.BuildTestPod("p2", 100, 0, node.Name)
|
||||
p3 := test.BuildTestPod("p3", 100, 0, node.Name)
|
||||
p4 := test.BuildTestPod("p4", 100, 0, node.Name)
|
||||
node := test.BuildTestNode("n1", 2000, 3000, 10, nil)
|
||||
p1 := test.BuildTestPod("p1", 100, 0, node.Name, nil)
|
||||
p2 := test.BuildTestPod("p2", 100, 0, node.Name, nil)
|
||||
p3 := test.BuildTestPod("p3", 100, 0, node.Name, nil)
|
||||
p4 := test.BuildTestPod("p4", 100, 0, node.Name, nil)
|
||||
p2.Labels = map[string]string{"foo": "bar"}
|
||||
p1.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
p2.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
|
||||
@@ -25,7 +25,7 @@ import (
|
||||
)
|
||||
|
||||
// BuildTestPod creates a test pod with given parameters.
|
||||
func BuildTestPod(name string, cpu int64, memory int64, nodeName string) *v1.Pod {
|
||||
func BuildTestPod(name string, cpu int64, memory int64, nodeName string, apply func(*v1.Pod)) *v1.Pod {
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
@@ -50,7 +50,9 @@ func BuildTestPod(name string, cpu int64, memory int64, nodeName string) *v1.Pod
|
||||
if memory >= 0 {
|
||||
pod.Spec.Containers[0].Resources.Requests[v1.ResourceMemory] = *resource.NewQuantity(memory, resource.DecimalSI)
|
||||
}
|
||||
|
||||
if apply != nil {
|
||||
apply(pod)
|
||||
}
|
||||
return pod
|
||||
}
|
||||
|
||||
@@ -85,7 +87,7 @@ func GetDaemonSetOwnerRefList() []metav1.OwnerReference {
|
||||
}
|
||||
|
||||
// BuildTestNode creates a node with specified capacity.
|
||||
func BuildTestNode(name string, millicpu int64, mem int64, pods int64) *v1.Node {
|
||||
func BuildTestNode(name string, millicpu int64, mem int64, pods int64, apply func(*v1.Node)) *v1.Node {
|
||||
node := &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
@@ -109,5 +111,8 @@ func BuildTestNode(name string, millicpu int64, mem int64, pods int64) *v1.Node
|
||||
},
|
||||
},
|
||||
}
|
||||
if apply != nil {
|
||||
apply(node)
|
||||
}
|
||||
return node
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user