mirror of
https://github.com/kubernetes-sigs/descheduler.git
synced 2026-01-26 05:14:13 +01:00
1116 lines
37 KiB
Go
1116 lines
37 KiB
Go
/*
|
|
Copyright 2017 The Kubernetes Authors.
|
|
|
|
Licensed under the Apache License, Version 2.0 (the "License");
|
|
you may not use this file except in compliance with the License.
|
|
You may obtain a copy of the License at
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
Unless required by applicable law or agreed to in writing, software
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
See the License for the specific language governing permissions and
|
|
limitations under the License.
|
|
*/
|
|
|
|
package nodeutilization
|
|
|
|
import (
|
|
"context"
|
|
"fmt"
|
|
"testing"
|
|
|
|
v1 "k8s.io/api/core/v1"
|
|
policyv1 "k8s.io/api/policy/v1"
|
|
"k8s.io/api/policy/v1beta1"
|
|
"k8s.io/apimachinery/pkg/api/resource"
|
|
"k8s.io/apimachinery/pkg/runtime"
|
|
"k8s.io/client-go/informers"
|
|
"k8s.io/client-go/kubernetes/fake"
|
|
core "k8s.io/client-go/testing"
|
|
|
|
"sigs.k8s.io/descheduler/pkg/api"
|
|
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
|
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
|
"sigs.k8s.io/descheduler/pkg/utils"
|
|
"sigs.k8s.io/descheduler/test"
|
|
)
|
|
|
|
func TestLowNodeUtilization(t *testing.T) {
|
|
n1NodeName := "n1"
|
|
n2NodeName := "n2"
|
|
n3NodeName := "n3"
|
|
|
|
nodeSelectorKey := "datacenter"
|
|
nodeSelectorValue := "west"
|
|
notMatchingNodeSelectorValue := "east"
|
|
|
|
testCases := []struct {
|
|
name string
|
|
useDeviationThresholds bool
|
|
thresholds, targetThresholds api.ResourceThresholds
|
|
nodes []*v1.Node
|
|
pods []*v1.Pod
|
|
expectedPodsEvicted uint
|
|
evictedPods []string
|
|
}{
|
|
{
|
|
name: "no evictable pods",
|
|
thresholds: api.ResourceThresholds{
|
|
v1.ResourceCPU: 30,
|
|
v1.ResourcePods: 30,
|
|
},
|
|
targetThresholds: api.ResourceThresholds{
|
|
v1.ResourceCPU: 50,
|
|
v1.ResourcePods: 50,
|
|
},
|
|
nodes: []*v1.Node{
|
|
test.BuildTestNode(n1NodeName, 4000, 3000, 9, nil),
|
|
test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil),
|
|
test.BuildTestNode(n3NodeName, 4000, 3000, 10, test.SetNodeUnschedulable),
|
|
},
|
|
pods: []*v1.Pod{
|
|
// These won't be evicted.
|
|
test.BuildTestPod("p1", 400, 0, n1NodeName, test.SetDSOwnerRef),
|
|
test.BuildTestPod("p2", 400, 0, n1NodeName, test.SetDSOwnerRef),
|
|
test.BuildTestPod("p3", 400, 0, n1NodeName, test.SetDSOwnerRef),
|
|
test.BuildTestPod("p4", 400, 0, n1NodeName, test.SetDSOwnerRef),
|
|
test.BuildTestPod("p5", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
|
// A pod with local storage.
|
|
test.SetNormalOwnerRef(pod)
|
|
pod.Spec.Volumes = []v1.Volume{
|
|
{
|
|
Name: "sample",
|
|
VolumeSource: v1.VolumeSource{
|
|
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
|
EmptyDir: &v1.EmptyDirVolumeSource{
|
|
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
|
|
},
|
|
},
|
|
}
|
|
// A Mirror Pod.
|
|
pod.Annotations = test.GetMirrorPodAnnotation()
|
|
}),
|
|
test.BuildTestPod("p6", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
|
// A Critical Pod.
|
|
pod.Namespace = "kube-system"
|
|
priority := utils.SystemCriticalPriority
|
|
pod.Spec.Priority = &priority
|
|
}),
|
|
test.BuildTestPod("p9", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
|
},
|
|
expectedPodsEvicted: 0,
|
|
},
|
|
{
|
|
name: "without priorities",
|
|
thresholds: api.ResourceThresholds{
|
|
v1.ResourceCPU: 30,
|
|
v1.ResourcePods: 30,
|
|
},
|
|
targetThresholds: api.ResourceThresholds{
|
|
v1.ResourceCPU: 50,
|
|
v1.ResourcePods: 50,
|
|
},
|
|
nodes: []*v1.Node{
|
|
test.BuildTestNode(n1NodeName, 4000, 3000, 9, nil),
|
|
test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil),
|
|
test.BuildTestNode(n3NodeName, 4000, 3000, 10, test.SetNodeUnschedulable),
|
|
},
|
|
pods: []*v1.Pod{
|
|
test.BuildTestPod("p1", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
|
test.BuildTestPod("p2", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
|
test.BuildTestPod("p3", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
|
test.BuildTestPod("p4", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
|
test.BuildTestPod("p5", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
|
// These won't be evicted.
|
|
test.BuildTestPod("p6", 400, 0, n1NodeName, test.SetDSOwnerRef),
|
|
test.BuildTestPod("p7", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
|
// A pod with local storage.
|
|
test.SetNormalOwnerRef(pod)
|
|
pod.Spec.Volumes = []v1.Volume{
|
|
{
|
|
Name: "sample",
|
|
VolumeSource: v1.VolumeSource{
|
|
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
|
EmptyDir: &v1.EmptyDirVolumeSource{
|
|
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
|
|
},
|
|
},
|
|
}
|
|
// A Mirror Pod.
|
|
pod.Annotations = test.GetMirrorPodAnnotation()
|
|
}),
|
|
test.BuildTestPod("p8", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
|
// A Critical Pod.
|
|
pod.Namespace = "kube-system"
|
|
priority := utils.SystemCriticalPriority
|
|
pod.Spec.Priority = &priority
|
|
}),
|
|
test.BuildTestPod("p9", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
|
},
|
|
expectedPodsEvicted: 4,
|
|
},
|
|
{
|
|
name: "without priorities stop when cpu capacity is depleted",
|
|
thresholds: api.ResourceThresholds{
|
|
v1.ResourceCPU: 30,
|
|
v1.ResourcePods: 30,
|
|
},
|
|
targetThresholds: api.ResourceThresholds{
|
|
v1.ResourceCPU: 50,
|
|
v1.ResourcePods: 50,
|
|
},
|
|
nodes: []*v1.Node{
|
|
test.BuildTestNode(n1NodeName, 4000, 3000, 9, nil),
|
|
test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil),
|
|
test.BuildTestNode(n3NodeName, 4000, 3000, 10, test.SetNodeUnschedulable),
|
|
},
|
|
pods: []*v1.Pod{
|
|
test.BuildTestPod("p1", 400, 300, n1NodeName, test.SetRSOwnerRef),
|
|
test.BuildTestPod("p2", 400, 300, n1NodeName, test.SetRSOwnerRef),
|
|
test.BuildTestPod("p3", 400, 300, n1NodeName, test.SetRSOwnerRef),
|
|
test.BuildTestPod("p4", 400, 300, n1NodeName, test.SetRSOwnerRef),
|
|
test.BuildTestPod("p5", 400, 300, n1NodeName, test.SetRSOwnerRef),
|
|
// These won't be evicted.
|
|
test.BuildTestPod("p6", 400, 300, n1NodeName, test.SetDSOwnerRef),
|
|
test.BuildTestPod("p7", 400, 300, n1NodeName, func(pod *v1.Pod) {
|
|
// A pod with local storage.
|
|
test.SetNormalOwnerRef(pod)
|
|
pod.Spec.Volumes = []v1.Volume{
|
|
{
|
|
Name: "sample",
|
|
VolumeSource: v1.VolumeSource{
|
|
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
|
EmptyDir: &v1.EmptyDirVolumeSource{
|
|
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
|
|
},
|
|
},
|
|
}
|
|
// A Mirror Pod.
|
|
pod.Annotations = test.GetMirrorPodAnnotation()
|
|
}),
|
|
test.BuildTestPod("p8", 400, 300, n1NodeName, func(pod *v1.Pod) {
|
|
// A Critical Pod.
|
|
pod.Namespace = "kube-system"
|
|
priority := utils.SystemCriticalPriority
|
|
pod.Spec.Priority = &priority
|
|
}),
|
|
test.BuildTestPod("p9", 400, 2100, n2NodeName, test.SetRSOwnerRef),
|
|
},
|
|
// 4 pods available for eviction based on v1.ResourcePods, only 3 pods can be evicted before cpu is depleted
|
|
expectedPodsEvicted: 3,
|
|
},
|
|
{
|
|
name: "with priorities",
|
|
thresholds: api.ResourceThresholds{
|
|
v1.ResourceCPU: 30,
|
|
v1.ResourcePods: 30,
|
|
},
|
|
targetThresholds: api.ResourceThresholds{
|
|
v1.ResourceCPU: 50,
|
|
v1.ResourcePods: 50,
|
|
},
|
|
nodes: []*v1.Node{
|
|
test.BuildTestNode(n1NodeName, 4000, 3000, 9, nil),
|
|
test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil),
|
|
test.BuildTestNode(n3NodeName, 4000, 3000, 10, test.SetNodeUnschedulable),
|
|
},
|
|
pods: []*v1.Pod{
|
|
test.BuildTestPod("p1", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
|
test.SetRSOwnerRef(pod)
|
|
test.SetPodPriority(pod, highPriority)
|
|
}),
|
|
test.BuildTestPod("p2", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
|
test.SetRSOwnerRef(pod)
|
|
test.SetPodPriority(pod, highPriority)
|
|
}),
|
|
test.BuildTestPod("p3", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
|
test.SetRSOwnerRef(pod)
|
|
test.SetPodPriority(pod, highPriority)
|
|
}),
|
|
test.BuildTestPod("p4", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
|
test.SetRSOwnerRef(pod)
|
|
test.SetPodPriority(pod, highPriority)
|
|
}),
|
|
test.BuildTestPod("p5", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
|
test.SetRSOwnerRef(pod)
|
|
test.SetPodPriority(pod, lowPriority)
|
|
}),
|
|
// These won't be evicted.
|
|
test.BuildTestPod("p6", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
|
test.SetDSOwnerRef(pod)
|
|
test.SetPodPriority(pod, highPriority)
|
|
}),
|
|
test.BuildTestPod("p7", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
|
// A pod with local storage.
|
|
test.SetNormalOwnerRef(pod)
|
|
test.SetPodPriority(pod, lowPriority)
|
|
pod.Spec.Volumes = []v1.Volume{
|
|
{
|
|
Name: "sample",
|
|
VolumeSource: v1.VolumeSource{
|
|
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
|
EmptyDir: &v1.EmptyDirVolumeSource{
|
|
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
|
|
},
|
|
},
|
|
}
|
|
// A Mirror Pod.
|
|
pod.Annotations = test.GetMirrorPodAnnotation()
|
|
}),
|
|
test.BuildTestPod("p8", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
|
// A Critical Pod.
|
|
pod.Namespace = "kube-system"
|
|
priority := utils.SystemCriticalPriority
|
|
pod.Spec.Priority = &priority
|
|
}),
|
|
test.BuildTestPod("p9", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
|
},
|
|
expectedPodsEvicted: 4,
|
|
},
|
|
{
|
|
name: "without priorities evicting best-effort pods only",
|
|
thresholds: api.ResourceThresholds{
|
|
v1.ResourceCPU: 30,
|
|
v1.ResourcePods: 30,
|
|
},
|
|
targetThresholds: api.ResourceThresholds{
|
|
v1.ResourceCPU: 50,
|
|
v1.ResourcePods: 50,
|
|
},
|
|
nodes: []*v1.Node{
|
|
test.BuildTestNode(n1NodeName, 4000, 3000, 9, nil),
|
|
test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil),
|
|
test.BuildTestNode(n3NodeName, 4000, 3000, 10, test.SetNodeUnschedulable),
|
|
},
|
|
// All pods are assumed to be burstable (test.BuildTestNode always sets both cpu/memory resource requests to some value)
|
|
pods: []*v1.Pod{
|
|
test.BuildTestPod("p1", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
|
test.SetRSOwnerRef(pod)
|
|
test.MakeBestEffortPod(pod)
|
|
}),
|
|
test.BuildTestPod("p2", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
|
test.SetRSOwnerRef(pod)
|
|
test.MakeBestEffortPod(pod)
|
|
}),
|
|
test.BuildTestPod("p3", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
|
test.SetRSOwnerRef(pod)
|
|
}),
|
|
test.BuildTestPod("p4", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
|
test.SetRSOwnerRef(pod)
|
|
test.MakeBestEffortPod(pod)
|
|
}),
|
|
test.BuildTestPod("p5", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
|
test.SetRSOwnerRef(pod)
|
|
test.MakeBestEffortPod(pod)
|
|
}),
|
|
// These won't be evicted.
|
|
test.BuildTestPod("p6", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
|
test.SetDSOwnerRef(pod)
|
|
}),
|
|
test.BuildTestPod("p7", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
|
// A pod with local storage.
|
|
test.SetNormalOwnerRef(pod)
|
|
pod.Spec.Volumes = []v1.Volume{
|
|
{
|
|
Name: "sample",
|
|
VolumeSource: v1.VolumeSource{
|
|
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
|
EmptyDir: &v1.EmptyDirVolumeSource{
|
|
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
|
|
},
|
|
},
|
|
}
|
|
// A Mirror Pod.
|
|
pod.Annotations = test.GetMirrorPodAnnotation()
|
|
}),
|
|
test.BuildTestPod("p8", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
|
// A Critical Pod.
|
|
pod.Namespace = "kube-system"
|
|
priority := utils.SystemCriticalPriority
|
|
pod.Spec.Priority = &priority
|
|
}),
|
|
test.BuildTestPod("p9", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
|
},
|
|
expectedPodsEvicted: 4,
|
|
evictedPods: []string{"p1", "p2", "p4", "p5"},
|
|
},
|
|
{
|
|
name: "with extended resource",
|
|
thresholds: api.ResourceThresholds{
|
|
v1.ResourcePods: 30,
|
|
extendedResource: 30,
|
|
},
|
|
targetThresholds: api.ResourceThresholds{
|
|
v1.ResourcePods: 50,
|
|
extendedResource: 50,
|
|
},
|
|
nodes: []*v1.Node{
|
|
test.BuildTestNode(n1NodeName, 4000, 3000, 9, func(node *v1.Node) {
|
|
test.SetNodeExtendedResource(node, extendedResource, 8)
|
|
}),
|
|
test.BuildTestNode(n2NodeName, 4000, 3000, 10, func(node *v1.Node) {
|
|
test.SetNodeExtendedResource(node, extendedResource, 8)
|
|
}),
|
|
test.BuildTestNode(n3NodeName, 4000, 3000, 10, test.SetNodeUnschedulable),
|
|
},
|
|
pods: []*v1.Pod{
|
|
test.BuildTestPod("p1", 0, 0, n1NodeName, func(pod *v1.Pod) {
|
|
// A pod with extended resource.
|
|
test.SetRSOwnerRef(pod)
|
|
test.SetPodExtendedResourceRequest(pod, extendedResource, 1)
|
|
}),
|
|
test.BuildTestPod("p2", 0, 0, n1NodeName, func(pod *v1.Pod) {
|
|
test.SetRSOwnerRef(pod)
|
|
test.SetPodExtendedResourceRequest(pod, extendedResource, 1)
|
|
}),
|
|
test.BuildTestPod("p3", 0, 0, n1NodeName, func(pod *v1.Pod) {
|
|
test.SetRSOwnerRef(pod)
|
|
test.SetPodExtendedResourceRequest(pod, extendedResource, 1)
|
|
}),
|
|
test.BuildTestPod("p4", 0, 0, n1NodeName, func(pod *v1.Pod) {
|
|
test.SetRSOwnerRef(pod)
|
|
test.SetPodExtendedResourceRequest(pod, extendedResource, 1)
|
|
}),
|
|
test.BuildTestPod("p5", 0, 0, n1NodeName, func(pod *v1.Pod) {
|
|
test.SetRSOwnerRef(pod)
|
|
test.SetPodExtendedResourceRequest(pod, extendedResource, 1)
|
|
}),
|
|
test.BuildTestPod("p6", 0, 0, n1NodeName, func(pod *v1.Pod) {
|
|
test.SetNormalOwnerRef(pod)
|
|
test.SetPodExtendedResourceRequest(pod, extendedResource, 1)
|
|
}),
|
|
|
|
test.BuildTestPod("p7", 0, 0, n1NodeName, func(pod *v1.Pod) {
|
|
// A pod with local storage.
|
|
test.SetNormalOwnerRef(pod)
|
|
test.SetPodExtendedResourceRequest(pod, extendedResource, 1)
|
|
pod.Spec.Volumes = []v1.Volume{
|
|
{
|
|
Name: "sample",
|
|
VolumeSource: v1.VolumeSource{
|
|
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
|
EmptyDir: &v1.EmptyDirVolumeSource{
|
|
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
|
|
},
|
|
},
|
|
}
|
|
// A Mirror Pod.
|
|
pod.Annotations = test.GetMirrorPodAnnotation()
|
|
}),
|
|
test.BuildTestPod("p8", 0, 0, n1NodeName, func(pod *v1.Pod) {
|
|
// A Critical Pod.
|
|
test.SetPodExtendedResourceRequest(pod, extendedResource, 1)
|
|
pod.Namespace = "kube-system"
|
|
priority := utils.SystemCriticalPriority
|
|
pod.Spec.Priority = &priority
|
|
}),
|
|
test.BuildTestPod("p9", 0, 0, n2NodeName, func(pod *v1.Pod) {
|
|
test.SetRSOwnerRef(pod)
|
|
test.SetPodExtendedResourceRequest(pod, extendedResource, 1)
|
|
}),
|
|
},
|
|
// 4 pods available for eviction based on v1.ResourcePods, only 3 pods can be evicted before extended resource is depleted
|
|
expectedPodsEvicted: 3,
|
|
},
|
|
{
|
|
name: "with extended resource in some of nodes",
|
|
thresholds: api.ResourceThresholds{
|
|
v1.ResourcePods: 30,
|
|
extendedResource: 30,
|
|
},
|
|
targetThresholds: api.ResourceThresholds{
|
|
v1.ResourcePods: 50,
|
|
extendedResource: 50,
|
|
},
|
|
nodes: []*v1.Node{
|
|
test.BuildTestNode(n1NodeName, 4000, 3000, 9, func(node *v1.Node) {
|
|
test.SetNodeExtendedResource(node, extendedResource, 8)
|
|
}),
|
|
test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil),
|
|
test.BuildTestNode(n3NodeName, 4000, 3000, 10, test.SetNodeUnschedulable),
|
|
},
|
|
pods: []*v1.Pod{
|
|
test.BuildTestPod("p1", 0, 0, n1NodeName, func(pod *v1.Pod) {
|
|
// A pod with extended resource.
|
|
test.SetRSOwnerRef(pod)
|
|
test.SetPodExtendedResourceRequest(pod, extendedResource, 1)
|
|
}),
|
|
test.BuildTestPod("p9", 0, 0, n2NodeName, test.SetRSOwnerRef),
|
|
},
|
|
// 0 pods available for eviction because there's no enough extended resource in node2
|
|
expectedPodsEvicted: 0,
|
|
},
|
|
{
|
|
name: "without priorities, but only other node is unschedulable",
|
|
thresholds: api.ResourceThresholds{
|
|
v1.ResourceCPU: 30,
|
|
v1.ResourcePods: 30,
|
|
},
|
|
targetThresholds: api.ResourceThresholds{
|
|
v1.ResourceCPU: 50,
|
|
v1.ResourcePods: 50,
|
|
},
|
|
nodes: []*v1.Node{
|
|
test.BuildTestNode(n1NodeName, 4000, 3000, 9, nil),
|
|
test.BuildTestNode(n2NodeName, 4000, 3000, 10, test.SetNodeUnschedulable),
|
|
},
|
|
pods: []*v1.Pod{
|
|
test.BuildTestPod("p1", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
|
test.BuildTestPod("p2", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
|
test.BuildTestPod("p3", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
|
test.BuildTestPod("p4", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
|
test.BuildTestPod("p5", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
|
// These won't be evicted.
|
|
test.BuildTestPod("p6", 400, 0, n1NodeName, test.SetDSOwnerRef),
|
|
test.BuildTestPod("p7", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
|
// A pod with local storage.
|
|
test.SetNormalOwnerRef(pod)
|
|
pod.Spec.Volumes = []v1.Volume{
|
|
{
|
|
Name: "sample",
|
|
VolumeSource: v1.VolumeSource{
|
|
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
|
EmptyDir: &v1.EmptyDirVolumeSource{
|
|
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
|
|
},
|
|
},
|
|
}
|
|
// A Mirror Pod.
|
|
pod.Annotations = test.GetMirrorPodAnnotation()
|
|
}),
|
|
test.BuildTestPod("p8", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
|
// A Critical Pod.
|
|
pod.Namespace = "kube-system"
|
|
priority := utils.SystemCriticalPriority
|
|
pod.Spec.Priority = &priority
|
|
}),
|
|
},
|
|
expectedPodsEvicted: 0,
|
|
},
|
|
{
|
|
name: "without priorities, but only other node doesn't match pod node selector for p4 and p5",
|
|
thresholds: api.ResourceThresholds{
|
|
v1.ResourceCPU: 30,
|
|
v1.ResourcePods: 30,
|
|
},
|
|
targetThresholds: api.ResourceThresholds{
|
|
v1.ResourceCPU: 50,
|
|
v1.ResourcePods: 50,
|
|
},
|
|
nodes: []*v1.Node{
|
|
test.BuildTestNode(n1NodeName, 4000, 3000, 9, nil),
|
|
test.BuildTestNode(n2NodeName, 4000, 3000, 10, func(node *v1.Node) {
|
|
node.ObjectMeta.Labels = map[string]string{
|
|
nodeSelectorKey: notMatchingNodeSelectorValue,
|
|
}
|
|
}),
|
|
},
|
|
pods: []*v1.Pod{
|
|
test.BuildTestPod("p1", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
|
test.BuildTestPod("p2", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
|
test.BuildTestPod("p3", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
|
// These won't be evicted.
|
|
test.BuildTestPod("p4", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
|
// A pod selecting nodes in the "west" datacenter
|
|
test.SetNormalOwnerRef(pod)
|
|
pod.Spec.NodeSelector = map[string]string{
|
|
nodeSelectorKey: nodeSelectorValue,
|
|
}
|
|
}),
|
|
test.BuildTestPod("p5", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
|
// A pod selecting nodes in the "west" datacenter
|
|
test.SetNormalOwnerRef(pod)
|
|
pod.Spec.NodeSelector = map[string]string{
|
|
nodeSelectorKey: nodeSelectorValue,
|
|
}
|
|
}),
|
|
test.BuildTestPod("p6", 400, 0, n1NodeName, test.SetDSOwnerRef),
|
|
test.BuildTestPod("p7", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
|
// A pod with local storage.
|
|
test.SetNormalOwnerRef(pod)
|
|
pod.Spec.Volumes = []v1.Volume{
|
|
{
|
|
Name: "sample",
|
|
VolumeSource: v1.VolumeSource{
|
|
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
|
EmptyDir: &v1.EmptyDirVolumeSource{
|
|
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
|
|
},
|
|
},
|
|
}
|
|
// A Mirror Pod.
|
|
pod.Annotations = test.GetMirrorPodAnnotation()
|
|
}),
|
|
test.BuildTestPod("p8", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
|
// A Critical Pod.
|
|
pod.Namespace = "kube-system"
|
|
priority := utils.SystemCriticalPriority
|
|
pod.Spec.Priority = &priority
|
|
}),
|
|
},
|
|
expectedPodsEvicted: 3,
|
|
},
|
|
{
|
|
name: "without priorities, but only other node doesn't match pod node affinity for p4 and p5",
|
|
thresholds: api.ResourceThresholds{
|
|
v1.ResourceCPU: 30,
|
|
v1.ResourcePods: 30,
|
|
},
|
|
targetThresholds: api.ResourceThresholds{
|
|
v1.ResourceCPU: 50,
|
|
v1.ResourcePods: 50,
|
|
},
|
|
nodes: []*v1.Node{
|
|
test.BuildTestNode(n1NodeName, 4000, 3000, 9, nil),
|
|
test.BuildTestNode(n2NodeName, 4000, 3000, 10, func(node *v1.Node) {
|
|
node.ObjectMeta.Labels = map[string]string{
|
|
nodeSelectorKey: notMatchingNodeSelectorValue,
|
|
}
|
|
}),
|
|
},
|
|
pods: []*v1.Pod{
|
|
test.BuildTestPod("p1", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
|
test.BuildTestPod("p2", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
|
test.BuildTestPod("p3", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
|
// These won't be evicted.
|
|
test.BuildTestPod("p4", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
|
// A pod with affinity to run in the "west" datacenter upon scheduling
|
|
test.SetNormalOwnerRef(pod)
|
|
pod.Spec.Affinity = &v1.Affinity{
|
|
NodeAffinity: &v1.NodeAffinity{
|
|
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
|
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
|
{
|
|
MatchExpressions: []v1.NodeSelectorRequirement{
|
|
{
|
|
Key: nodeSelectorKey,
|
|
Operator: "In",
|
|
Values: []string{nodeSelectorValue},
|
|
},
|
|
},
|
|
},
|
|
},
|
|
},
|
|
},
|
|
}
|
|
}),
|
|
test.BuildTestPod("p5", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
|
// A pod with affinity to run in the "west" datacenter upon scheduling
|
|
test.SetNormalOwnerRef(pod)
|
|
pod.Spec.Affinity = &v1.Affinity{
|
|
NodeAffinity: &v1.NodeAffinity{
|
|
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
|
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
|
{
|
|
MatchExpressions: []v1.NodeSelectorRequirement{
|
|
{
|
|
Key: nodeSelectorKey,
|
|
Operator: "In",
|
|
Values: []string{nodeSelectorValue},
|
|
},
|
|
},
|
|
},
|
|
},
|
|
},
|
|
},
|
|
}
|
|
}),
|
|
test.BuildTestPod("p6", 400, 0, n1NodeName, test.SetDSOwnerRef),
|
|
test.BuildTestPod("p7", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
|
// A pod with local storage.
|
|
test.SetNormalOwnerRef(pod)
|
|
pod.Spec.Volumes = []v1.Volume{
|
|
{
|
|
Name: "sample",
|
|
VolumeSource: v1.VolumeSource{
|
|
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
|
EmptyDir: &v1.EmptyDirVolumeSource{
|
|
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
|
|
},
|
|
},
|
|
}
|
|
// A Mirror Pod.
|
|
pod.Annotations = test.GetMirrorPodAnnotation()
|
|
}),
|
|
test.BuildTestPod("p8", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
|
// A Critical Pod.
|
|
pod.Namespace = "kube-system"
|
|
priority := utils.SystemCriticalPriority
|
|
pod.Spec.Priority = &priority
|
|
}),
|
|
test.BuildTestPod("p9", 0, 0, n2NodeName, test.SetRSOwnerRef),
|
|
},
|
|
expectedPodsEvicted: 3,
|
|
},
|
|
{
|
|
name: "deviation thresholds",
|
|
thresholds: api.ResourceThresholds{
|
|
v1.ResourceCPU: 5,
|
|
v1.ResourcePods: 5,
|
|
},
|
|
targetThresholds: api.ResourceThresholds{
|
|
v1.ResourceCPU: 5,
|
|
v1.ResourcePods: 5,
|
|
},
|
|
useDeviationThresholds: true,
|
|
nodes: []*v1.Node{
|
|
test.BuildTestNode(n1NodeName, 4000, 3000, 9, nil),
|
|
test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil),
|
|
test.BuildTestNode(n3NodeName, 4000, 3000, 10, test.SetNodeUnschedulable),
|
|
},
|
|
pods: []*v1.Pod{
|
|
test.BuildTestPod("p1", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
|
test.BuildTestPod("p2", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
|
test.BuildTestPod("p3", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
|
test.BuildTestPod("p4", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
|
test.BuildTestPod("p5", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
|
// These won't be evicted.
|
|
test.BuildTestPod("p6", 400, 0, n1NodeName, test.SetDSOwnerRef),
|
|
test.BuildTestPod("p7", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
|
// A pod with local storage.
|
|
test.SetNormalOwnerRef(pod)
|
|
pod.Spec.Volumes = []v1.Volume{
|
|
{
|
|
Name: "sample",
|
|
VolumeSource: v1.VolumeSource{
|
|
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
|
EmptyDir: &v1.EmptyDirVolumeSource{
|
|
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
|
|
},
|
|
},
|
|
}
|
|
// A Mirror Pod.
|
|
pod.Annotations = test.GetMirrorPodAnnotation()
|
|
}),
|
|
test.BuildTestPod("p8", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
|
// A Critical Pod.
|
|
pod.Namespace = "kube-system"
|
|
priority := utils.SystemCriticalPriority
|
|
pod.Spec.Priority = &priority
|
|
}),
|
|
test.BuildTestPod("p9", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
|
},
|
|
expectedPodsEvicted: 2,
|
|
evictedPods: []string{},
|
|
},
|
|
}
|
|
|
|
for _, test := range testCases {
|
|
t.Run(test.name, func(t *testing.T) {
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
defer cancel()
|
|
|
|
var objs []runtime.Object
|
|
for _, node := range test.nodes {
|
|
objs = append(objs, node)
|
|
}
|
|
for _, pod := range test.pods {
|
|
objs = append(objs, pod)
|
|
}
|
|
fakeClient := fake.NewSimpleClientset(objs...)
|
|
|
|
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
|
|
podInformer := sharedInformerFactory.Core().V1().Pods()
|
|
|
|
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
|
|
if err != nil {
|
|
t.Errorf("Build get pods assigned to node function error: %v", err)
|
|
}
|
|
|
|
//fakeClient := &fake.Clientset{}
|
|
//fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
|
|
// list := action.(core.ListAction)
|
|
// fieldString := list.GetListRestrictions().Fields.String()
|
|
// if strings.Contains(fieldString, n1NodeName) {
|
|
// return true, test.pods[n1NodeName], nil
|
|
// }
|
|
// if strings.Contains(fieldString, n2NodeName) {
|
|
// return true, test.pods[n2NodeName], nil
|
|
// }
|
|
// if strings.Contains(fieldString, n3NodeName) {
|
|
// return true, test.pods[n3NodeName], nil
|
|
// }
|
|
// return true, nil, fmt.Errorf("Failed to list: %v", list)
|
|
//})
|
|
//fakeClient.Fake.AddReactor("get", "nodes", func(action core.Action) (bool, runtime.Object, error) {
|
|
// getAction := action.(core.GetAction)
|
|
// if node, exists := test.nodes[getAction.GetName()]; exists {
|
|
// return true, node, nil
|
|
// }
|
|
// return true, nil, fmt.Errorf("Wrong node: %v", getAction.GetName())
|
|
//})
|
|
podsForEviction := make(map[string]struct{})
|
|
for _, pod := range test.evictedPods {
|
|
podsForEviction[pod] = struct{}{}
|
|
}
|
|
|
|
evictionFailed := false
|
|
if len(test.evictedPods) > 0 {
|
|
fakeClient.Fake.AddReactor("create", "pods", func(action core.Action) (bool, runtime.Object, error) {
|
|
getAction := action.(core.CreateAction)
|
|
obj := getAction.GetObject()
|
|
if eviction, ok := obj.(*v1beta1.Eviction); ok {
|
|
if _, exists := podsForEviction[eviction.Name]; exists {
|
|
return true, obj, nil
|
|
}
|
|
evictionFailed = true
|
|
return true, nil, fmt.Errorf("pod %q was unexpectedly evicted", eviction.Name)
|
|
}
|
|
return true, obj, nil
|
|
})
|
|
}
|
|
|
|
sharedInformerFactory.Start(ctx.Done())
|
|
sharedInformerFactory.WaitForCacheSync(ctx.Done())
|
|
|
|
podEvictor := evictions.NewPodEvictor(
|
|
fakeClient,
|
|
policyv1.SchemeGroupVersion.String(),
|
|
false,
|
|
nil,
|
|
nil,
|
|
test.nodes,
|
|
false,
|
|
)
|
|
|
|
strategy := api.DeschedulerStrategy{
|
|
Enabled: true,
|
|
Params: &api.StrategyParameters{
|
|
NodeResourceUtilizationThresholds: &api.NodeResourceUtilizationThresholds{
|
|
Thresholds: test.thresholds,
|
|
TargetThresholds: test.targetThresholds,
|
|
UseDeviationThresholds: test.useDeviationThresholds,
|
|
},
|
|
NodeFit: true,
|
|
},
|
|
}
|
|
|
|
evictorFilter := evictions.NewEvictorFilter(
|
|
test.nodes,
|
|
getPodsAssignedToNode,
|
|
false,
|
|
false,
|
|
false,
|
|
false,
|
|
evictions.WithNodeFit(strategy.Params.NodeFit),
|
|
)
|
|
|
|
LowNodeUtilization(ctx, fakeClient, strategy, test.nodes, podEvictor, evictorFilter, getPodsAssignedToNode)
|
|
|
|
podsEvicted := podEvictor.TotalEvicted()
|
|
if test.expectedPodsEvicted != podsEvicted {
|
|
t.Errorf("Expected %v pods to be evicted but %v got evicted", test.expectedPodsEvicted, podsEvicted)
|
|
}
|
|
if evictionFailed {
|
|
t.Errorf("Pod evictions failed unexpectedly")
|
|
}
|
|
})
|
|
}
|
|
}
|
|
|
|
func TestValidateLowNodeUtilizationStrategyConfig(t *testing.T) {
|
|
tests := []struct {
|
|
name string
|
|
thresholds api.ResourceThresholds
|
|
targetThresholds api.ResourceThresholds
|
|
errInfo error
|
|
}{
|
|
{
|
|
name: "passing invalid thresholds",
|
|
thresholds: api.ResourceThresholds{
|
|
v1.ResourceCPU: 20,
|
|
v1.ResourceMemory: 120,
|
|
},
|
|
targetThresholds: api.ResourceThresholds{
|
|
v1.ResourceCPU: 80,
|
|
v1.ResourceMemory: 80,
|
|
},
|
|
errInfo: fmt.Errorf("thresholds config is not valid: %v", fmt.Errorf(
|
|
"%v threshold not in [%v, %v] range", v1.ResourceMemory, MinResourcePercentage, MaxResourcePercentage)),
|
|
},
|
|
{
|
|
name: "thresholds and targetThresholds configured different num of resources",
|
|
thresholds: api.ResourceThresholds{
|
|
v1.ResourceCPU: 20,
|
|
v1.ResourceMemory: 20,
|
|
},
|
|
targetThresholds: api.ResourceThresholds{
|
|
v1.ResourceCPU: 80,
|
|
v1.ResourceMemory: 80,
|
|
v1.ResourcePods: 80,
|
|
},
|
|
errInfo: fmt.Errorf("thresholds and targetThresholds configured different resources"),
|
|
},
|
|
{
|
|
name: "thresholds and targetThresholds configured different resources",
|
|
thresholds: api.ResourceThresholds{
|
|
v1.ResourceCPU: 20,
|
|
v1.ResourceMemory: 20,
|
|
},
|
|
targetThresholds: api.ResourceThresholds{
|
|
v1.ResourceCPU: 80,
|
|
v1.ResourcePods: 80,
|
|
},
|
|
errInfo: fmt.Errorf("thresholds and targetThresholds configured different resources"),
|
|
},
|
|
{
|
|
name: "thresholds' CPU config value is greater than targetThresholds'",
|
|
thresholds: api.ResourceThresholds{
|
|
v1.ResourceCPU: 90,
|
|
v1.ResourceMemory: 20,
|
|
},
|
|
targetThresholds: api.ResourceThresholds{
|
|
v1.ResourceCPU: 80,
|
|
v1.ResourceMemory: 80,
|
|
},
|
|
errInfo: fmt.Errorf("thresholds' %v percentage is greater than targetThresholds'", v1.ResourceCPU),
|
|
},
|
|
{
|
|
name: "only thresholds configured extended resource",
|
|
thresholds: api.ResourceThresholds{
|
|
v1.ResourceCPU: 20,
|
|
v1.ResourceMemory: 20,
|
|
extendedResource: 20,
|
|
},
|
|
targetThresholds: api.ResourceThresholds{
|
|
v1.ResourceCPU: 80,
|
|
v1.ResourceMemory: 80,
|
|
},
|
|
errInfo: fmt.Errorf("thresholds and targetThresholds configured different resources"),
|
|
},
|
|
{
|
|
name: "only targetThresholds configured extended resource",
|
|
thresholds: api.ResourceThresholds{
|
|
v1.ResourceCPU: 20,
|
|
v1.ResourceMemory: 20,
|
|
},
|
|
targetThresholds: api.ResourceThresholds{
|
|
v1.ResourceCPU: 80,
|
|
v1.ResourceMemory: 80,
|
|
extendedResource: 80,
|
|
},
|
|
errInfo: fmt.Errorf("thresholds and targetThresholds configured different resources"),
|
|
},
|
|
{
|
|
name: "thresholds and targetThresholds configured different extended resources",
|
|
thresholds: api.ResourceThresholds{
|
|
v1.ResourceCPU: 20,
|
|
v1.ResourceMemory: 20,
|
|
extendedResource: 20,
|
|
},
|
|
targetThresholds: api.ResourceThresholds{
|
|
v1.ResourceCPU: 80,
|
|
v1.ResourceMemory: 80,
|
|
"example.com/bar": 80,
|
|
},
|
|
errInfo: fmt.Errorf("thresholds and targetThresholds configured different resources"),
|
|
},
|
|
{
|
|
name: "thresholds' extended resource config value is greater than targetThresholds'",
|
|
thresholds: api.ResourceThresholds{
|
|
v1.ResourceCPU: 20,
|
|
v1.ResourceMemory: 20,
|
|
extendedResource: 90,
|
|
},
|
|
targetThresholds: api.ResourceThresholds{
|
|
v1.ResourceCPU: 80,
|
|
v1.ResourceMemory: 80,
|
|
extendedResource: 20,
|
|
},
|
|
errInfo: fmt.Errorf("thresholds' %v percentage is greater than targetThresholds'", extendedResource),
|
|
},
|
|
{
|
|
name: "passing valid strategy config",
|
|
thresholds: api.ResourceThresholds{
|
|
v1.ResourceCPU: 20,
|
|
v1.ResourceMemory: 20,
|
|
},
|
|
targetThresholds: api.ResourceThresholds{
|
|
v1.ResourceCPU: 80,
|
|
v1.ResourceMemory: 80,
|
|
},
|
|
errInfo: nil,
|
|
},
|
|
{
|
|
name: "passing valid strategy config with extended resource",
|
|
thresholds: api.ResourceThresholds{
|
|
v1.ResourceCPU: 20,
|
|
v1.ResourceMemory: 20,
|
|
extendedResource: 20,
|
|
},
|
|
targetThresholds: api.ResourceThresholds{
|
|
v1.ResourceCPU: 80,
|
|
v1.ResourceMemory: 80,
|
|
extendedResource: 80,
|
|
},
|
|
errInfo: nil,
|
|
},
|
|
}
|
|
|
|
for _, testCase := range tests {
|
|
validateErr := validateLowUtilizationStrategyConfig(testCase.thresholds, testCase.targetThresholds, false)
|
|
|
|
if validateErr == nil || testCase.errInfo == nil {
|
|
if validateErr != testCase.errInfo {
|
|
t.Errorf("expected validity of strategy config: thresholds %#v targetThresholds %#v to be %v but got %v instead",
|
|
testCase.thresholds, testCase.targetThresholds, testCase.errInfo, validateErr)
|
|
}
|
|
} else if validateErr.Error() != testCase.errInfo.Error() {
|
|
t.Errorf("expected validity of strategy config: thresholds %#v targetThresholds %#v to be %v but got %v instead",
|
|
testCase.thresholds, testCase.targetThresholds, testCase.errInfo, validateErr)
|
|
}
|
|
}
|
|
}
|
|
|
|
func TestLowNodeUtilizationWithTaints(t *testing.T) {
|
|
ctx := context.Background()
|
|
strategy := api.DeschedulerStrategy{
|
|
Enabled: true,
|
|
Params: &api.StrategyParameters{
|
|
NodeResourceUtilizationThresholds: &api.NodeResourceUtilizationThresholds{
|
|
Thresholds: api.ResourceThresholds{
|
|
v1.ResourcePods: 20,
|
|
},
|
|
TargetThresholds: api.ResourceThresholds{
|
|
v1.ResourcePods: 70,
|
|
},
|
|
},
|
|
NodeFit: true,
|
|
},
|
|
}
|
|
|
|
n1 := test.BuildTestNode("n1", 2000, 3000, 10, nil)
|
|
n2 := test.BuildTestNode("n2", 1000, 3000, 10, nil)
|
|
n3 := test.BuildTestNode("n3", 1000, 3000, 10, nil)
|
|
n3withTaints := n3.DeepCopy()
|
|
n3withTaints.Spec.Taints = []v1.Taint{
|
|
{
|
|
Key: "key",
|
|
Value: "value",
|
|
Effect: v1.TaintEffectNoSchedule,
|
|
},
|
|
}
|
|
|
|
podThatToleratesTaint := test.BuildTestPod("tolerate_pod", 200, 0, n1.Name, test.SetRSOwnerRef)
|
|
podThatToleratesTaint.Spec.Tolerations = []v1.Toleration{
|
|
{
|
|
Key: "key",
|
|
Value: "value",
|
|
},
|
|
}
|
|
|
|
tests := []struct {
|
|
name string
|
|
nodes []*v1.Node
|
|
pods []*v1.Pod
|
|
evictionsExpected uint
|
|
}{
|
|
{
|
|
name: "No taints",
|
|
nodes: []*v1.Node{n1, n2, n3},
|
|
pods: []*v1.Pod{
|
|
//Node 1 pods
|
|
test.BuildTestPod(fmt.Sprintf("pod_1_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
|
|
test.BuildTestPod(fmt.Sprintf("pod_2_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
|
|
test.BuildTestPod(fmt.Sprintf("pod_3_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
|
|
test.BuildTestPod(fmt.Sprintf("pod_4_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
|
|
test.BuildTestPod(fmt.Sprintf("pod_5_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
|
|
test.BuildTestPod(fmt.Sprintf("pod_6_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
|
|
test.BuildTestPod(fmt.Sprintf("pod_7_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
|
|
test.BuildTestPod(fmt.Sprintf("pod_8_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
|
|
// Node 2 pods
|
|
test.BuildTestPod(fmt.Sprintf("pod_9_%s", n2.Name), 200, 0, n2.Name, test.SetRSOwnerRef),
|
|
},
|
|
evictionsExpected: 1,
|
|
},
|
|
{
|
|
name: "No pod tolerates node taint",
|
|
nodes: []*v1.Node{n1, n3withTaints},
|
|
pods: []*v1.Pod{
|
|
//Node 1 pods
|
|
test.BuildTestPod(fmt.Sprintf("pod_1_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
|
|
test.BuildTestPod(fmt.Sprintf("pod_2_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
|
|
test.BuildTestPod(fmt.Sprintf("pod_3_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
|
|
test.BuildTestPod(fmt.Sprintf("pod_4_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
|
|
test.BuildTestPod(fmt.Sprintf("pod_5_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
|
|
test.BuildTestPod(fmt.Sprintf("pod_6_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
|
|
test.BuildTestPod(fmt.Sprintf("pod_7_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
|
|
test.BuildTestPod(fmt.Sprintf("pod_8_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
|
|
// Node 3 pods
|
|
test.BuildTestPod(fmt.Sprintf("pod_9_%s", n3withTaints.Name), 200, 0, n3withTaints.Name, test.SetRSOwnerRef),
|
|
},
|
|
evictionsExpected: 0,
|
|
},
|
|
{
|
|
name: "Pod which tolerates node taint",
|
|
nodes: []*v1.Node{n1, n3withTaints},
|
|
pods: []*v1.Pod{
|
|
//Node 1 pods
|
|
test.BuildTestPod(fmt.Sprintf("pod_1_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
|
|
test.BuildTestPod(fmt.Sprintf("pod_2_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
|
|
test.BuildTestPod(fmt.Sprintf("pod_3_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
|
|
test.BuildTestPod(fmt.Sprintf("pod_4_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
|
|
test.BuildTestPod(fmt.Sprintf("pod_5_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
|
|
test.BuildTestPod(fmt.Sprintf("pod_6_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
|
|
test.BuildTestPod(fmt.Sprintf("pod_7_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
|
|
podThatToleratesTaint,
|
|
// Node 3 pods
|
|
test.BuildTestPod(fmt.Sprintf("pod_9_%s", n3withTaints.Name), 200, 0, n3withTaints.Name, test.SetRSOwnerRef),
|
|
},
|
|
evictionsExpected: 1,
|
|
},
|
|
}
|
|
|
|
for _, item := range tests {
|
|
t.Run(item.name, func(t *testing.T) {
|
|
var objs []runtime.Object
|
|
for _, node := range item.nodes {
|
|
objs = append(objs, node)
|
|
}
|
|
|
|
for _, pod := range item.pods {
|
|
objs = append(objs, pod)
|
|
}
|
|
|
|
fakeClient := fake.NewSimpleClientset(objs...)
|
|
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
|
|
podInformer := sharedInformerFactory.Core().V1().Pods()
|
|
|
|
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
|
|
if err != nil {
|
|
t.Errorf("Build get pods assigned to node function error: %v", err)
|
|
}
|
|
|
|
sharedInformerFactory.Start(ctx.Done())
|
|
sharedInformerFactory.WaitForCacheSync(ctx.Done())
|
|
|
|
podEvictor := evictions.NewPodEvictor(
|
|
fakeClient,
|
|
policyv1.SchemeGroupVersion.String(),
|
|
false,
|
|
&item.evictionsExpected,
|
|
nil,
|
|
item.nodes,
|
|
false,
|
|
)
|
|
|
|
evictorFilter := evictions.NewEvictorFilter(
|
|
item.nodes,
|
|
getPodsAssignedToNode,
|
|
false,
|
|
false,
|
|
false,
|
|
false,
|
|
)
|
|
|
|
LowNodeUtilization(ctx, fakeClient, strategy, item.nodes, podEvictor, evictorFilter, getPodsAssignedToNode)
|
|
|
|
if item.evictionsExpected != podEvictor.TotalEvicted() {
|
|
t.Errorf("Expected %v evictions, got %v", item.evictionsExpected, podEvictor.TotalEvicted())
|
|
}
|
|
})
|
|
}
|
|
}
|