1
0
mirror of https://github.com/kubernetes-sigs/descheduler.git synced 2026-01-26 05:14:13 +01:00

Merge pull request #169 from Bowenislandsong/fix-test

Remove hardcoded node selection in E2e test
This commit is contained in:
Kubernetes Prow Robot
2019-07-12 13:55:05 -07:00
committed by GitHub
4 changed files with 52 additions and 18 deletions

View File

@@ -3,9 +3,23 @@ sudo: false
language: go language: go
go: go:
- "1.10" - 1.11.1
services:
- docker
before_script:
- curl -Lo kubectl \https://storage.googleapis.com/kubernetes-release/release/v1.13.0/bin/linux/amd64/kubectl && chmod +x kubectl && sudo mv kubectl /usr/local/bin/
- wget https://github.com/kubernetes-sigs/kind/releases/download/0.2.1/kind-linux-amd64
- chmod +x kind-linux-amd64
- mv kind-linux-amd64 kind
- export PATH=$PATH:$PWD
- kind create cluster --config=$TRAVIS_BUILD_DIR/hack/kind_config.yaml
- export KUBECONFIG="$(kind get kubeconfig-path)"
- docker pull kubernetes/pause
- kind load docker-image kubernetes/pause
- cp "$(kind get kubeconfig-path --name="kind")" /tmp/admin.conf
script: script:
- make lint - hack/verify-gofmt.sh
- make build - make lint
- make test-unit - make build
- make test-unit
- make test-e2e

6
hack/kind_config.yaml Normal file
View File

@@ -0,0 +1,6 @@
kind: Cluster
apiVersion: kind.sigs.k8s.io/v1alpha3
nodes:
- role: control-plane
- role: worker
- role: worker

View File

@@ -23,7 +23,7 @@ DESCHEDULER_ROOT=$(dirname "${BASH_SOURCE}")/..
GO_VERSION=($(go version)) GO_VERSION=($(go version))
if [[ -z $(echo "${GO_VERSION[2]}" | grep -E 'go1.2|go1.3|go1.4|go1.5|go1.6|go1.7|go1.8|go1.9|go1.10') ]]; then if [[ -z $(echo "${GO_VERSION[2]}" | grep -E 'go1.2|go1.3|go1.4|go1.5|go1.6|go1.7|go1.8|go1.9|go1.10|go1.11') ]]; then
echo "Unknown go version '${GO_VERSION[2]}', skipping gofmt." echo "Unknown go version '${GO_VERSION[2]}', skipping gofmt."
exit 1 exit 1
fi fi

View File

@@ -18,6 +18,7 @@ package e2e
import ( import (
"github.com/golang/glog" "github.com/golang/glog"
"math"
"testing" "testing"
"time" "time"
@@ -38,17 +39,18 @@ import (
func MakePodSpec() v1.PodSpec { func MakePodSpec() v1.PodSpec {
return v1.PodSpec{ return v1.PodSpec{
Containers: []v1.Container{{ Containers: []v1.Container{{
Name: "pause", Name: "pause",
Image: "kubernetes/pause", ImagePullPolicy: "Never",
Ports: []v1.ContainerPort{{ContainerPort: 80}}, Image: "kubernetes/pause",
Ports: []v1.ContainerPort{{ContainerPort: 80}},
Resources: v1.ResourceRequirements{ Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{ Limits: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("100m"), v1.ResourceCPU: resource.MustParse("100m"),
v1.ResourceMemory: resource.MustParse("500Mi"), v1.ResourceMemory: resource.MustParse("1000Mi"),
}, },
Requests: v1.ResourceList{ Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("100m"), v1.ResourceCPU: resource.MustParse("100m"),
v1.ResourceMemory: resource.MustParse("500Mi"), v1.ResourceMemory: resource.MustParse("800Mi"),
}, },
}, },
}}, }},
@@ -119,7 +121,8 @@ func startEndToEndForLowNodeUtilization(clientset clientset.Interface) {
func TestE2E(t *testing.T) { func TestE2E(t *testing.T) {
// If we have reached here, it means cluster would have been already setup and the kubeconfig file should // If we have reached here, it means cluster would have been already setup and the kubeconfig file should
// be in /tmp directory. // be in /tmp directory as admin.conf.
var leastLoadedNode v1.Node
clientSet, err := client.CreateClient("/tmp/admin.conf") clientSet, err := client.CreateClient("/tmp/admin.conf")
if err != nil { if err != nil {
t.Errorf("Error during client creation with %v", err) t.Errorf("Error during client creation with %v", err)
@@ -130,20 +133,31 @@ func TestE2E(t *testing.T) {
} }
// Assumption: We would have 3 node cluster by now. Kubeadm brings all the master components onto master node. // Assumption: We would have 3 node cluster by now. Kubeadm brings all the master components onto master node.
// So, the last node would have least utilization. // So, the last node would have least utilization.
leastLoadedNode := nodeList.Items[2]
rc := RcByNameContainer("test-rc", int32(15), map[string]string{"test": "app"}, nil) rc := RcByNameContainer("test-rc", int32(15), map[string]string{"test": "app"}, nil)
_, err = clientSet.CoreV1().ReplicationControllers("default").Create(rc) _, err = clientSet.CoreV1().ReplicationControllers("default").Create(rc)
if err != nil { if err != nil {
t.Errorf("Error creating deployment %v", err) t.Errorf("Error creating deployment %v", err)
} }
podsOnleastUtilizedNode, err := podutil.ListPodsOnANode(clientSet, &leastLoadedNode) podsBefore := math.MaxInt16
if err != nil { for i := range nodeList.Items {
t.Errorf("Error listing pods on a node %v", err) // Skip the Master Node
if _, exist := nodeList.Items[i].Labels["node-role.kubernetes.io/master"]; exist {
continue
}
// List all the pods on the current Node
podsOnANode, err := podutil.ListEvictablePodsOnNode(clientSet, &nodeList.Items[i], true)
if err != nil {
t.Errorf("Error listing pods on a node %v", err)
}
// Update leastLoadedNode if necessary
if tmpLoads := len(podsOnANode); tmpLoads < podsBefore {
leastLoadedNode = nodeList.Items[i]
podsBefore = tmpLoads
}
} }
podsBefore := len(podsOnleastUtilizedNode)
t.Log("Eviction of pods starting") t.Log("Eviction of pods starting")
startEndToEndForLowNodeUtilization(clientSet) startEndToEndForLowNodeUtilization(clientSet)
podsOnleastUtilizedNode, err = podutil.ListPodsOnANode(clientSet, &leastLoadedNode) podsOnleastUtilizedNode, err := podutil.ListEvictablePodsOnNode(clientSet, &leastLoadedNode, true)
if err != nil { if err != nil {
t.Errorf("Error listing pods on a node %v", err) t.Errorf("Error listing pods on a node %v", err)
} }