diff --git a/.travis.yml b/.travis.yml index 67d508953..4a7fc6f58 100644 --- a/.travis.yml +++ b/.travis.yml @@ -3,9 +3,23 @@ sudo: false language: go go: -- "1.10" - + - 1.11.1 +services: + - docker +before_script: + - curl -Lo kubectl \https://storage.googleapis.com/kubernetes-release/release/v1.13.0/bin/linux/amd64/kubectl && chmod +x kubectl && sudo mv kubectl /usr/local/bin/ + - wget https://github.com/kubernetes-sigs/kind/releases/download/0.2.1/kind-linux-amd64 + - chmod +x kind-linux-amd64 + - mv kind-linux-amd64 kind + - export PATH=$PATH:$PWD + - kind create cluster --config=$TRAVIS_BUILD_DIR/hack/kind_config.yaml + - export KUBECONFIG="$(kind get kubeconfig-path)" + - docker pull kubernetes/pause + - kind load docker-image kubernetes/pause + - cp "$(kind get kubeconfig-path --name="kind")" /tmp/admin.conf script: - - make lint - - make build - - make test-unit +- hack/verify-gofmt.sh +- make lint +- make build +- make test-unit +- make test-e2e diff --git a/hack/kind_config.yaml b/hack/kind_config.yaml new file mode 100644 index 000000000..a59746fd3 --- /dev/null +++ b/hack/kind_config.yaml @@ -0,0 +1,6 @@ +kind: Cluster +apiVersion: kind.sigs.k8s.io/v1alpha3 +nodes: +- role: control-plane +- role: worker +- role: worker diff --git a/hack/verify-gofmt.sh b/hack/verify-gofmt.sh index 365d130d0..08ab75f21 100755 --- a/hack/verify-gofmt.sh +++ b/hack/verify-gofmt.sh @@ -23,7 +23,7 @@ DESCHEDULER_ROOT=$(dirname "${BASH_SOURCE}")/.. GO_VERSION=($(go version)) -if [[ -z $(echo "${GO_VERSION[2]}" | grep -E 'go1.2|go1.3|go1.4|go1.5|go1.6|go1.7|go1.8|go1.9|go1.10') ]]; then +if [[ -z $(echo "${GO_VERSION[2]}" | grep -E 'go1.2|go1.3|go1.4|go1.5|go1.6|go1.7|go1.8|go1.9|go1.10|go1.11') ]]; then echo "Unknown go version '${GO_VERSION[2]}', skipping gofmt." exit 1 fi diff --git a/test/e2e/e2e_test.go b/test/e2e/e2e_test.go index a09f4e14c..d62f85255 100644 --- a/test/e2e/e2e_test.go +++ b/test/e2e/e2e_test.go @@ -18,6 +18,7 @@ package e2e import ( "github.com/golang/glog" + "math" "testing" "time" @@ -38,17 +39,18 @@ import ( func MakePodSpec() v1.PodSpec { return v1.PodSpec{ Containers: []v1.Container{{ - Name: "pause", - Image: "kubernetes/pause", - Ports: []v1.ContainerPort{{ContainerPort: 80}}, + Name: "pause", + ImagePullPolicy: "Never", + Image: "kubernetes/pause", + Ports: []v1.ContainerPort{{ContainerPort: 80}}, Resources: v1.ResourceRequirements{ Limits: v1.ResourceList{ v1.ResourceCPU: resource.MustParse("100m"), - v1.ResourceMemory: resource.MustParse("500Mi"), + v1.ResourceMemory: resource.MustParse("1000Mi"), }, Requests: v1.ResourceList{ v1.ResourceCPU: resource.MustParse("100m"), - v1.ResourceMemory: resource.MustParse("500Mi"), + v1.ResourceMemory: resource.MustParse("800Mi"), }, }, }}, @@ -119,7 +121,8 @@ func startEndToEndForLowNodeUtilization(clientset clientset.Interface) { func TestE2E(t *testing.T) { // If we have reached here, it means cluster would have been already setup and the kubeconfig file should - // be in /tmp directory. + // be in /tmp directory as admin.conf. + var leastLoadedNode v1.Node clientSet, err := client.CreateClient("/tmp/admin.conf") if err != nil { t.Errorf("Error during client creation with %v", err) @@ -130,20 +133,31 @@ func TestE2E(t *testing.T) { } // Assumption: We would have 3 node cluster by now. Kubeadm brings all the master components onto master node. // So, the last node would have least utilization. - leastLoadedNode := nodeList.Items[2] rc := RcByNameContainer("test-rc", int32(15), map[string]string{"test": "app"}, nil) _, err = clientSet.CoreV1().ReplicationControllers("default").Create(rc) if err != nil { t.Errorf("Error creating deployment %v", err) } - podsOnleastUtilizedNode, err := podutil.ListPodsOnANode(clientSet, &leastLoadedNode) - if err != nil { - t.Errorf("Error listing pods on a node %v", err) + podsBefore := math.MaxInt16 + for i := range nodeList.Items { + // Skip the Master Node + if _, exist := nodeList.Items[i].Labels["node-role.kubernetes.io/master"]; exist { + continue + } + // List all the pods on the current Node + podsOnANode, err := podutil.ListEvictablePodsOnNode(clientSet, &nodeList.Items[i], true) + if err != nil { + t.Errorf("Error listing pods on a node %v", err) + } + // Update leastLoadedNode if necessary + if tmpLoads := len(podsOnANode); tmpLoads < podsBefore { + leastLoadedNode = nodeList.Items[i] + podsBefore = tmpLoads + } } - podsBefore := len(podsOnleastUtilizedNode) t.Log("Eviction of pods starting") startEndToEndForLowNodeUtilization(clientSet) - podsOnleastUtilizedNode, err = podutil.ListPodsOnANode(clientSet, &leastLoadedNode) + podsOnleastUtilizedNode, err := podutil.ListEvictablePodsOnNode(clientSet, &leastLoadedNode, true) if err != nil { t.Errorf("Error listing pods on a node %v", err) }