mirror of
https://github.com/kubernetes-sigs/descheduler.git
synced 2026-01-26 05:14:13 +01:00
Compare commits
215 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7a6e095451 | ||
|
|
28c17d240d | ||
|
|
6dd91b6a22 | ||
|
|
7d93551c34 | ||
|
|
853c43737d | ||
|
|
d8dab9d134 | ||
|
|
8dc7b475d9 | ||
|
|
15a971494e | ||
|
|
5364e17c62 | ||
|
|
fc8c581d7a | ||
|
|
d7482bd618 | ||
|
|
d065f9904b | ||
|
|
fb8cdc10c7 | ||
|
|
55cf45a6ba | ||
|
|
e2a23f2848 | ||
|
|
c567768845 | ||
|
|
9510891f42 | ||
|
|
73858beeea | ||
|
|
c3346e9806 | ||
|
|
cc92eaa35d | ||
|
|
816f8cb682 | ||
|
|
5f12ade97b | ||
|
|
2b68e65238 | ||
|
|
86d0f3b038 | ||
|
|
c9f64dfe37 | ||
|
|
431597dd43 | ||
|
|
861f057d1b | ||
|
|
d845040d77 | ||
|
|
30d05382b6 | ||
|
|
ca02665d14 | ||
|
|
2328d89897 | ||
|
|
ac089fe5ce | ||
|
|
00e23dbc07 | ||
|
|
3401edab53 | ||
|
|
e3865fcf8e | ||
|
|
361aa01c51 | ||
|
|
c77f240e37 | ||
|
|
d9a77393cc | ||
|
|
44c7eb5285 | ||
|
|
462bbbbb47 | ||
|
|
18e3fd3de5 | ||
|
|
fdf94304d0 | ||
|
|
ea0ba7d39a | ||
|
|
5be355d815 | ||
|
|
5efec68fd3 | ||
|
|
b3cc62dac6 | ||
|
|
3be0a9f80d | ||
|
|
906bca0802 | ||
|
|
d193bc1370 | ||
|
|
6654aeff99 | ||
|
|
e3d06d1541 | ||
|
|
b44d7718b3 | ||
|
|
531d6ddc49 | ||
|
|
bbc902b86f | ||
|
|
1fdcbcd008 | ||
|
|
2fdcfc04d5 | ||
|
|
872953b9cf | ||
|
|
9276f0e555 | ||
|
|
cee12a5019 | ||
|
|
456110b508 | ||
|
|
edab9d7fed | ||
|
|
7563b5561b | ||
|
|
8b210b08f6 | ||
|
|
a3d33909fa | ||
|
|
4b9e732c18 | ||
|
|
dd54f1a656 | ||
|
|
11044ed89d | ||
|
|
e5d4a2eba6 | ||
|
|
c9e3c63b85 | ||
|
|
5b1d551ffd | ||
|
|
b176dd2e77 | ||
|
|
9ea6aa536e | ||
|
|
bd2c217010 | ||
|
|
c42670e1cc | ||
|
|
5e25e21ca2 | ||
|
|
0af97c1b5e | ||
|
|
1652ba7976 | ||
|
|
5af668e89a | ||
|
|
26adf87323 | ||
|
|
0a58cf4535 | ||
|
|
3116dad75e | ||
|
|
fb1b5fc690 | ||
|
|
dc7f9efc19 | ||
|
|
f1127541aa | ||
|
|
9d6b6094cd | ||
|
|
e35eb4a0b5 | ||
|
|
2e6f14103b | ||
|
|
17ef1d5e5f | ||
|
|
7788d53d0b | ||
|
|
5f66ed8401 | ||
|
|
cd4d09726c | ||
|
|
68a106aed0 | ||
|
|
1931bd6c1a | ||
|
|
9e28f0b362 | ||
|
|
7245a31f52 | ||
|
|
66a2a87e49 | ||
|
|
e7ceddf2bc | ||
|
|
e3d25a9ab4 | ||
|
|
992e00ecd2 | ||
|
|
d157a4359b | ||
|
|
6a08b5661a | ||
|
|
7094c404c9 | ||
|
|
20c610c65a | ||
|
|
934a06381d | ||
|
|
674c1db05c | ||
|
|
dee89a6cc1 | ||
|
|
60fbaca305 | ||
|
|
164d2b0729 | ||
|
|
023a2f2a47 | ||
|
|
d1c6f3f709 | ||
|
|
fc1688057a | ||
|
|
e6e200b93c | ||
|
|
5d843d1f08 | ||
|
|
6c981cc067 | ||
|
|
22a3a6ea1d | ||
|
|
294bddb5e2 | ||
|
|
0a9d1959e2 | ||
|
|
19ee5d80b5 | ||
|
|
14d9e175c2 | ||
|
|
468e138070 | ||
|
|
db13b2ac73 | ||
|
|
40ca53e0a5 | ||
|
|
35d8367fe5 | ||
|
|
345dd9cf27 | ||
|
|
81f471fe05 | ||
|
|
aa5e8770f5 | ||
|
|
2690d139c5 | ||
|
|
cd192ce5fc | ||
|
|
048f3fd1e5 | ||
|
|
a079fd2757 | ||
|
|
ae0a9ed525 | ||
|
|
0a815e8786 | ||
|
|
0115748fe8 | ||
|
|
d0305dac3f | ||
|
|
72d6a8aa33 | ||
|
|
654fdbba94 | ||
|
|
d73471327b | ||
|
|
1294c8a2c2 | ||
|
|
ddd3dd6f19 | ||
|
|
fad9e8dc39 | ||
|
|
450a5c290b | ||
|
|
af2198428e | ||
|
|
bc7be54e2e | ||
|
|
ccb61fc800 | ||
|
|
61819e3fec | ||
|
|
a7ceb67109 | ||
|
|
57a28e9a8f | ||
|
|
c1d87dd93c | ||
|
|
34fb602101 | ||
|
|
a6af54ab30 | ||
|
|
e41ef8cca3 | ||
|
|
d26cd4b317 | ||
|
|
f7d0acb731 | ||
|
|
f1f8b2eaa7 | ||
|
|
0a7f14d75e | ||
|
|
de76f9b14c | ||
|
|
8b84bb26ff | ||
|
|
bb25192163 | ||
|
|
40bb490f4c | ||
|
|
08729f6ef9 | ||
|
|
3dd7de8132 | ||
|
|
471aeb5ea4 | ||
|
|
65e7093ee7 | ||
|
|
fc0cd4ba30 | ||
|
|
ba3eac6c57 | ||
|
|
11a95ce8fb | ||
|
|
29a9fc6b56 | ||
|
|
d3c2f25685 | ||
|
|
e858c9ee80 | ||
|
|
44752e5e83 | ||
|
|
7123f30783 | ||
|
|
a82cf7cea4 | ||
|
|
72318868b0 | ||
|
|
589fb95236 | ||
|
|
7df543d137 | ||
|
|
1d7f429ba1 | ||
|
|
bf29a6073f | ||
|
|
65635bdb2e | ||
|
|
fd961557d0 | ||
|
|
c29bdc1dbe | ||
|
|
ffecc54bf5 | ||
|
|
f6f6fbab10 | ||
|
|
12c217477c | ||
|
|
20a4798465 | ||
|
|
a201f222e5 | ||
|
|
6e705fde85 | ||
|
|
5763554be4 | ||
|
|
6f873d5e69 | ||
|
|
445ae92caa | ||
|
|
8f3c0cf4b8 | ||
|
|
2a280f9a20 | ||
|
|
0503f53904 | ||
|
|
e0a9dfcb76 | ||
|
|
5db49f2ce1 | ||
|
|
582bd67681 | ||
|
|
a63f815116 | ||
|
|
afc17a62ea | ||
|
|
6dbc8a1fcc | ||
|
|
d2bd16a12d | ||
|
|
955d0eb228 | ||
|
|
a490726245 | ||
|
|
16a504fb87 | ||
|
|
d54b73a6ba | ||
|
|
344dc0f3c2 | ||
|
|
9f38146bbf | ||
|
|
1473e1d024 | ||
|
|
7b4b9d9e7e | ||
|
|
7d079813e5 | ||
|
|
e02857e00a | ||
|
|
acfd4f8680 | ||
|
|
c29c9db41e | ||
|
|
5d3f987dde | ||
|
|
97732cf62d | ||
|
|
4afc4dfb16 | ||
|
|
023ccd99f5 |
1
.gitignore
vendored
Normal file
1
.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
|||||||
|
_output/
|
||||||
15
.golangci.yml
Normal file
15
.golangci.yml
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
run:
|
||||||
|
deadline: 2m
|
||||||
|
|
||||||
|
linters:
|
||||||
|
disable-all: true
|
||||||
|
enable:
|
||||||
|
- gofmt
|
||||||
|
- gosimple
|
||||||
|
- gocyclo
|
||||||
|
- misspell
|
||||||
|
- govet
|
||||||
|
|
||||||
|
linters-settings:
|
||||||
|
goimports:
|
||||||
|
local-prefixes: sigs.k8s.io/descheduler
|
||||||
28
.travis.yml
28
.travis.yml
@@ -1,7 +1,31 @@
|
|||||||
|
sudo: false
|
||||||
|
|
||||||
language: go
|
language: go
|
||||||
|
|
||||||
go:
|
go:
|
||||||
- 1.8.3
|
- 1.13.x
|
||||||
|
env:
|
||||||
|
- K8S_VERSION=v1.17.0
|
||||||
|
- K8S_VERSION=v1.16.4
|
||||||
|
- K8S_VERSION=v1.15.7
|
||||||
|
services:
|
||||||
|
- docker
|
||||||
|
before_script:
|
||||||
|
- curl -Lo kubectl https://storage.googleapis.com/kubernetes-release/release/${K8S_VERSION}/bin/linux/amd64/kubectl && chmod +x kubectl && sudo mv kubectl /usr/local/bin/
|
||||||
|
- wget https://github.com/kubernetes-sigs/kind/releases/download/v0.7.0/kind-linux-amd64
|
||||||
|
- chmod +x kind-linux-amd64
|
||||||
|
- mv kind-linux-amd64 kind
|
||||||
|
- export PATH=$PATH:$PWD
|
||||||
|
- kind create cluster --image kindest/node:${K8S_VERSION} --config=$TRAVIS_BUILD_DIR/hack/kind_config.yaml
|
||||||
|
- export KUBECONFIG="$(kind get kubeconfig-path)"
|
||||||
|
- docker pull kubernetes/pause
|
||||||
|
- kind load docker-image kubernetes/pause
|
||||||
|
- kind get kubeconfig > /tmp/admin.conf
|
||||||
script:
|
script:
|
||||||
|
- mkdir -p ~/gopath/src/sigs.k8s.io/
|
||||||
|
- mv ~/gopath/src/github.com/kubernetes-sigs/descheduler ~/gopath/src/sigs.k8s.io/.
|
||||||
- hack/verify-gofmt.sh
|
- hack/verify-gofmt.sh
|
||||||
|
- make lint
|
||||||
- make build
|
- make build
|
||||||
- make test
|
- make test-unit
|
||||||
|
- make test-e2e
|
||||||
|
|||||||
23
CONTRIBUTING.md
Normal file
23
CONTRIBUTING.md
Normal file
@@ -0,0 +1,23 @@
|
|||||||
|
# Contributing Guidelines
|
||||||
|
|
||||||
|
Welcome to Kubernetes. We are excited about the prospect of you joining our [community](https://github.com/kubernetes/community)! The Kubernetes community abides by the CNCF [code of conduct](code-of-conduct.md). Here is an excerpt:
|
||||||
|
|
||||||
|
_As contributors and maintainers of this project, and in the interest of fostering an open and welcoming community, we pledge to respect all people who contribute through reporting issues, posting feature requests, updating documentation, submitting pull requests or patches, and other activities._
|
||||||
|
|
||||||
|
## Getting Started
|
||||||
|
|
||||||
|
We have full documentation on how to get started contributing here:
|
||||||
|
|
||||||
|
- [Contributor License Agreement](https://git.k8s.io/community/CLA.md) Kubernetes projects require that you sign a Contributor License Agreement (CLA) before we can accept your pull requests
|
||||||
|
- [Kubernetes Contributor Guide](http://git.k8s.io/community/contributors/guide) - Main contributor documentation, or you can just jump directly to the [contributing section](http://git.k8s.io/community/contributors/guide#contributing)
|
||||||
|
- [Contributor Cheat Sheet](https://git.k8s.io/community/contributors/guide/contributor-cheatsheet.md) - Common resources for existing developers
|
||||||
|
|
||||||
|
## Mentorship
|
||||||
|
|
||||||
|
- [Mentoring Initiatives](https://git.k8s.io/community/mentoring) - We have a diverse set of mentorship programs available that are always looking for volunteers!
|
||||||
|
|
||||||
|
|
||||||
|
## Contact Information
|
||||||
|
|
||||||
|
- [Slack channel](https://kubernetes.slack.com/messages/sig-scheduling)
|
||||||
|
- [Mailing list](https://groups.google.com/forum/#!forum/kubernetes-sig-scheduling)
|
||||||
12
Dockerfile
12
Dockerfile
@@ -11,10 +11,16 @@
|
|||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
FROM golang:1.13.6
|
||||||
|
|
||||||
FROM fedora
|
WORKDIR /go/src/sigs.k8s.io/descheduler
|
||||||
|
COPY . .
|
||||||
|
RUN make
|
||||||
|
|
||||||
|
FROM scratch
|
||||||
|
|
||||||
MAINTAINER Avesh Agarwal <avagarwa@redhat.com>
|
MAINTAINER Avesh Agarwal <avagarwa@redhat.com>
|
||||||
|
|
||||||
COPY _output/bin/descheduler /bin/descheduler
|
COPY --from=0 /go/src/sigs.k8s.io/descheduler/_output/bin/descheduler /bin/descheduler
|
||||||
CMD ["/bin/descheduler --help"]
|
|
||||||
|
CMD ["/bin/descheduler", "--help"]
|
||||||
|
|||||||
20
Dockerfile.dev
Normal file
20
Dockerfile.dev
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
# Copyright 2017 The Kubernetes Authors.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
FROM scratch
|
||||||
|
|
||||||
|
MAINTAINER Avesh Agarwal <avagarwa@redhat.com>
|
||||||
|
|
||||||
|
COPY _output/bin/descheduler /bin/descheduler
|
||||||
|
|
||||||
|
CMD ["/bin/descheduler", "--help"]
|
||||||
54
Makefile
54
Makefile
@@ -15,21 +15,67 @@
|
|||||||
.PHONY: test
|
.PHONY: test
|
||||||
|
|
||||||
# VERSION is currently based on the last commit
|
# VERSION is currently based on the last commit
|
||||||
VERSION:=$(shell git rev-parse --short HEAD)
|
VERSION?=$(shell git describe --tags)
|
||||||
|
COMMIT=$(shell git rev-parse HEAD)
|
||||||
|
BUILD=$(shell date +%FT%T%z)
|
||||||
|
LDFLAG_LOCATION=sigs.k8s.io/descheduler/cmd/descheduler/app
|
||||||
|
|
||||||
|
LDFLAGS=-ldflags "-X ${LDFLAG_LOCATION}.version=${VERSION} -X ${LDFLAG_LOCATION}.buildDate=${BUILD} -X ${LDFLAG_LOCATION}.gitCommit=${COMMIT}"
|
||||||
|
|
||||||
|
GOLANGCI_VERSION := v1.15.0
|
||||||
|
HAS_GOLANGCI := $(shell which golangci-lint)
|
||||||
|
|
||||||
|
# REGISTRY is the container registry to push
|
||||||
|
# into. The default is to push to the staging
|
||||||
|
# registry, not production.
|
||||||
|
REGISTRY?=gcr.io/k8s-staging-descheduler
|
||||||
|
|
||||||
# IMAGE is the image name of descheduler
|
# IMAGE is the image name of descheduler
|
||||||
IMAGE:=descheduler:$(VERSION)
|
IMAGE:=descheduler:$(VERSION)
|
||||||
|
|
||||||
|
# IMAGE_GCLOUD is the image name of descheduler in the remote registry
|
||||||
|
IMAGE_GCLOUD:=$(REGISTRY)/descheduler:$(VERSION)
|
||||||
|
|
||||||
|
# TODO: upload binaries to GCS bucket
|
||||||
|
#
|
||||||
|
# In the future binaries can be uploaded to
|
||||||
|
# GCS bucket gs://k8s-staging-descheduler.
|
||||||
|
|
||||||
all: build
|
all: build
|
||||||
|
|
||||||
build:
|
build:
|
||||||
go build -o _output/bin/descheduler github.com/kubernetes-incubator/descheduler/cmd/descheduler
|
CGO_ENABLED=0 go build ${LDFLAGS} -o _output/bin/descheduler sigs.k8s.io/descheduler/cmd/descheduler
|
||||||
|
|
||||||
image: build
|
dev-image: build
|
||||||
|
docker build -f Dockerfile.dev -t $(IMAGE) .
|
||||||
|
|
||||||
|
image:
|
||||||
docker build -t $(IMAGE) .
|
docker build -t $(IMAGE) .
|
||||||
|
|
||||||
|
push-container-to-gcloud: image
|
||||||
|
gcloud auth configure-docker
|
||||||
|
docker tag $(IMAGE) $(IMAGE_GCLOUD)
|
||||||
|
docker push $(IMAGE_GCLOUD)
|
||||||
|
|
||||||
|
push: push-container-to-gcloud
|
||||||
|
|
||||||
clean:
|
clean:
|
||||||
rm -rf _output
|
rm -rf _output
|
||||||
|
|
||||||
test:
|
test-unit:
|
||||||
./test/run-unit-tests.sh
|
./test/run-unit-tests.sh
|
||||||
|
|
||||||
|
test-e2e:
|
||||||
|
./test/run-e2e-tests.sh
|
||||||
|
|
||||||
|
gen:
|
||||||
|
./hack/update-generated-conversions.sh
|
||||||
|
./hack/update-generated-deep-copies.sh
|
||||||
|
./hack/update-generated-defaulters.sh
|
||||||
|
#undo go mod changes caused by above.
|
||||||
|
go mod tidy
|
||||||
|
lint:
|
||||||
|
ifndef HAS_GOLANGCI
|
||||||
|
curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(GOPATH)/bin ${GOLANGCI_VERSION}
|
||||||
|
endif
|
||||||
|
golangci-lint run
|
||||||
|
|||||||
6
OWNERS
6
OWNERS
@@ -1,8 +1,10 @@
|
|||||||
approvers:
|
approvers:
|
||||||
- aveshagarwal
|
- aveshagarwal
|
||||||
|
- k82cn
|
||||||
- ravisantoshgudimetla
|
- ravisantoshgudimetla
|
||||||
- jayunit100
|
|
||||||
reviewers:
|
reviewers:
|
||||||
- aveshagarwal
|
- aveshagarwal
|
||||||
|
- k82cn
|
||||||
- ravisantoshgudimetla
|
- ravisantoshgudimetla
|
||||||
- jayunit100
|
- damemi
|
||||||
|
- seanmalloy
|
||||||
|
|||||||
186
README.md
186
README.md
@@ -1,3 +1,6 @@
|
|||||||
|
[](https://travis-ci.org/kubernetes-sigs/descheduler)
|
||||||
|
[](https://goreportcard.com/report/sigs.k8s.io/descheduler)
|
||||||
|
|
||||||
# Descheduler for Kubernetes
|
# Descheduler for Kubernetes
|
||||||
|
|
||||||
## Introduction
|
## Introduction
|
||||||
@@ -23,6 +26,8 @@ but relies on the default scheduler for that.
|
|||||||
|
|
||||||
## Build and Run
|
## Build and Run
|
||||||
|
|
||||||
|
- Checkout the repo into your $GOPATH directory under src/sigs.k8s.io/descheduler
|
||||||
|
|
||||||
Build descheduler:
|
Build descheduler:
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
@@ -35,118 +40,52 @@ and run descheduler:
|
|||||||
$ ./_output/bin/descheduler --kubeconfig <path to kubeconfig> --policy-config-file <path-to-policy-file>
|
$ ./_output/bin/descheduler --kubeconfig <path to kubeconfig> --policy-config-file <path-to-policy-file>
|
||||||
```
|
```
|
||||||
|
|
||||||
|
If you want more information about what descheduler is doing add `-v 1` to the command line
|
||||||
|
|
||||||
For more information about available options run:
|
For more information about available options run:
|
||||||
```
|
```
|
||||||
$ ./_output/bin/descheduler --help
|
$ ./_output/bin/descheduler --help
|
||||||
```
|
```
|
||||||
|
|
||||||
## Running Descheduler as a Job Inside of a Pod
|
## Running Descheduler as a Job or CronJob
|
||||||
|
|
||||||
Descheduler can be run as a job inside of a pod. It has the advantage of
|
The descheduler can be run as a job or cronjob inside of a pod. It has the advantage of
|
||||||
being able to be run multiple times without needing user intervention.
|
being able to be run multiple times without needing user intervention.
|
||||||
Descheduler pod is run as a critical pod to avoid being evicted by itself,
|
The descheduler pod is run as a critical pod to avoid being evicted by itself,
|
||||||
or by kubelet due to an eviction event. Since critical pods are created in
|
or by the kubelet due to an eviction event. Since critical pods are created in the
|
||||||
`kube-system` namespace, descheduler job and its pod will also be created
|
`kube-system` namespace, the descheduler job and its pod will also be created
|
||||||
in `kube-system` namespace.
|
in `kube-system` namespace.
|
||||||
|
|
||||||
### Create a container image
|
### Setup RBAC
|
||||||
|
|
||||||
First we create a simple Docker image utilizing the Dockerfile found in the root directory:
|
To give necessary permissions for the descheduler to work in a pod.
|
||||||
|
|
||||||
```
|
```
|
||||||
$ make image
|
$ kubectl create -f kubernetes/rbac.yaml
|
||||||
```
|
```
|
||||||
|
|
||||||
### Create a cluster role
|
|
||||||
|
|
||||||
To give necessary permissions for the descheduler to work in a pod, create a cluster role:
|
|
||||||
|
|
||||||
```
|
|
||||||
$ cat << EOF| kubectl create -f -
|
|
||||||
kind: ClusterRole
|
|
||||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
|
||||||
metadata:
|
|
||||||
name: descheduler-cluster-role
|
|
||||||
rules:
|
|
||||||
- apiGroups: [""]
|
|
||||||
resources: ["nodes"]
|
|
||||||
verbs: ["get", "watch", "list"]
|
|
||||||
- apiGroups: [""]
|
|
||||||
resources: ["pods"]
|
|
||||||
verbs: ["get", "watch", "list", "delete"]
|
|
||||||
EOF
|
|
||||||
```
|
|
||||||
|
|
||||||
### Create the service account which will be used to run the job:
|
|
||||||
|
|
||||||
```
|
|
||||||
$ kubectl create sa descheduler-sa -n kube-system
|
|
||||||
```
|
|
||||||
|
|
||||||
### Bind the cluster role to the service account:
|
|
||||||
|
|
||||||
```
|
|
||||||
$ kubectl create clusterrolebinding descheduler-cluster-role-binding \
|
|
||||||
--clusterrole=descheduler-cluster-role \
|
|
||||||
--serviceaccount=kube-system:descheduler-sa
|
|
||||||
```
|
|
||||||
### Create a configmap to store descheduler policy
|
### Create a configmap to store descheduler policy
|
||||||
|
|
||||||
Descheduler policy is created as a ConfigMap in `kube-system` namespace
|
|
||||||
so that it can be mounted as a volume inside pod.
|
|
||||||
|
|
||||||
```
|
```
|
||||||
$ kubectl create configmap descheduler-policy-configmap \
|
$ kubectl create -f kubernetes/configmap.yaml
|
||||||
-n kube-system --from-file=<path-to-policy-dir/policy.yaml>
|
|
||||||
```
|
|
||||||
### Create the job specification (descheduler-job.yaml)
|
|
||||||
|
|
||||||
```
|
|
||||||
apiVersion: batch/v1
|
|
||||||
kind: Job
|
|
||||||
metadata:
|
|
||||||
name: descheduler-job
|
|
||||||
namespace: kube-system
|
|
||||||
spec:
|
|
||||||
parallelism: 1
|
|
||||||
completions: 1
|
|
||||||
template:
|
|
||||||
metadata:
|
|
||||||
name: descheduler-pod
|
|
||||||
annotations:
|
|
||||||
scheduler.alpha.kubernetes.io/critical-pod: "true"
|
|
||||||
spec:
|
|
||||||
containers:
|
|
||||||
- name: descheduler
|
|
||||||
image: descheduler
|
|
||||||
volumeMounts:
|
|
||||||
- mountPath: /policy-dir
|
|
||||||
name: policy-volume
|
|
||||||
command:
|
|
||||||
- "/bin/sh"
|
|
||||||
- "-ec"
|
|
||||||
- |
|
|
||||||
/bin/descheduler --policy-config-file /policy-dir/policy.yaml
|
|
||||||
restartPolicy: "Never"
|
|
||||||
serviceAccountName: descheduler-sa
|
|
||||||
volumes:
|
|
||||||
- name: policy-volume
|
|
||||||
configMap:
|
|
||||||
name: descheduler-policy-configmap
|
|
||||||
```
|
```
|
||||||
|
|
||||||
Please note that the pod template is configured with critical pod annotation, and
|
### Create a Job or CronJob
|
||||||
the policy `policy-file` is mounted as a volume from the config map.
|
|
||||||
|
|
||||||
### Run the descheduler as a job in a pod:
|
As a Job.
|
||||||
```
|
```
|
||||||
$ kubectl create -f descheduler-job.yaml
|
$ kubectl create -f kubernetes/job.yaml
|
||||||
|
```
|
||||||
|
|
||||||
|
Or as a CronJob.
|
||||||
|
```
|
||||||
|
$ kubectl create -f kubernetes/cronjob.yaml
|
||||||
```
|
```
|
||||||
|
|
||||||
## Policy and Strategies
|
## Policy and Strategies
|
||||||
|
|
||||||
Descheduler's policy is configurable and includes strategies to be enabled or disabled.
|
Descheduler's policy is configurable and includes strategies to be enabled or disabled.
|
||||||
Two strategies, `RemoveDuplicates` and `LowNodeUtilization` are currently implemented.
|
Five strategies, `RemoveDuplicates`, `LowNodeUtilization`, `RemovePodsViolatingInterPodAntiAffinity`, `RemovePodsViolatingNodeAffinity` , `RemovePodsViolatingNodeTaints` are currently implemented.
|
||||||
As part of the policy, the parameters associated with the strategies can be configured too.
|
As part of the policy, the parameters associated with the strategies can be configured too.
|
||||||
By default, all strategies are enabled.
|
By default, all strategies are enabled.
|
||||||
|
|
||||||
@@ -158,7 +97,7 @@ those duplicate pods are evicted for better spreading of pods in a cluster. This
|
|||||||
if some nodes went down due to whatever reasons, and pods on them were moved to other nodes leading to
|
if some nodes went down due to whatever reasons, and pods on them were moved to other nodes leading to
|
||||||
more than one pod associated with RS or RC, for example, running on same node. Once the failed nodes
|
more than one pod associated with RS or RC, for example, running on same node. Once the failed nodes
|
||||||
are ready again, this strategy could be enabled to evict those duplicate pods. Currently, there are no
|
are ready again, this strategy could be enabled to evict those duplicate pods. Currently, there are no
|
||||||
parameters associated with this strategy. To disable this strategy, the policy would look like:
|
parameters associated with this strategy. To disable this strategy, the policy should look like:
|
||||||
|
|
||||||
```
|
```
|
||||||
apiVersion: "descheduler/v1alpha1"
|
apiVersion: "descheduler/v1alpha1"
|
||||||
@@ -210,16 +149,56 @@ This parameter can be configured to activate the strategy only when number of un
|
|||||||
are above the configured value. This could be helpful in large clusters where a few nodes could go
|
are above the configured value. This could be helpful in large clusters where a few nodes could go
|
||||||
under utilized frequently or for a short period of time. By default, `numberOfNodes` is set to zero.
|
under utilized frequently or for a short period of time. By default, `numberOfNodes` is set to zero.
|
||||||
|
|
||||||
|
### RemovePodsViolatingInterPodAntiAffinity
|
||||||
|
|
||||||
|
This strategy makes sure that pods violating interpod anti-affinity are removed from nodes. For example, if there is podA on node and podB and podC(running on same node) have antiaffinity rules which prohibit them to run on the same node, then podA will be evicted from the node so that podB and podC could run. This issue could happen, when the anti-affinity rules for pods B,C are created when they are already running on node. Currently, there are no parameters associated with this strategy. To disable this strategy, the policy should look like:
|
||||||
|
|
||||||
|
```
|
||||||
|
apiVersion: "descheduler/v1alpha1"
|
||||||
|
kind: "DeschedulerPolicy"
|
||||||
|
strategies:
|
||||||
|
"RemovePodsViolatingInterPodAntiAffinity":
|
||||||
|
enabled: false
|
||||||
|
```
|
||||||
|
|
||||||
|
### RemovePodsViolatingNodeAffinity
|
||||||
|
|
||||||
|
This strategy makes sure that pods violating node affinity are removed from nodes. For example, there is podA that was scheduled on nodeA which satisfied the node affinity rule `requiredDuringSchedulingIgnoredDuringExecution` at the time of scheduling, but over time nodeA no longer satisfies the rule, then if another node nodeB is available that satisfies the node affinity rule, then podA will be evicted from nodeA. The policy file should like this -
|
||||||
|
|
||||||
|
```
|
||||||
|
apiVersion: "descheduler/v1alpha1"
|
||||||
|
kind: "DeschedulerPolicy"
|
||||||
|
strategies:
|
||||||
|
"RemovePodsViolatingNodeAffinity":
|
||||||
|
enabled: true
|
||||||
|
params:
|
||||||
|
nodeAffinityType:
|
||||||
|
- "requiredDuringSchedulingIgnoredDuringExecution"
|
||||||
|
```
|
||||||
|
### RemovePodsViolatingNodeTaints
|
||||||
|
|
||||||
|
This strategy makes sure that pods violating NoSchedule taints on nodes are removed. For example: there is a pod "podA" with toleration to tolerate a taint ``key=value:NoSchedule`` scheduled and running on the tainted node. If the node's taint is subsequently updated/removed, taint is no longer satisfied by its pods' tolerations and will be evicted. The policy file should look like:
|
||||||
|
|
||||||
|
````
|
||||||
|
apiVersion: "descheduler/v1alpha1"
|
||||||
|
kind: "DeschedulerPolicy"
|
||||||
|
strategies:
|
||||||
|
"RemovePodsViolatingNodeTaints":
|
||||||
|
enabled: true
|
||||||
|
````
|
||||||
## Pod Evictions
|
## Pod Evictions
|
||||||
|
|
||||||
When the descheduler decides to evict pods from a node, it employs following general mechanism:
|
When the descheduler decides to evict pods from a node, it employs following general mechanism:
|
||||||
|
|
||||||
* Critical pods (with annotations scheduler.alpha.kubernetes.io/critical-pod) are never evicted.
|
* [Critical pods](https://kubernetes.io/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/) (with priorityClassName set to system-cluster-critical or system-node-critical) are never evicted.
|
||||||
* Pods (static or mirrored pods or stand alone pods) not part of an RC, RS, Deployment or Jobs are
|
* Pods (static or mirrored pods or stand alone pods) not part of an RC, RS, Deployment or Jobs are
|
||||||
never evicted because these pods won't be recreated.
|
never evicted because these pods won't be recreated.
|
||||||
* Pods associated with DaemonSets are never evicted.
|
* Pods associated with DaemonSets are never evicted.
|
||||||
* Pods with local storage are never evicted.
|
* Pods with local storage are never evicted.
|
||||||
* Best efforts pods are evicted before Burstable and Guaranteed pods.
|
* Best efforts pods are evicted before Burstable and Guaranteed pods.
|
||||||
|
* All types of pods with annotation descheduler.alpha.kubernetes.io/evict are evicted. This
|
||||||
|
annotation is used to override checks which prevent eviction and user can select which pod is evicted.
|
||||||
|
User should know how and if the pod will be recreated.
|
||||||
|
|
||||||
### Pod disruption Budget (PDB)
|
### Pod disruption Budget (PDB)
|
||||||
Pods subject to Pod Disruption Budget (PDB) are not evicted if descheduling violates its pod
|
Pods subject to Pod Disruption Budget (PDB) are not evicted if descheduling violates its pod
|
||||||
@@ -229,10 +208,7 @@ disruption budget (PDB). The pods are evicted by using eviction subresource to h
|
|||||||
|
|
||||||
This roadmap is not in any particular order.
|
This roadmap is not in any particular order.
|
||||||
|
|
||||||
* Addition of test cases (unit and end-to-end)
|
* Consideration of pod affinity
|
||||||
* Ability to run inside a pod as a job
|
|
||||||
* Strategy to consider taints and tolerations
|
|
||||||
* Consideration of pod affinity and anti-affinity
|
|
||||||
* Strategy to consider pod life time
|
* Strategy to consider pod life time
|
||||||
* Strategy to consider number of pending pods
|
* Strategy to consider number of pending pods
|
||||||
* Integration with cluster autoscaler
|
* Integration with cluster autoscaler
|
||||||
@@ -240,8 +216,22 @@ This roadmap is not in any particular order.
|
|||||||
* Consideration of Kubernetes's scheduler's predicates
|
* Consideration of Kubernetes's scheduler's predicates
|
||||||
|
|
||||||
|
|
||||||
## Note
|
## Compatibility matrix
|
||||||
|
|
||||||
This project is under active development, and is not intended for production use.
|
Descheduler | supported Kubernetes version
|
||||||
Any api could be changed any time with out any notice. That said, your feedback is
|
-------------|-----------------------------
|
||||||
very important and appreciated to make this project more stable and useful.
|
0.4+ | 1.9+
|
||||||
|
0.1-0.3 | 1.7-1.8
|
||||||
|
|
||||||
|
## Community, discussion, contribution, and support
|
||||||
|
|
||||||
|
Learn how to engage with the Kubernetes community on the [community page](http://kubernetes.io/community/).
|
||||||
|
|
||||||
|
You can reach the maintainers of this project at:
|
||||||
|
|
||||||
|
- [Slack channel](https://kubernetes.slack.com/messages/sig-scheduling)
|
||||||
|
- [Mailing list](https://groups.google.com/forum/#!forum/kubernetes-sig-scheduling)
|
||||||
|
|
||||||
|
### Code of conduct
|
||||||
|
|
||||||
|
Participation in the Kubernetes community is governed by the [Kubernetes Code of Conduct](code-of-conduct.md).
|
||||||
|
|||||||
15
SECURITY_CONTACTS
Normal file
15
SECURITY_CONTACTS
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
# Defined below are the security contacts for this repo.
|
||||||
|
#
|
||||||
|
# They are the contact point for the Product Security Team to reach out
|
||||||
|
# to for triaging and handling of incoming issues.
|
||||||
|
#
|
||||||
|
# The below names agree to abide by the
|
||||||
|
# [Embargo Policy](https://github.com/kubernetes/sig-release/blob/master/security-release-process-documentation/security-release-process.md#embargo-policy)
|
||||||
|
# and will be removed and replaced if they violate that agreement.
|
||||||
|
#
|
||||||
|
# DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE
|
||||||
|
# INSTRUCTIONS AT https://kubernetes.io/security/
|
||||||
|
|
||||||
|
aveshagarwal
|
||||||
|
k82cn
|
||||||
|
ravisantoshgudimetla
|
||||||
24
cloudbuild.yaml
Normal file
24
cloudbuild.yaml
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
# See https://cloud.google.com/cloud-build/docs/build-config
|
||||||
|
|
||||||
|
# this must be specified in seconds. If omitted, defaults to 600s (10 mins)
|
||||||
|
timeout: 1200s
|
||||||
|
# this prevents errors if you don't use both _GIT_TAG and _PULL_BASE_REF,
|
||||||
|
# or any new substitutions added in the future.
|
||||||
|
options:
|
||||||
|
substitution_option: ALLOW_LOOSE
|
||||||
|
steps:
|
||||||
|
- name: 'gcr.io/k8s-testimages/gcb-docker-gcloud:v20190906-745fed4'
|
||||||
|
entrypoint: make
|
||||||
|
env:
|
||||||
|
- DOCKER_CLI_EXPERIMENTAL=enabled
|
||||||
|
- VERSION=$_GIT_TAG
|
||||||
|
- BASE_REF=$_PULL_BASE_REF
|
||||||
|
args:
|
||||||
|
- push
|
||||||
|
substitutions:
|
||||||
|
# _GIT_TAG will be filled with a git-based tag for the image, of the form vYYYYMMDD-hash, and
|
||||||
|
# can be used as a substitution
|
||||||
|
_GIT_TAG: '12345'
|
||||||
|
# _PULL_BASE_REF will contain the ref that was pushed to to trigger this build -
|
||||||
|
# a branch like 'master' or 'release-0.2', or a tag like 'v0.2'.
|
||||||
|
_PULL_BASE_REF: 'master'
|
||||||
@@ -18,13 +18,12 @@ limitations under the License.
|
|||||||
package options
|
package options
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
|
|
||||||
// install the componentconfig api so we get its defaulting and conversion functions
|
// install the componentconfig api so we get its defaulting and conversion functions
|
||||||
"github.com/kubernetes-incubator/descheduler/pkg/apis/componentconfig"
|
"sigs.k8s.io/descheduler/pkg/apis/componentconfig"
|
||||||
_ "github.com/kubernetes-incubator/descheduler/pkg/apis/componentconfig/install"
|
"sigs.k8s.io/descheduler/pkg/apis/componentconfig/v1alpha1"
|
||||||
"github.com/kubernetes-incubator/descheduler/pkg/apis/componentconfig/v1alpha1"
|
deschedulerscheme "sigs.k8s.io/descheduler/pkg/descheduler/scheme"
|
||||||
deschedulerscheme "github.com/kubernetes-incubator/descheduler/pkg/descheduler/scheme"
|
|
||||||
|
|
||||||
"github.com/spf13/pflag"
|
"github.com/spf13/pflag"
|
||||||
)
|
)
|
||||||
@@ -49,8 +48,14 @@ func NewDeschedulerServer() *DeschedulerServer {
|
|||||||
|
|
||||||
// AddFlags adds flags for a specific SchedulerServer to the specified FlagSet
|
// AddFlags adds flags for a specific SchedulerServer to the specified FlagSet
|
||||||
func (rs *DeschedulerServer) AddFlags(fs *pflag.FlagSet) {
|
func (rs *DeschedulerServer) AddFlags(fs *pflag.FlagSet) {
|
||||||
fs.DurationVar(&rs.DeschedulingInterval, "descheduling-interval", rs.DeschedulingInterval, "time interval between two consecutive descheduler executions")
|
fs.DurationVar(&rs.DeschedulingInterval, "descheduling-interval", rs.DeschedulingInterval, "Time interval between two consecutive descheduler executions. Setting this value instructs the descheduler to run in a continuous loop at the interval specified.")
|
||||||
fs.StringVar(&rs.KubeconfigFile, "kubeconfig-file", rs.KubeconfigFile, "File with kube configuration.")
|
fs.StringVar(&rs.KubeconfigFile, "kubeconfig", rs.KubeconfigFile, "File with kube configuration.")
|
||||||
fs.StringVar(&rs.PolicyConfigFile, "policy-config-file", rs.PolicyConfigFile, "File with descheduler policy configuration.")
|
fs.StringVar(&rs.PolicyConfigFile, "policy-config-file", rs.PolicyConfigFile, "File with descheduler policy configuration.")
|
||||||
fs.BoolVar(&rs.DryRun, "dry-run", rs.DryRun, "execute descheduler in dry run mode.")
|
fs.BoolVar(&rs.DryRun, "dry-run", rs.DryRun, "execute descheduler in dry run mode.")
|
||||||
|
// node-selector query causes descheduler to run only on nodes that matches the node labels in the query
|
||||||
|
fs.StringVar(&rs.NodeSelector, "node-selector", rs.NodeSelector, "Selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2)")
|
||||||
|
// max-no-pods-to-evict limits the maximum number of pods to be evicted per node by descheduler.
|
||||||
|
fs.IntVar(&rs.MaxNoOfPodsToEvictPerNode, "max-pods-to-evict-per-node", rs.MaxNoOfPodsToEvictPerNode, "Limits the maximum number of pods to be evicted per node by descheduler")
|
||||||
|
// evict-local-storage-pods allows eviction of pods that are using local storage. This is false by default.
|
||||||
|
fs.BoolVar(&rs.EvictLocalStoragePods, "evict-local-storage-pods", rs.EvictLocalStoragePods, "Enables evicting pods using local storage by descheduler")
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -19,16 +19,16 @@ package app
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
|
||||||
"io"
|
"io"
|
||||||
|
|
||||||
"github.com/kubernetes-incubator/descheduler/cmd/descheduler/app/options"
|
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
|
||||||
"github.com/kubernetes-incubator/descheduler/pkg/descheduler"
|
"sigs.k8s.io/descheduler/pkg/descheduler"
|
||||||
|
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
|
|
||||||
aflag "k8s.io/apiserver/pkg/util/flag"
|
aflag "k8s.io/component-base/cli/flag"
|
||||||
"k8s.io/apiserver/pkg/util/logs"
|
"k8s.io/component-base/logs"
|
||||||
|
"k8s.io/klog"
|
||||||
)
|
)
|
||||||
|
|
||||||
// NewDeschedulerCommand creates a *cobra.Command object with default parameters
|
// NewDeschedulerCommand creates a *cobra.Command object with default parameters
|
||||||
@@ -43,7 +43,7 @@ func NewDeschedulerCommand(out io.Writer) *cobra.Command {
|
|||||||
defer logs.FlushLogs()
|
defer logs.FlushLogs()
|
||||||
err := Run(s)
|
err := Run(s)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Println(err)
|
klog.Errorf("%v", err)
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|||||||
87
cmd/descheduler/app/version.go
Normal file
87
cmd/descheduler/app/version.go
Normal file
@@ -0,0 +1,87 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2017 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package app
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"runtime"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// gitCommit is a constant representing the source version that
|
||||||
|
// generated this build. It should be set during build via -ldflags.
|
||||||
|
gitCommit string
|
||||||
|
// version is a constant representing the version tag that
|
||||||
|
// generated this build. It should be set during build via -ldflags.
|
||||||
|
version string
|
||||||
|
// buildDate in ISO8601 format, output of $(date -u +'%Y-%m-%dT%H:%M:%SZ')
|
||||||
|
//It should be set during build via -ldflags.
|
||||||
|
buildDate string
|
||||||
|
)
|
||||||
|
|
||||||
|
// Info holds the information related to descheduler app version.
|
||||||
|
type Info struct {
|
||||||
|
Major string `json:"major"`
|
||||||
|
Minor string `json:"minor"`
|
||||||
|
GitCommit string `json:"gitCommit"`
|
||||||
|
GitVersion string `json:"gitVersion"`
|
||||||
|
BuildDate string `json:"buildDate"`
|
||||||
|
GoVersion string `json:"goVersion"`
|
||||||
|
Compiler string `json:"compiler"`
|
||||||
|
Platform string `json:"platform"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get returns the overall codebase version. It's for detecting
|
||||||
|
// what code a binary was built from.
|
||||||
|
func Get() Info {
|
||||||
|
majorVersion, minorVersion := splitVersion(version)
|
||||||
|
return Info{
|
||||||
|
Major: majorVersion,
|
||||||
|
Minor: minorVersion,
|
||||||
|
GitCommit: gitCommit,
|
||||||
|
GitVersion: version,
|
||||||
|
BuildDate: buildDate,
|
||||||
|
GoVersion: runtime.Version(),
|
||||||
|
Compiler: runtime.Compiler,
|
||||||
|
Platform: fmt.Sprintf("%s/%s", runtime.GOOS, runtime.GOARCH),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewVersionCommand() *cobra.Command {
|
||||||
|
var versionCmd = &cobra.Command{
|
||||||
|
Use: "version",
|
||||||
|
Short: "Version of descheduler",
|
||||||
|
Long: `Prints the version of descheduler.`,
|
||||||
|
Run: func(cmd *cobra.Command, args []string) {
|
||||||
|
fmt.Printf("Descheduler version %+v\n", Get())
|
||||||
|
},
|
||||||
|
}
|
||||||
|
return versionCmd
|
||||||
|
}
|
||||||
|
|
||||||
|
// splitVersion splits the git version to generate major and minor versions needed.
|
||||||
|
func splitVersion(version string) (string, string) {
|
||||||
|
if version == "" {
|
||||||
|
return "", ""
|
||||||
|
}
|
||||||
|
// A sample version would be of form v0.1.0-7-ge884046, so split at first '.' and
|
||||||
|
// then return 0 and 1+(+ appended to follow semver convention) for major and minor versions.
|
||||||
|
return strings.Trim(strings.Split(version, ".")[0], "v"), strings.Split(version, ".")[1] + "+"
|
||||||
|
}
|
||||||
@@ -17,14 +17,18 @@ limitations under the License.
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/kubernetes-incubator/descheduler/cmd/descheduler/app"
|
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
|
"sigs.k8s.io/descheduler/cmd/descheduler/app"
|
||||||
)
|
)
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
out := os.Stdout
|
out := os.Stdout
|
||||||
cmd := app.NewDeschedulerCommand(out)
|
cmd := app.NewDeschedulerCommand(out)
|
||||||
|
cmd.AddCommand(app.NewVersionCommand())
|
||||||
|
flag.CommandLine.Parse([]string{})
|
||||||
if err := cmd.Execute(); err != nil {
|
if err := cmd.Execute(); err != nil {
|
||||||
fmt.Println(err)
|
fmt.Println(err)
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
|
|||||||
@@ -1,58 +1,3 @@
|
|||||||
## Kubernetes Community Code of Conduct
|
# Kubernetes Community Code of Conduct
|
||||||
|
|
||||||
### Contributor Code of Conduct
|
Please refer to our [Kubernetes Community Code of Conduct](https://git.k8s.io/community/code-of-conduct.md)
|
||||||
|
|
||||||
As contributors and maintainers of this project, and in the interest of fostering
|
|
||||||
an open and welcoming community, we pledge to respect all people who contribute
|
|
||||||
through reporting issues, posting feature requests, updating documentation,
|
|
||||||
submitting pull requests or patches, and other activities.
|
|
||||||
|
|
||||||
We are committed to making participation in this project a harassment-free experience for
|
|
||||||
everyone, regardless of level of experience, gender, gender identity and expression,
|
|
||||||
sexual orientation, disability, personal appearance, body size, race, ethnicity, age,
|
|
||||||
religion, or nationality.
|
|
||||||
|
|
||||||
Examples of unacceptable behavior by participants include:
|
|
||||||
|
|
||||||
* The use of sexualized language or imagery
|
|
||||||
* Personal attacks
|
|
||||||
* Trolling or insulting/derogatory comments
|
|
||||||
* Public or private harassment
|
|
||||||
* Publishing other's private information, such as physical or electronic addresses,
|
|
||||||
without explicit permission
|
|
||||||
* Other unethical or unprofessional conduct.
|
|
||||||
|
|
||||||
Project maintainers have the right and responsibility to remove, edit, or reject
|
|
||||||
comments, commits, code, wiki edits, issues, and other contributions that are not
|
|
||||||
aligned to this Code of Conduct. By adopting this Code of Conduct, project maintainers
|
|
||||||
commit themselves to fairly and consistently applying these principles to every aspect
|
|
||||||
of managing this project. Project maintainers who do not follow or enforce the Code of
|
|
||||||
Conduct may be permanently removed from the project team.
|
|
||||||
|
|
||||||
This code of conduct applies both within project spaces and in public spaces
|
|
||||||
when an individual is representing the project or its community.
|
|
||||||
|
|
||||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting a Kubernetes maintainer, Sarah Novotny <sarahnovotny@google.com>, and/or Dan Kohn <dan@linuxfoundation.org>.
|
|
||||||
|
|
||||||
This Code of Conduct is adapted from the Contributor Covenant
|
|
||||||
(http://contributor-covenant.org), version 1.2.0, available at
|
|
||||||
http://contributor-covenant.org/version/1/2/0/
|
|
||||||
|
|
||||||
### Kubernetes Events Code of Conduct
|
|
||||||
|
|
||||||
Kubernetes events are working conferences intended for professional networking and collaboration in the
|
|
||||||
Kubernetes community. Attendees are expected to behave according to professional standards and in accordance
|
|
||||||
with their employer's policies on appropriate workplace behavior.
|
|
||||||
|
|
||||||
While at Kubernetes events or related social networking opportunities, attendees should not engage in
|
|
||||||
discriminatory or offensive speech or actions regarding gender, sexuality, race, or religion. Speakers should
|
|
||||||
be especially aware of these concerns.
|
|
||||||
|
|
||||||
The Kubernetes team does not condone any statements by speakers contrary to these standards. The Kubernetes
|
|
||||||
team reserves the right to deny entrance and/or eject from an event (without refund) any individual found to
|
|
||||||
be engaging in discriminatory or offensive speech or actions.
|
|
||||||
|
|
||||||
Please bring any concerns to to the immediate attention of Kubernetes event staff
|
|
||||||
|
|
||||||
|
|
||||||
[]()
|
|
||||||
|
|||||||
21
docs/releasing.md
Normal file
21
docs/releasing.md
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
# Release process
|
||||||
|
|
||||||
|
## Semi-automatic
|
||||||
|
|
||||||
|
1. Make sure your repo is clean by git's standards
|
||||||
|
2. Tag the repository and push the tag `VERSION=v0.10.0 git tag -m $VERSION $VERSION; git push origin $VERSION`
|
||||||
|
3. Publish a draft release using the tag you just created
|
||||||
|
4. Perform the [image promotion process](https://github.com/kubernetes/k8s.io/tree/master/k8s.gcr.io#image-promoter)
|
||||||
|
5. Publish release
|
||||||
|
6. Email `kubernetes-sig-scheduling@googlegroups.com` to announce the release
|
||||||
|
|
||||||
|
## Manual
|
||||||
|
|
||||||
|
1. Make sure your repo is clean by git's standards
|
||||||
|
2. Tag the repository and push the tag `VERSION=v0.10.0 git tag -m $VERSION $VERSION; git push origin $VERSION`
|
||||||
|
3. Checkout the tag you just created and make sure your repo is clean by git's standards `git checkout $VERSION`
|
||||||
|
4. Build and push the container image to the staging registry `VERSION=$VERSION make push`
|
||||||
|
5. Publish a draft release using the tag you just created
|
||||||
|
6. Perform the [image promotion process](https://github.com/kubernetes/k8s.io/tree/master/k8s.gcr.io#image-promoter)
|
||||||
|
7. Publish release
|
||||||
|
8. Email `kubernetes-sig-scheduling@googlegroups.com` to announce the release
|
||||||
8
examples/node-affinity.yml
Normal file
8
examples/node-affinity.yml
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
apiVersion: "descheduler/v1alpha1"
|
||||||
|
kind: "DeschedulerPolicy"
|
||||||
|
strategies:
|
||||||
|
"RemovePodsViolatingNodeAffinity":
|
||||||
|
enabled: true
|
||||||
|
params:
|
||||||
|
nodeAffinityType:
|
||||||
|
- "requiredDuringSchedulingIgnoredDuringExecution"
|
||||||
@@ -3,6 +3,8 @@ kind: "DeschedulerPolicy"
|
|||||||
strategies:
|
strategies:
|
||||||
"RemoveDuplicates":
|
"RemoveDuplicates":
|
||||||
enabled: true
|
enabled: true
|
||||||
|
"RemovePodsViolatingInterPodAntiAffinity":
|
||||||
|
enabled: true
|
||||||
"LowNodeUtilization":
|
"LowNodeUtilization":
|
||||||
enabled: true
|
enabled: true
|
||||||
params:
|
params:
|
||||||
|
|||||||
328
glide.lock
generated
328
glide.lock
generated
@@ -1,328 +0,0 @@
|
|||||||
hash: 6ccf8e8213eb31f9dd31b46c3aa3c2c01929c6230fb049cfabcabd498ade9c30
|
|
||||||
updated: 2017-10-16T14:31:20.353977552-04:00
|
|
||||||
imports:
|
|
||||||
- name: github.com/davecgh/go-spew
|
|
||||||
version: 5215b55f46b2b919f50a1df0eaa5886afe4e3b3d
|
|
||||||
subpackages:
|
|
||||||
- spew
|
|
||||||
- name: github.com/docker/distribution
|
|
||||||
version: cd27f179f2c10c5d300e6d09025b538c475b0d51
|
|
||||||
subpackages:
|
|
||||||
- digest
|
|
||||||
- reference
|
|
||||||
- name: github.com/emicklei/go-restful
|
|
||||||
version: ff4f55a206334ef123e4f79bbf348980da81ca46
|
|
||||||
subpackages:
|
|
||||||
- log
|
|
||||||
- name: github.com/emicklei/go-restful-swagger12
|
|
||||||
version: dcef7f55730566d41eae5db10e7d6981829720f6
|
|
||||||
- name: github.com/ghodss/yaml
|
|
||||||
version: 73d445a93680fa1a78ae23a5839bad48f32ba1ee
|
|
||||||
- name: github.com/go-openapi/analysis
|
|
||||||
version: b44dc874b601d9e4e2f6e19140e794ba24bead3b
|
|
||||||
- name: github.com/go-openapi/jsonpointer
|
|
||||||
version: 46af16f9f7b149af66e5d1bd010e3574dc06de98
|
|
||||||
- name: github.com/go-openapi/jsonreference
|
|
||||||
version: 13c6e3589ad90f49bd3e3bbe2c2cb3d7a4142272
|
|
||||||
- name: github.com/go-openapi/loads
|
|
||||||
version: 18441dfa706d924a39a030ee2c3b1d8d81917b38
|
|
||||||
- name: github.com/go-openapi/spec
|
|
||||||
version: 6aced65f8501fe1217321abf0749d354824ba2ff
|
|
||||||
- name: github.com/go-openapi/swag
|
|
||||||
version: 1d0bd113de87027671077d3c71eb3ac5d7dbba72
|
|
||||||
- name: github.com/gogo/protobuf
|
|
||||||
version: c0656edd0d9eab7c66d1eb0c568f9039345796f7
|
|
||||||
subpackages:
|
|
||||||
- proto
|
|
||||||
- sortkeys
|
|
||||||
- name: github.com/golang/glog
|
|
||||||
version: 44145f04b68cf362d9c4df2182967c2275eaefed
|
|
||||||
- name: github.com/google/gofuzz
|
|
||||||
version: 44d81051d367757e1c7c6a5a86423ece9afcf63c
|
|
||||||
- name: github.com/hashicorp/golang-lru
|
|
||||||
version: a0d98a5f288019575c6d1f4bb1573fef2d1fcdc4
|
|
||||||
subpackages:
|
|
||||||
- simplelru
|
|
||||||
- name: github.com/howeyc/gopass
|
|
||||||
version: bf9dde6d0d2c004a008c27aaee91170c786f6db8
|
|
||||||
- name: github.com/imdario/mergo
|
|
||||||
version: 6633656539c1639d9d78127b7d47c622b5d7b6dc
|
|
||||||
- name: github.com/inconshreveable/mousetrap
|
|
||||||
version: 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75
|
|
||||||
- name: github.com/juju/ratelimit
|
|
||||||
version: 5b9ff866471762aa2ab2dced63c9fb6f53921342
|
|
||||||
- name: github.com/kubernetes/repo-infra
|
|
||||||
version: f521b5d472e00e05da5394994942064510a6e8bf
|
|
||||||
- name: github.com/mailru/easyjson
|
|
||||||
version: d5b7844b561a7bc640052f1b935f7b800330d7e0
|
|
||||||
subpackages:
|
|
||||||
- buffer
|
|
||||||
- jlexer
|
|
||||||
- jwriter
|
|
||||||
- name: github.com/PuerkitoBio/purell
|
|
||||||
version: 8a290539e2e8629dbc4e6bad948158f790ec31f4
|
|
||||||
- name: github.com/PuerkitoBio/urlesc
|
|
||||||
version: 5bd2802263f21d8788851d5305584c82a5c75d7e
|
|
||||||
- name: github.com/spf13/cobra
|
|
||||||
version: f62e98d28ab7ad31d707ba837a966378465c7b57
|
|
||||||
- name: github.com/spf13/pflag
|
|
||||||
version: 9ff6c6923cfffbcd502984b8e0c80539a94968b7
|
|
||||||
- name: github.com/ugorji/go
|
|
||||||
version: ded73eae5db7e7a0ef6f55aace87a2873c5d2b74
|
|
||||||
subpackages:
|
|
||||||
- codec
|
|
||||||
- name: golang.org/x/crypto
|
|
||||||
version: d172538b2cfce0c13cee31e647d0367aa8cd2486
|
|
||||||
subpackages:
|
|
||||||
- bcrypt
|
|
||||||
- blowfish
|
|
||||||
- nacl/secretbox
|
|
||||||
- poly1305
|
|
||||||
- salsa20/salsa
|
|
||||||
- ssh/terminal
|
|
||||||
- name: golang.org/x/net
|
|
||||||
version: f2499483f923065a842d38eb4c7f1927e6fc6e6d
|
|
||||||
subpackages:
|
|
||||||
- context
|
|
||||||
- html
|
|
||||||
- html/atom
|
|
||||||
- http2
|
|
||||||
- http2/hpack
|
|
||||||
- idna
|
|
||||||
- internal/timeseries
|
|
||||||
- lex/httplex
|
|
||||||
- trace
|
|
||||||
- websocket
|
|
||||||
- name: golang.org/x/sys
|
|
||||||
version: 8f0908ab3b2457e2e15403d3697c9ef5cb4b57a9
|
|
||||||
subpackages:
|
|
||||||
- unix
|
|
||||||
- name: golang.org/x/text
|
|
||||||
version: 2910a502d2bf9e43193af9d68ca516529614eed3
|
|
||||||
subpackages:
|
|
||||||
- cases
|
|
||||||
- internal/tag
|
|
||||||
- language
|
|
||||||
- runes
|
|
||||||
- secure/bidirule
|
|
||||||
- secure/precis
|
|
||||||
- transform
|
|
||||||
- unicode/bidi
|
|
||||||
- unicode/norm
|
|
||||||
- width
|
|
||||||
- name: gopkg.in/inf.v0
|
|
||||||
version: 3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4
|
|
||||||
- name: gopkg.in/yaml.v2
|
|
||||||
version: 53feefa2559fb8dfa8d81baad31be332c97d6c77
|
|
||||||
- name: k8s.io/apimachinery
|
|
||||||
version: 917740426ad66ff818da4809990480bcc0786a77
|
|
||||||
subpackages:
|
|
||||||
- pkg/api/equality
|
|
||||||
- pkg/api/errors
|
|
||||||
- pkg/api/meta
|
|
||||||
- pkg/api/resource
|
|
||||||
- pkg/apimachinery
|
|
||||||
- pkg/apimachinery/announced
|
|
||||||
- pkg/apimachinery/registered
|
|
||||||
- pkg/apis/meta/v1
|
|
||||||
- pkg/apis/meta/v1/unstructured
|
|
||||||
- pkg/apis/meta/v1alpha1
|
|
||||||
- pkg/conversion
|
|
||||||
- pkg/conversion/queryparams
|
|
||||||
- pkg/conversion/unstructured
|
|
||||||
- pkg/fields
|
|
||||||
- pkg/labels
|
|
||||||
- pkg/openapi
|
|
||||||
- pkg/runtime
|
|
||||||
- pkg/runtime/schema
|
|
||||||
- pkg/runtime/serializer
|
|
||||||
- pkg/runtime/serializer/json
|
|
||||||
- pkg/runtime/serializer/protobuf
|
|
||||||
- pkg/runtime/serializer/recognizer
|
|
||||||
- pkg/runtime/serializer/streaming
|
|
||||||
- pkg/runtime/serializer/versioning
|
|
||||||
- pkg/selection
|
|
||||||
- pkg/types
|
|
||||||
- pkg/util/cache
|
|
||||||
- pkg/util/clock
|
|
||||||
- pkg/util/diff
|
|
||||||
- pkg/util/errors
|
|
||||||
- pkg/util/framer
|
|
||||||
- pkg/util/intstr
|
|
||||||
- pkg/util/json
|
|
||||||
- pkg/util/net
|
|
||||||
- pkg/util/rand
|
|
||||||
- pkg/util/runtime
|
|
||||||
- pkg/util/sets
|
|
||||||
- pkg/util/validation
|
|
||||||
- pkg/util/validation/field
|
|
||||||
- pkg/util/wait
|
|
||||||
- pkg/util/yaml
|
|
||||||
- pkg/version
|
|
||||||
- pkg/watch
|
|
||||||
- third_party/forked/golang/reflect
|
|
||||||
- name: k8s.io/apiserver
|
|
||||||
version: a7f02eb8e3920e446965036c9610ec52a7ede92f
|
|
||||||
subpackages:
|
|
||||||
- pkg/util/flag
|
|
||||||
- pkg/util/logs
|
|
||||||
- name: k8s.io/client-go
|
|
||||||
version: ec52d278b25c8fef82a965d93afdc74771ea6963
|
|
||||||
subpackages:
|
|
||||||
- discovery
|
|
||||||
- discovery/fake
|
|
||||||
- kubernetes/scheme
|
|
||||||
- pkg/api
|
|
||||||
- pkg/api/v1
|
|
||||||
- pkg/apis/admissionregistration
|
|
||||||
- pkg/apis/admissionregistration/v1alpha1
|
|
||||||
- pkg/apis/apps
|
|
||||||
- pkg/apis/apps/v1beta1
|
|
||||||
- pkg/apis/authentication
|
|
||||||
- pkg/apis/authentication/v1
|
|
||||||
- pkg/apis/authentication/v1beta1
|
|
||||||
- pkg/apis/authorization
|
|
||||||
- pkg/apis/authorization/v1
|
|
||||||
- pkg/apis/authorization/v1beta1
|
|
||||||
- pkg/apis/autoscaling
|
|
||||||
- pkg/apis/autoscaling/v1
|
|
||||||
- pkg/apis/autoscaling/v2alpha1
|
|
||||||
- pkg/apis/batch
|
|
||||||
- pkg/apis/batch/v1
|
|
||||||
- pkg/apis/batch/v2alpha1
|
|
||||||
- pkg/apis/certificates
|
|
||||||
- pkg/apis/certificates/v1beta1
|
|
||||||
- pkg/apis/extensions
|
|
||||||
- pkg/apis/extensions/v1beta1
|
|
||||||
- pkg/apis/networking
|
|
||||||
- pkg/apis/networking/v1
|
|
||||||
- pkg/apis/policy
|
|
||||||
- pkg/apis/policy/v1beta1
|
|
||||||
- pkg/apis/rbac
|
|
||||||
- pkg/apis/rbac/v1alpha1
|
|
||||||
- pkg/apis/rbac/v1beta1
|
|
||||||
- pkg/apis/settings
|
|
||||||
- pkg/apis/settings/v1alpha1
|
|
||||||
- pkg/apis/storage
|
|
||||||
- pkg/apis/storage/v1
|
|
||||||
- pkg/apis/storage/v1beta1
|
|
||||||
- pkg/util
|
|
||||||
- pkg/util/parsers
|
|
||||||
- pkg/version
|
|
||||||
- rest
|
|
||||||
- rest/watch
|
|
||||||
- testing
|
|
||||||
- tools/auth
|
|
||||||
- tools/cache
|
|
||||||
- tools/clientcmd
|
|
||||||
- tools/clientcmd/api
|
|
||||||
- tools/clientcmd/api/latest
|
|
||||||
- tools/clientcmd/api/v1
|
|
||||||
- tools/metrics
|
|
||||||
- transport
|
|
||||||
- util/cert
|
|
||||||
- util/flowcontrol
|
|
||||||
- util/homedir
|
|
||||||
- util/integer
|
|
||||||
- name: k8s.io/gengo
|
|
||||||
version: c79c13d131b0a8f42d05faa6491c12e94ccc6f30
|
|
||||||
- name: k8s.io/kubernetes
|
|
||||||
version: 4bc5e7f9a6c25dc4c03d4d656f2cefd21540e28c
|
|
||||||
subpackages:
|
|
||||||
- pkg/api
|
|
||||||
- pkg/api/install
|
|
||||||
- pkg/api/v1
|
|
||||||
- pkg/api/v1/helper/qos
|
|
||||||
- pkg/api/v1/ref
|
|
||||||
- pkg/api/v1/resource
|
|
||||||
- pkg/apis/admissionregistration
|
|
||||||
- pkg/apis/admissionregistration/v1alpha1
|
|
||||||
- pkg/apis/apps
|
|
||||||
- pkg/apis/apps/install
|
|
||||||
- pkg/apis/apps/v1beta1
|
|
||||||
- pkg/apis/authentication
|
|
||||||
- pkg/apis/authentication/install
|
|
||||||
- pkg/apis/authentication/v1
|
|
||||||
- pkg/apis/authentication/v1beta1
|
|
||||||
- pkg/apis/authorization
|
|
||||||
- pkg/apis/authorization/install
|
|
||||||
- pkg/apis/authorization/v1
|
|
||||||
- pkg/apis/authorization/v1beta1
|
|
||||||
- pkg/apis/autoscaling
|
|
||||||
- pkg/apis/autoscaling/install
|
|
||||||
- pkg/apis/autoscaling/v1
|
|
||||||
- pkg/apis/autoscaling/v2alpha1
|
|
||||||
- pkg/apis/batch
|
|
||||||
- pkg/apis/batch/install
|
|
||||||
- pkg/apis/batch/v1
|
|
||||||
- pkg/apis/batch/v2alpha1
|
|
||||||
- pkg/apis/certificates
|
|
||||||
- pkg/apis/certificates/install
|
|
||||||
- pkg/apis/certificates/v1beta1
|
|
||||||
- pkg/apis/extensions
|
|
||||||
- pkg/apis/extensions/install
|
|
||||||
- pkg/apis/extensions/v1beta1
|
|
||||||
- pkg/apis/networking
|
|
||||||
- pkg/apis/networking/v1
|
|
||||||
- pkg/apis/policy
|
|
||||||
- pkg/apis/policy/install
|
|
||||||
- pkg/apis/policy/v1beta1
|
|
||||||
- pkg/apis/rbac
|
|
||||||
- pkg/apis/rbac/install
|
|
||||||
- pkg/apis/rbac/v1alpha1
|
|
||||||
- pkg/apis/rbac/v1beta1
|
|
||||||
- pkg/apis/settings
|
|
||||||
- pkg/apis/settings/install
|
|
||||||
- pkg/apis/settings/v1alpha1
|
|
||||||
- pkg/apis/storage
|
|
||||||
- pkg/apis/storage/install
|
|
||||||
- pkg/apis/storage/v1
|
|
||||||
- pkg/apis/storage/v1beta1
|
|
||||||
- pkg/client/clientset_generated/clientset
|
|
||||||
- pkg/client/clientset_generated/clientset/fake
|
|
||||||
- pkg/client/clientset_generated/clientset/scheme
|
|
||||||
- pkg/client/clientset_generated/clientset/typed/admissionregistration/v1alpha1
|
|
||||||
- pkg/client/clientset_generated/clientset/typed/admissionregistration/v1alpha1/fake
|
|
||||||
- pkg/client/clientset_generated/clientset/typed/apps/v1beta1
|
|
||||||
- pkg/client/clientset_generated/clientset/typed/apps/v1beta1/fake
|
|
||||||
- pkg/client/clientset_generated/clientset/typed/authentication/v1
|
|
||||||
- pkg/client/clientset_generated/clientset/typed/authentication/v1/fake
|
|
||||||
- pkg/client/clientset_generated/clientset/typed/authentication/v1beta1
|
|
||||||
- pkg/client/clientset_generated/clientset/typed/authentication/v1beta1/fake
|
|
||||||
- pkg/client/clientset_generated/clientset/typed/authorization/v1
|
|
||||||
- pkg/client/clientset_generated/clientset/typed/authorization/v1/fake
|
|
||||||
- pkg/client/clientset_generated/clientset/typed/authorization/v1beta1
|
|
||||||
- pkg/client/clientset_generated/clientset/typed/authorization/v1beta1/fake
|
|
||||||
- pkg/client/clientset_generated/clientset/typed/autoscaling/v1
|
|
||||||
- pkg/client/clientset_generated/clientset/typed/autoscaling/v1/fake
|
|
||||||
- pkg/client/clientset_generated/clientset/typed/autoscaling/v2alpha1
|
|
||||||
- pkg/client/clientset_generated/clientset/typed/autoscaling/v2alpha1/fake
|
|
||||||
- pkg/client/clientset_generated/clientset/typed/batch/v1
|
|
||||||
- pkg/client/clientset_generated/clientset/typed/batch/v1/fake
|
|
||||||
- pkg/client/clientset_generated/clientset/typed/batch/v2alpha1
|
|
||||||
- pkg/client/clientset_generated/clientset/typed/batch/v2alpha1/fake
|
|
||||||
- pkg/client/clientset_generated/clientset/typed/certificates/v1beta1
|
|
||||||
- pkg/client/clientset_generated/clientset/typed/certificates/v1beta1/fake
|
|
||||||
- pkg/client/clientset_generated/clientset/typed/core/v1
|
|
||||||
- pkg/client/clientset_generated/clientset/typed/core/v1/fake
|
|
||||||
- pkg/client/clientset_generated/clientset/typed/extensions/v1beta1
|
|
||||||
- pkg/client/clientset_generated/clientset/typed/extensions/v1beta1/fake
|
|
||||||
- pkg/client/clientset_generated/clientset/typed/networking/v1
|
|
||||||
- pkg/client/clientset_generated/clientset/typed/networking/v1/fake
|
|
||||||
- pkg/client/clientset_generated/clientset/typed/policy/v1beta1
|
|
||||||
- pkg/client/clientset_generated/clientset/typed/policy/v1beta1/fake
|
|
||||||
- pkg/client/clientset_generated/clientset/typed/rbac/v1alpha1
|
|
||||||
- pkg/client/clientset_generated/clientset/typed/rbac/v1alpha1/fake
|
|
||||||
- pkg/client/clientset_generated/clientset/typed/rbac/v1beta1
|
|
||||||
- pkg/client/clientset_generated/clientset/typed/rbac/v1beta1/fake
|
|
||||||
- pkg/client/clientset_generated/clientset/typed/settings/v1alpha1
|
|
||||||
- pkg/client/clientset_generated/clientset/typed/settings/v1alpha1/fake
|
|
||||||
- pkg/client/clientset_generated/clientset/typed/storage/v1
|
|
||||||
- pkg/client/clientset_generated/clientset/typed/storage/v1/fake
|
|
||||||
- pkg/client/clientset_generated/clientset/typed/storage/v1beta1
|
|
||||||
- pkg/client/clientset_generated/clientset/typed/storage/v1beta1/fake
|
|
||||||
- pkg/client/listers/core/v1
|
|
||||||
- pkg/kubelet/types
|
|
||||||
- pkg/util
|
|
||||||
- pkg/util/parsers
|
|
||||||
testImports: []
|
|
||||||
15
glide.yaml
15
glide.yaml
@@ -1,15 +0,0 @@
|
|||||||
package: github.com/kubernetes-incubator/descheduler
|
|
||||||
import:
|
|
||||||
- package: k8s.io/client-go
|
|
||||||
version: ec52d278b25c8fef82a965d93afdc74771ea6963
|
|
||||||
- package: k8s.io/apiserver
|
|
||||||
version: release-1.7
|
|
||||||
- package: k8s.io/apimachinery
|
|
||||||
version: 917740426ad66ff818da4809990480bcc0786a77
|
|
||||||
- package: k8s.io/kubernetes
|
|
||||||
version: 1.7.6
|
|
||||||
- package: github.com/kubernetes/repo-infra
|
|
||||||
- package: github.com/spf13/cobra
|
|
||||||
version: f62e98d28ab7ad31d707ba837a966378465c7b57
|
|
||||||
- package: k8s.io/gengo
|
|
||||||
version: c79c13d131b0a8f42d05faa6491c12e94ccc6f30
|
|
||||||
14
go.mod
Normal file
14
go.mod
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
module sigs.k8s.io/descheduler
|
||||||
|
|
||||||
|
go 1.13
|
||||||
|
|
||||||
|
require (
|
||||||
|
github.com/spf13/cobra v0.0.5
|
||||||
|
github.com/spf13/pflag v1.0.5
|
||||||
|
k8s.io/api v0.17.0
|
||||||
|
k8s.io/apimachinery v0.17.3-beta.0
|
||||||
|
k8s.io/apiserver v0.17.0
|
||||||
|
k8s.io/client-go v0.17.0
|
||||||
|
k8s.io/component-base v0.17.0
|
||||||
|
k8s.io/klog v1.0.0
|
||||||
|
)
|
||||||
355
go.sum
Normal file
355
go.sum
Normal file
@@ -0,0 +1,355 @@
|
|||||||
|
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||||
|
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||||
|
cloud.google.com/go v0.38.0 h1:ROfEUZz+Gh5pa62DJWXSaonyu3StP6EA6lPEXPI6mCo=
|
||||||
|
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
|
||||||
|
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
|
||||||
|
github.com/Azure/go-autorest/autorest v0.9.0 h1:MRvx8gncNaXJqOoLmhNjUAKh33JJF8LyxPhomEtOsjs=
|
||||||
|
github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI=
|
||||||
|
github.com/Azure/go-autorest/autorest/adal v0.5.0 h1:q2gDruN08/guU9vAjuPWff0+QIrpH6ediguzdAzXAUU=
|
||||||
|
github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0=
|
||||||
|
github.com/Azure/go-autorest/autorest/date v0.1.0 h1:YGrhWfrgtFs84+h0o46rJrlmsZtyZRg470CqAXTZaGM=
|
||||||
|
github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA=
|
||||||
|
github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
|
||||||
|
github.com/Azure/go-autorest/autorest/mocks v0.2.0 h1:Ww5g4zThfD/6cLb4z6xxgeyDa7QDkizMkJKe0ysZXp0=
|
||||||
|
github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
|
||||||
|
github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY=
|
||||||
|
github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc=
|
||||||
|
github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k=
|
||||||
|
github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk=
|
||||||
|
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||||
|
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
|
||||||
|
github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
|
||||||
|
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
|
||||||
|
github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
|
||||||
|
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
|
||||||
|
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||||
|
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||||
|
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
|
||||||
|
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||||
|
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||||
|
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
|
||||||
|
github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
|
||||||
|
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||||
|
github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
|
||||||
|
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||||
|
github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
|
||||||
|
github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc=
|
||||||
|
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||||
|
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||||
|
github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||||
|
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||||
|
github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
|
||||||
|
github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
|
||||||
|
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
|
||||||
|
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
|
||||||
|
github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
|
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
|
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||||
|
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
|
github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=
|
||||||
|
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||||
|
github.com/docker/docker v0.7.3-0.20190327010347-be7ac8be2ae0/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||||
|
github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
|
||||||
|
github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||||
|
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||||
|
github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
|
||||||
|
github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
|
||||||
|
github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
|
||||||
|
github.com/evanphx/json-patch v4.2.0+incompatible h1:fUDGZCv/7iAN7u0puUVhvKCcsR6vRfwrJatElLBEf0I=
|
||||||
|
github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||||
|
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||||
|
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
|
||||||
|
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||||
|
github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||||
|
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||||
|
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||||
|
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||||
|
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
|
||||||
|
github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0=
|
||||||
|
github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg=
|
||||||
|
github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
|
||||||
|
github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg=
|
||||||
|
github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc=
|
||||||
|
github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8=
|
||||||
|
github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc=
|
||||||
|
github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo=
|
||||||
|
github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I=
|
||||||
|
github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
|
||||||
|
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
|
||||||
|
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||||
|
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||||
|
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
|
||||||
|
github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d h1:3PaI8p3seN09VjbTYC/QWlUZdZ1qS1zGjy7LH2Wt07I=
|
||||||
|
github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
|
||||||
|
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||||
|
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903 h1:LbsanbbD6LieFkXbj9YNNBupiGHJgFeLpO0j0Fza1h8=
|
||||||
|
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||||
|
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||||
|
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||||
|
github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
|
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
|
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
|
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
|
||||||
|
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
|
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||||
|
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||||
|
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||||
|
github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY=
|
||||||
|
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||||
|
github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI=
|
||||||
|
github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw=
|
||||||
|
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||||
|
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||||
|
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||||
|
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
|
github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
|
||||||
|
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
|
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||||
|
github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d h1:7XGaL1e6bYS1yIonGp9761ExpPPV1ui0SAC59Yube9k=
|
||||||
|
github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
|
||||||
|
github.com/gophercloud/gophercloud v0.1.0 h1:P/nh25+rzXouhytV2pUHBb65fnds26Ghl8/391+sT5o=
|
||||||
|
github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8=
|
||||||
|
github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
||||||
|
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
||||||
|
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
|
||||||
|
github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
|
||||||
|
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
|
||||||
|
github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
||||||
|
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||||
|
github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU=
|
||||||
|
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||||
|
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
|
||||||
|
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
|
||||||
|
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||||
|
github.com/imdario/mergo v0.3.5 h1:JboBksRwiiAJWvIYJVo46AfV+IAIKZpfrSzVKj42R4Q=
|
||||||
|
github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
||||||
|
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
|
||||||
|
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||||
|
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
|
||||||
|
github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||||
|
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||||
|
github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||||
|
github.com/json-iterator/go v1.1.8 h1:QiWkFLKq0T7mpzwOTu6BzNDbfTE8OLrYhVKYMLF46Ok=
|
||||||
|
github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||||
|
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
|
||||||
|
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||||
|
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
|
||||||
|
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
|
||||||
|
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||||
|
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||||
|
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||||
|
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
|
||||||
|
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||||
|
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||||
|
github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=
|
||||||
|
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||||
|
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||||
|
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||||
|
github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||||
|
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||||
|
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||||
|
github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs=
|
||||||
|
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
|
||||||
|
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
||||||
|
github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
||||||
|
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||||
|
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||||
|
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||||
|
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||||
|
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||||
|
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||||
|
github.com/modern-go/reflect2 v0.0.0-20180320133207-05fbef0ca5da/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||||
|
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||||
|
github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
|
||||||
|
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||||
|
github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||||
|
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||||
|
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||||
|
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
|
||||||
|
github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
|
||||||
|
github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||||
|
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||||
|
github.com/onsi/ginkgo v1.10.1 h1:q/mM8GF/n0shIN8SaAZ0V+jnLPzen6WIVZdiwrRlMlo=
|
||||||
|
github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||||
|
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
|
||||||
|
github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME=
|
||||||
|
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||||
|
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
|
||||||
|
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
|
||||||
|
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||||
|
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||||
|
github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
|
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||||
|
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
|
github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA=
|
||||||
|
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||||
|
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||||
|
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||||
|
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||||
|
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||||
|
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||||
|
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||||
|
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
|
||||||
|
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
|
||||||
|
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||||
|
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||||
|
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
|
||||||
|
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
|
||||||
|
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
|
||||||
|
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||||
|
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
|
||||||
|
github.com/spf13/cobra v0.0.5 h1:f0B+LkLX6DtmRH1isoNA9VTtNUK9K8xYd28JNNfOv/s=
|
||||||
|
github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
|
||||||
|
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
|
||||||
|
github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||||
|
github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||||
|
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||||
|
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||||
|
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||||
|
github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
|
||||||
|
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
|
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
|
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
|
||||||
|
github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||||
|
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||||
|
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||||
|
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
|
||||||
|
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||||
|
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||||
|
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
|
||||||
|
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
|
||||||
|
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
||||||
|
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
|
||||||
|
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||||
|
go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg=
|
||||||
|
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
|
||||||
|
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||||
|
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
||||||
|
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||||
|
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||||
|
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||||
|
golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||||
|
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||||
|
golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||||
|
golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586 h1:7KByu05hhLed2MO29w7p1XfZvZ13m8mub3shuVftRs0=
|
||||||
|
golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||||
|
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||||
|
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||||
|
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||||
|
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||||
|
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||||
|
golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||||
|
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||||
|
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
|
golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
|
golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
|
golang.org/x/net v0.0.0-20191004110552-13f9640d40b9 h1:rjwSpXsdiK0dV8/Naq3kAw9ymfAeJIyd0upUIElB+lI=
|
||||||
|
golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
|
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||||
|
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||||
|
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0=
|
||||||
|
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||||
|
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456 h1:ng0gs1AKnRRuEMZoTLLlbOd+C17zUDepwGQBb/n+JVg=
|
||||||
|
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
|
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
|
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
|
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
|
||||||
|
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||||
|
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
|
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
|
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ=
|
||||||
|
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
|
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
|
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
|
golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
|
golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
|
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
|
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||||
|
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||||
|
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||||
|
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||||
|
golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||||
|
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
|
||||||
|
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||||
|
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||||
|
google.golang.org/appengine v1.5.0 h1:KxkO13IPW4Lslp2bz+KHP2E3gtFlrIGNThxkZQ3g+4c=
|
||||||
|
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||||
|
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||||
|
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||||
|
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||||
|
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||||
|
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||||
|
google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||||
|
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||||
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
|
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
|
||||||
|
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
|
gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
|
||||||
|
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
|
||||||
|
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||||
|
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
|
||||||
|
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
||||||
|
gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
|
||||||
|
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
|
||||||
|
gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
|
||||||
|
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
|
||||||
|
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||||
|
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
|
||||||
|
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
|
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
|
gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I=
|
||||||
|
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
|
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
|
||||||
|
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||||
|
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||||
|
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||||
|
k8s.io/api v0.17.0 h1:H9d/lw+VkZKEVIUc8F3wgiQ+FUXTTr21M87jXLU7yqM=
|
||||||
|
k8s.io/api v0.17.0/go.mod h1:npsyOePkeP0CPwyGfXDHxvypiYMJxBWAMpQxCaJ4ZxI=
|
||||||
|
k8s.io/apimachinery v0.17.0/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg=
|
||||||
|
k8s.io/apimachinery v0.17.3-beta.0 h1:DeN0royOQ5+j3ytFWnDkxGiF5r9T7m6E9Ukzjg4vVHc=
|
||||||
|
k8s.io/apimachinery v0.17.3-beta.0/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg=
|
||||||
|
k8s.io/apiserver v0.17.0 h1:XhUix+FKFDcBygWkQNp7wKKvZL030QUlH1o8vFeSgZA=
|
||||||
|
k8s.io/apiserver v0.17.0/go.mod h1:ABM+9x/prjINN6iiffRVNCBR2Wk7uY4z+EtEGZD48cg=
|
||||||
|
k8s.io/client-go v0.17.0 h1:8QOGvUGdqDMFrm9sD6IUFl256BcffynGoe80sxgTEDg=
|
||||||
|
k8s.io/client-go v0.17.0/go.mod h1:TYgR6EUHs6k45hb6KWjVD6jFZvJV4gHDikv/It0xz+k=
|
||||||
|
k8s.io/component-base v0.17.0 h1:BnDFcmBDq+RPpxXjmuYnZXb59XNN9CaFrX8ba9+3xrA=
|
||||||
|
k8s.io/component-base v0.17.0/go.mod h1:rKuRAokNMY2nn2A6LP/MiwpoaMRHpfRnrPaUJJj1Yoc=
|
||||||
|
k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
|
||||||
|
k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
|
||||||
|
k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
|
||||||
|
k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8=
|
||||||
|
k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
|
||||||
|
k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a h1:UcxjrRMyNx/i/y8G7kPvLyy7rfbeuf1PYyBf973pgyU=
|
||||||
|
k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E=
|
||||||
|
k8s.io/utils v0.0.0-20191114184206-e782cd3c129f h1:GiPwtSzdP43eI1hpPCbROQCCIgCuiMMNF8YUVLF3vJo=
|
||||||
|
k8s.io/utils v0.0.0-20191114184206-e782cd3c129f/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
|
||||||
|
sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI=
|
||||||
|
sigs.k8s.io/structured-merge-diff v1.0.1-0.20191108220359-b1b620dd3f06/go.mod h1:/ULNhyfzRopfcjskuui0cTITekDduZ7ycKN3oUT9R18=
|
||||||
|
sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs=
|
||||||
|
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
|
||||||
95
hack/e2e-gce/gcloud_create_cluster.sh
Executable file
95
hack/e2e-gce/gcloud_create_cluster.sh
Executable file
@@ -0,0 +1,95 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -o errexit
|
||||||
|
set -o nounset
|
||||||
|
set -o pipefail
|
||||||
|
|
||||||
|
echo "Make sure that uuid package is installed"
|
||||||
|
|
||||||
|
master_uuid=$(uuid)
|
||||||
|
node1_uuid=$(uuid)
|
||||||
|
node2_uuid=$(uuid)
|
||||||
|
kube_apiserver_port=6443
|
||||||
|
kube_version=1.13.1
|
||||||
|
|
||||||
|
DESCHEDULER_ROOT=$(dirname "${BASH_SOURCE}")/../../
|
||||||
|
E2E_GCE_HOME=$DESCHEDULER_ROOT/hack/e2e-gce
|
||||||
|
|
||||||
|
|
||||||
|
create_cluster() {
|
||||||
|
echo "#################### Creating instances ##########################"
|
||||||
|
gcloud compute instances create descheduler-$master_uuid --image="ubuntu-1604-xenial-v20180306" --image-project="ubuntu-os-cloud" --zone=us-east1-b
|
||||||
|
# Keeping the --zone here so as to make sure that e2e's can run locally.
|
||||||
|
echo "gcloud compute instances delete descheduler-$master_uuid --zone=us-east1-b --quiet" > $E2E_GCE_HOME/delete_cluster.sh
|
||||||
|
|
||||||
|
gcloud compute instances create descheduler-$node1_uuid --image="ubuntu-1604-xenial-v20180306" --image-project="ubuntu-os-cloud" --zone=us-east1-b
|
||||||
|
echo "gcloud compute instances delete descheduler-$node1_uuid --zone=us-east1-b --quiet" >> $E2E_GCE_HOME/delete_cluster.sh
|
||||||
|
|
||||||
|
gcloud compute instances create descheduler-$node2_uuid --image="ubuntu-1604-xenial-v20180306" --image-project="ubuntu-os-cloud" --zone=us-east1-b
|
||||||
|
echo "gcloud compute instances delete descheduler-$node2_uuid --zone=us-east1-b --quiet" >> $E2E_GCE_HOME/delete_cluster.sh
|
||||||
|
|
||||||
|
# Delete the firewall port created for master.
|
||||||
|
echo "gcloud compute firewall-rules delete kubeapiserver-$master_uuid --quiet" >> $E2E_GCE_HOME/delete_cluster.sh
|
||||||
|
chmod 755 $E2E_GCE_HOME/delete_cluster.sh
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
generate_kubeadm_instance_files() {
|
||||||
|
# TODO: Check if they have come up. awk $6 contains the state(RUNNING or not).
|
||||||
|
master_private_ip=$(gcloud compute instances list | grep $master_uuid|awk '{print $4}')
|
||||||
|
node1_public_ip=$(gcloud compute instances list | grep $node1_uuid|awk '{print $5}')
|
||||||
|
node2_public_ip=$(gcloud compute instances list | grep $node2_uuid|awk '{print $5}')
|
||||||
|
echo "kubeadm init --kubernetes-version=${kube_version} --apiserver-advertise-address=${master_private_ip}" --ignore-preflight-errors=all --pod-network-cidr=10.96.0.0/12 > $E2E_GCE_HOME/kubeadm_install.sh
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
transfer_install_files() {
|
||||||
|
gcloud compute scp $E2E_GCE_HOME/kubeadm_preinstall.sh descheduler-$master_uuid:/tmp --zone=us-east1-b
|
||||||
|
gcloud compute scp $E2E_GCE_HOME/kubeadm_install.sh descheduler-$master_uuid:/tmp --zone=us-east1-b
|
||||||
|
gcloud compute scp $E2E_GCE_HOME/kubeadm_preinstall.sh descheduler-$node1_uuid:/tmp --zone=us-east1-b
|
||||||
|
gcloud compute scp $E2E_GCE_HOME/kubeadm_preinstall.sh descheduler-$node2_uuid:/tmp --zone=us-east1-b
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
install_kube() {
|
||||||
|
# Docker installation.
|
||||||
|
gcloud compute ssh descheduler-$master_uuid --command "sudo apt-get update; sudo apt-get install -y docker.io" --zone=us-east1-b
|
||||||
|
gcloud compute ssh descheduler-$node1_uuid --command "sudo apt-get update; sudo apt-get install -y docker.io" --zone=us-east1-b
|
||||||
|
gcloud compute ssh descheduler-$node2_uuid --command "sudo apt-get update; sudo apt-get install -y docker.io" --zone=us-east1-b
|
||||||
|
# kubeadm installation.
|
||||||
|
# 1. Transfer files to master, nodes.
|
||||||
|
transfer_install_files
|
||||||
|
# 2. Install kubeadm.
|
||||||
|
#TODO: Add rm /tmp/kubeadm_install.sh
|
||||||
|
# Open port for kube API server
|
||||||
|
gcloud compute firewall-rules create kubeapiserver-$master_uuid --allow tcp:6443 --source-tags=descheduler-$master_uuid --source-ranges=0.0.0.0/0 --description="Opening api server port"
|
||||||
|
|
||||||
|
gcloud compute ssh descheduler-$master_uuid --command "sudo chmod 755 /tmp/kubeadm_preinstall.sh; sudo /tmp/kubeadm_preinstall.sh" --zone=us-east1-b
|
||||||
|
kubeadm_join_command=$(gcloud compute ssh descheduler-$master_uuid --command "sudo chmod 755 /tmp/kubeadm_install.sh; sudo /tmp/kubeadm_install.sh" --zone=us-east1-b|grep 'kubeadm join')
|
||||||
|
|
||||||
|
# Copy the kubeconfig file onto /tmp for e2e tests.
|
||||||
|
gcloud compute ssh descheduler-$master_uuid --command "sudo cp /etc/kubernetes/admin.conf /tmp; sudo chmod 777 /tmp/admin.conf" --zone=us-east1-b
|
||||||
|
gcloud compute scp descheduler-$master_uuid:/tmp/admin.conf /tmp/admin.conf --zone=us-east1-b
|
||||||
|
|
||||||
|
# Postinstall on master, need to add a network plugin for kube-dns to come to running state.
|
||||||
|
gcloud compute ssh descheduler-$master_uuid --command "sudo kubectl apply -f https://raw.githubusercontent.com/cloudnativelabs/kube-router/master/daemonset/kubeadm-kuberouter.yaml --kubeconfig /etc/kubernetes/admin.conf" --zone=us-east1-b
|
||||||
|
echo $kubeadm_join_command > $E2E_GCE_HOME/kubeadm_join.sh
|
||||||
|
|
||||||
|
# Copy kubeadm_join to every node.
|
||||||
|
#TODO: Put these in a loop, so that extension becomes possible.
|
||||||
|
gcloud compute ssh descheduler-$node1_uuid --command "sudo chmod 755 /tmp/kubeadm_preinstall.sh; sudo /tmp/kubeadm_preinstall.sh" --zone=us-east1-b
|
||||||
|
gcloud compute scp $E2E_GCE_HOME/kubeadm_join.sh descheduler-$node1_uuid:/tmp --zone=us-east1-b
|
||||||
|
gcloud compute ssh descheduler-$node1_uuid --command "sudo chmod 755 /tmp/kubeadm_join.sh; sudo /tmp/kubeadm_join.sh" --zone=us-east1-b
|
||||||
|
|
||||||
|
gcloud compute ssh descheduler-$node2_uuid --command "sudo chmod 755 /tmp/kubeadm_preinstall.sh; sudo /tmp/kubeadm_preinstall.sh" --zone=us-east1-b
|
||||||
|
gcloud compute scp $E2E_GCE_HOME/kubeadm_join.sh descheduler-$node2_uuid:/tmp --zone=us-east1-b
|
||||||
|
gcloud compute ssh descheduler-$node2_uuid --command "sudo chmod 755 /tmp/kubeadm_join.sh; sudo /tmp/kubeadm_join.sh" --zone=us-east1-b
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
create_cluster
|
||||||
|
|
||||||
|
generate_kubeadm_instance_files
|
||||||
|
|
||||||
|
install_kube
|
||||||
8
hack/e2e-gce/gcloud_sdk_configure.sh
Executable file
8
hack/e2e-gce/gcloud_sdk_configure.sh
Executable file
@@ -0,0 +1,8 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
|
||||||
|
gcloud auth activate-service-account --key-file "${GCE_SA_CREDS}"
|
||||||
|
gcloud config set project $GCE_PROJECT_ID
|
||||||
|
gcloud config set compute/zone $GCE_ZONE
|
||||||
9
hack/e2e-gce/install_gcloud.sh
Executable file
9
hack/e2e-gce/install_gcloud.sh
Executable file
@@ -0,0 +1,9 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
wget https://dl.google.com/dl/cloudsdk/channels/rapid/downloads/google-cloud-sdk-176.0.0-linux-x86_64.tar.gz
|
||||||
|
|
||||||
|
tar -xvzf google-cloud-sdk-176.0.0-linux-x86_64.tar.gz
|
||||||
|
|
||||||
|
./google-cloud-sdk/install.sh -q
|
||||||
11
hack/e2e-gce/kubeadm_preinstall.sh
Normal file
11
hack/e2e-gce/kubeadm_preinstall.sh
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
apt-get update
|
||||||
|
apt-get install -y docker.io
|
||||||
|
|
||||||
|
apt-get update && apt-get install -y apt-transport-https
|
||||||
|
curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add -
|
||||||
|
cat <<EOF >/etc/apt/sources.list.d/kubernetes.list
|
||||||
|
deb http://apt.kubernetes.io/ kubernetes-xenial main
|
||||||
|
EOF
|
||||||
|
apt-get update
|
||||||
|
apt-get install -y kubelet kubeadm kubectl
|
||||||
|
exit 0
|
||||||
6
hack/kind_config.yaml
Normal file
6
hack/kind_config.yaml
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
kind: Cluster
|
||||||
|
apiVersion: kind.sigs.k8s.io/v1alpha3
|
||||||
|
nodes:
|
||||||
|
- role: control-plane
|
||||||
|
- role: worker
|
||||||
|
- role: worker
|
||||||
@@ -43,5 +43,5 @@ OS_ROOT="$( os::util::absolute_path "${init_source}" )"
|
|||||||
export OS_ROOT
|
export OS_ROOT
|
||||||
cd "${OS_ROOT}"
|
cd "${OS_ROOT}"
|
||||||
|
|
||||||
PRJ_PREFIX="github.com/kubernetes-incubator/descheduler"
|
PRJ_PREFIX="sigs.k8s.io/descheduler"
|
||||||
OS_OUTPUT_BINPATH="${OS_ROOT}/_output/bin"
|
OS_OUTPUT_BINPATH="${OS_ROOT}/_output/bin"
|
||||||
|
|||||||
@@ -1,151 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
# Copyright 2015 The Kubernetes Authors.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
set -o errexit
|
|
||||||
set -o nounset
|
|
||||||
set -o pipefail
|
|
||||||
|
|
||||||
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
|
|
||||||
|
|
||||||
# The sort at the end makes sure we feed the topological sort a deterministic
|
|
||||||
# list (since there aren't many dependencies).
|
|
||||||
|
|
||||||
generated_files=($(
|
|
||||||
find . -not \( \
|
|
||||||
\( \
|
|
||||||
-wholename './output' \
|
|
||||||
-o -wholename './_output' \
|
|
||||||
-o -wholename './staging' \
|
|
||||||
-o -wholename './release' \
|
|
||||||
-o -wholename './target' \
|
|
||||||
-o -wholename '*/third_party/*' \
|
|
||||||
-o -wholename '*/vendor/*' \
|
|
||||||
-o -wholename '*/codecgen-*-1234.generated.go' \
|
|
||||||
\) -prune \
|
|
||||||
\) -name '*.generated.go' | LC_ALL=C sort -r
|
|
||||||
))
|
|
||||||
|
|
||||||
# We only work for deps within this prefix.
|
|
||||||
#my_prefix="k8s.io/kubernetes"
|
|
||||||
my_prefix="github.com/kubernetes-incubator/descheduler"
|
|
||||||
|
|
||||||
# Register function to be called on EXIT to remove codecgen
|
|
||||||
# binary and also to touch the files that should be regenerated
|
|
||||||
# since they are first removed.
|
|
||||||
# This is necessary to make the script work after previous failure.
|
|
||||||
function cleanup {
|
|
||||||
rm -f "${CODECGEN:-}"
|
|
||||||
pushd "${KUBE_ROOT}" > /dev/null
|
|
||||||
for (( i=0; i < number; i++ )); do
|
|
||||||
touch "${generated_files[${i}]}" || true
|
|
||||||
done
|
|
||||||
popd > /dev/null
|
|
||||||
}
|
|
||||||
trap cleanup EXIT
|
|
||||||
|
|
||||||
# Precompute dependencies for all directories.
|
|
||||||
# Then sort all files in the dependency order.
|
|
||||||
number=${#generated_files[@]}
|
|
||||||
result=""
|
|
||||||
for (( i=0; i<number; i++ )); do
|
|
||||||
visited[${i}]=false
|
|
||||||
file="${generated_files[${i}]/\.generated\.go/.go}"
|
|
||||||
deps[${i}]=$(go list -f '{{range .Deps}}{{.}}{{"\n"}}{{end}}' ${file} | grep "^${my_prefix}")
|
|
||||||
done
|
|
||||||
###echo "DBG: found $number generated files"
|
|
||||||
###for f in $(echo "${generated_files[@]}" | LC_ALL=C sort); do
|
|
||||||
### echo "DBG: $f"
|
|
||||||
###done
|
|
||||||
|
|
||||||
# NOTE: depends function assumes that the whole repository is under
|
|
||||||
# $my_prefix - it will NOT work if that is not true.
|
|
||||||
function depends {
|
|
||||||
rhs="$(dirname ${generated_files[$2]/#./${my_prefix}})"
|
|
||||||
###echo "DBG: does ${file} depend on ${rhs}?"
|
|
||||||
for dep in ${deps[$1]}; do
|
|
||||||
###echo "DBG: checking against $dep"
|
|
||||||
if [[ "${dep}" == "${rhs}" ]]; then
|
|
||||||
###echo "DBG: = yes"
|
|
||||||
return 0
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
###echo "DBG: = no"
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
|
|
||||||
function tsort {
|
|
||||||
visited[$1]=true
|
|
||||||
local j=0
|
|
||||||
for (( j=0; j<number; j++ )); do
|
|
||||||
if ! ${visited[${j}]}; then
|
|
||||||
if depends "$1" ${j}; then
|
|
||||||
tsort $j
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
result="${result} $1"
|
|
||||||
}
|
|
||||||
echo "Building dependencies"
|
|
||||||
for (( i=0; i<number; i++ )); do
|
|
||||||
###echo "DBG: considering ${generated_files[${i}]}"
|
|
||||||
if ! ${visited[${i}]}; then
|
|
||||||
###echo "DBG: tsorting ${generated_files[${i}]}"
|
|
||||||
tsort ${i}
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
index=(${result})
|
|
||||||
|
|
||||||
haveindex=${index:-}
|
|
||||||
if [[ -z ${haveindex} ]]; then
|
|
||||||
echo No files found for $0
|
|
||||||
echo A previous run of $0 may have deleted all the files and then crashed.
|
|
||||||
echo Use 'touch' to create files named 'types.generated.go' listed as deleted in 'git status'
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "Building codecgen"
|
|
||||||
CODECGEN="${PWD}/codecgen_binary"
|
|
||||||
go build -o "${CODECGEN}" ./vendor/github.com/ugorji/go/codec/codecgen
|
|
||||||
|
|
||||||
# Running codecgen fails if some of the files doesn't compile.
|
|
||||||
# Thus (since all the files are completely auto-generated and
|
|
||||||
# not required for the code to be compilable, we first remove
|
|
||||||
# them and the regenerate them.
|
|
||||||
for (( i=0; i < number; i++ )); do
|
|
||||||
rm -f "${generated_files[${i}]}"
|
|
||||||
done
|
|
||||||
|
|
||||||
# Generate files in the dependency order.
|
|
||||||
for current in "${index[@]}"; do
|
|
||||||
generated_file=${generated_files[${current}]}
|
|
||||||
initial_dir=${PWD}
|
|
||||||
file=${generated_file/\.generated\.go/.go}
|
|
||||||
echo "processing ${file}"
|
|
||||||
# codecgen work only if invoked from directory where the file
|
|
||||||
# is located.
|
|
||||||
pushd "$(dirname ${file})" > /dev/null
|
|
||||||
base_file=$(basename "${file}")
|
|
||||||
base_generated_file=$(basename "${generated_file}")
|
|
||||||
# We use '-d 1234' flag to have a deterministic output every time.
|
|
||||||
# The constant was just randomly chosen.
|
|
||||||
###echo "DBG: running ${CODECGEN} -d 1234 -o ${base_generated_file} ${base_file}"
|
|
||||||
${CODECGEN} -d 1234 -o "${base_generated_file}" "${base_file}"
|
|
||||||
# Add boilerplate at the beginning of the generated file.
|
|
||||||
sed 's/YEAR/2017/' "${initial_dir}/hack/boilerplate/boilerplate.go.txt" > "${base_generated_file}.tmp"
|
|
||||||
cat "${base_generated_file}" >> "${base_generated_file}.tmp"
|
|
||||||
mv "${base_generated_file}.tmp" "${base_generated_file}"
|
|
||||||
popd > /dev/null
|
|
||||||
done
|
|
||||||
@@ -1,7 +1,7 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
source "$(dirname "${BASH_SOURCE}")/lib/init.sh"
|
source "$(dirname "${BASH_SOURCE}")/lib/init.sh"
|
||||||
|
|
||||||
go build -o "${OS_OUTPUT_BINPATH}/conversion-gen" "${PRJ_PREFIX}/vendor/k8s.io/kubernetes/cmd/libs/go2idl/conversion-gen"
|
go build -o "${OS_OUTPUT_BINPATH}/conversion-gen" "k8s.io/code-generator/cmd/conversion-gen"
|
||||||
|
|
||||||
${OS_OUTPUT_BINPATH}/conversion-gen \
|
${OS_OUTPUT_BINPATH}/conversion-gen \
|
||||||
--go-header-file "hack/boilerplate/boilerplate.go.txt" \
|
--go-header-file "hack/boilerplate/boilerplate.go.txt" \
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
source "$(dirname "${BASH_SOURCE}")/lib/init.sh"
|
source "$(dirname "${BASH_SOURCE}")/lib/init.sh"
|
||||||
|
|
||||||
go build -o "${OS_OUTPUT_BINPATH}/deepcopy-gen" "${PRJ_PREFIX}/vendor/k8s.io/kubernetes/cmd/libs/go2idl/deepcopy-gen"
|
go build -o "${OS_OUTPUT_BINPATH}/deepcopy-gen" "k8s.io/code-generator/cmd/deepcopy-gen"
|
||||||
|
|
||||||
${OS_OUTPUT_BINPATH}/deepcopy-gen \
|
${OS_OUTPUT_BINPATH}/deepcopy-gen \
|
||||||
--go-header-file "hack/boilerplate/boilerplate.go.txt" \
|
--go-header-file "hack/boilerplate/boilerplate.go.txt" \
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
source "$(dirname "${BASH_SOURCE}")/lib/init.sh"
|
source "$(dirname "${BASH_SOURCE}")/lib/init.sh"
|
||||||
|
|
||||||
go build -o "${OS_OUTPUT_BINPATH}/defaulter-gen" "${PRJ_PREFIX}/vendor/k8s.io/kubernetes/cmd/libs/go2idl/defaulter-gen"
|
go build -o "${OS_OUTPUT_BINPATH}/defaulter-gen" "k8s.io/code-generator/cmd/defaulter-gen"
|
||||||
|
|
||||||
${OS_OUTPUT_BINPATH}/defaulter-gen \
|
${OS_OUTPUT_BINPATH}/defaulter-gen \
|
||||||
--go-header-file "hack/boilerplate/boilerplate.go.txt" \
|
--go-header-file "hack/boilerplate/boilerplate.go.txt" \
|
||||||
|
|||||||
49
hack/update-gofmt.sh
Executable file
49
hack/update-gofmt.sh
Executable file
@@ -0,0 +1,49 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Copyright 2017 The Kubernetes Authors.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
|
||||||
|
set -o errexit
|
||||||
|
set -o nounset
|
||||||
|
set -o pipefail
|
||||||
|
|
||||||
|
DESCHEDULER_ROOT=$(dirname "${BASH_SOURCE}")/..
|
||||||
|
|
||||||
|
GO_VERSION=($(go version))
|
||||||
|
|
||||||
|
if [[ -z $(echo "${GO_VERSION[2]}" | grep -E 'go1.2|go1.3|go1.4|go1.5|go1.6|go1.7|go1.8|go1.9|go1.10|go1.11|go1.12|go1.13') ]]; then
|
||||||
|
echo "Unknown go version '${GO_VERSION[2]}', skipping gofmt."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
cd "${DESCHEDULER_ROOT}"
|
||||||
|
|
||||||
|
find_files() {
|
||||||
|
find . -not \( \
|
||||||
|
\( \
|
||||||
|
-wholename './output' \
|
||||||
|
-o -wholename './_output' \
|
||||||
|
-o -wholename './release' \
|
||||||
|
-o -wholename './target' \
|
||||||
|
-o -wholename './.git' \
|
||||||
|
-o -wholename '*/third_party/*' \
|
||||||
|
-o -wholename '*/Godeps/*' \
|
||||||
|
-o -wholename '*/vendor/*' \
|
||||||
|
\) -prune \
|
||||||
|
\) -name '*.go'
|
||||||
|
}
|
||||||
|
|
||||||
|
GOFMT="gofmt -s -w"
|
||||||
|
find_files | xargs $GOFMT -l
|
||||||
@@ -23,8 +23,8 @@ DESCHEDULER_ROOT=$(dirname "${BASH_SOURCE}")/..
|
|||||||
|
|
||||||
GO_VERSION=($(go version))
|
GO_VERSION=($(go version))
|
||||||
|
|
||||||
if [[ -z $(echo "${GO_VERSION[2]}" | grep -E 'go1.2|go1.3|go1.4|go1.5|go1.6|go1.7|go1.8') ]]; then
|
if [[ -z $(echo "${GO_VERSION[2]}" | grep -E 'go1.2|go1.3|go1.4|go1.5|go1.6|go1.7|go1.8|go1.9|go1.10|go1.11|go1.12|go1.13') ]]; then
|
||||||
echo "Unknown go version '${GO_VERSION}', skipping gofmt."
|
echo "Unknown go version '${GO_VERSION[2]}', skipping gofmt."
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
@@ -45,7 +45,7 @@ find_files() {
|
|||||||
\) -name '*.go'
|
\) -name '*.go'
|
||||||
}
|
}
|
||||||
|
|
||||||
GOFMT="gofmt -s"
|
GOFMT="gofmt -s"
|
||||||
bad_files=$(find_files | xargs $GOFMT -l)
|
bad_files=$(find_files | xargs $GOFMT -l)
|
||||||
if [[ -n "${bad_files}" ]]; then
|
if [[ -n "${bad_files}" ]]; then
|
||||||
echo "!!! '$GOFMT' needs to be run on the following files: "
|
echo "!!! '$GOFMT' needs to be run on the following files: "
|
||||||
|
|||||||
28
kubernetes/configmap.yaml
Normal file
28
kubernetes/configmap.yaml
Normal file
@@ -0,0 +1,28 @@
|
|||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: ConfigMap
|
||||||
|
metadata:
|
||||||
|
name: descheduler-policy-configmap
|
||||||
|
namespace: kube-system
|
||||||
|
data:
|
||||||
|
policy.yaml: |
|
||||||
|
apiVersion: "descheduler/v1alpha1"
|
||||||
|
kind: "DeschedulerPolicy"
|
||||||
|
strategies:
|
||||||
|
"RemoveDuplicates":
|
||||||
|
enabled: true
|
||||||
|
"RemovePodsViolatingInterPodAntiAffinity":
|
||||||
|
enabled: true
|
||||||
|
"LowNodeUtilization":
|
||||||
|
enabled: true
|
||||||
|
params:
|
||||||
|
nodeResourceUtilizationThresholds:
|
||||||
|
thresholds:
|
||||||
|
"cpu" : 20
|
||||||
|
"memory": 20
|
||||||
|
"pods": 20
|
||||||
|
targetThresholds:
|
||||||
|
"cpu" : 50
|
||||||
|
"memory": 50
|
||||||
|
"pods": 50
|
||||||
|
|
||||||
35
kubernetes/cronjob.yaml
Normal file
35
kubernetes/cronjob.yaml
Normal file
@@ -0,0 +1,35 @@
|
|||||||
|
---
|
||||||
|
apiVersion: batch/v1beta1
|
||||||
|
kind: CronJob
|
||||||
|
metadata:
|
||||||
|
name: descheduler-cronjob
|
||||||
|
namespace: kube-system
|
||||||
|
spec:
|
||||||
|
schedule: "*/2 * * * *"
|
||||||
|
concurrencyPolicy: "Forbid"
|
||||||
|
jobTemplate:
|
||||||
|
spec:
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
name: descheduler-pod
|
||||||
|
spec:
|
||||||
|
priorityClassName: system-cluster-critical
|
||||||
|
containers:
|
||||||
|
- name: descheduler
|
||||||
|
image: us.gcr.io/k8s-artifacts-prod/descheduler:v0.10.0
|
||||||
|
volumeMounts:
|
||||||
|
- mountPath: /policy-dir
|
||||||
|
name: policy-volume
|
||||||
|
command:
|
||||||
|
- "/bin/descheduler"
|
||||||
|
args:
|
||||||
|
- "--policy-config-file"
|
||||||
|
- "/policy-dir/policy.yaml"
|
||||||
|
- "--v"
|
||||||
|
- "3"
|
||||||
|
restartPolicy: "Never"
|
||||||
|
serviceAccountName: descheduler-sa
|
||||||
|
volumes:
|
||||||
|
- name: policy-volume
|
||||||
|
configMap:
|
||||||
|
name: descheduler-policy-configmap
|
||||||
33
kubernetes/job.yaml
Normal file
33
kubernetes/job.yaml
Normal file
@@ -0,0 +1,33 @@
|
|||||||
|
---
|
||||||
|
apiVersion: batch/v1
|
||||||
|
kind: Job
|
||||||
|
metadata:
|
||||||
|
name: descheduler-job
|
||||||
|
namespace: kube-system
|
||||||
|
spec:
|
||||||
|
parallelism: 1
|
||||||
|
completions: 1
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
name: descheduler-pod
|
||||||
|
spec:
|
||||||
|
priorityClassName: system-cluster-critical
|
||||||
|
containers:
|
||||||
|
- name: descheduler
|
||||||
|
image: us.gcr.io/k8s-artifacts-prod/descheduler:v0.10.0
|
||||||
|
volumeMounts:
|
||||||
|
- mountPath: /policy-dir
|
||||||
|
name: policy-volume
|
||||||
|
command:
|
||||||
|
- "/bin/descheduler"
|
||||||
|
args:
|
||||||
|
- "--policy-config-file"
|
||||||
|
- "/policy-dir/policy.yaml"
|
||||||
|
- "--v"
|
||||||
|
- "3"
|
||||||
|
restartPolicy: "Never"
|
||||||
|
serviceAccountName: descheduler-sa
|
||||||
|
volumes:
|
||||||
|
- name: policy-volume
|
||||||
|
configMap:
|
||||||
|
name: descheduler-policy-configmap
|
||||||
40
kubernetes/rbac.yaml
Normal file
40
kubernetes/rbac.yaml
Normal file
@@ -0,0 +1,40 @@
|
|||||||
|
---
|
||||||
|
kind: ClusterRole
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
metadata:
|
||||||
|
name: descheduler-cluster-role
|
||||||
|
namespace: kube-system
|
||||||
|
rules:
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources: ["events"]
|
||||||
|
verbs: ["create", "update"]
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources: ["nodes"]
|
||||||
|
verbs: ["get", "watch", "list"]
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources: ["pods"]
|
||||||
|
verbs: ["get", "watch", "list", "delete"]
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources: ["pods/eviction"]
|
||||||
|
verbs: ["create"]
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: ServiceAccount
|
||||||
|
metadata:
|
||||||
|
name: descheduler-sa
|
||||||
|
namespace: kube-system
|
||||||
|
---
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: ClusterRoleBinding
|
||||||
|
metadata:
|
||||||
|
name: descheduler-cluster-role-binding
|
||||||
|
namespace: kube-system
|
||||||
|
roleRef:
|
||||||
|
apiGroup: rbac.authorization.k8s.io
|
||||||
|
kind: ClusterRole
|
||||||
|
name: descheduler-cluster-role
|
||||||
|
subjects:
|
||||||
|
- name: descheduler-sa
|
||||||
|
kind: ServiceAccount
|
||||||
|
namespace: kube-system
|
||||||
|
|
||||||
@@ -16,4 +16,4 @@ limitations under the License.
|
|||||||
|
|
||||||
// +k8s:deepcopy-gen=package,register
|
// +k8s:deepcopy-gen=package,register
|
||||||
|
|
||||||
package api // import "github.com/kubernetes-incubator/descheduler/pkg/api"
|
package api // import "sigs.k8s.io/descheduler/pkg/api"
|
||||||
|
|||||||
@@ -1,49 +0,0 @@
|
|||||||
/*
|
|
||||||
Copyright 2017 The Kubernetes Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
// Package install installs the descheduler's policy API group.
|
|
||||||
package install
|
|
||||||
|
|
||||||
import (
|
|
||||||
"k8s.io/apimachinery/pkg/apimachinery/announced"
|
|
||||||
"k8s.io/apimachinery/pkg/apimachinery/registered"
|
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
|
||||||
|
|
||||||
deschedulerapi "github.com/kubernetes-incubator/descheduler/pkg/api"
|
|
||||||
"github.com/kubernetes-incubator/descheduler/pkg/api/v1alpha1"
|
|
||||||
deschedulerscheme "github.com/kubernetes-incubator/descheduler/pkg/descheduler/scheme"
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
Install(deschedulerscheme.GroupFactoryRegistry, deschedulerscheme.Registry, deschedulerscheme.Scheme)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Install registers the API group and adds types to a scheme
|
|
||||||
func Install(groupFactoryRegistry announced.APIGroupFactoryRegistry, registry *registered.APIRegistrationManager, scheme *runtime.Scheme) {
|
|
||||||
if err := announced.NewGroupMetaFactory(
|
|
||||||
&announced.GroupMetaFactoryArgs{
|
|
||||||
GroupName: deschedulerapi.GroupName,
|
|
||||||
VersionPreferenceOrder: []string{v1alpha1.SchemeGroupVersion.Version},
|
|
||||||
ImportPrefix: "github.com/kubernetes-incubator/descheduler/pkg/api",
|
|
||||||
AddInternalObjectsToScheme: deschedulerapi.AddToScheme,
|
|
||||||
},
|
|
||||||
announced.VersionToSchemeFunc{
|
|
||||||
v1alpha1.SchemeGroupVersion.Version: v1alpha1.AddToScheme,
|
|
||||||
},
|
|
||||||
).Announce(groupFactoryRegistry).RegisterAndEnable(registry, scheme); err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -19,11 +19,14 @@ package api
|
|||||||
import (
|
import (
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
|
|
||||||
|
"sigs.k8s.io/descheduler/pkg/descheduler/scheme"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
|
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
|
||||||
AddToScheme = SchemeBuilder.AddToScheme
|
AddToScheme = SchemeBuilder.AddToScheme
|
||||||
|
Scheme = runtime.NewScheme()
|
||||||
)
|
)
|
||||||
|
|
||||||
// GroupName is the group name use in this package
|
// GroupName is the group name use in this package
|
||||||
@@ -32,6 +35,12 @@ const GroupName = "descheduler"
|
|||||||
// SchemeGroupVersion is group version used to register these objects
|
// SchemeGroupVersion is group version used to register these objects
|
||||||
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal}
|
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
if err := addKnownTypes(scheme.Scheme); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Kind takes an unqualified kind and returns a Group qualified GroupKind
|
// Kind takes an unqualified kind and returns a Group qualified GroupKind
|
||||||
func Kind(kind string) schema.GroupKind {
|
func Kind(kind string) schema.GroupKind {
|
||||||
return SchemeGroupVersion.WithKind(kind).GroupKind()
|
return SchemeGroupVersion.WithKind(kind).GroupKind()
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -17,10 +17,12 @@ limitations under the License.
|
|||||||
package api
|
package api
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"k8s.io/api/core/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/kubernetes/pkg/api/v1"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
|
||||||
type DeschedulerPolicy struct {
|
type DeschedulerPolicy struct {
|
||||||
metav1.TypeMeta
|
metav1.TypeMeta
|
||||||
|
|
||||||
@@ -45,6 +47,7 @@ type DeschedulerStrategy struct {
|
|||||||
// Only one of its members may be specified
|
// Only one of its members may be specified
|
||||||
type StrategyParameters struct {
|
type StrategyParameters struct {
|
||||||
NodeResourceUtilizationThresholds NodeResourceUtilizationThresholds
|
NodeResourceUtilizationThresholds NodeResourceUtilizationThresholds
|
||||||
|
NodeAffinityType []string
|
||||||
}
|
}
|
||||||
|
|
||||||
type Percentage float64
|
type Percentage float64
|
||||||
|
|||||||
@@ -15,10 +15,10 @@ limitations under the License.
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
// +k8s:deepcopy-gen=package,register
|
// +k8s:deepcopy-gen=package,register
|
||||||
// +k8s:conversion-gen=github.com/kubernetes-incubator/descheduler/pkg/api
|
// +k8s:conversion-gen=sigs.k8s.io/descheduler/pkg/api
|
||||||
// +k8s:defaulter-gen=TypeMeta
|
// +k8s:defaulter-gen=TypeMeta
|
||||||
|
|
||||||
// Package v1alpha1 is the v1alpha1 version of the descheduler API
|
// Package v1alpha1 is the v1alpha1 version of the descheduler API
|
||||||
// +groupName=descheduler
|
// +groupName=descheduler
|
||||||
|
|
||||||
package v1alpha1 // import "github.com/kubernetes-incubator/descheduler/pkg/api/v1alpha1"
|
package v1alpha1 // import "sigs.k8s.io/descheduler/pkg/api/v1alpha1"
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -17,10 +17,12 @@ limitations under the License.
|
|||||||
package v1alpha1
|
package v1alpha1
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"k8s.io/api/core/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/kubernetes/pkg/api/v1"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
|
||||||
type DeschedulerPolicy struct {
|
type DeschedulerPolicy struct {
|
||||||
metav1.TypeMeta `json:",inline"`
|
metav1.TypeMeta `json:",inline"`
|
||||||
|
|
||||||
@@ -45,6 +47,7 @@ type DeschedulerStrategy struct {
|
|||||||
// Only one of its members may be specified
|
// Only one of its members may be specified
|
||||||
type StrategyParameters struct {
|
type StrategyParameters struct {
|
||||||
NodeResourceUtilizationThresholds NodeResourceUtilizationThresholds `json:"nodeResourceUtilizationThresholds,omitempty"`
|
NodeResourceUtilizationThresholds NodeResourceUtilizationThresholds `json:"nodeResourceUtilizationThresholds,omitempty"`
|
||||||
|
NodeAffinityType []string `json:"nodeAffinityType,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type Percentage float64
|
type Percentage float64
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
// +build !ignore_autogenerated
|
// +build !ignore_autogenerated
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Copyright 2017 The Kubernetes Authors.
|
Copyright 2020 The Kubernetes Authors.
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
you may not use this file except in compliance with the License.
|
you may not use this file except in compliance with the License.
|
||||||
@@ -16,34 +16,66 @@ See the License for the specific language governing permissions and
|
|||||||
limitations under the License.
|
limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
// This file was autogenerated by conversion-gen. Do not edit it manually!
|
// Code generated by conversion-gen. DO NOT EDIT.
|
||||||
|
|
||||||
package v1alpha1
|
package v1alpha1
|
||||||
|
|
||||||
import (
|
import (
|
||||||
api "github.com/kubernetes-incubator/descheduler/pkg/api"
|
unsafe "unsafe"
|
||||||
|
|
||||||
conversion "k8s.io/apimachinery/pkg/conversion"
|
conversion "k8s.io/apimachinery/pkg/conversion"
|
||||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||||
unsafe "unsafe"
|
api "sigs.k8s.io/descheduler/pkg/api"
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
SchemeBuilder.Register(RegisterConversions)
|
localSchemeBuilder.Register(RegisterConversions)
|
||||||
}
|
}
|
||||||
|
|
||||||
// RegisterConversions adds conversion functions to the given scheme.
|
// RegisterConversions adds conversion functions to the given scheme.
|
||||||
// Public to allow building arbitrary schemes.
|
// Public to allow building arbitrary schemes.
|
||||||
func RegisterConversions(scheme *runtime.Scheme) error {
|
func RegisterConversions(s *runtime.Scheme) error {
|
||||||
return scheme.AddGeneratedConversionFuncs(
|
if err := s.AddGeneratedConversionFunc((*DeschedulerPolicy)(nil), (*api.DeschedulerPolicy)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||||
Convert_v1alpha1_DeschedulerPolicy_To_api_DeschedulerPolicy,
|
return Convert_v1alpha1_DeschedulerPolicy_To_api_DeschedulerPolicy(a.(*DeschedulerPolicy), b.(*api.DeschedulerPolicy), scope)
|
||||||
Convert_api_DeschedulerPolicy_To_v1alpha1_DeschedulerPolicy,
|
}); err != nil {
|
||||||
Convert_v1alpha1_DeschedulerStrategy_To_api_DeschedulerStrategy,
|
return err
|
||||||
Convert_api_DeschedulerStrategy_To_v1alpha1_DeschedulerStrategy,
|
}
|
||||||
Convert_v1alpha1_NodeResourceUtilizationThresholds_To_api_NodeResourceUtilizationThresholds,
|
if err := s.AddGeneratedConversionFunc((*api.DeschedulerPolicy)(nil), (*DeschedulerPolicy)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||||
Convert_api_NodeResourceUtilizationThresholds_To_v1alpha1_NodeResourceUtilizationThresholds,
|
return Convert_api_DeschedulerPolicy_To_v1alpha1_DeschedulerPolicy(a.(*api.DeschedulerPolicy), b.(*DeschedulerPolicy), scope)
|
||||||
Convert_v1alpha1_StrategyParameters_To_api_StrategyParameters,
|
}); err != nil {
|
||||||
Convert_api_StrategyParameters_To_v1alpha1_StrategyParameters,
|
return err
|
||||||
)
|
}
|
||||||
|
if err := s.AddGeneratedConversionFunc((*DeschedulerStrategy)(nil), (*api.DeschedulerStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||||
|
return Convert_v1alpha1_DeschedulerStrategy_To_api_DeschedulerStrategy(a.(*DeschedulerStrategy), b.(*api.DeschedulerStrategy), scope)
|
||||||
|
}); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := s.AddGeneratedConversionFunc((*api.DeschedulerStrategy)(nil), (*DeschedulerStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||||
|
return Convert_api_DeschedulerStrategy_To_v1alpha1_DeschedulerStrategy(a.(*api.DeschedulerStrategy), b.(*DeschedulerStrategy), scope)
|
||||||
|
}); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := s.AddGeneratedConversionFunc((*NodeResourceUtilizationThresholds)(nil), (*api.NodeResourceUtilizationThresholds)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||||
|
return Convert_v1alpha1_NodeResourceUtilizationThresholds_To_api_NodeResourceUtilizationThresholds(a.(*NodeResourceUtilizationThresholds), b.(*api.NodeResourceUtilizationThresholds), scope)
|
||||||
|
}); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := s.AddGeneratedConversionFunc((*api.NodeResourceUtilizationThresholds)(nil), (*NodeResourceUtilizationThresholds)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||||
|
return Convert_api_NodeResourceUtilizationThresholds_To_v1alpha1_NodeResourceUtilizationThresholds(a.(*api.NodeResourceUtilizationThresholds), b.(*NodeResourceUtilizationThresholds), scope)
|
||||||
|
}); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := s.AddGeneratedConversionFunc((*StrategyParameters)(nil), (*api.StrategyParameters)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||||
|
return Convert_v1alpha1_StrategyParameters_To_api_StrategyParameters(a.(*StrategyParameters), b.(*api.StrategyParameters), scope)
|
||||||
|
}); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := s.AddGeneratedConversionFunc((*api.StrategyParameters)(nil), (*StrategyParameters)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||||
|
return Convert_api_StrategyParameters_To_v1alpha1_StrategyParameters(a.(*api.StrategyParameters), b.(*StrategyParameters), scope)
|
||||||
|
}); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func autoConvert_v1alpha1_DeschedulerPolicy_To_api_DeschedulerPolicy(in *DeschedulerPolicy, out *api.DeschedulerPolicy, s conversion.Scope) error {
|
func autoConvert_v1alpha1_DeschedulerPolicy_To_api_DeschedulerPolicy(in *DeschedulerPolicy, out *api.DeschedulerPolicy, s conversion.Scope) error {
|
||||||
@@ -122,6 +154,7 @@ func autoConvert_v1alpha1_StrategyParameters_To_api_StrategyParameters(in *Strat
|
|||||||
if err := Convert_v1alpha1_NodeResourceUtilizationThresholds_To_api_NodeResourceUtilizationThresholds(&in.NodeResourceUtilizationThresholds, &out.NodeResourceUtilizationThresholds, s); err != nil {
|
if err := Convert_v1alpha1_NodeResourceUtilizationThresholds_To_api_NodeResourceUtilizationThresholds(&in.NodeResourceUtilizationThresholds, &out.NodeResourceUtilizationThresholds, s); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
out.NodeAffinityType = *(*[]string)(unsafe.Pointer(&in.NodeAffinityType))
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -134,6 +167,7 @@ func autoConvert_api_StrategyParameters_To_v1alpha1_StrategyParameters(in *api.S
|
|||||||
if err := Convert_api_NodeResourceUtilizationThresholds_To_v1alpha1_NodeResourceUtilizationThresholds(&in.NodeResourceUtilizationThresholds, &out.NodeResourceUtilizationThresholds, s); err != nil {
|
if err := Convert_api_NodeResourceUtilizationThresholds_To_v1alpha1_NodeResourceUtilizationThresholds(&in.NodeResourceUtilizationThresholds, &out.NodeResourceUtilizationThresholds, s); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
out.NodeAffinityType = *(*[]string)(unsafe.Pointer(&in.NodeAffinityType))
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
// +build !ignore_autogenerated
|
// +build !ignore_autogenerated
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Copyright 2017 The Kubernetes Authors.
|
Copyright 2020 The Kubernetes Authors.
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
you may not use this file except in compliance with the License.
|
you may not use this file except in compliance with the License.
|
||||||
@@ -16,98 +16,155 @@ See the License for the specific language governing permissions and
|
|||||||
limitations under the License.
|
limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
// This file was autogenerated by deepcopy-gen. Do not edit it manually!
|
// Code generated by deepcopy-gen. DO NOT EDIT.
|
||||||
|
|
||||||
package v1alpha1
|
package v1alpha1
|
||||||
|
|
||||||
import (
|
import (
|
||||||
conversion "k8s.io/apimachinery/pkg/conversion"
|
|
||||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||||
reflect "reflect"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
SchemeBuilder.Register(RegisterDeepCopies)
|
func (in *DeschedulerPolicy) DeepCopyInto(out *DeschedulerPolicy) {
|
||||||
}
|
*out = *in
|
||||||
|
out.TypeMeta = in.TypeMeta
|
||||||
// RegisterDeepCopies adds deep-copy functions to the given scheme. Public
|
if in.Strategies != nil {
|
||||||
// to allow building arbitrary schemes.
|
in, out := &in.Strategies, &out.Strategies
|
||||||
func RegisterDeepCopies(scheme *runtime.Scheme) error {
|
*out = make(StrategyList, len(*in))
|
||||||
return scheme.AddGeneratedDeepCopyFuncs(
|
for key, val := range *in {
|
||||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1alpha1_DeschedulerPolicy, InType: reflect.TypeOf(&DeschedulerPolicy{})},
|
(*out)[key] = *val.DeepCopy()
|
||||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1alpha1_DeschedulerStrategy, InType: reflect.TypeOf(&DeschedulerStrategy{})},
|
|
||||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1alpha1_NodeResourceUtilizationThresholds, InType: reflect.TypeOf(&NodeResourceUtilizationThresholds{})},
|
|
||||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1alpha1_StrategyParameters, InType: reflect.TypeOf(&StrategyParameters{})},
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeepCopy_v1alpha1_DeschedulerPolicy is an autogenerated deepcopy function.
|
|
||||||
func DeepCopy_v1alpha1_DeschedulerPolicy(in interface{}, out interface{}, c *conversion.Cloner) error {
|
|
||||||
{
|
|
||||||
in := in.(*DeschedulerPolicy)
|
|
||||||
out := out.(*DeschedulerPolicy)
|
|
||||||
*out = *in
|
|
||||||
if in.Strategies != nil {
|
|
||||||
in, out := &in.Strategies, &out.Strategies
|
|
||||||
*out = make(StrategyList)
|
|
||||||
for key, val := range *in {
|
|
||||||
newVal := new(DeschedulerStrategy)
|
|
||||||
if err := DeepCopy_v1alpha1_DeschedulerStrategy(&val, newVal, c); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
(*out)[key] = *newVal
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeschedulerPolicy.
|
||||||
|
func (in *DeschedulerPolicy) DeepCopy() *DeschedulerPolicy {
|
||||||
|
if in == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
out := new(DeschedulerPolicy)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||||
|
func (in *DeschedulerPolicy) DeepCopyObject() runtime.Object {
|
||||||
|
if c := in.DeepCopy(); c != nil {
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *DeschedulerStrategy) DeepCopyInto(out *DeschedulerStrategy) {
|
||||||
|
*out = *in
|
||||||
|
in.Params.DeepCopyInto(&out.Params)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeschedulerStrategy.
|
||||||
|
func (in *DeschedulerStrategy) DeepCopy() *DeschedulerStrategy {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(DeschedulerStrategy)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *NodeResourceUtilizationThresholds) DeepCopyInto(out *NodeResourceUtilizationThresholds) {
|
||||||
|
*out = *in
|
||||||
|
if in.Thresholds != nil {
|
||||||
|
in, out := &in.Thresholds, &out.Thresholds
|
||||||
|
*out = make(ResourceThresholds, len(*in))
|
||||||
|
for key, val := range *in {
|
||||||
|
(*out)[key] = val
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if in.TargetThresholds != nil {
|
||||||
|
in, out := &in.TargetThresholds, &out.TargetThresholds
|
||||||
|
*out = make(ResourceThresholds, len(*in))
|
||||||
|
for key, val := range *in {
|
||||||
|
(*out)[key] = val
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeResourceUtilizationThresholds.
|
||||||
|
func (in *NodeResourceUtilizationThresholds) DeepCopy() *NodeResourceUtilizationThresholds {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(NodeResourceUtilizationThresholds)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in ResourceThresholds) DeepCopyInto(out *ResourceThresholds) {
|
||||||
|
{
|
||||||
|
in := &in
|
||||||
|
*out = make(ResourceThresholds, len(*in))
|
||||||
|
for key, val := range *in {
|
||||||
|
(*out)[key] = val
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeepCopy_v1alpha1_DeschedulerStrategy is an autogenerated deepcopy function.
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceThresholds.
|
||||||
func DeepCopy_v1alpha1_DeschedulerStrategy(in interface{}, out interface{}, c *conversion.Cloner) error {
|
func (in ResourceThresholds) DeepCopy() ResourceThresholds {
|
||||||
{
|
if in == nil {
|
||||||
in := in.(*DeschedulerStrategy)
|
|
||||||
out := out.(*DeschedulerStrategy)
|
|
||||||
*out = *in
|
|
||||||
if err := DeepCopy_v1alpha1_StrategyParameters(&in.Params, &out.Params, c); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
out := new(ResourceThresholds)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return *out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in StrategyList) DeepCopyInto(out *StrategyList) {
|
||||||
|
{
|
||||||
|
in := &in
|
||||||
|
*out = make(StrategyList, len(*in))
|
||||||
|
for key, val := range *in {
|
||||||
|
(*out)[key] = *val.DeepCopy()
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeepCopy_v1alpha1_NodeResourceUtilizationThresholds is an autogenerated deepcopy function.
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StrategyList.
|
||||||
func DeepCopy_v1alpha1_NodeResourceUtilizationThresholds(in interface{}, out interface{}, c *conversion.Cloner) error {
|
func (in StrategyList) DeepCopy() StrategyList {
|
||||||
{
|
if in == nil {
|
||||||
in := in.(*NodeResourceUtilizationThresholds)
|
|
||||||
out := out.(*NodeResourceUtilizationThresholds)
|
|
||||||
*out = *in
|
|
||||||
if in.Thresholds != nil {
|
|
||||||
in, out := &in.Thresholds, &out.Thresholds
|
|
||||||
*out = make(ResourceThresholds)
|
|
||||||
for key, val := range *in {
|
|
||||||
(*out)[key] = val
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if in.TargetThresholds != nil {
|
|
||||||
in, out := &in.TargetThresholds, &out.TargetThresholds
|
|
||||||
*out = make(ResourceThresholds)
|
|
||||||
for key, val := range *in {
|
|
||||||
(*out)[key] = val
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
out := new(StrategyList)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return *out
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeepCopy_v1alpha1_StrategyParameters is an autogenerated deepcopy function.
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
func DeepCopy_v1alpha1_StrategyParameters(in interface{}, out interface{}, c *conversion.Cloner) error {
|
func (in *StrategyParameters) DeepCopyInto(out *StrategyParameters) {
|
||||||
{
|
*out = *in
|
||||||
in := in.(*StrategyParameters)
|
in.NodeResourceUtilizationThresholds.DeepCopyInto(&out.NodeResourceUtilizationThresholds)
|
||||||
out := out.(*StrategyParameters)
|
if in.NodeAffinityType != nil {
|
||||||
*out = *in
|
in, out := &in.NodeAffinityType, &out.NodeAffinityType
|
||||||
if err := DeepCopy_v1alpha1_NodeResourceUtilizationThresholds(&in.NodeResourceUtilizationThresholds, &out.NodeResourceUtilizationThresholds, c); err != nil {
|
*out = make([]string, len(*in))
|
||||||
return err
|
copy(*out, *in)
|
||||||
}
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StrategyParameters.
|
||||||
|
func (in *StrategyParameters) DeepCopy() *StrategyParameters {
|
||||||
|
if in == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
out := new(StrategyParameters)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
// +build !ignore_autogenerated
|
// +build !ignore_autogenerated
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Copyright 2017 The Kubernetes Authors.
|
Copyright 2020 The Kubernetes Authors.
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
you may not use this file except in compliance with the License.
|
you may not use this file except in compliance with the License.
|
||||||
@@ -16,7 +16,7 @@ See the License for the specific language governing permissions and
|
|||||||
limitations under the License.
|
limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
// This file was autogenerated by defaulter-gen. Do not edit it manually!
|
// Code generated by defaulter-gen. DO NOT EDIT.
|
||||||
|
|
||||||
package v1alpha1
|
package v1alpha1
|
||||||
|
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
// +build !ignore_autogenerated
|
// +build !ignore_autogenerated
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Copyright 2017 The Kubernetes Authors.
|
Copyright 2020 The Kubernetes Authors.
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
you may not use this file except in compliance with the License.
|
you may not use this file except in compliance with the License.
|
||||||
@@ -16,98 +16,155 @@ See the License for the specific language governing permissions and
|
|||||||
limitations under the License.
|
limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
// This file was autogenerated by deepcopy-gen. Do not edit it manually!
|
// Code generated by deepcopy-gen. DO NOT EDIT.
|
||||||
|
|
||||||
package api
|
package api
|
||||||
|
|
||||||
import (
|
import (
|
||||||
conversion "k8s.io/apimachinery/pkg/conversion"
|
|
||||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||||
reflect "reflect"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
SchemeBuilder.Register(RegisterDeepCopies)
|
func (in *DeschedulerPolicy) DeepCopyInto(out *DeschedulerPolicy) {
|
||||||
}
|
*out = *in
|
||||||
|
out.TypeMeta = in.TypeMeta
|
||||||
// RegisterDeepCopies adds deep-copy functions to the given scheme. Public
|
if in.Strategies != nil {
|
||||||
// to allow building arbitrary schemes.
|
in, out := &in.Strategies, &out.Strategies
|
||||||
func RegisterDeepCopies(scheme *runtime.Scheme) error {
|
*out = make(StrategyList, len(*in))
|
||||||
return scheme.AddGeneratedDeepCopyFuncs(
|
for key, val := range *in {
|
||||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_DeschedulerPolicy, InType: reflect.TypeOf(&DeschedulerPolicy{})},
|
(*out)[key] = *val.DeepCopy()
|
||||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_DeschedulerStrategy, InType: reflect.TypeOf(&DeschedulerStrategy{})},
|
|
||||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_NodeResourceUtilizationThresholds, InType: reflect.TypeOf(&NodeResourceUtilizationThresholds{})},
|
|
||||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_StrategyParameters, InType: reflect.TypeOf(&StrategyParameters{})},
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeepCopy_api_DeschedulerPolicy is an autogenerated deepcopy function.
|
|
||||||
func DeepCopy_api_DeschedulerPolicy(in interface{}, out interface{}, c *conversion.Cloner) error {
|
|
||||||
{
|
|
||||||
in := in.(*DeschedulerPolicy)
|
|
||||||
out := out.(*DeschedulerPolicy)
|
|
||||||
*out = *in
|
|
||||||
if in.Strategies != nil {
|
|
||||||
in, out := &in.Strategies, &out.Strategies
|
|
||||||
*out = make(StrategyList)
|
|
||||||
for key, val := range *in {
|
|
||||||
newVal := new(DeschedulerStrategy)
|
|
||||||
if err := DeepCopy_api_DeschedulerStrategy(&val, newVal, c); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
(*out)[key] = *newVal
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeschedulerPolicy.
|
||||||
|
func (in *DeschedulerPolicy) DeepCopy() *DeschedulerPolicy {
|
||||||
|
if in == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
out := new(DeschedulerPolicy)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||||
|
func (in *DeschedulerPolicy) DeepCopyObject() runtime.Object {
|
||||||
|
if c := in.DeepCopy(); c != nil {
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *DeschedulerStrategy) DeepCopyInto(out *DeschedulerStrategy) {
|
||||||
|
*out = *in
|
||||||
|
in.Params.DeepCopyInto(&out.Params)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeschedulerStrategy.
|
||||||
|
func (in *DeschedulerStrategy) DeepCopy() *DeschedulerStrategy {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(DeschedulerStrategy)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *NodeResourceUtilizationThresholds) DeepCopyInto(out *NodeResourceUtilizationThresholds) {
|
||||||
|
*out = *in
|
||||||
|
if in.Thresholds != nil {
|
||||||
|
in, out := &in.Thresholds, &out.Thresholds
|
||||||
|
*out = make(ResourceThresholds, len(*in))
|
||||||
|
for key, val := range *in {
|
||||||
|
(*out)[key] = val
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if in.TargetThresholds != nil {
|
||||||
|
in, out := &in.TargetThresholds, &out.TargetThresholds
|
||||||
|
*out = make(ResourceThresholds, len(*in))
|
||||||
|
for key, val := range *in {
|
||||||
|
(*out)[key] = val
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeResourceUtilizationThresholds.
|
||||||
|
func (in *NodeResourceUtilizationThresholds) DeepCopy() *NodeResourceUtilizationThresholds {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(NodeResourceUtilizationThresholds)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in ResourceThresholds) DeepCopyInto(out *ResourceThresholds) {
|
||||||
|
{
|
||||||
|
in := &in
|
||||||
|
*out = make(ResourceThresholds, len(*in))
|
||||||
|
for key, val := range *in {
|
||||||
|
(*out)[key] = val
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeepCopy_api_DeschedulerStrategy is an autogenerated deepcopy function.
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceThresholds.
|
||||||
func DeepCopy_api_DeschedulerStrategy(in interface{}, out interface{}, c *conversion.Cloner) error {
|
func (in ResourceThresholds) DeepCopy() ResourceThresholds {
|
||||||
{
|
if in == nil {
|
||||||
in := in.(*DeschedulerStrategy)
|
|
||||||
out := out.(*DeschedulerStrategy)
|
|
||||||
*out = *in
|
|
||||||
if err := DeepCopy_api_StrategyParameters(&in.Params, &out.Params, c); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
out := new(ResourceThresholds)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return *out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in StrategyList) DeepCopyInto(out *StrategyList) {
|
||||||
|
{
|
||||||
|
in := &in
|
||||||
|
*out = make(StrategyList, len(*in))
|
||||||
|
for key, val := range *in {
|
||||||
|
(*out)[key] = *val.DeepCopy()
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeepCopy_api_NodeResourceUtilizationThresholds is an autogenerated deepcopy function.
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StrategyList.
|
||||||
func DeepCopy_api_NodeResourceUtilizationThresholds(in interface{}, out interface{}, c *conversion.Cloner) error {
|
func (in StrategyList) DeepCopy() StrategyList {
|
||||||
{
|
if in == nil {
|
||||||
in := in.(*NodeResourceUtilizationThresholds)
|
|
||||||
out := out.(*NodeResourceUtilizationThresholds)
|
|
||||||
*out = *in
|
|
||||||
if in.Thresholds != nil {
|
|
||||||
in, out := &in.Thresholds, &out.Thresholds
|
|
||||||
*out = make(ResourceThresholds)
|
|
||||||
for key, val := range *in {
|
|
||||||
(*out)[key] = val
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if in.TargetThresholds != nil {
|
|
||||||
in, out := &in.TargetThresholds, &out.TargetThresholds
|
|
||||||
*out = make(ResourceThresholds)
|
|
||||||
for key, val := range *in {
|
|
||||||
(*out)[key] = val
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
out := new(StrategyList)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return *out
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeepCopy_api_StrategyParameters is an autogenerated deepcopy function.
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
func DeepCopy_api_StrategyParameters(in interface{}, out interface{}, c *conversion.Cloner) error {
|
func (in *StrategyParameters) DeepCopyInto(out *StrategyParameters) {
|
||||||
{
|
*out = *in
|
||||||
in := in.(*StrategyParameters)
|
in.NodeResourceUtilizationThresholds.DeepCopyInto(&out.NodeResourceUtilizationThresholds)
|
||||||
out := out.(*StrategyParameters)
|
if in.NodeAffinityType != nil {
|
||||||
*out = *in
|
in, out := &in.NodeAffinityType, &out.NodeAffinityType
|
||||||
if err := DeepCopy_api_NodeResourceUtilizationThresholds(&in.NodeResourceUtilizationThresholds, &out.NodeResourceUtilizationThresholds, c); err != nil {
|
*out = make([]string, len(*in))
|
||||||
return err
|
copy(*out, *in)
|
||||||
}
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StrategyParameters.
|
||||||
|
func (in *StrategyParameters) DeepCopy() *StrategyParameters {
|
||||||
|
if in == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
out := new(StrategyParameters)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -16,4 +16,4 @@ limitations under the License.
|
|||||||
|
|
||||||
// +k8s:deepcopy-gen=package,register
|
// +k8s:deepcopy-gen=package,register
|
||||||
|
|
||||||
package componentconfig // import "github.com/kubernetes-incubator/descheduler/pkg/apis/componentconfig"
|
package componentconfig // import "sigs.k8s.io/descheduler/pkg/apis/componentconfig"
|
||||||
|
|||||||
@@ -1,49 +0,0 @@
|
|||||||
/*
|
|
||||||
Copyright 2017 The Kubernetes Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
// Package install installs the descheduler's componentconfig API group.
|
|
||||||
package install
|
|
||||||
|
|
||||||
import (
|
|
||||||
"k8s.io/apimachinery/pkg/apimachinery/announced"
|
|
||||||
"k8s.io/apimachinery/pkg/apimachinery/registered"
|
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
|
||||||
|
|
||||||
"github.com/kubernetes-incubator/descheduler/pkg/apis/componentconfig"
|
|
||||||
"github.com/kubernetes-incubator/descheduler/pkg/apis/componentconfig/v1alpha1"
|
|
||||||
deschedulerscheme "github.com/kubernetes-incubator/descheduler/pkg/descheduler/scheme"
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
Install(deschedulerscheme.GroupFactoryRegistry, deschedulerscheme.Registry, deschedulerscheme.Scheme)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Install registers the API group and adds types to a scheme
|
|
||||||
func Install(groupFactoryRegistry announced.APIGroupFactoryRegistry, registry *registered.APIRegistrationManager, scheme *runtime.Scheme) {
|
|
||||||
if err := announced.NewGroupMetaFactory(
|
|
||||||
&announced.GroupMetaFactoryArgs{
|
|
||||||
GroupName: componentconfig.GroupName,
|
|
||||||
VersionPreferenceOrder: []string{v1alpha1.SchemeGroupVersion.Version},
|
|
||||||
ImportPrefix: "github.com/kubernetes-incubator/descheduler/pkg/apis/componentconfig",
|
|
||||||
AddInternalObjectsToScheme: componentconfig.AddToScheme,
|
|
||||||
},
|
|
||||||
announced.VersionToSchemeFunc{
|
|
||||||
v1alpha1.SchemeGroupVersion.Version: v1alpha1.AddToScheme,
|
|
||||||
},
|
|
||||||
).Announce(groupFactoryRegistry).RegisterAndEnable(registry, scheme); err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -19,6 +19,8 @@ package componentconfig
|
|||||||
import (
|
import (
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
|
|
||||||
|
"sigs.k8s.io/descheduler/pkg/descheduler/scheme"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@@ -32,6 +34,12 @@ const GroupName = "deschedulercomponentconfig"
|
|||||||
// SchemeGroupVersion is group version used to register these objects
|
// SchemeGroupVersion is group version used to register these objects
|
||||||
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal}
|
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
if err := addKnownTypes(scheme.Scheme); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Kind takes an unqualified kind and returns a Group qualified GroupKind
|
// Kind takes an unqualified kind and returns a Group qualified GroupKind
|
||||||
func Kind(kind string) schema.GroupKind {
|
func Kind(kind string) schema.GroupKind {
|
||||||
return SchemeGroupVersion.WithKind(kind).GroupKind()
|
return SchemeGroupVersion.WithKind(kind).GroupKind()
|
||||||
|
|||||||
@@ -1,525 +0,0 @@
|
|||||||
/*
|
|
||||||
Copyright 2017 The Kubernetes Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
// ************************************************************
|
|
||||||
// DO NOT EDIT.
|
|
||||||
// THIS FILE IS AUTO-GENERATED BY codecgen.
|
|
||||||
// ************************************************************
|
|
||||||
|
|
||||||
package componentconfig
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
codec1978 "github.com/ugorji/go/codec"
|
|
||||||
pkg1_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
||||||
"reflect"
|
|
||||||
"runtime"
|
|
||||||
time "time"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// ----- content types ----
|
|
||||||
codecSelferC_UTF81234 = 1
|
|
||||||
codecSelferC_RAW1234 = 0
|
|
||||||
// ----- value types used ----
|
|
||||||
codecSelferValueTypeArray1234 = 10
|
|
||||||
codecSelferValueTypeMap1234 = 9
|
|
||||||
// ----- containerStateValues ----
|
|
||||||
codecSelfer_containerMapKey1234 = 2
|
|
||||||
codecSelfer_containerMapValue1234 = 3
|
|
||||||
codecSelfer_containerMapEnd1234 = 4
|
|
||||||
codecSelfer_containerArrayElem1234 = 6
|
|
||||||
codecSelfer_containerArrayEnd1234 = 7
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits())
|
|
||||||
codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`)
|
|
||||||
)
|
|
||||||
|
|
||||||
type codecSelfer1234 struct{}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
if codec1978.GenVersion != 5 {
|
|
||||||
_, file, _, _ := runtime.Caller(0)
|
|
||||||
err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v",
|
|
||||||
5, codec1978.GenVersion, file)
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
if false { // reference the types, but skip this branch at build/run time
|
|
||||||
var v0 pkg1_v1.TypeMeta
|
|
||||||
var v1 time.Duration
|
|
||||||
_, _ = v0, v1
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *DeschedulerConfiguration) CodecEncodeSelf(e *codec1978.Encoder) {
|
|
||||||
var h codecSelfer1234
|
|
||||||
z, r := codec1978.GenHelperEncoder(e)
|
|
||||||
_, _, _ = h, z, r
|
|
||||||
if x == nil {
|
|
||||||
r.EncodeNil()
|
|
||||||
} else {
|
|
||||||
yym1 := z.EncBinary()
|
|
||||||
_ = yym1
|
|
||||||
if false {
|
|
||||||
} else if z.HasExtensions() && z.EncExt(x) {
|
|
||||||
} else {
|
|
||||||
yysep2 := !z.EncBinary()
|
|
||||||
yy2arr2 := z.EncBasicHandle().StructToArray
|
|
||||||
var yyq2 [6]bool
|
|
||||||
_, _, _ = yysep2, yyq2, yy2arr2
|
|
||||||
const yyr2 bool = false
|
|
||||||
yyq2[0] = x.Kind != ""
|
|
||||||
yyq2[1] = x.APIVersion != ""
|
|
||||||
var yynn2 int
|
|
||||||
if yyr2 || yy2arr2 {
|
|
||||||
r.EncodeArrayStart(6)
|
|
||||||
} else {
|
|
||||||
yynn2 = 4
|
|
||||||
for _, b := range yyq2 {
|
|
||||||
if b {
|
|
||||||
yynn2++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
r.EncodeMapStart(yynn2)
|
|
||||||
yynn2 = 0
|
|
||||||
}
|
|
||||||
if yyr2 || yy2arr2 {
|
|
||||||
z.EncSendContainerState(codecSelfer_containerArrayElem1234)
|
|
||||||
if yyq2[0] {
|
|
||||||
yym4 := z.EncBinary()
|
|
||||||
_ = yym4
|
|
||||||
if false {
|
|
||||||
} else {
|
|
||||||
r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
r.EncodeString(codecSelferC_UTF81234, "")
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if yyq2[0] {
|
|
||||||
z.EncSendContainerState(codecSelfer_containerMapKey1234)
|
|
||||||
r.EncodeString(codecSelferC_UTF81234, string("kind"))
|
|
||||||
z.EncSendContainerState(codecSelfer_containerMapValue1234)
|
|
||||||
yym5 := z.EncBinary()
|
|
||||||
_ = yym5
|
|
||||||
if false {
|
|
||||||
} else {
|
|
||||||
r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if yyr2 || yy2arr2 {
|
|
||||||
z.EncSendContainerState(codecSelfer_containerArrayElem1234)
|
|
||||||
if yyq2[1] {
|
|
||||||
yym7 := z.EncBinary()
|
|
||||||
_ = yym7
|
|
||||||
if false {
|
|
||||||
} else {
|
|
||||||
r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
r.EncodeString(codecSelferC_UTF81234, "")
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if yyq2[1] {
|
|
||||||
z.EncSendContainerState(codecSelfer_containerMapKey1234)
|
|
||||||
r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
|
|
||||||
z.EncSendContainerState(codecSelfer_containerMapValue1234)
|
|
||||||
yym8 := z.EncBinary()
|
|
||||||
_ = yym8
|
|
||||||
if false {
|
|
||||||
} else {
|
|
||||||
r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if yyr2 || yy2arr2 {
|
|
||||||
z.EncSendContainerState(codecSelfer_containerArrayElem1234)
|
|
||||||
yym10 := z.EncBinary()
|
|
||||||
_ = yym10
|
|
||||||
if false {
|
|
||||||
} else if z.HasExtensions() && z.EncExt(x.DeschedulingInterval) {
|
|
||||||
} else {
|
|
||||||
r.EncodeInt(int64(x.DeschedulingInterval))
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
z.EncSendContainerState(codecSelfer_containerMapKey1234)
|
|
||||||
r.EncodeString(codecSelferC_UTF81234, string("DeschedulingInterval"))
|
|
||||||
z.EncSendContainerState(codecSelfer_containerMapValue1234)
|
|
||||||
yym11 := z.EncBinary()
|
|
||||||
_ = yym11
|
|
||||||
if false {
|
|
||||||
} else if z.HasExtensions() && z.EncExt(x.DeschedulingInterval) {
|
|
||||||
} else {
|
|
||||||
r.EncodeInt(int64(x.DeschedulingInterval))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if yyr2 || yy2arr2 {
|
|
||||||
z.EncSendContainerState(codecSelfer_containerArrayElem1234)
|
|
||||||
yym13 := z.EncBinary()
|
|
||||||
_ = yym13
|
|
||||||
if false {
|
|
||||||
} else {
|
|
||||||
r.EncodeString(codecSelferC_UTF81234, string(x.KubeconfigFile))
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
z.EncSendContainerState(codecSelfer_containerMapKey1234)
|
|
||||||
r.EncodeString(codecSelferC_UTF81234, string("KubeconfigFile"))
|
|
||||||
z.EncSendContainerState(codecSelfer_containerMapValue1234)
|
|
||||||
yym14 := z.EncBinary()
|
|
||||||
_ = yym14
|
|
||||||
if false {
|
|
||||||
} else {
|
|
||||||
r.EncodeString(codecSelferC_UTF81234, string(x.KubeconfigFile))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if yyr2 || yy2arr2 {
|
|
||||||
z.EncSendContainerState(codecSelfer_containerArrayElem1234)
|
|
||||||
yym16 := z.EncBinary()
|
|
||||||
_ = yym16
|
|
||||||
if false {
|
|
||||||
} else {
|
|
||||||
r.EncodeString(codecSelferC_UTF81234, string(x.PolicyConfigFile))
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
z.EncSendContainerState(codecSelfer_containerMapKey1234)
|
|
||||||
r.EncodeString(codecSelferC_UTF81234, string("PolicyConfigFile"))
|
|
||||||
z.EncSendContainerState(codecSelfer_containerMapValue1234)
|
|
||||||
yym17 := z.EncBinary()
|
|
||||||
_ = yym17
|
|
||||||
if false {
|
|
||||||
} else {
|
|
||||||
r.EncodeString(codecSelferC_UTF81234, string(x.PolicyConfigFile))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if yyr2 || yy2arr2 {
|
|
||||||
z.EncSendContainerState(codecSelfer_containerArrayElem1234)
|
|
||||||
yym19 := z.EncBinary()
|
|
||||||
_ = yym19
|
|
||||||
if false {
|
|
||||||
} else {
|
|
||||||
r.EncodeBool(bool(x.DryRun))
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
z.EncSendContainerState(codecSelfer_containerMapKey1234)
|
|
||||||
r.EncodeString(codecSelferC_UTF81234, string("DryRun"))
|
|
||||||
z.EncSendContainerState(codecSelfer_containerMapValue1234)
|
|
||||||
yym20 := z.EncBinary()
|
|
||||||
_ = yym20
|
|
||||||
if false {
|
|
||||||
} else {
|
|
||||||
r.EncodeBool(bool(x.DryRun))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if yyr2 || yy2arr2 {
|
|
||||||
z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
|
|
||||||
} else {
|
|
||||||
z.EncSendContainerState(codecSelfer_containerMapEnd1234)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *DeschedulerConfiguration) CodecDecodeSelf(d *codec1978.Decoder) {
|
|
||||||
var h codecSelfer1234
|
|
||||||
z, r := codec1978.GenHelperDecoder(d)
|
|
||||||
_, _, _ = h, z, r
|
|
||||||
yym1 := z.DecBinary()
|
|
||||||
_ = yym1
|
|
||||||
if false {
|
|
||||||
} else if z.HasExtensions() && z.DecExt(x) {
|
|
||||||
} else {
|
|
||||||
yyct2 := r.ContainerType()
|
|
||||||
if yyct2 == codecSelferValueTypeMap1234 {
|
|
||||||
yyl2 := r.ReadMapStart()
|
|
||||||
if yyl2 == 0 {
|
|
||||||
z.DecSendContainerState(codecSelfer_containerMapEnd1234)
|
|
||||||
} else {
|
|
||||||
x.codecDecodeSelfFromMap(yyl2, d)
|
|
||||||
}
|
|
||||||
} else if yyct2 == codecSelferValueTypeArray1234 {
|
|
||||||
yyl2 := r.ReadArrayStart()
|
|
||||||
if yyl2 == 0 {
|
|
||||||
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
|
|
||||||
} else {
|
|
||||||
x.codecDecodeSelfFromArray(yyl2, d)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *DeschedulerConfiguration) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
|
|
||||||
var h codecSelfer1234
|
|
||||||
z, r := codec1978.GenHelperDecoder(d)
|
|
||||||
_, _, _ = h, z, r
|
|
||||||
var yys3Slc = z.DecScratchBuffer() // default slice to decode into
|
|
||||||
_ = yys3Slc
|
|
||||||
var yyhl3 bool = l >= 0
|
|
||||||
for yyj3 := 0; ; yyj3++ {
|
|
||||||
if yyhl3 {
|
|
||||||
if yyj3 >= l {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if r.CheckBreak() {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
z.DecSendContainerState(codecSelfer_containerMapKey1234)
|
|
||||||
yys3Slc = r.DecodeBytes(yys3Slc, true, true)
|
|
||||||
yys3 := string(yys3Slc)
|
|
||||||
z.DecSendContainerState(codecSelfer_containerMapValue1234)
|
|
||||||
switch yys3 {
|
|
||||||
case "kind":
|
|
||||||
if r.TryDecodeAsNil() {
|
|
||||||
x.Kind = ""
|
|
||||||
} else {
|
|
||||||
yyv4 := &x.Kind
|
|
||||||
yym5 := z.DecBinary()
|
|
||||||
_ = yym5
|
|
||||||
if false {
|
|
||||||
} else {
|
|
||||||
*((*string)(yyv4)) = r.DecodeString()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case "apiVersion":
|
|
||||||
if r.TryDecodeAsNil() {
|
|
||||||
x.APIVersion = ""
|
|
||||||
} else {
|
|
||||||
yyv6 := &x.APIVersion
|
|
||||||
yym7 := z.DecBinary()
|
|
||||||
_ = yym7
|
|
||||||
if false {
|
|
||||||
} else {
|
|
||||||
*((*string)(yyv6)) = r.DecodeString()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case "DeschedulingInterval":
|
|
||||||
if r.TryDecodeAsNil() {
|
|
||||||
x.DeschedulingInterval = 0
|
|
||||||
} else {
|
|
||||||
yyv8 := &x.DeschedulingInterval
|
|
||||||
yym9 := z.DecBinary()
|
|
||||||
_ = yym9
|
|
||||||
if false {
|
|
||||||
} else if z.HasExtensions() && z.DecExt(yyv8) {
|
|
||||||
} else {
|
|
||||||
*((*int64)(yyv8)) = int64(r.DecodeInt(64))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case "KubeconfigFile":
|
|
||||||
if r.TryDecodeAsNil() {
|
|
||||||
x.KubeconfigFile = ""
|
|
||||||
} else {
|
|
||||||
yyv10 := &x.KubeconfigFile
|
|
||||||
yym11 := z.DecBinary()
|
|
||||||
_ = yym11
|
|
||||||
if false {
|
|
||||||
} else {
|
|
||||||
*((*string)(yyv10)) = r.DecodeString()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case "PolicyConfigFile":
|
|
||||||
if r.TryDecodeAsNil() {
|
|
||||||
x.PolicyConfigFile = ""
|
|
||||||
} else {
|
|
||||||
yyv12 := &x.PolicyConfigFile
|
|
||||||
yym13 := z.DecBinary()
|
|
||||||
_ = yym13
|
|
||||||
if false {
|
|
||||||
} else {
|
|
||||||
*((*string)(yyv12)) = r.DecodeString()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case "DryRun":
|
|
||||||
if r.TryDecodeAsNil() {
|
|
||||||
x.DryRun = false
|
|
||||||
} else {
|
|
||||||
yyv14 := &x.DryRun
|
|
||||||
yym15 := z.DecBinary()
|
|
||||||
_ = yym15
|
|
||||||
if false {
|
|
||||||
} else {
|
|
||||||
*((*bool)(yyv14)) = r.DecodeBool()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
z.DecStructFieldNotFound(-1, yys3)
|
|
||||||
} // end switch yys3
|
|
||||||
} // end for yyj3
|
|
||||||
z.DecSendContainerState(codecSelfer_containerMapEnd1234)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *DeschedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
|
|
||||||
var h codecSelfer1234
|
|
||||||
z, r := codec1978.GenHelperDecoder(d)
|
|
||||||
_, _, _ = h, z, r
|
|
||||||
var yyj16 int
|
|
||||||
var yyb16 bool
|
|
||||||
var yyhl16 bool = l >= 0
|
|
||||||
yyj16++
|
|
||||||
if yyhl16 {
|
|
||||||
yyb16 = yyj16 > l
|
|
||||||
} else {
|
|
||||||
yyb16 = r.CheckBreak()
|
|
||||||
}
|
|
||||||
if yyb16 {
|
|
||||||
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
|
|
||||||
if r.TryDecodeAsNil() {
|
|
||||||
x.Kind = ""
|
|
||||||
} else {
|
|
||||||
yyv17 := &x.Kind
|
|
||||||
yym18 := z.DecBinary()
|
|
||||||
_ = yym18
|
|
||||||
if false {
|
|
||||||
} else {
|
|
||||||
*((*string)(yyv17)) = r.DecodeString()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
yyj16++
|
|
||||||
if yyhl16 {
|
|
||||||
yyb16 = yyj16 > l
|
|
||||||
} else {
|
|
||||||
yyb16 = r.CheckBreak()
|
|
||||||
}
|
|
||||||
if yyb16 {
|
|
||||||
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
|
|
||||||
if r.TryDecodeAsNil() {
|
|
||||||
x.APIVersion = ""
|
|
||||||
} else {
|
|
||||||
yyv19 := &x.APIVersion
|
|
||||||
yym20 := z.DecBinary()
|
|
||||||
_ = yym20
|
|
||||||
if false {
|
|
||||||
} else {
|
|
||||||
*((*string)(yyv19)) = r.DecodeString()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
yyj16++
|
|
||||||
if yyhl16 {
|
|
||||||
yyb16 = yyj16 > l
|
|
||||||
} else {
|
|
||||||
yyb16 = r.CheckBreak()
|
|
||||||
}
|
|
||||||
if yyb16 {
|
|
||||||
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
|
|
||||||
if r.TryDecodeAsNil() {
|
|
||||||
x.DeschedulingInterval = 0
|
|
||||||
} else {
|
|
||||||
yyv21 := &x.DeschedulingInterval
|
|
||||||
yym22 := z.DecBinary()
|
|
||||||
_ = yym22
|
|
||||||
if false {
|
|
||||||
} else if z.HasExtensions() && z.DecExt(yyv21) {
|
|
||||||
} else {
|
|
||||||
*((*int64)(yyv21)) = int64(r.DecodeInt(64))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
yyj16++
|
|
||||||
if yyhl16 {
|
|
||||||
yyb16 = yyj16 > l
|
|
||||||
} else {
|
|
||||||
yyb16 = r.CheckBreak()
|
|
||||||
}
|
|
||||||
if yyb16 {
|
|
||||||
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
|
|
||||||
if r.TryDecodeAsNil() {
|
|
||||||
x.KubeconfigFile = ""
|
|
||||||
} else {
|
|
||||||
yyv23 := &x.KubeconfigFile
|
|
||||||
yym24 := z.DecBinary()
|
|
||||||
_ = yym24
|
|
||||||
if false {
|
|
||||||
} else {
|
|
||||||
*((*string)(yyv23)) = r.DecodeString()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
yyj16++
|
|
||||||
if yyhl16 {
|
|
||||||
yyb16 = yyj16 > l
|
|
||||||
} else {
|
|
||||||
yyb16 = r.CheckBreak()
|
|
||||||
}
|
|
||||||
if yyb16 {
|
|
||||||
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
|
|
||||||
if r.TryDecodeAsNil() {
|
|
||||||
x.PolicyConfigFile = ""
|
|
||||||
} else {
|
|
||||||
yyv25 := &x.PolicyConfigFile
|
|
||||||
yym26 := z.DecBinary()
|
|
||||||
_ = yym26
|
|
||||||
if false {
|
|
||||||
} else {
|
|
||||||
*((*string)(yyv25)) = r.DecodeString()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
yyj16++
|
|
||||||
if yyhl16 {
|
|
||||||
yyb16 = yyj16 > l
|
|
||||||
} else {
|
|
||||||
yyb16 = r.CheckBreak()
|
|
||||||
}
|
|
||||||
if yyb16 {
|
|
||||||
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
|
|
||||||
if r.TryDecodeAsNil() {
|
|
||||||
x.DryRun = false
|
|
||||||
} else {
|
|
||||||
yyv27 := &x.DryRun
|
|
||||||
yym28 := z.DecBinary()
|
|
||||||
_ = yym28
|
|
||||||
if false {
|
|
||||||
} else {
|
|
||||||
*((*bool)(yyv27)) = r.DecodeBool()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for {
|
|
||||||
yyj16++
|
|
||||||
if yyhl16 {
|
|
||||||
yyb16 = yyj16 > l
|
|
||||||
} else {
|
|
||||||
yyb16 = r.CheckBreak()
|
|
||||||
}
|
|
||||||
if yyb16 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
|
|
||||||
z.DecStructFieldNotFound(yyj16-1, "")
|
|
||||||
}
|
|
||||||
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
|
|
||||||
}
|
|
||||||
@@ -22,6 +22,8 @@ import (
|
|||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
|
||||||
type DeschedulerConfiguration struct {
|
type DeschedulerConfiguration struct {
|
||||||
metav1.TypeMeta
|
metav1.TypeMeta
|
||||||
|
|
||||||
@@ -37,4 +39,13 @@ type DeschedulerConfiguration struct {
|
|||||||
|
|
||||||
// Dry run
|
// Dry run
|
||||||
DryRun bool
|
DryRun bool
|
||||||
|
|
||||||
|
// Node selectors
|
||||||
|
NodeSelector string
|
||||||
|
|
||||||
|
// MaxNoOfPodsToEvictPerNode restricts maximum of pods to be evicted per node.
|
||||||
|
MaxNoOfPodsToEvictPerNode int
|
||||||
|
|
||||||
|
// EvictLocalStoragePods allows pods using local storage to be evicted.
|
||||||
|
EvictLocalStoragePods bool
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -15,10 +15,10 @@ limitations under the License.
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
// +k8s:deepcopy-gen=package,register
|
// +k8s:deepcopy-gen=package,register
|
||||||
// +k8s:conversion-gen=github.com/kubernetes-incubator/descheduler/pkg/apis/componentconfig
|
// +k8s:conversion-gen=sigs.k8s.io/descheduler/pkg/apis/componentconfig
|
||||||
// +k8s:defaulter-gen=TypeMeta
|
// +k8s:defaulter-gen=TypeMeta
|
||||||
|
|
||||||
// Package v1alpha1 is the v1alpha1 version of the descheduler's componentconfig API
|
// Package v1alpha1 is the v1alpha1 version of the descheduler's componentconfig API
|
||||||
// +groupName=deschedulercomponentconfig
|
// +groupName=deschedulercomponentconfig
|
||||||
|
|
||||||
package v1alpha1 // import "github.com/kubernetes-incubator/descheduler/pkg/apis/componentconfig/v1alpha1"
|
package v1alpha1 // import "sigs.k8s.io/descheduler/pkg/apis/componentconfig/v1alpha1"
|
||||||
|
|||||||
@@ -1,546 +0,0 @@
|
|||||||
/*
|
|
||||||
Copyright 2017 The Kubernetes Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
// ************************************************************
|
|
||||||
// DO NOT EDIT.
|
|
||||||
// THIS FILE IS AUTO-GENERATED BY codecgen.
|
|
||||||
// ************************************************************
|
|
||||||
|
|
||||||
package v1alpha1
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
codec1978 "github.com/ugorji/go/codec"
|
|
||||||
pkg1_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
||||||
"reflect"
|
|
||||||
"runtime"
|
|
||||||
time "time"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// ----- content types ----
|
|
||||||
codecSelferC_UTF81234 = 1
|
|
||||||
codecSelferC_RAW1234 = 0
|
|
||||||
// ----- value types used ----
|
|
||||||
codecSelferValueTypeArray1234 = 10
|
|
||||||
codecSelferValueTypeMap1234 = 9
|
|
||||||
// ----- containerStateValues ----
|
|
||||||
codecSelfer_containerMapKey1234 = 2
|
|
||||||
codecSelfer_containerMapValue1234 = 3
|
|
||||||
codecSelfer_containerMapEnd1234 = 4
|
|
||||||
codecSelfer_containerArrayElem1234 = 6
|
|
||||||
codecSelfer_containerArrayEnd1234 = 7
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits())
|
|
||||||
codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`)
|
|
||||||
)
|
|
||||||
|
|
||||||
type codecSelfer1234 struct{}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
if codec1978.GenVersion != 5 {
|
|
||||||
_, file, _, _ := runtime.Caller(0)
|
|
||||||
err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v",
|
|
||||||
5, codec1978.GenVersion, file)
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
if false { // reference the types, but skip this branch at build/run time
|
|
||||||
var v0 pkg1_v1.TypeMeta
|
|
||||||
var v1 time.Duration
|
|
||||||
_, _ = v0, v1
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *DeschedulerConfiguration) CodecEncodeSelf(e *codec1978.Encoder) {
|
|
||||||
var h codecSelfer1234
|
|
||||||
z, r := codec1978.GenHelperEncoder(e)
|
|
||||||
_, _, _ = h, z, r
|
|
||||||
if x == nil {
|
|
||||||
r.EncodeNil()
|
|
||||||
} else {
|
|
||||||
yym1 := z.EncBinary()
|
|
||||||
_ = yym1
|
|
||||||
if false {
|
|
||||||
} else if z.HasExtensions() && z.EncExt(x) {
|
|
||||||
} else {
|
|
||||||
yysep2 := !z.EncBinary()
|
|
||||||
yy2arr2 := z.EncBasicHandle().StructToArray
|
|
||||||
var yyq2 [6]bool
|
|
||||||
_, _, _ = yysep2, yyq2, yy2arr2
|
|
||||||
const yyr2 bool = false
|
|
||||||
yyq2[0] = x.Kind != ""
|
|
||||||
yyq2[1] = x.APIVersion != ""
|
|
||||||
yyq2[2] = x.DeschedulingInterval != 0
|
|
||||||
yyq2[4] = x.PolicyConfigFile != ""
|
|
||||||
yyq2[5] = x.DryRun != false
|
|
||||||
var yynn2 int
|
|
||||||
if yyr2 || yy2arr2 {
|
|
||||||
r.EncodeArrayStart(6)
|
|
||||||
} else {
|
|
||||||
yynn2 = 1
|
|
||||||
for _, b := range yyq2 {
|
|
||||||
if b {
|
|
||||||
yynn2++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
r.EncodeMapStart(yynn2)
|
|
||||||
yynn2 = 0
|
|
||||||
}
|
|
||||||
if yyr2 || yy2arr2 {
|
|
||||||
z.EncSendContainerState(codecSelfer_containerArrayElem1234)
|
|
||||||
if yyq2[0] {
|
|
||||||
yym4 := z.EncBinary()
|
|
||||||
_ = yym4
|
|
||||||
if false {
|
|
||||||
} else {
|
|
||||||
r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
r.EncodeString(codecSelferC_UTF81234, "")
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if yyq2[0] {
|
|
||||||
z.EncSendContainerState(codecSelfer_containerMapKey1234)
|
|
||||||
r.EncodeString(codecSelferC_UTF81234, string("kind"))
|
|
||||||
z.EncSendContainerState(codecSelfer_containerMapValue1234)
|
|
||||||
yym5 := z.EncBinary()
|
|
||||||
_ = yym5
|
|
||||||
if false {
|
|
||||||
} else {
|
|
||||||
r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if yyr2 || yy2arr2 {
|
|
||||||
z.EncSendContainerState(codecSelfer_containerArrayElem1234)
|
|
||||||
if yyq2[1] {
|
|
||||||
yym7 := z.EncBinary()
|
|
||||||
_ = yym7
|
|
||||||
if false {
|
|
||||||
} else {
|
|
||||||
r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
r.EncodeString(codecSelferC_UTF81234, "")
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if yyq2[1] {
|
|
||||||
z.EncSendContainerState(codecSelfer_containerMapKey1234)
|
|
||||||
r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
|
|
||||||
z.EncSendContainerState(codecSelfer_containerMapValue1234)
|
|
||||||
yym8 := z.EncBinary()
|
|
||||||
_ = yym8
|
|
||||||
if false {
|
|
||||||
} else {
|
|
||||||
r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if yyr2 || yy2arr2 {
|
|
||||||
z.EncSendContainerState(codecSelfer_containerArrayElem1234)
|
|
||||||
if yyq2[2] {
|
|
||||||
yym10 := z.EncBinary()
|
|
||||||
_ = yym10
|
|
||||||
if false {
|
|
||||||
} else if z.HasExtensions() && z.EncExt(x.DeschedulingInterval) {
|
|
||||||
} else {
|
|
||||||
r.EncodeInt(int64(x.DeschedulingInterval))
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
r.EncodeInt(0)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if yyq2[2] {
|
|
||||||
z.EncSendContainerState(codecSelfer_containerMapKey1234)
|
|
||||||
r.EncodeString(codecSelferC_UTF81234, string("deschedulingInterval"))
|
|
||||||
z.EncSendContainerState(codecSelfer_containerMapValue1234)
|
|
||||||
yym11 := z.EncBinary()
|
|
||||||
_ = yym11
|
|
||||||
if false {
|
|
||||||
} else if z.HasExtensions() && z.EncExt(x.DeschedulingInterval) {
|
|
||||||
} else {
|
|
||||||
r.EncodeInt(int64(x.DeschedulingInterval))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if yyr2 || yy2arr2 {
|
|
||||||
z.EncSendContainerState(codecSelfer_containerArrayElem1234)
|
|
||||||
yym13 := z.EncBinary()
|
|
||||||
_ = yym13
|
|
||||||
if false {
|
|
||||||
} else {
|
|
||||||
r.EncodeString(codecSelferC_UTF81234, string(x.KubeconfigFile))
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
z.EncSendContainerState(codecSelfer_containerMapKey1234)
|
|
||||||
r.EncodeString(codecSelferC_UTF81234, string("kubeconfigFile"))
|
|
||||||
z.EncSendContainerState(codecSelfer_containerMapValue1234)
|
|
||||||
yym14 := z.EncBinary()
|
|
||||||
_ = yym14
|
|
||||||
if false {
|
|
||||||
} else {
|
|
||||||
r.EncodeString(codecSelferC_UTF81234, string(x.KubeconfigFile))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if yyr2 || yy2arr2 {
|
|
||||||
z.EncSendContainerState(codecSelfer_containerArrayElem1234)
|
|
||||||
if yyq2[4] {
|
|
||||||
yym16 := z.EncBinary()
|
|
||||||
_ = yym16
|
|
||||||
if false {
|
|
||||||
} else {
|
|
||||||
r.EncodeString(codecSelferC_UTF81234, string(x.PolicyConfigFile))
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
r.EncodeString(codecSelferC_UTF81234, "")
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if yyq2[4] {
|
|
||||||
z.EncSendContainerState(codecSelfer_containerMapKey1234)
|
|
||||||
r.EncodeString(codecSelferC_UTF81234, string("policyConfigFile"))
|
|
||||||
z.EncSendContainerState(codecSelfer_containerMapValue1234)
|
|
||||||
yym17 := z.EncBinary()
|
|
||||||
_ = yym17
|
|
||||||
if false {
|
|
||||||
} else {
|
|
||||||
r.EncodeString(codecSelferC_UTF81234, string(x.PolicyConfigFile))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if yyr2 || yy2arr2 {
|
|
||||||
z.EncSendContainerState(codecSelfer_containerArrayElem1234)
|
|
||||||
if yyq2[5] {
|
|
||||||
yym19 := z.EncBinary()
|
|
||||||
_ = yym19
|
|
||||||
if false {
|
|
||||||
} else {
|
|
||||||
r.EncodeBool(bool(x.DryRun))
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
r.EncodeBool(false)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if yyq2[5] {
|
|
||||||
z.EncSendContainerState(codecSelfer_containerMapKey1234)
|
|
||||||
r.EncodeString(codecSelferC_UTF81234, string("dryRun"))
|
|
||||||
z.EncSendContainerState(codecSelfer_containerMapValue1234)
|
|
||||||
yym20 := z.EncBinary()
|
|
||||||
_ = yym20
|
|
||||||
if false {
|
|
||||||
} else {
|
|
||||||
r.EncodeBool(bool(x.DryRun))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if yyr2 || yy2arr2 {
|
|
||||||
z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
|
|
||||||
} else {
|
|
||||||
z.EncSendContainerState(codecSelfer_containerMapEnd1234)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *DeschedulerConfiguration) CodecDecodeSelf(d *codec1978.Decoder) {
|
|
||||||
var h codecSelfer1234
|
|
||||||
z, r := codec1978.GenHelperDecoder(d)
|
|
||||||
_, _, _ = h, z, r
|
|
||||||
yym1 := z.DecBinary()
|
|
||||||
_ = yym1
|
|
||||||
if false {
|
|
||||||
} else if z.HasExtensions() && z.DecExt(x) {
|
|
||||||
} else {
|
|
||||||
yyct2 := r.ContainerType()
|
|
||||||
if yyct2 == codecSelferValueTypeMap1234 {
|
|
||||||
yyl2 := r.ReadMapStart()
|
|
||||||
if yyl2 == 0 {
|
|
||||||
z.DecSendContainerState(codecSelfer_containerMapEnd1234)
|
|
||||||
} else {
|
|
||||||
x.codecDecodeSelfFromMap(yyl2, d)
|
|
||||||
}
|
|
||||||
} else if yyct2 == codecSelferValueTypeArray1234 {
|
|
||||||
yyl2 := r.ReadArrayStart()
|
|
||||||
if yyl2 == 0 {
|
|
||||||
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
|
|
||||||
} else {
|
|
||||||
x.codecDecodeSelfFromArray(yyl2, d)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *DeschedulerConfiguration) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
|
|
||||||
var h codecSelfer1234
|
|
||||||
z, r := codec1978.GenHelperDecoder(d)
|
|
||||||
_, _, _ = h, z, r
|
|
||||||
var yys3Slc = z.DecScratchBuffer() // default slice to decode into
|
|
||||||
_ = yys3Slc
|
|
||||||
var yyhl3 bool = l >= 0
|
|
||||||
for yyj3 := 0; ; yyj3++ {
|
|
||||||
if yyhl3 {
|
|
||||||
if yyj3 >= l {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if r.CheckBreak() {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
z.DecSendContainerState(codecSelfer_containerMapKey1234)
|
|
||||||
yys3Slc = r.DecodeBytes(yys3Slc, true, true)
|
|
||||||
yys3 := string(yys3Slc)
|
|
||||||
z.DecSendContainerState(codecSelfer_containerMapValue1234)
|
|
||||||
switch yys3 {
|
|
||||||
case "kind":
|
|
||||||
if r.TryDecodeAsNil() {
|
|
||||||
x.Kind = ""
|
|
||||||
} else {
|
|
||||||
yyv4 := &x.Kind
|
|
||||||
yym5 := z.DecBinary()
|
|
||||||
_ = yym5
|
|
||||||
if false {
|
|
||||||
} else {
|
|
||||||
*((*string)(yyv4)) = r.DecodeString()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case "apiVersion":
|
|
||||||
if r.TryDecodeAsNil() {
|
|
||||||
x.APIVersion = ""
|
|
||||||
} else {
|
|
||||||
yyv6 := &x.APIVersion
|
|
||||||
yym7 := z.DecBinary()
|
|
||||||
_ = yym7
|
|
||||||
if false {
|
|
||||||
} else {
|
|
||||||
*((*string)(yyv6)) = r.DecodeString()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case "deschedulingInterval":
|
|
||||||
if r.TryDecodeAsNil() {
|
|
||||||
x.DeschedulingInterval = 0
|
|
||||||
} else {
|
|
||||||
yyv8 := &x.DeschedulingInterval
|
|
||||||
yym9 := z.DecBinary()
|
|
||||||
_ = yym9
|
|
||||||
if false {
|
|
||||||
} else if z.HasExtensions() && z.DecExt(yyv8) {
|
|
||||||
} else {
|
|
||||||
*((*int64)(yyv8)) = int64(r.DecodeInt(64))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case "kubeconfigFile":
|
|
||||||
if r.TryDecodeAsNil() {
|
|
||||||
x.KubeconfigFile = ""
|
|
||||||
} else {
|
|
||||||
yyv10 := &x.KubeconfigFile
|
|
||||||
yym11 := z.DecBinary()
|
|
||||||
_ = yym11
|
|
||||||
if false {
|
|
||||||
} else {
|
|
||||||
*((*string)(yyv10)) = r.DecodeString()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case "policyConfigFile":
|
|
||||||
if r.TryDecodeAsNil() {
|
|
||||||
x.PolicyConfigFile = ""
|
|
||||||
} else {
|
|
||||||
yyv12 := &x.PolicyConfigFile
|
|
||||||
yym13 := z.DecBinary()
|
|
||||||
_ = yym13
|
|
||||||
if false {
|
|
||||||
} else {
|
|
||||||
*((*string)(yyv12)) = r.DecodeString()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case "dryRun":
|
|
||||||
if r.TryDecodeAsNil() {
|
|
||||||
x.DryRun = false
|
|
||||||
} else {
|
|
||||||
yyv14 := &x.DryRun
|
|
||||||
yym15 := z.DecBinary()
|
|
||||||
_ = yym15
|
|
||||||
if false {
|
|
||||||
} else {
|
|
||||||
*((*bool)(yyv14)) = r.DecodeBool()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
z.DecStructFieldNotFound(-1, yys3)
|
|
||||||
} // end switch yys3
|
|
||||||
} // end for yyj3
|
|
||||||
z.DecSendContainerState(codecSelfer_containerMapEnd1234)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *DeschedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
|
|
||||||
var h codecSelfer1234
|
|
||||||
z, r := codec1978.GenHelperDecoder(d)
|
|
||||||
_, _, _ = h, z, r
|
|
||||||
var yyj16 int
|
|
||||||
var yyb16 bool
|
|
||||||
var yyhl16 bool = l >= 0
|
|
||||||
yyj16++
|
|
||||||
if yyhl16 {
|
|
||||||
yyb16 = yyj16 > l
|
|
||||||
} else {
|
|
||||||
yyb16 = r.CheckBreak()
|
|
||||||
}
|
|
||||||
if yyb16 {
|
|
||||||
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
|
|
||||||
if r.TryDecodeAsNil() {
|
|
||||||
x.Kind = ""
|
|
||||||
} else {
|
|
||||||
yyv17 := &x.Kind
|
|
||||||
yym18 := z.DecBinary()
|
|
||||||
_ = yym18
|
|
||||||
if false {
|
|
||||||
} else {
|
|
||||||
*((*string)(yyv17)) = r.DecodeString()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
yyj16++
|
|
||||||
if yyhl16 {
|
|
||||||
yyb16 = yyj16 > l
|
|
||||||
} else {
|
|
||||||
yyb16 = r.CheckBreak()
|
|
||||||
}
|
|
||||||
if yyb16 {
|
|
||||||
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
|
|
||||||
if r.TryDecodeAsNil() {
|
|
||||||
x.APIVersion = ""
|
|
||||||
} else {
|
|
||||||
yyv19 := &x.APIVersion
|
|
||||||
yym20 := z.DecBinary()
|
|
||||||
_ = yym20
|
|
||||||
if false {
|
|
||||||
} else {
|
|
||||||
*((*string)(yyv19)) = r.DecodeString()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
yyj16++
|
|
||||||
if yyhl16 {
|
|
||||||
yyb16 = yyj16 > l
|
|
||||||
} else {
|
|
||||||
yyb16 = r.CheckBreak()
|
|
||||||
}
|
|
||||||
if yyb16 {
|
|
||||||
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
|
|
||||||
if r.TryDecodeAsNil() {
|
|
||||||
x.DeschedulingInterval = 0
|
|
||||||
} else {
|
|
||||||
yyv21 := &x.DeschedulingInterval
|
|
||||||
yym22 := z.DecBinary()
|
|
||||||
_ = yym22
|
|
||||||
if false {
|
|
||||||
} else if z.HasExtensions() && z.DecExt(yyv21) {
|
|
||||||
} else {
|
|
||||||
*((*int64)(yyv21)) = int64(r.DecodeInt(64))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
yyj16++
|
|
||||||
if yyhl16 {
|
|
||||||
yyb16 = yyj16 > l
|
|
||||||
} else {
|
|
||||||
yyb16 = r.CheckBreak()
|
|
||||||
}
|
|
||||||
if yyb16 {
|
|
||||||
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
|
|
||||||
if r.TryDecodeAsNil() {
|
|
||||||
x.KubeconfigFile = ""
|
|
||||||
} else {
|
|
||||||
yyv23 := &x.KubeconfigFile
|
|
||||||
yym24 := z.DecBinary()
|
|
||||||
_ = yym24
|
|
||||||
if false {
|
|
||||||
} else {
|
|
||||||
*((*string)(yyv23)) = r.DecodeString()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
yyj16++
|
|
||||||
if yyhl16 {
|
|
||||||
yyb16 = yyj16 > l
|
|
||||||
} else {
|
|
||||||
yyb16 = r.CheckBreak()
|
|
||||||
}
|
|
||||||
if yyb16 {
|
|
||||||
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
|
|
||||||
if r.TryDecodeAsNil() {
|
|
||||||
x.PolicyConfigFile = ""
|
|
||||||
} else {
|
|
||||||
yyv25 := &x.PolicyConfigFile
|
|
||||||
yym26 := z.DecBinary()
|
|
||||||
_ = yym26
|
|
||||||
if false {
|
|
||||||
} else {
|
|
||||||
*((*string)(yyv25)) = r.DecodeString()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
yyj16++
|
|
||||||
if yyhl16 {
|
|
||||||
yyb16 = yyj16 > l
|
|
||||||
} else {
|
|
||||||
yyb16 = r.CheckBreak()
|
|
||||||
}
|
|
||||||
if yyb16 {
|
|
||||||
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
|
|
||||||
if r.TryDecodeAsNil() {
|
|
||||||
x.DryRun = false
|
|
||||||
} else {
|
|
||||||
yyv27 := &x.DryRun
|
|
||||||
yym28 := z.DecBinary()
|
|
||||||
_ = yym28
|
|
||||||
if false {
|
|
||||||
} else {
|
|
||||||
*((*bool)(yyv27)) = r.DecodeBool()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for {
|
|
||||||
yyj16++
|
|
||||||
if yyhl16 {
|
|
||||||
yyb16 = yyj16 > l
|
|
||||||
} else {
|
|
||||||
yyb16 = r.CheckBreak()
|
|
||||||
}
|
|
||||||
if yyb16 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
|
|
||||||
z.DecStructFieldNotFound(yyj16-1, "")
|
|
||||||
}
|
|
||||||
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
|
|
||||||
}
|
|
||||||
@@ -22,6 +22,8 @@ import (
|
|||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
|
||||||
type DeschedulerConfiguration struct {
|
type DeschedulerConfiguration struct {
|
||||||
metav1.TypeMeta `json:",inline"`
|
metav1.TypeMeta `json:",inline"`
|
||||||
|
|
||||||
@@ -37,4 +39,13 @@ type DeschedulerConfiguration struct {
|
|||||||
|
|
||||||
// Dry run
|
// Dry run
|
||||||
DryRun bool `json:"dryRun,omitempty"`
|
DryRun bool `json:"dryRun,omitempty"`
|
||||||
|
|
||||||
|
// Node selectors
|
||||||
|
NodeSelector string `json:"nodeSelector,omitempty"`
|
||||||
|
|
||||||
|
// MaxNoOfPodsToEvictPerNode restricts maximum of pods to be evicted per node.
|
||||||
|
MaxNoOfPodsToEvictPerNode int `json:"maxNoOfPodsToEvictPerNode,omitempty"`
|
||||||
|
|
||||||
|
// EvictLocalStoragePods allows pods using local storage to be evicted.
|
||||||
|
EvictLocalStoragePods bool `json:"evictLocalStoragePods,omitempty"`
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
// +build !ignore_autogenerated
|
// +build !ignore_autogenerated
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Copyright 2017 The Kubernetes Authors.
|
Copyright 2020 The Kubernetes Authors.
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
you may not use this file except in compliance with the License.
|
you may not use this file except in compliance with the License.
|
||||||
@@ -16,28 +16,36 @@ See the License for the specific language governing permissions and
|
|||||||
limitations under the License.
|
limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
// This file was autogenerated by conversion-gen. Do not edit it manually!
|
// Code generated by conversion-gen. DO NOT EDIT.
|
||||||
|
|
||||||
package v1alpha1
|
package v1alpha1
|
||||||
|
|
||||||
import (
|
import (
|
||||||
componentconfig "github.com/kubernetes-incubator/descheduler/pkg/apis/componentconfig"
|
time "time"
|
||||||
|
|
||||||
conversion "k8s.io/apimachinery/pkg/conversion"
|
conversion "k8s.io/apimachinery/pkg/conversion"
|
||||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||||
time "time"
|
componentconfig "sigs.k8s.io/descheduler/pkg/apis/componentconfig"
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
SchemeBuilder.Register(RegisterConversions)
|
localSchemeBuilder.Register(RegisterConversions)
|
||||||
}
|
}
|
||||||
|
|
||||||
// RegisterConversions adds conversion functions to the given scheme.
|
// RegisterConversions adds conversion functions to the given scheme.
|
||||||
// Public to allow building arbitrary schemes.
|
// Public to allow building arbitrary schemes.
|
||||||
func RegisterConversions(scheme *runtime.Scheme) error {
|
func RegisterConversions(s *runtime.Scheme) error {
|
||||||
return scheme.AddGeneratedConversionFuncs(
|
if err := s.AddGeneratedConversionFunc((*DeschedulerConfiguration)(nil), (*componentconfig.DeschedulerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||||
Convert_v1alpha1_DeschedulerConfiguration_To_componentconfig_DeschedulerConfiguration,
|
return Convert_v1alpha1_DeschedulerConfiguration_To_componentconfig_DeschedulerConfiguration(a.(*DeschedulerConfiguration), b.(*componentconfig.DeschedulerConfiguration), scope)
|
||||||
Convert_componentconfig_DeschedulerConfiguration_To_v1alpha1_DeschedulerConfiguration,
|
}); err != nil {
|
||||||
)
|
return err
|
||||||
|
}
|
||||||
|
if err := s.AddGeneratedConversionFunc((*componentconfig.DeschedulerConfiguration)(nil), (*DeschedulerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||||
|
return Convert_componentconfig_DeschedulerConfiguration_To_v1alpha1_DeschedulerConfiguration(a.(*componentconfig.DeschedulerConfiguration), b.(*DeschedulerConfiguration), scope)
|
||||||
|
}); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func autoConvert_v1alpha1_DeschedulerConfiguration_To_componentconfig_DeschedulerConfiguration(in *DeschedulerConfiguration, out *componentconfig.DeschedulerConfiguration, s conversion.Scope) error {
|
func autoConvert_v1alpha1_DeschedulerConfiguration_To_componentconfig_DeschedulerConfiguration(in *DeschedulerConfiguration, out *componentconfig.DeschedulerConfiguration, s conversion.Scope) error {
|
||||||
@@ -45,6 +53,9 @@ func autoConvert_v1alpha1_DeschedulerConfiguration_To_componentconfig_Deschedule
|
|||||||
out.KubeconfigFile = in.KubeconfigFile
|
out.KubeconfigFile = in.KubeconfigFile
|
||||||
out.PolicyConfigFile = in.PolicyConfigFile
|
out.PolicyConfigFile = in.PolicyConfigFile
|
||||||
out.DryRun = in.DryRun
|
out.DryRun = in.DryRun
|
||||||
|
out.NodeSelector = in.NodeSelector
|
||||||
|
out.MaxNoOfPodsToEvictPerNode = in.MaxNoOfPodsToEvictPerNode
|
||||||
|
out.EvictLocalStoragePods = in.EvictLocalStoragePods
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -58,6 +69,9 @@ func autoConvert_componentconfig_DeschedulerConfiguration_To_v1alpha1_Deschedule
|
|||||||
out.KubeconfigFile = in.KubeconfigFile
|
out.KubeconfigFile = in.KubeconfigFile
|
||||||
out.PolicyConfigFile = in.PolicyConfigFile
|
out.PolicyConfigFile = in.PolicyConfigFile
|
||||||
out.DryRun = in.DryRun
|
out.DryRun = in.DryRun
|
||||||
|
out.NodeSelector = in.NodeSelector
|
||||||
|
out.MaxNoOfPodsToEvictPerNode = in.MaxNoOfPodsToEvictPerNode
|
||||||
|
out.EvictLocalStoragePods = in.EvictLocalStoragePods
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
// +build !ignore_autogenerated
|
// +build !ignore_autogenerated
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Copyright 2017 The Kubernetes Authors.
|
Copyright 2020 The Kubernetes Authors.
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
you may not use this file except in compliance with the License.
|
you may not use this file except in compliance with the License.
|
||||||
@@ -16,34 +16,35 @@ See the License for the specific language governing permissions and
|
|||||||
limitations under the License.
|
limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
// This file was autogenerated by deepcopy-gen. Do not edit it manually!
|
// Code generated by deepcopy-gen. DO NOT EDIT.
|
||||||
|
|
||||||
package v1alpha1
|
package v1alpha1
|
||||||
|
|
||||||
import (
|
import (
|
||||||
conversion "k8s.io/apimachinery/pkg/conversion"
|
|
||||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||||
reflect "reflect"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
SchemeBuilder.Register(RegisterDeepCopies)
|
func (in *DeschedulerConfiguration) DeepCopyInto(out *DeschedulerConfiguration) {
|
||||||
|
*out = *in
|
||||||
|
out.TypeMeta = in.TypeMeta
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// RegisterDeepCopies adds deep-copy functions to the given scheme. Public
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeschedulerConfiguration.
|
||||||
// to allow building arbitrary schemes.
|
func (in *DeschedulerConfiguration) DeepCopy() *DeschedulerConfiguration {
|
||||||
func RegisterDeepCopies(scheme *runtime.Scheme) error {
|
if in == nil {
|
||||||
return scheme.AddGeneratedDeepCopyFuncs(
|
|
||||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1alpha1_DeschedulerConfiguration, InType: reflect.TypeOf(&DeschedulerConfiguration{})},
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeepCopy_v1alpha1_DeschedulerConfiguration is an autogenerated deepcopy function.
|
|
||||||
func DeepCopy_v1alpha1_DeschedulerConfiguration(in interface{}, out interface{}, c *conversion.Cloner) error {
|
|
||||||
{
|
|
||||||
in := in.(*DeschedulerConfiguration)
|
|
||||||
out := out.(*DeschedulerConfiguration)
|
|
||||||
*out = *in
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
out := new(DeschedulerConfiguration)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||||
|
func (in *DeschedulerConfiguration) DeepCopyObject() runtime.Object {
|
||||||
|
if c := in.DeepCopy(); c != nil {
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
// +build !ignore_autogenerated
|
// +build !ignore_autogenerated
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Copyright 2017 The Kubernetes Authors.
|
Copyright 2020 The Kubernetes Authors.
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
you may not use this file except in compliance with the License.
|
you may not use this file except in compliance with the License.
|
||||||
@@ -16,7 +16,7 @@ See the License for the specific language governing permissions and
|
|||||||
limitations under the License.
|
limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
// This file was autogenerated by defaulter-gen. Do not edit it manually!
|
// Code generated by defaulter-gen. DO NOT EDIT.
|
||||||
|
|
||||||
package v1alpha1
|
package v1alpha1
|
||||||
|
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
// +build !ignore_autogenerated
|
// +build !ignore_autogenerated
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Copyright 2017 The Kubernetes Authors.
|
Copyright 2020 The Kubernetes Authors.
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
you may not use this file except in compliance with the License.
|
you may not use this file except in compliance with the License.
|
||||||
@@ -16,34 +16,35 @@ See the License for the specific language governing permissions and
|
|||||||
limitations under the License.
|
limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
// This file was autogenerated by deepcopy-gen. Do not edit it manually!
|
// Code generated by deepcopy-gen. DO NOT EDIT.
|
||||||
|
|
||||||
package componentconfig
|
package componentconfig
|
||||||
|
|
||||||
import (
|
import (
|
||||||
conversion "k8s.io/apimachinery/pkg/conversion"
|
|
||||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||||
reflect "reflect"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
SchemeBuilder.Register(RegisterDeepCopies)
|
func (in *DeschedulerConfiguration) DeepCopyInto(out *DeschedulerConfiguration) {
|
||||||
|
*out = *in
|
||||||
|
out.TypeMeta = in.TypeMeta
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// RegisterDeepCopies adds deep-copy functions to the given scheme. Public
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeschedulerConfiguration.
|
||||||
// to allow building arbitrary schemes.
|
func (in *DeschedulerConfiguration) DeepCopy() *DeschedulerConfiguration {
|
||||||
func RegisterDeepCopies(scheme *runtime.Scheme) error {
|
if in == nil {
|
||||||
return scheme.AddGeneratedDeepCopyFuncs(
|
|
||||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_componentconfig_DeschedulerConfiguration, InType: reflect.TypeOf(&DeschedulerConfiguration{})},
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeepCopy_componentconfig_DeschedulerConfiguration is an autogenerated deepcopy function.
|
|
||||||
func DeepCopy_componentconfig_DeschedulerConfiguration(in interface{}, out interface{}, c *conversion.Cloner) error {
|
|
||||||
{
|
|
||||||
in := in.(*DeschedulerConfiguration)
|
|
||||||
out := out.(*DeschedulerConfiguration)
|
|
||||||
*out = *in
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
out := new(DeschedulerConfiguration)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||||
|
func (in *DeschedulerConfiguration) DeepCopyObject() runtime.Object {
|
||||||
|
if c := in.DeepCopy(); c != nil {
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -19,9 +19,11 @@ package client
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
|
// Ensure to load all auth plugins.
|
||||||
|
_ "k8s.io/client-go/plugin/pkg/client/auth"
|
||||||
"k8s.io/client-go/rest"
|
"k8s.io/client-go/rest"
|
||||||
"k8s.io/client-go/tools/clientcmd"
|
"k8s.io/client-go/tools/clientcmd"
|
||||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func CreateClient(kubeconfig string) (clientset.Interface, error) {
|
func CreateClient(kubeconfig string) (clientset.Interface, error) {
|
||||||
|
|||||||
@@ -19,15 +19,19 @@ package descheduler
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/kubernetes-incubator/descheduler/cmd/descheduler/app/options"
|
"k8s.io/klog"
|
||||||
"github.com/kubernetes-incubator/descheduler/pkg/descheduler/client"
|
|
||||||
eutils "github.com/kubernetes-incubator/descheduler/pkg/descheduler/evictions/utils"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
nodeutil "github.com/kubernetes-incubator/descheduler/pkg/descheduler/node"
|
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
|
||||||
"github.com/kubernetes-incubator/descheduler/pkg/descheduler/strategies"
|
"sigs.k8s.io/descheduler/pkg/api"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/descheduler/client"
|
||||||
|
eutils "sigs.k8s.io/descheduler/pkg/descheduler/evictions/utils"
|
||||||
|
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/descheduler/strategies"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/utils"
|
||||||
)
|
)
|
||||||
|
|
||||||
func Run(rs *options.DeschedulerServer) error {
|
func Run(rs *options.DeschedulerServer) error {
|
||||||
|
|
||||||
rsclient, err := client.CreateClient(rs.KubeconfigFile)
|
rsclient, err := client.CreateClient(rs.KubeconfigFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -39,22 +43,42 @@ func Run(rs *options.DeschedulerServer) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if deschedulerPolicy == nil {
|
if deschedulerPolicy == nil {
|
||||||
return fmt.Errorf("\ndeschedulerPolicy is nil\n")
|
return fmt.Errorf("deschedulerPolicy is nil")
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return RunDeschedulerStrategies(rs, deschedulerPolicy)
|
||||||
|
}
|
||||||
|
|
||||||
|
func RunDeschedulerStrategies(rs *options.DeschedulerServer, deschedulerPolicy *api.DeschedulerPolicy) error {
|
||||||
evictionPolicyGroupVersion, err := eutils.SupportEviction(rs.Client)
|
evictionPolicyGroupVersion, err := eutils.SupportEviction(rs.Client)
|
||||||
if err != nil || len(evictionPolicyGroupVersion) == 0 {
|
if err != nil || len(evictionPolicyGroupVersion) == 0 {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
stopChannel := make(chan struct{})
|
stopChannel := make(chan struct{})
|
||||||
nodes, err := nodeutil.ReadyNodes(rs.Client, stopChannel)
|
nodes, err := nodeutil.ReadyNodes(rs.Client, rs.NodeSelector, stopChannel)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
strategies.RemoveDuplicatePods(rs, deschedulerPolicy.Strategies["RemoveDuplicates"], evictionPolicyGroupVersion, nodes)
|
if len(nodes) <= 1 {
|
||||||
strategies.LowNodeUtilization(rs, deschedulerPolicy.Strategies["LowNodeUtilization"], evictionPolicyGroupVersion, nodes)
|
klog.V(1).Infof("The cluster size is 0 or 1 meaning eviction causes service disruption or degradation. So aborting..")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
nodePodCount := utils.InitializeNodePodCount(nodes)
|
||||||
|
wait.Until(func() {
|
||||||
|
strategies.RemoveDuplicatePods(rs, deschedulerPolicy.Strategies["RemoveDuplicates"], evictionPolicyGroupVersion, nodes, nodePodCount)
|
||||||
|
strategies.LowNodeUtilization(rs, deschedulerPolicy.Strategies["LowNodeUtilization"], evictionPolicyGroupVersion, nodes, nodePodCount)
|
||||||
|
strategies.RemovePodsViolatingInterPodAntiAffinity(rs, deschedulerPolicy.Strategies["RemovePodsViolatingInterPodAntiAffinity"], evictionPolicyGroupVersion, nodes, nodePodCount)
|
||||||
|
strategies.RemovePodsViolatingNodeAffinity(rs, deschedulerPolicy.Strategies["RemovePodsViolatingNodeAffinity"], evictionPolicyGroupVersion, nodes, nodePodCount)
|
||||||
|
strategies.RemovePodsViolatingNodeTaints(rs, deschedulerPolicy.Strategies["RemovePodsViolatingNodeTaints"], evictionPolicyGroupVersion, nodes, nodePodCount)
|
||||||
|
|
||||||
|
// If there was no interval specified, send a signal to the stopChannel to end the wait.Until loop after 1 iteration
|
||||||
|
if rs.DeschedulingInterval.Seconds() == 0 {
|
||||||
|
close(stopChannel)
|
||||||
|
}
|
||||||
|
}, rs.DeschedulingInterval, stopChannel)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -19,13 +19,17 @@ package evictions
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
|
v1 "k8s.io/api/core/v1"
|
||||||
|
policy "k8s.io/api/policy/v1beta1"
|
||||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/kubernetes/pkg/api/v1"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
policy "k8s.io/kubernetes/pkg/apis/policy/v1beta1"
|
"k8s.io/client-go/kubernetes/scheme"
|
||||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
clientcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||||
|
"k8s.io/client-go/tools/record"
|
||||||
|
"k8s.io/klog"
|
||||||
|
|
||||||
eutils "github.com/kubernetes-incubator/descheduler/pkg/descheduler/evictions/utils"
|
eutils "sigs.k8s.io/descheduler/pkg/descheduler/evictions/utils"
|
||||||
)
|
)
|
||||||
|
|
||||||
func EvictPod(client clientset.Interface, pod *v1.Pod, policyGroupVersion string, dryRun bool) (bool, error) {
|
func EvictPod(client clientset.Interface, pod *v1.Pod, policyGroupVersion string, dryRun bool) (bool, error) {
|
||||||
@@ -45,9 +49,14 @@ func EvictPod(client clientset.Interface, pod *v1.Pod, policyGroupVersion string
|
|||||||
},
|
},
|
||||||
DeleteOptions: deleteOptions,
|
DeleteOptions: deleteOptions,
|
||||||
}
|
}
|
||||||
err := client.Policy().Evictions(eviction.Namespace).Evict(eviction)
|
err := client.PolicyV1beta1().Evictions(eviction.Namespace).Evict(eviction)
|
||||||
|
|
||||||
if err == nil {
|
if err == nil {
|
||||||
|
eventBroadcaster := record.NewBroadcaster()
|
||||||
|
eventBroadcaster.StartLogging(klog.V(3).Infof)
|
||||||
|
eventBroadcaster.StartRecordingToSink(&clientcorev1.EventSinkImpl{Interface: client.CoreV1().Events(pod.Namespace)})
|
||||||
|
r := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "sigs.k8s.io.descheduler"})
|
||||||
|
r.Event(pod, v1.EventTypeNormal, "Descheduled", "pod evicted by sigs.k8s.io/descheduler")
|
||||||
return true, nil
|
return true, nil
|
||||||
} else if apierrors.IsTooManyRequests(err) {
|
} else if apierrors.IsTooManyRequests(err) {
|
||||||
return false, fmt.Errorf("error when evicting pod (ignoring) %q: %v", pod.Name, err)
|
return false, fmt.Errorf("error when evicting pod (ignoring) %q: %v", pod.Name, err)
|
||||||
|
|||||||
@@ -17,24 +17,48 @@ limitations under the License.
|
|||||||
package evictions
|
package evictions
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/kubernetes-incubator/descheduler/test"
|
"k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
|
"k8s.io/client-go/kubernetes/fake"
|
||||||
core "k8s.io/client-go/testing"
|
core "k8s.io/client-go/testing"
|
||||||
"k8s.io/kubernetes/pkg/api/v1"
|
"sigs.k8s.io/descheduler/test"
|
||||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset/fake"
|
|
||||||
"testing"
|
"testing"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestEvictPod(t *testing.T) {
|
func TestEvictPod(t *testing.T) {
|
||||||
n1 := test.BuildTestNode("node1", 1000, 2000, 9)
|
node1 := test.BuildTestNode("node1", 1000, 2000, 9)
|
||||||
p1 := test.BuildTestPod("p1", 400, 0, n1.Name)
|
pod1 := test.BuildTestPod("p1", 400, 0, "node1")
|
||||||
fakeClient := &fake.Clientset{}
|
tests := []struct {
|
||||||
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
|
description string
|
||||||
return true, &v1.PodList{Items: []v1.Pod{*p1}}, nil
|
node *v1.Node
|
||||||
})
|
pod *v1.Pod
|
||||||
evicted, _ := EvictPod(fakeClient, p1, "v1", false)
|
pods []v1.Pod
|
||||||
if !evicted {
|
want bool
|
||||||
t.Errorf("Expected %v pod to be evicted", p1.Name)
|
}{
|
||||||
|
{
|
||||||
|
description: "test pod eviction - pod present",
|
||||||
|
node: node1,
|
||||||
|
pod: pod1,
|
||||||
|
pods: []v1.Pod{*pod1},
|
||||||
|
want: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
description: "test pod eviction - pod absent",
|
||||||
|
node: node1,
|
||||||
|
pod: pod1,
|
||||||
|
pods: []v1.Pod{*test.BuildTestPod("p2", 400, 0, "node1"), *test.BuildTestPod("p3", 450, 0, "node1")},
|
||||||
|
want: true,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for _, test := range tests {
|
||||||
|
fakeClient := &fake.Clientset{}
|
||||||
|
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
|
||||||
|
return true, &v1.PodList{Items: test.pods}, nil
|
||||||
|
})
|
||||||
|
got, _ := EvictPod(fakeClient, test.pod, "v1", false)
|
||||||
|
if got != test.want {
|
||||||
|
t.Errorf("Test error for Desc: %s. Expected %v pod eviction to be %v, got %v", test.description, test.pod.Name, test.want, got)
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -17,7 +17,7 @@ limitations under the License.
|
|||||||
package utils
|
package utils
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
|||||||
@@ -19,32 +19,46 @@ package node
|
|||||||
import (
|
import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/golang/glog"
|
"k8s.io/api/core/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/fields"
|
"k8s.io/apimachinery/pkg/fields"
|
||||||
"k8s.io/apimachinery/pkg/labels"
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
|
corelisters "k8s.io/client-go/listers/core/v1"
|
||||||
"k8s.io/client-go/tools/cache"
|
"k8s.io/client-go/tools/cache"
|
||||||
"k8s.io/kubernetes/pkg/api/v1"
|
"k8s.io/klog"
|
||||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
"sigs.k8s.io/descheduler/pkg/utils"
|
||||||
corelisters "k8s.io/kubernetes/pkg/client/listers/core/v1"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// ReadyNodes returns ready nodes irrespective of whether they are
|
// ReadyNodes returns ready nodes irrespective of whether they are
|
||||||
// schedulable or not.
|
// schedulable or not.
|
||||||
func ReadyNodes(client clientset.Interface, stopChannel <-chan struct{}) ([]*v1.Node, error) {
|
func ReadyNodes(client clientset.Interface, nodeSelector string, stopChannel <-chan struct{}) ([]*v1.Node, error) {
|
||||||
nl := GetNodeLister(client, stopChannel)
|
ns, err := labels.Parse(nodeSelector)
|
||||||
nodes, err := nl.List(labels.Everything())
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return []*v1.Node{}, err
|
return []*v1.Node{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var nodes []*v1.Node
|
||||||
|
nl := GetNodeLister(client, stopChannel)
|
||||||
|
if nl != nil {
|
||||||
|
// err is defined above
|
||||||
|
if nodes, err = nl.List(ns); err != nil {
|
||||||
|
return []*v1.Node{}, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if len(nodes) == 0 {
|
if len(nodes) == 0 {
|
||||||
var err error
|
klog.V(2).Infof("node lister returned empty list, now fetch directly")
|
||||||
nItems, err := client.Core().Nodes().List(metav1.ListOptions{})
|
|
||||||
|
nItems, err := client.CoreV1().Nodes().List(metav1.ListOptions{LabelSelector: nodeSelector})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return []*v1.Node{}, err
|
return []*v1.Node{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if nItems == nil || len(nItems.Items) == 0 {
|
||||||
|
return []*v1.Node{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
for i := range nItems.Items {
|
for i := range nItems.Items {
|
||||||
node := nItems.Items[i]
|
node := nItems.Items[i]
|
||||||
nodes = append(nodes, &node)
|
nodes = append(nodes, &node)
|
||||||
@@ -61,15 +75,22 @@ func ReadyNodes(client clientset.Interface, stopChannel <-chan struct{}) ([]*v1.
|
|||||||
}
|
}
|
||||||
|
|
||||||
func GetNodeLister(client clientset.Interface, stopChannel <-chan struct{}) corelisters.NodeLister {
|
func GetNodeLister(client clientset.Interface, stopChannel <-chan struct{}) corelisters.NodeLister {
|
||||||
listWatcher := cache.NewListWatchFromClient(client.Core().RESTClient(), "nodes", v1.NamespaceAll, fields.Everything())
|
if stopChannel == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
listWatcher := cache.NewListWatchFromClient(client.CoreV1().RESTClient(), "nodes", v1.NamespaceAll, fields.Everything())
|
||||||
store := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
|
store := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
|
||||||
nodeLister := corelisters.NewNodeLister(store)
|
nodeLister := corelisters.NewNodeLister(store)
|
||||||
reflector := cache.NewReflector(listWatcher, &v1.Node{}, store, time.Hour)
|
reflector := cache.NewReflector(listWatcher, &v1.Node{}, store, time.Hour)
|
||||||
reflector.RunUntil(stopChannel)
|
go reflector.Run(stopChannel)
|
||||||
|
|
||||||
|
// To give some time so that listing works, chosen randomly
|
||||||
|
time.Sleep(100 * time.Millisecond)
|
||||||
|
|
||||||
return nodeLister
|
return nodeLister
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// IsReady checks if the descheduler could run against given node.
|
||||||
func IsReady(node *v1.Node) bool {
|
func IsReady(node *v1.Node) bool {
|
||||||
for i := range node.Status.Conditions {
|
for i := range node.Status.Conditions {
|
||||||
cond := &node.Status.Conditions[i]
|
cond := &node.Status.Conditions[i]
|
||||||
@@ -78,20 +99,66 @@ func IsReady(node *v1.Node) bool {
|
|||||||
// - NodeOutOfDisk condition status is ConditionFalse,
|
// - NodeOutOfDisk condition status is ConditionFalse,
|
||||||
// - NodeNetworkUnavailable condition status is ConditionFalse.
|
// - NodeNetworkUnavailable condition status is ConditionFalse.
|
||||||
if cond.Type == v1.NodeReady && cond.Status != v1.ConditionTrue {
|
if cond.Type == v1.NodeReady && cond.Status != v1.ConditionTrue {
|
||||||
glog.V(1).Infof("Ignoring node %v with %v condition status %v", node.Name, cond.Type, cond.Status)
|
klog.V(1).Infof("Ignoring node %v with %v condition status %v", node.Name, cond.Type, cond.Status)
|
||||||
return false
|
return false
|
||||||
} /*else if cond.Type == v1.NodeOutOfDisk && cond.Status != v1.ConditionFalse {
|
} /*else if cond.Type == v1.NodeOutOfDisk && cond.Status != v1.ConditionFalse {
|
||||||
glog.V(4).Infof("Ignoring node %v with %v condition status %v", node.Name, cond.Type, cond.Status)
|
klog.V(4).Infof("Ignoring node %v with %v condition status %v", node.Name, cond.Type, cond.Status)
|
||||||
return false
|
return false
|
||||||
} else if cond.Type == v1.NodeNetworkUnavailable && cond.Status != v1.ConditionFalse {
|
} else if cond.Type == v1.NodeNetworkUnavailable && cond.Status != v1.ConditionFalse {
|
||||||
glog.V(4).Infof("Ignoring node %v with %v condition status %v", node.Name, cond.Type, cond.Status)
|
klog.V(4).Infof("Ignoring node %v with %v condition status %v", node.Name, cond.Type, cond.Status)
|
||||||
return false
|
return false
|
||||||
}*/
|
}*/
|
||||||
}
|
}
|
||||||
// Ignore nodes that are marked unschedulable
|
// Ignore nodes that are marked unschedulable
|
||||||
/*if node.Spec.Unschedulable {
|
/*if node.Spec.Unschedulable {
|
||||||
glog.V(4).Infof("Ignoring node %v since it is unschedulable", node.Name)
|
klog.V(4).Infof("Ignoring node %v since it is unschedulable", node.Name)
|
||||||
return false
|
return false
|
||||||
}*/
|
}*/
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// IsNodeUnschedulable checks if the node is unschedulable. This is helper function to check only in case of
|
||||||
|
// underutilized node so that they won't be accounted for.
|
||||||
|
func IsNodeUnschedulable(node *v1.Node) bool {
|
||||||
|
return node.Spec.Unschedulable
|
||||||
|
}
|
||||||
|
|
||||||
|
// PodFitsAnyNode checks if the given pod fits any of the given nodes, based on
|
||||||
|
// multiple criteria, like, pod node selector matching the node label, node
|
||||||
|
// being schedulable or not.
|
||||||
|
func PodFitsAnyNode(pod *v1.Pod, nodes []*v1.Node) bool {
|
||||||
|
for _, node := range nodes {
|
||||||
|
|
||||||
|
ok, err := utils.PodMatchNodeSelector(pod, node)
|
||||||
|
if err != nil || !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if ok {
|
||||||
|
if !IsNodeUnschedulable(node) {
|
||||||
|
klog.V(2).Infof("Pod %v can possibly be scheduled on %v", pod.Name, node.Name)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// PodFitsCurrentNode checks if the given pod fits on the given node if the pod
|
||||||
|
// node selector matches the node label.
|
||||||
|
func PodFitsCurrentNode(pod *v1.Pod, node *v1.Node) bool {
|
||||||
|
ok, err := utils.PodMatchNodeSelector(pod, node)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
klog.Error(err)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if !ok {
|
||||||
|
klog.V(1).Infof("Pod %v does not fit on node %v", pod.Name, node.Name)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
klog.V(3).Infof("Pod %v fits on node %v", pod.Name, node.Name)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|||||||
@@ -17,66 +17,326 @@ limitations under the License.
|
|||||||
package node
|
package node
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/kubernetes-incubator/descheduler/test"
|
"k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
core "k8s.io/client-go/testing"
|
"k8s.io/client-go/kubernetes/fake"
|
||||||
"k8s.io/kubernetes/pkg/api/v1"
|
"sigs.k8s.io/descheduler/test"
|
||||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset/fake"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestReadyNodes(t *testing.T) {
|
func TestReadyNodes(t *testing.T) {
|
||||||
fakeClient := &fake.Clientset{}
|
node1 := test.BuildTestNode("node2", 1000, 2000, 9)
|
||||||
node1 := test.BuildTestNode("node1", 1000, 2000, 9)
|
node2 := test.BuildTestNode("node3", 1000, 2000, 9)
|
||||||
node1.Status.Conditions = []v1.NodeCondition{{Type: v1.NodeOutOfDisk, Status: v1.ConditionTrue}}
|
node2.Status.Conditions = []v1.NodeCondition{{Type: v1.NodeMemoryPressure, Status: v1.ConditionTrue}}
|
||||||
node2 := test.BuildTestNode("node2", 1000, 2000, 9)
|
node3 := test.BuildTestNode("node4", 1000, 2000, 9)
|
||||||
node3 := test.BuildTestNode("node3", 1000, 2000, 9)
|
node3.Status.Conditions = []v1.NodeCondition{{Type: v1.NodeNetworkUnavailable, Status: v1.ConditionTrue}}
|
||||||
node3.Status.Conditions = []v1.NodeCondition{{Type: v1.NodeMemoryPressure, Status: v1.ConditionTrue}}
|
node4 := test.BuildTestNode("node5", 1000, 2000, 9)
|
||||||
node4 := test.BuildTestNode("node4", 1000, 2000, 9)
|
node4.Spec.Unschedulable = true
|
||||||
node4.Status.Conditions = []v1.NodeCondition{{Type: v1.NodeNetworkUnavailable, Status: v1.ConditionTrue}}
|
node5 := test.BuildTestNode("node6", 1000, 2000, 9)
|
||||||
node5 := test.BuildTestNode("node5", 1000, 2000, 9)
|
node5.Status.Conditions = []v1.NodeCondition{{Type: v1.NodeReady, Status: v1.ConditionFalse}}
|
||||||
node5.Spec.Unschedulable = true
|
|
||||||
node6 := test.BuildTestNode("node6", 1000, 2000, 9)
|
|
||||||
node6.Status.Conditions = []v1.NodeCondition{{Type: v1.NodeReady, Status: v1.ConditionFalse}}
|
|
||||||
|
|
||||||
fakeClient.Fake.AddReactor("get", "nodes", func(action core.Action) (bool, runtime.Object, error) {
|
|
||||||
getAction := action.(core.GetAction)
|
|
||||||
switch getAction.GetName() {
|
|
||||||
case node1.Name:
|
|
||||||
return true, node1, nil
|
|
||||||
case node2.Name:
|
|
||||||
return true, node2, nil
|
|
||||||
case node3.Name:
|
|
||||||
return true, node3, nil
|
|
||||||
case node4.Name:
|
|
||||||
return true, node4, nil
|
|
||||||
case node5.Name:
|
|
||||||
return true, node5, nil
|
|
||||||
case node6.Name:
|
|
||||||
return true, node6, nil
|
|
||||||
}
|
|
||||||
return true, nil, fmt.Errorf("Wrong node: %v", getAction.GetName())
|
|
||||||
})
|
|
||||||
|
|
||||||
if !IsReady(node1) {
|
if !IsReady(node1) {
|
||||||
t.Errorf("Expected %v to be ready", node1.Name)
|
|
||||||
}
|
|
||||||
if !IsReady(node2) {
|
|
||||||
t.Errorf("Expected %v to be ready", node2.Name)
|
t.Errorf("Expected %v to be ready", node2.Name)
|
||||||
}
|
}
|
||||||
if !IsReady(node3) {
|
if !IsReady(node2) {
|
||||||
t.Errorf("Expected %v to be ready", node3.Name)
|
t.Errorf("Expected %v to be ready", node3.Name)
|
||||||
}
|
}
|
||||||
if !IsReady(node4) {
|
if !IsReady(node3) {
|
||||||
t.Errorf("Expected %v to be ready", node4.Name)
|
t.Errorf("Expected %v to be ready", node4.Name)
|
||||||
}
|
}
|
||||||
if !IsReady(node5) {
|
if !IsReady(node4) {
|
||||||
t.Errorf("Expected %v to be ready", node5.Name)
|
t.Errorf("Expected %v to be ready", node5.Name)
|
||||||
}
|
}
|
||||||
if IsReady(node6) {
|
if IsReady(node5) {
|
||||||
t.Errorf("Expected %v to be not ready", node6.Name)
|
t.Errorf("Expected %v to be not ready", node5.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestReadyNodesWithNodeSelector(t *testing.T) {
|
||||||
|
node1 := test.BuildTestNode("node1", 1000, 2000, 9)
|
||||||
|
node1.Labels = map[string]string{"type": "compute"}
|
||||||
|
node2 := test.BuildTestNode("node2", 1000, 2000, 9)
|
||||||
|
node2.Labels = map[string]string{"type": "infra"}
|
||||||
|
|
||||||
|
fakeClient := fake.NewSimpleClientset(node1, node2)
|
||||||
|
nodeSelector := "type=compute"
|
||||||
|
nodes, _ := ReadyNodes(fakeClient, nodeSelector, nil)
|
||||||
|
|
||||||
|
if nodes[0].Name != "node1" {
|
||||||
|
t.Errorf("Expected node1, got %s", nodes[0].Name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestIsNodeUnschedulable(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
description string
|
||||||
|
node *v1.Node
|
||||||
|
IsUnSchedulable bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
description: "Node is expected to be schedulable",
|
||||||
|
node: &v1.Node{
|
||||||
|
Spec: v1.NodeSpec{Unschedulable: false},
|
||||||
|
},
|
||||||
|
IsUnSchedulable: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
description: "Node is not expected to be schedulable because of unschedulable field",
|
||||||
|
node: &v1.Node{
|
||||||
|
Spec: v1.NodeSpec{Unschedulable: true},
|
||||||
|
},
|
||||||
|
IsUnSchedulable: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, test := range tests {
|
||||||
|
actualUnSchedulable := IsNodeUnschedulable(test.node)
|
||||||
|
if actualUnSchedulable != test.IsUnSchedulable {
|
||||||
|
t.Errorf("Test %#v failed", test.description)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPodFitsCurrentNode(t *testing.T) {
|
||||||
|
|
||||||
|
nodeLabelKey := "kubernetes.io/desiredNode"
|
||||||
|
nodeLabelValue := "yes"
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
description string
|
||||||
|
pod *v1.Pod
|
||||||
|
node *v1.Node
|
||||||
|
success bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
description: "Pod with nodeAffinity set, expected to fit the node",
|
||||||
|
pod: &v1.Pod{
|
||||||
|
Spec: v1.PodSpec{
|
||||||
|
Affinity: &v1.Affinity{
|
||||||
|
NodeAffinity: &v1.NodeAffinity{
|
||||||
|
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
||||||
|
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||||
|
{
|
||||||
|
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||||
|
{
|
||||||
|
Key: nodeLabelKey,
|
||||||
|
Operator: "In",
|
||||||
|
Values: []string{
|
||||||
|
nodeLabelValue,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
node: &v1.Node{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Labels: map[string]string{
|
||||||
|
nodeLabelKey: nodeLabelValue,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
success: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
description: "Pod with nodeAffinity set, not expected to fit the node",
|
||||||
|
pod: &v1.Pod{
|
||||||
|
Spec: v1.PodSpec{
|
||||||
|
Affinity: &v1.Affinity{
|
||||||
|
NodeAffinity: &v1.NodeAffinity{
|
||||||
|
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
||||||
|
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||||
|
{
|
||||||
|
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||||
|
{
|
||||||
|
Key: nodeLabelKey,
|
||||||
|
Operator: "In",
|
||||||
|
Values: []string{
|
||||||
|
nodeLabelValue,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
node: &v1.Node{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Labels: map[string]string{
|
||||||
|
nodeLabelKey: "no",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
success: false,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tests {
|
||||||
|
actual := PodFitsCurrentNode(tc.pod, tc.node)
|
||||||
|
if actual != tc.success {
|
||||||
|
t.Errorf("Test %#v failed", tc.description)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPodFitsAnyNode(t *testing.T) {
|
||||||
|
|
||||||
|
nodeLabelKey := "kubernetes.io/desiredNode"
|
||||||
|
nodeLabelValue := "yes"
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
description string
|
||||||
|
pod *v1.Pod
|
||||||
|
nodes []*v1.Node
|
||||||
|
success bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
description: "Pod expected to fit one of the nodes",
|
||||||
|
pod: &v1.Pod{
|
||||||
|
Spec: v1.PodSpec{
|
||||||
|
Affinity: &v1.Affinity{
|
||||||
|
NodeAffinity: &v1.NodeAffinity{
|
||||||
|
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
||||||
|
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||||
|
{
|
||||||
|
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||||
|
{
|
||||||
|
Key: nodeLabelKey,
|
||||||
|
Operator: "In",
|
||||||
|
Values: []string{
|
||||||
|
nodeLabelValue,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
nodes: []*v1.Node{
|
||||||
|
{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Labels: map[string]string{
|
||||||
|
nodeLabelKey: nodeLabelValue,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Labels: map[string]string{
|
||||||
|
nodeLabelKey: "no",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
success: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
description: "Pod expected to fit none of the nodes",
|
||||||
|
pod: &v1.Pod{
|
||||||
|
Spec: v1.PodSpec{
|
||||||
|
Affinity: &v1.Affinity{
|
||||||
|
NodeAffinity: &v1.NodeAffinity{
|
||||||
|
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
||||||
|
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||||
|
{
|
||||||
|
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||||
|
{
|
||||||
|
Key: nodeLabelKey,
|
||||||
|
Operator: "In",
|
||||||
|
Values: []string{
|
||||||
|
nodeLabelValue,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
nodes: []*v1.Node{
|
||||||
|
{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Labels: map[string]string{
|
||||||
|
nodeLabelKey: "unfit1",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Labels: map[string]string{
|
||||||
|
nodeLabelKey: "unfit2",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
success: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
description: "Nodes are unschedulable but labels match, should fail",
|
||||||
|
pod: &v1.Pod{
|
||||||
|
Spec: v1.PodSpec{
|
||||||
|
Affinity: &v1.Affinity{
|
||||||
|
NodeAffinity: &v1.NodeAffinity{
|
||||||
|
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
||||||
|
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||||
|
{
|
||||||
|
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||||
|
{
|
||||||
|
Key: nodeLabelKey,
|
||||||
|
Operator: "In",
|
||||||
|
Values: []string{
|
||||||
|
nodeLabelValue,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
nodes: []*v1.Node{
|
||||||
|
{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Labels: map[string]string{
|
||||||
|
nodeLabelKey: nodeLabelValue,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Spec: v1.NodeSpec{
|
||||||
|
Unschedulable: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Labels: map[string]string{
|
||||||
|
nodeLabelKey: "no",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
success: false,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tests {
|
||||||
|
actual := PodFitsAnyNode(tc.pod, tc.nodes)
|
||||||
|
if actual != tc.success {
|
||||||
|
t.Errorf("Test %#v failed", tc.description)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -17,19 +17,51 @@ limitations under the License.
|
|||||||
package pod
|
package pod
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
v1 "k8s.io/api/core/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/fields"
|
"k8s.io/apimachinery/pkg/fields"
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/kubernetes/pkg/api"
|
"sigs.k8s.io/descheduler/pkg/utils"
|
||||||
"k8s.io/kubernetes/pkg/api/v1"
|
|
||||||
"k8s.io/kubernetes/pkg/api/v1/helper/qos"
|
|
||||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
|
||||||
"k8s.io/kubernetes/pkg/kubelet/types"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
evictPodAnnotationKey = "descheduler.alpha.kubernetes.io/evict"
|
||||||
|
)
|
||||||
|
|
||||||
|
// IsEvictable checks if a pod is evictable or not.
|
||||||
|
func IsEvictable(pod *v1.Pod, evictLocalStoragePods bool) bool {
|
||||||
|
ownerRefList := OwnerRef(pod)
|
||||||
|
if !HaveEvictAnnotation(pod) && (IsMirrorPod(pod) || (!evictLocalStoragePods && IsPodWithLocalStorage(pod)) || len(ownerRefList) == 0 || IsDaemonsetPod(ownerRefList) || IsCriticalPod(pod)) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListEvictablePodsOnNode returns the list of evictable pods on node.
|
||||||
|
func ListEvictablePodsOnNode(client clientset.Interface, node *v1.Node, evictLocalStoragePods bool) ([]*v1.Pod, error) {
|
||||||
|
pods, err := ListPodsOnANode(client, node)
|
||||||
|
if err != nil {
|
||||||
|
return []*v1.Pod{}, err
|
||||||
|
}
|
||||||
|
evictablePods := make([]*v1.Pod, 0)
|
||||||
|
for _, pod := range pods {
|
||||||
|
if !IsEvictable(pod, evictLocalStoragePods) {
|
||||||
|
continue
|
||||||
|
} else {
|
||||||
|
evictablePods = append(evictablePods, pod)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return evictablePods, nil
|
||||||
|
}
|
||||||
|
|
||||||
func ListPodsOnANode(client clientset.Interface, node *v1.Node) ([]*v1.Pod, error) {
|
func ListPodsOnANode(client clientset.Interface, node *v1.Node) ([]*v1.Pod, error) {
|
||||||
|
fieldSelector, err := fields.ParseSelector("spec.nodeName=" + node.Name + ",status.phase!=" + string(v1.PodSucceeded) + ",status.phase!=" + string(v1.PodFailed))
|
||||||
|
if err != nil {
|
||||||
|
return []*v1.Pod{}, err
|
||||||
|
}
|
||||||
|
|
||||||
podList, err := client.CoreV1().Pods(v1.NamespaceAll).List(
|
podList, err := client.CoreV1().Pods(v1.NamespaceAll).List(
|
||||||
metav1.ListOptions{FieldSelector: fields.SelectorFromSet(fields.Set{"spec.nodeName": node.Name}).String()})
|
metav1.ListOptions{FieldSelector: fieldSelector.String()})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return []*v1.Pod{}, err
|
return []*v1.Pod{}, err
|
||||||
}
|
}
|
||||||
@@ -38,36 +70,42 @@ func ListPodsOnANode(client clientset.Interface, node *v1.Node) ([]*v1.Pod, erro
|
|||||||
for i := range podList.Items {
|
for i := range podList.Items {
|
||||||
pods = append(pods, &podList.Items[i])
|
pods = append(pods, &podList.Items[i])
|
||||||
}
|
}
|
||||||
|
|
||||||
return pods, nil
|
return pods, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func IsCriticalPod(pod *v1.Pod) bool {
|
func IsCriticalPod(pod *v1.Pod) bool {
|
||||||
return types.IsCriticalPod(pod)
|
return utils.IsCriticalPod(pod)
|
||||||
}
|
}
|
||||||
|
|
||||||
func IsBestEffortPod(pod *v1.Pod) bool {
|
func IsBestEffortPod(pod *v1.Pod) bool {
|
||||||
return qos.GetPodQOS(pod) == v1.PodQOSBestEffort
|
return utils.GetPodQOS(pod) == v1.PodQOSBestEffort
|
||||||
}
|
}
|
||||||
|
|
||||||
func IsBurstablePod(pod *v1.Pod) bool {
|
func IsBurstablePod(pod *v1.Pod) bool {
|
||||||
return qos.GetPodQOS(pod) == v1.PodQOSBurstable
|
return utils.GetPodQOS(pod) == v1.PodQOSBurstable
|
||||||
}
|
}
|
||||||
|
|
||||||
func IsGuaranteedPod(pod *v1.Pod) bool {
|
func IsGuaranteedPod(pod *v1.Pod) bool {
|
||||||
return qos.GetPodQOS(pod) == v1.PodQOSGuaranteed
|
return utils.GetPodQOS(pod) == v1.PodQOSGuaranteed
|
||||||
}
|
}
|
||||||
|
|
||||||
func IsDaemonsetPod(sr *v1.SerializedReference) bool {
|
func IsDaemonsetPod(ownerRefList []metav1.OwnerReference) bool {
|
||||||
if sr != nil {
|
for _, ownerRef := range ownerRefList {
|
||||||
return sr.Reference.Kind == "DaemonSet"
|
if ownerRef.Kind == "DaemonSet" {
|
||||||
|
return true
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsMirrorPod checks whether the pod is a mirror pod.
|
// IsMirrorPod checks whether the pod is a mirror pod.
|
||||||
func IsMirrorPod(pod *v1.Pod) bool {
|
func IsMirrorPod(pod *v1.Pod) bool {
|
||||||
_, found := pod.ObjectMeta.Annotations[types.ConfigMirrorAnnotationKey]
|
return utils.IsMirrorPod(pod)
|
||||||
|
}
|
||||||
|
|
||||||
|
// HaveEvictAnnotation checks if the pod have evict annotation
|
||||||
|
func HaveEvictAnnotation(pod *v1.Pod) bool {
|
||||||
|
_, found := pod.ObjectMeta.Annotations[evictPodAnnotationKey]
|
||||||
return found
|
return found
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -81,15 +119,7 @@ func IsPodWithLocalStorage(pod *v1.Pod) bool {
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// CreatorRef returns the kind of the creator reference of the pod.
|
// OwnerRef returns the ownerRefList for the pod.
|
||||||
func CreatorRef(pod *v1.Pod) (*v1.SerializedReference, error) {
|
func OwnerRef(pod *v1.Pod) []metav1.OwnerReference {
|
||||||
creatorRef, found := pod.ObjectMeta.Annotations[v1.CreatedByAnnotation]
|
return pod.ObjectMeta.GetOwnerReferences()
|
||||||
if !found {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
var sr v1.SerializedReference
|
|
||||||
if err := runtime.DecodeInto(api.Codecs.UniversalDecoder(), []byte(creatorRef), &sr); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return &sr, nil
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -19,11 +19,165 @@ package pod
|
|||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/kubernetes-incubator/descheduler/test"
|
v1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
"k8s.io/kubernetes/pkg/api/v1"
|
"sigs.k8s.io/descheduler/pkg/utils"
|
||||||
|
"sigs.k8s.io/descheduler/test"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func TestIsEvictable(t *testing.T) {
|
||||||
|
n1 := test.BuildTestNode("node1", 1000, 2000, 13)
|
||||||
|
type testCase struct {
|
||||||
|
pod *v1.Pod
|
||||||
|
runBefore func(*v1.Pod)
|
||||||
|
evictLocalStoragePods bool
|
||||||
|
result bool
|
||||||
|
}
|
||||||
|
|
||||||
|
testCases := []testCase{
|
||||||
|
{
|
||||||
|
pod: test.BuildTestPod("p1", 400, 0, n1.Name),
|
||||||
|
runBefore: func(pod *v1.Pod) {
|
||||||
|
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||||
|
},
|
||||||
|
evictLocalStoragePods: false,
|
||||||
|
result: true,
|
||||||
|
}, {
|
||||||
|
pod: test.BuildTestPod("p2", 400, 0, n1.Name),
|
||||||
|
runBefore: func(pod *v1.Pod) {
|
||||||
|
pod.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"}
|
||||||
|
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||||
|
},
|
||||||
|
evictLocalStoragePods: false,
|
||||||
|
result: true,
|
||||||
|
}, {
|
||||||
|
pod: test.BuildTestPod("p3", 400, 0, n1.Name),
|
||||||
|
runBefore: func(pod *v1.Pod) {
|
||||||
|
pod.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
|
||||||
|
},
|
||||||
|
evictLocalStoragePods: false,
|
||||||
|
result: true,
|
||||||
|
}, {
|
||||||
|
pod: test.BuildTestPod("p4", 400, 0, n1.Name),
|
||||||
|
runBefore: func(pod *v1.Pod) {
|
||||||
|
pod.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"}
|
||||||
|
pod.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
|
||||||
|
},
|
||||||
|
evictLocalStoragePods: false,
|
||||||
|
result: true,
|
||||||
|
}, {
|
||||||
|
pod: test.BuildTestPod("p5", 400, 0, n1.Name),
|
||||||
|
runBefore: func(pod *v1.Pod) {
|
||||||
|
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||||
|
pod.Spec.Volumes = []v1.Volume{
|
||||||
|
{
|
||||||
|
Name: "sample",
|
||||||
|
VolumeSource: v1.VolumeSource{
|
||||||
|
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||||
|
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||||
|
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
},
|
||||||
|
evictLocalStoragePods: false,
|
||||||
|
result: false,
|
||||||
|
}, {
|
||||||
|
pod: test.BuildTestPod("p6", 400, 0, n1.Name),
|
||||||
|
runBefore: func(pod *v1.Pod) {
|
||||||
|
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||||
|
pod.Spec.Volumes = []v1.Volume{
|
||||||
|
{
|
||||||
|
Name: "sample",
|
||||||
|
VolumeSource: v1.VolumeSource{
|
||||||
|
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||||
|
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||||
|
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
},
|
||||||
|
evictLocalStoragePods: true,
|
||||||
|
result: true,
|
||||||
|
}, {
|
||||||
|
pod: test.BuildTestPod("p7", 400, 0, n1.Name),
|
||||||
|
runBefore: func(pod *v1.Pod) {
|
||||||
|
pod.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"}
|
||||||
|
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||||
|
pod.Spec.Volumes = []v1.Volume{
|
||||||
|
{
|
||||||
|
Name: "sample",
|
||||||
|
VolumeSource: v1.VolumeSource{
|
||||||
|
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||||
|
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||||
|
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
},
|
||||||
|
evictLocalStoragePods: false,
|
||||||
|
result: true,
|
||||||
|
}, {
|
||||||
|
pod: test.BuildTestPod("p8", 400, 0, n1.Name),
|
||||||
|
runBefore: func(pod *v1.Pod) {
|
||||||
|
pod.ObjectMeta.OwnerReferences = test.GetDaemonSetOwnerRefList()
|
||||||
|
},
|
||||||
|
evictLocalStoragePods: false,
|
||||||
|
result: false,
|
||||||
|
}, {
|
||||||
|
pod: test.BuildTestPod("p9", 400, 0, n1.Name),
|
||||||
|
runBefore: func(pod *v1.Pod) {
|
||||||
|
pod.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"}
|
||||||
|
pod.ObjectMeta.OwnerReferences = test.GetDaemonSetOwnerRefList()
|
||||||
|
},
|
||||||
|
evictLocalStoragePods: false,
|
||||||
|
result: true,
|
||||||
|
}, {
|
||||||
|
pod: test.BuildTestPod("p10", 400, 0, n1.Name),
|
||||||
|
runBefore: func(pod *v1.Pod) {
|
||||||
|
pod.Annotations = test.GetMirrorPodAnnotation()
|
||||||
|
},
|
||||||
|
evictLocalStoragePods: false,
|
||||||
|
result: false,
|
||||||
|
}, {
|
||||||
|
pod: test.BuildTestPod("p11", 400, 0, n1.Name),
|
||||||
|
runBefore: func(pod *v1.Pod) {
|
||||||
|
pod.Annotations = test.GetMirrorPodAnnotation()
|
||||||
|
pod.Annotations["descheduler.alpha.kubernetes.io/evict"] = "true"
|
||||||
|
},
|
||||||
|
evictLocalStoragePods: false,
|
||||||
|
result: true,
|
||||||
|
}, {
|
||||||
|
pod: test.BuildTestPod("p12", 400, 0, n1.Name),
|
||||||
|
runBefore: func(pod *v1.Pod) {
|
||||||
|
priority := utils.SystemCriticalPriority
|
||||||
|
pod.Spec.Priority = &priority
|
||||||
|
},
|
||||||
|
evictLocalStoragePods: false,
|
||||||
|
result: false,
|
||||||
|
}, {
|
||||||
|
pod: test.BuildTestPod("p13", 400, 0, n1.Name),
|
||||||
|
runBefore: func(pod *v1.Pod) {
|
||||||
|
priority := utils.SystemCriticalPriority
|
||||||
|
pod.Spec.Priority = &priority
|
||||||
|
pod.Annotations = map[string]string{
|
||||||
|
"descheduler.alpha.kubernetes.io/evict": "true",
|
||||||
|
}
|
||||||
|
},
|
||||||
|
evictLocalStoragePods: false,
|
||||||
|
result: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range testCases {
|
||||||
|
test.runBefore(test.pod)
|
||||||
|
result := IsEvictable(test.pod, test.evictLocalStoragePods)
|
||||||
|
if result != test.result {
|
||||||
|
t.Errorf("IsEvictable should return for pod %s %t, but it returns %t", test.pod.Name, test.result, result)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
func TestPodTypes(t *testing.T) {
|
func TestPodTypes(t *testing.T) {
|
||||||
n1 := test.BuildTestNode("node1", 1000, 2000, 9)
|
n1 := test.BuildTestNode("node1", 1000, 2000, 9)
|
||||||
p1 := test.BuildTestPod("p1", 400, 0, n1.Name)
|
p1 := test.BuildTestPod("p1", 400, 0, n1.Name)
|
||||||
@@ -33,13 +187,17 @@ func TestPodTypes(t *testing.T) {
|
|||||||
p3 := test.BuildTestPod("p3", 400, 0, n1.Name)
|
p3 := test.BuildTestPod("p3", 400, 0, n1.Name)
|
||||||
p4 := test.BuildTestPod("p4", 400, 0, n1.Name)
|
p4 := test.BuildTestPod("p4", 400, 0, n1.Name)
|
||||||
p5 := test.BuildTestPod("p5", 400, 0, n1.Name)
|
p5 := test.BuildTestPod("p5", 400, 0, n1.Name)
|
||||||
|
p6 := test.BuildTestPod("p6", 400, 0, n1.Name)
|
||||||
|
|
||||||
p1.Annotations = test.GetReplicaSetAnnotation()
|
p6.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||||
|
|
||||||
|
p1.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
|
||||||
// The following 4 pods won't get evicted.
|
// The following 4 pods won't get evicted.
|
||||||
// A daemonset.
|
// A daemonset.
|
||||||
p2.Annotations = test.GetDaemonSetAnnotation()
|
//p2.Annotations = test.GetDaemonSetAnnotation()
|
||||||
|
p2.ObjectMeta.OwnerReferences = test.GetDaemonSetOwnerRefList()
|
||||||
// A pod with local storage.
|
// A pod with local storage.
|
||||||
p3.Annotations = test.GetNormalPodAnnotation()
|
p3.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||||
p3.Spec.Volumes = []v1.Volume{
|
p3.Spec.Volumes = []v1.Volume{
|
||||||
{
|
{
|
||||||
Name: "sample",
|
Name: "sample",
|
||||||
@@ -54,7 +212,10 @@ func TestPodTypes(t *testing.T) {
|
|||||||
p4.Annotations = test.GetMirrorPodAnnotation()
|
p4.Annotations = test.GetMirrorPodAnnotation()
|
||||||
// A Critical Pod.
|
// A Critical Pod.
|
||||||
p5.Namespace = "kube-system"
|
p5.Namespace = "kube-system"
|
||||||
p5.Annotations = test.GetCriticalPodAnnotation()
|
priority := utils.SystemCriticalPriority
|
||||||
|
p5.Spec.Priority = &priority
|
||||||
|
systemCriticalPriority := utils.SystemCriticalPriority
|
||||||
|
p5.Spec.Priority = &systemCriticalPriority
|
||||||
if !IsMirrorPod(p4) {
|
if !IsMirrorPod(p4) {
|
||||||
t.Errorf("Expected p4 to be a mirror pod.")
|
t.Errorf("Expected p4 to be a mirror pod.")
|
||||||
}
|
}
|
||||||
@@ -64,12 +225,12 @@ func TestPodTypes(t *testing.T) {
|
|||||||
if !IsPodWithLocalStorage(p3) {
|
if !IsPodWithLocalStorage(p3) {
|
||||||
t.Errorf("Expected p3 to be a pod with local storage.")
|
t.Errorf("Expected p3 to be a pod with local storage.")
|
||||||
}
|
}
|
||||||
sr, _ := CreatorRef(p2)
|
ownerRefList := OwnerRef(p2)
|
||||||
if !IsDaemonsetPod(sr) {
|
if !IsDaemonsetPod(ownerRefList) {
|
||||||
t.Errorf("Expected p2 to be a daemonset pod.")
|
t.Errorf("Expected p2 to be a daemonset pod.")
|
||||||
}
|
}
|
||||||
sr, _ = CreatorRef(p1)
|
ownerRefList = OwnerRef(p1)
|
||||||
if IsDaemonsetPod(sr) || IsPodWithLocalStorage(p1) || IsCriticalPod(p1) || IsMirrorPod(p1) {
|
if IsDaemonsetPod(ownerRefList) || IsPodWithLocalStorage(p1) || IsCriticalPod(p1) || IsMirrorPod(p1) {
|
||||||
t.Errorf("Expected p1 to be a normal pod.")
|
t.Errorf("Expected p1 to be a normal pod.")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -21,17 +21,16 @@ import (
|
|||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
|
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
|
"k8s.io/klog"
|
||||||
|
|
||||||
"github.com/golang/glog"
|
"sigs.k8s.io/descheduler/pkg/api"
|
||||||
"github.com/kubernetes-incubator/descheduler/pkg/api"
|
"sigs.k8s.io/descheduler/pkg/api/v1alpha1"
|
||||||
_ "github.com/kubernetes-incubator/descheduler/pkg/api/install"
|
"sigs.k8s.io/descheduler/pkg/descheduler/scheme"
|
||||||
"github.com/kubernetes-incubator/descheduler/pkg/api/v1alpha1"
|
|
||||||
"github.com/kubernetes-incubator/descheduler/pkg/descheduler/scheme"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func LoadPolicyConfig(policyConfigFile string) (*api.DeschedulerPolicy, error) {
|
func LoadPolicyConfig(policyConfigFile string) (*api.DeschedulerPolicy, error) {
|
||||||
if policyConfigFile == "" {
|
if policyConfigFile == "" {
|
||||||
glog.V(1).Infof("policy config file not specified")
|
klog.V(1).Infof("policy config file not specified")
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -17,17 +17,11 @@ limitations under the License.
|
|||||||
package scheme
|
package scheme
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"os"
|
|
||||||
|
|
||||||
"k8s.io/apimachinery/pkg/apimachinery/announced"
|
|
||||||
"k8s.io/apimachinery/pkg/apimachinery/registered"
|
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
"k8s.io/apimachinery/pkg/runtime/serializer"
|
"k8s.io/apimachinery/pkg/runtime/serializer"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
GroupFactoryRegistry = make(announced.APIGroupFactoryRegistry)
|
Scheme = runtime.NewScheme()
|
||||||
Registry = registered.NewOrDie(os.Getenv("DESCHEDULER_API_VERSIONS"))
|
Codecs = serializer.NewCodecFactory(Scheme)
|
||||||
Scheme = runtime.NewScheme()
|
|
||||||
Codecs = serializer.NewCodecFactory(Scheme)
|
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -17,17 +17,17 @@ limitations under the License.
|
|||||||
package strategies
|
package strategies
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/golang/glog"
|
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"k8s.io/kubernetes/pkg/api/v1"
|
"k8s.io/api/core/v1"
|
||||||
//TODO: Change to client-go instead of generated clientset.
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
"k8s.io/klog"
|
||||||
|
|
||||||
"github.com/kubernetes-incubator/descheduler/cmd/descheduler/app/options"
|
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
|
||||||
"github.com/kubernetes-incubator/descheduler/pkg/api"
|
"sigs.k8s.io/descheduler/pkg/api"
|
||||||
"github.com/kubernetes-incubator/descheduler/pkg/descheduler/evictions"
|
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||||
podutil "github.com/kubernetes-incubator/descheduler/pkg/descheduler/pod"
|
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/utils"
|
||||||
)
|
)
|
||||||
|
|
||||||
//type creator string
|
//type creator string
|
||||||
@@ -36,41 +36,45 @@ type DuplicatePodsMap map[string][]*v1.Pod
|
|||||||
// RemoveDuplicatePods removes the duplicate pods on node. This strategy evicts all duplicate pods on node.
|
// RemoveDuplicatePods removes the duplicate pods on node. This strategy evicts all duplicate pods on node.
|
||||||
// A pod is said to be a duplicate of other if both of them are from same creator, kind and are within the same
|
// A pod is said to be a duplicate of other if both of them are from same creator, kind and are within the same
|
||||||
// namespace. As of now, this strategy won't evict daemonsets, mirror pods, critical pods and pods with local storages.
|
// namespace. As of now, this strategy won't evict daemonsets, mirror pods, critical pods and pods with local storages.
|
||||||
func RemoveDuplicatePods(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, policyGroupVersion string, nodes []*v1.Node) {
|
func RemoveDuplicatePods(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, policyGroupVersion string, nodes []*v1.Node, nodepodCount utils.NodePodEvictedCount) {
|
||||||
if !strategy.Enabled {
|
if !strategy.Enabled {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
deleteDuplicatePods(ds.Client, policyGroupVersion, nodes, ds.DryRun)
|
deleteDuplicatePods(ds.Client, policyGroupVersion, nodes, ds.DryRun, nodepodCount, ds.MaxNoOfPodsToEvictPerNode, ds.EvictLocalStoragePods)
|
||||||
}
|
}
|
||||||
|
|
||||||
// deleteDuplicatePods evicts the pod from node and returns the count of evicted pods.
|
// deleteDuplicatePods evicts the pod from node and returns the count of evicted pods.
|
||||||
func deleteDuplicatePods(client clientset.Interface, policyGroupVersion string, nodes []*v1.Node, dryRun bool) int {
|
func deleteDuplicatePods(client clientset.Interface, policyGroupVersion string, nodes []*v1.Node, dryRun bool, nodepodCount utils.NodePodEvictedCount, maxPodsToEvict int, evictLocalStoragePods bool) int {
|
||||||
podsEvicted := 0
|
podsEvicted := 0
|
||||||
for _, node := range nodes {
|
for _, node := range nodes {
|
||||||
glog.V(1).Infof("Processing node: %#v", node.Name)
|
klog.V(1).Infof("Processing node: %#v", node.Name)
|
||||||
dpm := ListDuplicatePodsOnANode(client, node)
|
dpm := ListDuplicatePodsOnANode(client, node, evictLocalStoragePods)
|
||||||
for creator, pods := range dpm {
|
for creator, pods := range dpm {
|
||||||
if len(pods) > 1 {
|
if len(pods) > 1 {
|
||||||
glog.V(1).Infof("%#v", creator)
|
klog.V(1).Infof("%#v", creator)
|
||||||
// i = 0 does not evict the first pod
|
// i = 0 does not evict the first pod
|
||||||
for i := 1; i < len(pods); i++ {
|
for i := 1; i < len(pods); i++ {
|
||||||
|
if maxPodsToEvict > 0 && nodepodCount[node]+1 > maxPodsToEvict {
|
||||||
|
break
|
||||||
|
}
|
||||||
success, err := evictions.EvictPod(client, pods[i], policyGroupVersion, dryRun)
|
success, err := evictions.EvictPod(client, pods[i], policyGroupVersion, dryRun)
|
||||||
if !success {
|
if !success {
|
||||||
glog.Infof("Error when evicting pod: %#v (%#v)", pods[i].Name, err)
|
klog.Infof("Error when evicting pod: %#v (%#v)", pods[i].Name, err)
|
||||||
} else {
|
} else {
|
||||||
podsEvicted++
|
nodepodCount[node]++
|
||||||
glog.V(1).Infof("Evicted pod: %#v (%#v)", pods[i].Name, err)
|
klog.V(1).Infof("Evicted pod: %#v (%#v)", pods[i].Name, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
podsEvicted += nodepodCount[node]
|
||||||
}
|
}
|
||||||
return podsEvicted
|
return podsEvicted
|
||||||
}
|
}
|
||||||
|
|
||||||
// ListDuplicatePodsOnANode lists duplicate pods on a given node.
|
// ListDuplicatePodsOnANode lists duplicate pods on a given node.
|
||||||
func ListDuplicatePodsOnANode(client clientset.Interface, node *v1.Node) DuplicatePodsMap {
|
func ListDuplicatePodsOnANode(client clientset.Interface, node *v1.Node, evictLocalStoragePods bool) DuplicatePodsMap {
|
||||||
pods, err := podutil.ListPodsOnANode(client, node)
|
pods, err := podutil.ListEvictablePodsOnNode(client, node, evictLocalStoragePods)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -80,16 +84,14 @@ func ListDuplicatePodsOnANode(client clientset.Interface, node *v1.Node) Duplica
|
|||||||
// FindDuplicatePods takes a list of pods and returns a duplicatePodsMap.
|
// FindDuplicatePods takes a list of pods and returns a duplicatePodsMap.
|
||||||
func FindDuplicatePods(pods []*v1.Pod) DuplicatePodsMap {
|
func FindDuplicatePods(pods []*v1.Pod) DuplicatePodsMap {
|
||||||
dpm := DuplicatePodsMap{}
|
dpm := DuplicatePodsMap{}
|
||||||
|
// Ignoring the error here as in the ListDuplicatePodsOnNode function we call ListEvictablePodsOnNode which checks for error.
|
||||||
for _, pod := range pods {
|
for _, pod := range pods {
|
||||||
sr, err := podutil.CreatorRef(pod)
|
ownerRefList := podutil.OwnerRef(pod)
|
||||||
if err != nil || sr == nil {
|
for _, ownerRef := range ownerRefList {
|
||||||
continue
|
// Namespace/Kind/Name should be unique for the cluster.
|
||||||
|
s := strings.Join([]string{pod.ObjectMeta.Namespace, ownerRef.Kind, ownerRef.Name}, "/")
|
||||||
|
dpm[s] = append(dpm[s], pod)
|
||||||
}
|
}
|
||||||
if podutil.IsMirrorPod(pod) || podutil.IsDaemonsetPod(sr) || podutil.IsPodWithLocalStorage(pod) || podutil.IsCriticalPod(pod) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
s := strings.Join([]string{sr.Reference.Kind, sr.Reference.Namespace, sr.Reference.Name}, "/")
|
|
||||||
dpm[s] = append(dpm[s], pod)
|
|
||||||
}
|
}
|
||||||
return dpm
|
return dpm
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -19,35 +19,57 @@ package strategies
|
|||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/kubernetes-incubator/descheduler/test"
|
"k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
|
"k8s.io/client-go/kubernetes/fake"
|
||||||
core "k8s.io/client-go/testing"
|
core "k8s.io/client-go/testing"
|
||||||
"k8s.io/kubernetes/pkg/api/v1"
|
"sigs.k8s.io/descheduler/pkg/utils"
|
||||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset/fake"
|
"sigs.k8s.io/descheduler/test"
|
||||||
)
|
)
|
||||||
|
|
||||||
//TODO:@ravisantoshgudimetla This could be made table driven.
|
|
||||||
func TestFindDuplicatePods(t *testing.T) {
|
func TestFindDuplicatePods(t *testing.T) {
|
||||||
|
// first setup pods
|
||||||
node := test.BuildTestNode("n1", 2000, 3000, 10)
|
node := test.BuildTestNode("n1", 2000, 3000, 10)
|
||||||
p1 := test.BuildTestPod("p1", 100, 0, node.Name)
|
p1 := test.BuildTestPod("p1", 100, 0, node.Name)
|
||||||
|
p1.Namespace = "dev"
|
||||||
p2 := test.BuildTestPod("p2", 100, 0, node.Name)
|
p2 := test.BuildTestPod("p2", 100, 0, node.Name)
|
||||||
|
p2.Namespace = "dev"
|
||||||
p3 := test.BuildTestPod("p3", 100, 0, node.Name)
|
p3 := test.BuildTestPod("p3", 100, 0, node.Name)
|
||||||
|
p3.Namespace = "dev"
|
||||||
p4 := test.BuildTestPod("p4", 100, 0, node.Name)
|
p4 := test.BuildTestPod("p4", 100, 0, node.Name)
|
||||||
p5 := test.BuildTestPod("p5", 100, 0, node.Name)
|
p5 := test.BuildTestPod("p5", 100, 0, node.Name)
|
||||||
p6 := test.BuildTestPod("p6", 100, 0, node.Name)
|
p6 := test.BuildTestPod("p6", 100, 0, node.Name)
|
||||||
p7 := test.BuildTestPod("p7", 100, 0, node.Name)
|
p7 := test.BuildTestPod("p7", 100, 0, node.Name)
|
||||||
|
p7.Namespace = "kube-system"
|
||||||
|
p8 := test.BuildTestPod("p8", 100, 0, node.Name)
|
||||||
|
p8.Namespace = "test"
|
||||||
|
p9 := test.BuildTestPod("p9", 100, 0, node.Name)
|
||||||
|
p9.Namespace = "test"
|
||||||
|
p10 := test.BuildTestPod("p10", 100, 0, node.Name)
|
||||||
|
p10.Namespace = "test"
|
||||||
|
|
||||||
// All the following pods expect for one will be evicted.
|
// ### Evictable Pods ###
|
||||||
p1.Annotations = test.GetReplicaSetAnnotation()
|
|
||||||
p2.Annotations = test.GetReplicaSetAnnotation()
|
|
||||||
p3.Annotations = test.GetReplicaSetAnnotation()
|
|
||||||
|
|
||||||
// The following 4 pods won't get evicted.
|
// Three Pods in the "default" Namespace, bound to same ReplicaSet. 2 should be evicted.
|
||||||
// A daemonset.
|
ownerRef1 := test.GetReplicaSetOwnerRefList()
|
||||||
p4.Annotations = test.GetDaemonSetAnnotation()
|
p1.ObjectMeta.OwnerReferences = ownerRef1
|
||||||
// A pod with local storage.
|
p2.ObjectMeta.OwnerReferences = ownerRef1
|
||||||
p5.Annotations = test.GetNormalPodAnnotation()
|
p3.ObjectMeta.OwnerReferences = ownerRef1
|
||||||
|
|
||||||
|
// Three Pods in the "test" Namespace, bound to same ReplicaSet. 2 should be evicted.
|
||||||
|
ownerRef2 := test.GetReplicaSetOwnerRefList()
|
||||||
|
p8.ObjectMeta.OwnerReferences = ownerRef2
|
||||||
|
p9.ObjectMeta.OwnerReferences = ownerRef2
|
||||||
|
p10.ObjectMeta.OwnerReferences = ownerRef2
|
||||||
|
|
||||||
|
// ### Non-evictable Pods ###
|
||||||
|
|
||||||
|
// A DaemonSet.
|
||||||
|
p4.ObjectMeta.OwnerReferences = test.GetDaemonSetOwnerRefList()
|
||||||
|
|
||||||
|
// A Pod with local storage.
|
||||||
|
p5.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||||
p5.Spec.Volumes = []v1.Volume{
|
p5.Spec.Volumes = []v1.Volume{
|
||||||
{
|
{
|
||||||
Name: "sample",
|
Name: "sample",
|
||||||
@@ -58,22 +80,67 @@ func TestFindDuplicatePods(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
// A Mirror Pod.
|
// A Mirror Pod.
|
||||||
p6.Annotations = test.GetMirrorPodAnnotation()
|
p6.Annotations = test.GetMirrorPodAnnotation()
|
||||||
|
|
||||||
// A Critical Pod.
|
// A Critical Pod.
|
||||||
p7.Namespace = "kube-system"
|
priority := utils.SystemCriticalPriority
|
||||||
p7.Annotations = test.GetCriticalPodAnnotation()
|
p7.Spec.Priority = &priority
|
||||||
expectedEvictedPodCount := 2
|
|
||||||
fakeClient := &fake.Clientset{}
|
testCases := []struct {
|
||||||
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
|
description string
|
||||||
return true, &v1.PodList{Items: []v1.Pod{*p1, *p2, *p3, *p4, *p5, *p6, *p7}}, nil
|
maxPodsToEvict int
|
||||||
})
|
pods []v1.Pod
|
||||||
fakeClient.Fake.AddReactor("get", "nodes", func(action core.Action) (bool, runtime.Object, error) {
|
expectedEvictedPodCount int
|
||||||
return true, node, nil
|
}{
|
||||||
})
|
{
|
||||||
podsEvicted := deleteDuplicatePods(fakeClient, "v1", []*v1.Node{node}, false)
|
description: "Three pods in the `dev` Namespace, bound to same ReplicaSet. 2 should be evicted.",
|
||||||
if podsEvicted != expectedEvictedPodCount {
|
maxPodsToEvict: 5,
|
||||||
t.Errorf("Unexpected no of pods evicted")
|
pods: []v1.Pod{*p1, *p2, *p3},
|
||||||
|
expectedEvictedPodCount: 2,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
description: "Three Pods in the `test` Namespace, bound to same ReplicaSet. 2 should be evicted.",
|
||||||
|
maxPodsToEvict: 5,
|
||||||
|
pods: []v1.Pod{*p8, *p9, *p10},
|
||||||
|
expectedEvictedPodCount: 2,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
description: "Three Pods in the `dev` Namespace, three Pods in the `test` Namespace. Bound to ReplicaSet with same name. 4 should be evicted.",
|
||||||
|
maxPodsToEvict: 5,
|
||||||
|
pods: []v1.Pod{*p1, *p2, *p3, *p8, *p9, *p10},
|
||||||
|
expectedEvictedPodCount: 4,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
description: "Pods are: part of DaemonSet, with local storage, mirror pod annotation, critical pod annotation - none should be evicted.",
|
||||||
|
maxPodsToEvict: 2,
|
||||||
|
pods: []v1.Pod{*p4, *p5, *p6, *p7},
|
||||||
|
expectedEvictedPodCount: 0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
description: "Test all Pods: 4 should be evicted.",
|
||||||
|
maxPodsToEvict: 5,
|
||||||
|
pods: []v1.Pod{*p1, *p2, *p3, *p4, *p5, *p6, *p7, *p8, *p9, *p10},
|
||||||
|
expectedEvictedPodCount: 4,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, testCase := range testCases {
|
||||||
|
|
||||||
|
npe := utils.NodePodEvictedCount{}
|
||||||
|
npe[node] = 0
|
||||||
|
fakeClient := &fake.Clientset{}
|
||||||
|
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
|
||||||
|
return true, &v1.PodList{Items: testCase.pods}, nil
|
||||||
|
})
|
||||||
|
fakeClient.Fake.AddReactor("get", "nodes", func(action core.Action) (bool, runtime.Object, error) {
|
||||||
|
return true, node, nil
|
||||||
|
})
|
||||||
|
podsEvicted := deleteDuplicatePods(fakeClient, "v1", []*v1.Node{node}, false, npe, testCase.maxPodsToEvict, false)
|
||||||
|
if podsEvicted != testCase.expectedEvictedPodCount {
|
||||||
|
t.Errorf("Test error for description: %s. Expected evicted pods count %v, got %v", testCase.description, testCase.expectedEvictedPodCount, podsEvicted)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -19,29 +19,31 @@ package strategies
|
|||||||
import (
|
import (
|
||||||
"sort"
|
"sort"
|
||||||
|
|
||||||
"github.com/golang/glog"
|
"k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
"k8s.io/kubernetes/pkg/api/v1"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
helper "k8s.io/kubernetes/pkg/api/v1/resource"
|
"k8s.io/klog"
|
||||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/api"
|
||||||
"github.com/kubernetes-incubator/descheduler/cmd/descheduler/app/options"
|
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||||
"github.com/kubernetes-incubator/descheduler/pkg/api"
|
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
|
||||||
"github.com/kubernetes-incubator/descheduler/pkg/descheduler/evictions"
|
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||||
podutil "github.com/kubernetes-incubator/descheduler/pkg/descheduler/pod"
|
"sigs.k8s.io/descheduler/pkg/utils"
|
||||||
)
|
)
|
||||||
|
|
||||||
type NodeUsageMap struct {
|
type NodeUsageMap struct {
|
||||||
node *v1.Node
|
node *v1.Node
|
||||||
usage api.ResourceThresholds
|
usage api.ResourceThresholds
|
||||||
|
allPods []*v1.Pod
|
||||||
nonRemovablePods []*v1.Pod
|
nonRemovablePods []*v1.Pod
|
||||||
bePods []*v1.Pod
|
bePods []*v1.Pod
|
||||||
bPods []*v1.Pod
|
bPods []*v1.Pod
|
||||||
gPods []*v1.Pod
|
gPods []*v1.Pod
|
||||||
}
|
}
|
||||||
|
|
||||||
type NodePodsMap map[*v1.Node][]*v1.Pod
|
type NodePodsMap map[*v1.Node][]*v1.Pod
|
||||||
|
|
||||||
func LowNodeUtilization(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, evictionPolicyGroupVersion string, nodes []*v1.Node) {
|
func LowNodeUtilization(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, evictionPolicyGroupVersion string, nodes []*v1.Node, nodepodCount utils.NodePodEvictedCount) {
|
||||||
if !strategy.Enabled {
|
if !strategy.Enabled {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -57,82 +59,108 @@ func LowNodeUtilization(ds *options.DeschedulerServer, strategy api.DeschedulerS
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
npm := CreateNodePodsMap(ds.Client, nodes)
|
npm := createNodePodsMap(ds.Client, nodes)
|
||||||
lowNodes, targetNodes, _ := classifyNodes(npm, thresholds, targetThresholds)
|
lowNodes, targetNodes := classifyNodes(npm, thresholds, targetThresholds, ds.EvictLocalStoragePods)
|
||||||
|
|
||||||
|
klog.V(1).Infof("Criteria for a node under utilization: CPU: %v, Mem: %v, Pods: %v",
|
||||||
|
thresholds[v1.ResourceCPU], thresholds[v1.ResourceMemory], thresholds[v1.ResourcePods])
|
||||||
|
|
||||||
if len(lowNodes) == 0 {
|
if len(lowNodes) == 0 {
|
||||||
glog.V(1).Infof("No node is underutilized")
|
klog.V(1).Infof("No node is underutilized, nothing to do here, you might tune your thresholds further")
|
||||||
return
|
|
||||||
} else if len(lowNodes) < strategy.Params.NodeResourceUtilizationThresholds.NumberOfNodes {
|
|
||||||
glog.V(1).Infof("number of nodes underutilized is less than NumberOfNodes")
|
|
||||||
return
|
|
||||||
} else if len(lowNodes) == len(nodes) {
|
|
||||||
glog.V(1).Infof("all nodes are underutilized")
|
|
||||||
return
|
|
||||||
} else if len(targetNodes) == 0 {
|
|
||||||
glog.V(1).Infof("no node is above target utilization")
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
evictPodsFromTargetNodes(ds.Client, evictionPolicyGroupVersion, targetNodes, lowNodes, targetThresholds, ds.DryRun)
|
klog.V(1).Infof("Total number of underutilized nodes: %v", len(lowNodes))
|
||||||
|
|
||||||
|
if len(lowNodes) < strategy.Params.NodeResourceUtilizationThresholds.NumberOfNodes {
|
||||||
|
klog.V(1).Infof("number of nodes underutilized (%v) is less than NumberOfNodes (%v), nothing to do here", len(lowNodes), strategy.Params.NodeResourceUtilizationThresholds.NumberOfNodes)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(lowNodes) == len(nodes) {
|
||||||
|
klog.V(1).Infof("all nodes are underutilized, nothing to do here")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(targetNodes) == 0 {
|
||||||
|
klog.V(1).Infof("all nodes are under target utilization, nothing to do here")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
klog.V(1).Infof("Criteria for a node above target utilization: CPU: %v, Mem: %v, Pods: %v",
|
||||||
|
targetThresholds[v1.ResourceCPU], targetThresholds[v1.ResourceMemory], targetThresholds[v1.ResourcePods])
|
||||||
|
klog.V(1).Infof("Total number of nodes above target utilization: %v", len(targetNodes))
|
||||||
|
|
||||||
|
totalPodsEvicted := evictPodsFromTargetNodes(ds.Client, evictionPolicyGroupVersion, targetNodes, lowNodes, targetThresholds, ds.DryRun, ds.MaxNoOfPodsToEvictPerNode, nodepodCount)
|
||||||
|
klog.V(1).Infof("Total number of pods evicted: %v", totalPodsEvicted)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func validateThresholds(thresholds api.ResourceThresholds) bool {
|
func validateThresholds(thresholds api.ResourceThresholds) bool {
|
||||||
if thresholds == nil {
|
if thresholds == nil || len(thresholds) == 0 {
|
||||||
glog.V(1).Infof("no resource threshold is configured")
|
klog.V(1).Infof("no resource threshold is configured")
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
found := false
|
|
||||||
for name := range thresholds {
|
for name := range thresholds {
|
||||||
if name == v1.ResourceCPU || name == v1.ResourceMemory || name == v1.ResourcePods {
|
switch name {
|
||||||
found = true
|
case v1.ResourceCPU:
|
||||||
break
|
continue
|
||||||
|
case v1.ResourceMemory:
|
||||||
|
continue
|
||||||
|
case v1.ResourcePods:
|
||||||
|
continue
|
||||||
|
default:
|
||||||
|
klog.Errorf("only cpu, memory, or pods thresholds can be specified")
|
||||||
|
return false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if !found {
|
return true
|
||||||
glog.V(1).Infof("one of cpu, memory, or pods resource threshold must be configured")
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return found
|
|
||||||
}
|
}
|
||||||
|
|
||||||
//This function could be merged into above once we are clear.
|
//This function could be merged into above once we are clear.
|
||||||
func validateTargetThresholds(targetThresholds api.ResourceThresholds) bool {
|
func validateTargetThresholds(targetThresholds api.ResourceThresholds) bool {
|
||||||
if targetThresholds == nil {
|
if targetThresholds == nil {
|
||||||
glog.V(1).Infof("no target resource threshold is configured")
|
klog.V(1).Infof("no target resource threshold is configured")
|
||||||
return false
|
return false
|
||||||
} else if _, ok := targetThresholds[v1.ResourcePods]; !ok {
|
} else if _, ok := targetThresholds[v1.ResourcePods]; !ok {
|
||||||
glog.V(1).Infof("no target resource threshold for pods is configured")
|
klog.V(1).Infof("no target resource threshold for pods is configured")
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
func classifyNodes(npm NodePodsMap, thresholds api.ResourceThresholds, targetThresholds api.ResourceThresholds) ([]NodeUsageMap, []NodeUsageMap, []NodeUsageMap) {
|
// classifyNodes classifies the nodes into low-utilization or high-utilization nodes. If a node lies between
|
||||||
lowNodes, targetNodes, otherNodes := []NodeUsageMap{}, []NodeUsageMap{}, []NodeUsageMap{}
|
// low and high thresholds, it is simply ignored.
|
||||||
|
func classifyNodes(npm NodePodsMap, thresholds api.ResourceThresholds, targetThresholds api.ResourceThresholds, evictLocalStoragePods bool) ([]NodeUsageMap, []NodeUsageMap) {
|
||||||
|
lowNodes, targetNodes := []NodeUsageMap{}, []NodeUsageMap{}
|
||||||
for node, pods := range npm {
|
for node, pods := range npm {
|
||||||
usage, nonRemovablePods, bePods, bPods, gPods := NodeUtilization(node, pods)
|
usage, allPods, nonRemovablePods, bePods, bPods, gPods := NodeUtilization(node, pods, evictLocalStoragePods)
|
||||||
nuMap := NodeUsageMap{node, usage, nonRemovablePods, bePods, bPods, gPods}
|
nuMap := NodeUsageMap{node, usage, allPods, nonRemovablePods, bePods, bPods, gPods}
|
||||||
glog.V(1).Infof("Node %#v usage: %#v", node.Name, usage)
|
|
||||||
|
|
||||||
if IsNodeWithLowUtilization(usage, thresholds) {
|
// Check if node is underutilized and if we can schedule pods on it.
|
||||||
|
if !nodeutil.IsNodeUnschedulable(node) && IsNodeWithLowUtilization(usage, thresholds) {
|
||||||
|
klog.V(2).Infof("Node %#v is under utilized with usage: %#v", node.Name, usage)
|
||||||
lowNodes = append(lowNodes, nuMap)
|
lowNodes = append(lowNodes, nuMap)
|
||||||
} else if IsNodeAboveTargetUtilization(usage, targetThresholds) {
|
} else if IsNodeAboveTargetUtilization(usage, targetThresholds) {
|
||||||
|
klog.V(2).Infof("Node %#v is over utilized with usage: %#v", node.Name, usage)
|
||||||
targetNodes = append(targetNodes, nuMap)
|
targetNodes = append(targetNodes, nuMap)
|
||||||
} else {
|
} else {
|
||||||
// Seems we don't need to collect them?
|
klog.V(2).Infof("Node %#v is appropriately utilized with usage: %#v", node.Name, usage)
|
||||||
otherNodes = append(otherNodes, nuMap)
|
|
||||||
}
|
}
|
||||||
|
klog.V(2).Infof("allPods:%v, nonRemovablePods:%v, bePods:%v, bPods:%v, gPods:%v", len(allPods), len(nonRemovablePods), len(bePods), len(bPods), len(gPods))
|
||||||
}
|
}
|
||||||
return lowNodes, targetNodes, otherNodes
|
return lowNodes, targetNodes
|
||||||
}
|
}
|
||||||
|
|
||||||
func evictPodsFromTargetNodes(client clientset.Interface, evictionPolicyGroupVersion string, targetNodes, lowNodes []NodeUsageMap, targetThresholds api.ResourceThresholds, dryRun bool) int {
|
// evictPodsFromTargetNodes evicts pods based on priority, if all the pods on the node have priority, if not
|
||||||
|
// evicts them based on QoS as fallback option.
|
||||||
|
// TODO: @ravig Break this function into smaller functions.
|
||||||
|
func evictPodsFromTargetNodes(client clientset.Interface, evictionPolicyGroupVersion string, targetNodes, lowNodes []NodeUsageMap, targetThresholds api.ResourceThresholds, dryRun bool, maxPodsToEvict int, nodepodCount utils.NodePodEvictedCount) int {
|
||||||
podsEvicted := 0
|
podsEvicted := 0
|
||||||
|
|
||||||
SortNodesByUsage(targetNodes)
|
SortNodesByUsage(targetNodes)
|
||||||
|
|
||||||
// upper bound on total number of pods/cpu/memory to be moved
|
// upper bound on total number of pods/cpu/memory to be moved
|
||||||
var totalPods, totalCpu, totalMem float64
|
var totalPods, totalCPU, totalMem float64
|
||||||
for _, node := range lowNodes {
|
for _, node := range lowNodes {
|
||||||
nodeCapacity := node.node.Status.Capacity
|
nodeCapacity := node.node.Status.Capacity
|
||||||
if len(node.node.Status.Allocatable) > 0 {
|
if len(node.node.Status.Allocatable) > 0 {
|
||||||
@@ -145,7 +173,7 @@ func evictPodsFromTargetNodes(client clientset.Interface, evictionPolicyGroupVer
|
|||||||
// totalCPU capacity to be moved
|
// totalCPU capacity to be moved
|
||||||
if _, ok := targetThresholds[v1.ResourceCPU]; ok {
|
if _, ok := targetThresholds[v1.ResourceCPU]; ok {
|
||||||
cpuPercentage := targetThresholds[v1.ResourceCPU] - node.usage[v1.ResourceCPU]
|
cpuPercentage := targetThresholds[v1.ResourceCPU] - node.usage[v1.ResourceCPU]
|
||||||
totalCpu += ((float64(cpuPercentage) * float64(nodeCapacity.Cpu().MilliValue())) / 100)
|
totalCPU += ((float64(cpuPercentage) * float64(nodeCapacity.Cpu().MilliValue())) / 100)
|
||||||
}
|
}
|
||||||
|
|
||||||
// totalMem capacity to be moved
|
// totalMem capacity to be moved
|
||||||
@@ -155,18 +183,41 @@ func evictPodsFromTargetNodes(client clientset.Interface, evictionPolicyGroupVer
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
klog.V(1).Infof("Total capacity to be moved: CPU:%v, Mem:%v, Pods:%v", totalCPU, totalMem, totalPods)
|
||||||
|
klog.V(1).Infof("********Number of pods evicted from each node:***********")
|
||||||
|
|
||||||
for _, node := range targetNodes {
|
for _, node := range targetNodes {
|
||||||
nodeCapacity := node.node.Status.Capacity
|
nodeCapacity := node.node.Status.Capacity
|
||||||
if len(node.node.Status.Allocatable) > 0 {
|
if len(node.node.Status.Allocatable) > 0 {
|
||||||
nodeCapacity = node.node.Status.Allocatable
|
nodeCapacity = node.node.Status.Allocatable
|
||||||
}
|
}
|
||||||
glog.V(1).Infof("evicting pods from node %#v with usage: %#v", node.node.Name, node.usage)
|
klog.V(3).Infof("evicting pods from node %#v with usage: %#v", node.node.Name, node.usage)
|
||||||
// evict best effort pods
|
currentPodsEvicted := nodepodCount[node.node]
|
||||||
evictPods(node.bePods, client, evictionPolicyGroupVersion, targetThresholds, nodeCapacity, node.usage, &totalPods, &totalCpu, &totalMem, &podsEvicted, dryRun)
|
|
||||||
// evict burstable pods
|
// Check if one pod has priority, if yes, assume that all pods have priority and evict pods based on priority.
|
||||||
evictPods(node.bPods, client, evictionPolicyGroupVersion, targetThresholds, nodeCapacity, node.usage, &totalPods, &totalCpu, &totalMem, &podsEvicted, dryRun)
|
if node.allPods[0].Spec.Priority != nil {
|
||||||
// evict guaranteed pods
|
klog.V(1).Infof("All pods have priority associated with them. Evicting pods based on priority")
|
||||||
evictPods(node.gPods, client, evictionPolicyGroupVersion, targetThresholds, nodeCapacity, node.usage, &totalPods, &totalCpu, &totalMem, &podsEvicted, dryRun)
|
evictablePods := make([]*v1.Pod, 0)
|
||||||
|
evictablePods = append(append(node.bPods, node.bePods...), node.gPods...)
|
||||||
|
|
||||||
|
// sort the evictable Pods based on priority. This also sorts them based on QoS. If there are multiple pods with same priority, they are sorted based on QoS tiers.
|
||||||
|
sortPodsBasedOnPriority(evictablePods)
|
||||||
|
evictPods(evictablePods, client, evictionPolicyGroupVersion, targetThresholds, nodeCapacity, node.usage, &totalPods, &totalCPU, &totalMem, ¤tPodsEvicted, dryRun, maxPodsToEvict)
|
||||||
|
} else {
|
||||||
|
// TODO: Remove this when we support only priority.
|
||||||
|
// Falling back to evicting pods based on priority.
|
||||||
|
klog.V(1).Infof("Evicting pods based on QoS")
|
||||||
|
klog.V(1).Infof("There are %v non-evictable pods on the node", len(node.nonRemovablePods))
|
||||||
|
// evict best effort pods
|
||||||
|
evictPods(node.bePods, client, evictionPolicyGroupVersion, targetThresholds, nodeCapacity, node.usage, &totalPods, &totalCPU, &totalMem, ¤tPodsEvicted, dryRun, maxPodsToEvict)
|
||||||
|
// evict burstable pods
|
||||||
|
evictPods(node.bPods, client, evictionPolicyGroupVersion, targetThresholds, nodeCapacity, node.usage, &totalPods, &totalCPU, &totalMem, ¤tPodsEvicted, dryRun, maxPodsToEvict)
|
||||||
|
// evict guaranteed pods
|
||||||
|
evictPods(node.gPods, client, evictionPolicyGroupVersion, targetThresholds, nodeCapacity, node.usage, &totalPods, &totalCPU, &totalMem, ¤tPodsEvicted, dryRun, maxPodsToEvict)
|
||||||
|
}
|
||||||
|
nodepodCount[node.node] = currentPodsEvicted
|
||||||
|
podsEvicted = podsEvicted + nodepodCount[node.node]
|
||||||
|
klog.V(1).Infof("%v pods evicted from node %#v with usage %v", nodepodCount[node.node], node.node.Name, node.usage)
|
||||||
}
|
}
|
||||||
return podsEvicted
|
return podsEvicted
|
||||||
}
|
}
|
||||||
@@ -178,36 +229,39 @@ func evictPods(inputPods []*v1.Pod,
|
|||||||
nodeCapacity v1.ResourceList,
|
nodeCapacity v1.ResourceList,
|
||||||
nodeUsage api.ResourceThresholds,
|
nodeUsage api.ResourceThresholds,
|
||||||
totalPods *float64,
|
totalPods *float64,
|
||||||
totalCpu *float64,
|
totalCPU *float64,
|
||||||
totalMem *float64,
|
totalMem *float64,
|
||||||
podsEvicted *int,
|
podsEvicted *int,
|
||||||
dryRun bool) {
|
dryRun bool, maxPodsToEvict int) {
|
||||||
if IsNodeAboveTargetUtilization(nodeUsage, targetThresholds) && (*totalPods > 0 || *totalCpu > 0 || *totalMem > 0) {
|
if IsNodeAboveTargetUtilization(nodeUsage, targetThresholds) && (*totalPods > 0 || *totalCPU > 0 || *totalMem > 0) {
|
||||||
onePodPercentage := api.Percentage((float64(1) * 100) / float64(nodeCapacity.Pods().Value()))
|
onePodPercentage := api.Percentage((float64(1) * 100) / float64(nodeCapacity.Pods().Value()))
|
||||||
for _, pod := range inputPods {
|
for _, pod := range inputPods {
|
||||||
cUsage := helper.GetResourceRequest(pod, v1.ResourceCPU)
|
if maxPodsToEvict > 0 && *podsEvicted+1 > maxPodsToEvict {
|
||||||
mUsage := helper.GetResourceRequest(pod, v1.ResourceMemory)
|
break
|
||||||
|
}
|
||||||
|
cUsage := utils.GetResourceRequest(pod, v1.ResourceCPU)
|
||||||
|
mUsage := utils.GetResourceRequest(pod, v1.ResourceMemory)
|
||||||
success, err := evictions.EvictPod(client, pod, evictionPolicyGroupVersion, dryRun)
|
success, err := evictions.EvictPod(client, pod, evictionPolicyGroupVersion, dryRun)
|
||||||
if !success {
|
if !success {
|
||||||
glog.Infof("Error when evicting pod: %#v (%#v)", pod.Name, err)
|
klog.Warningf("Error when evicting pod: %#v (%#v)", pod.Name, err)
|
||||||
} else {
|
} else {
|
||||||
glog.V(1).Infof("Evicted pod: %#v (%#v)", pod.Name, err)
|
klog.V(3).Infof("Evicted pod: %#v (%#v)", pod.Name, err)
|
||||||
// update remaining pods
|
// update remaining pods
|
||||||
*podsEvicted++
|
*podsEvicted++
|
||||||
nodeUsage[v1.ResourcePods] -= onePodPercentage
|
nodeUsage[v1.ResourcePods] -= onePodPercentage
|
||||||
*totalPods--
|
*totalPods--
|
||||||
|
|
||||||
// update remaining cpu
|
// update remaining cpu
|
||||||
*totalCpu -= float64(cUsage)
|
*totalCPU -= float64(cUsage)
|
||||||
nodeUsage[v1.ResourceCPU] -= api.Percentage((float64(cUsage) * 100) / float64(nodeCapacity.Cpu().MilliValue()))
|
nodeUsage[v1.ResourceCPU] -= api.Percentage((float64(cUsage) * 100) / float64(nodeCapacity.Cpu().MilliValue()))
|
||||||
|
|
||||||
// update remaining memory
|
// update remaining memory
|
||||||
*totalMem -= float64(mUsage)
|
*totalMem -= float64(mUsage)
|
||||||
nodeUsage[v1.ResourceMemory] -= api.Percentage(float64(mUsage) / float64(nodeCapacity.Memory().Value()) * 100)
|
nodeUsage[v1.ResourceMemory] -= api.Percentage(float64(mUsage) / float64(nodeCapacity.Memory().Value()) * 100)
|
||||||
|
|
||||||
glog.V(1).Infof("updated node usage: %#v", nodeUsage)
|
klog.V(3).Infof("updated node usage: %#v", nodeUsage)
|
||||||
// check if node utilization drops below target threshold or required capacity (cpu, memory, pods) is moved
|
// check if node utilization drops below target threshold or required capacity (cpu, memory, pods) is moved
|
||||||
if !IsNodeAboveTargetUtilization(nodeUsage, targetThresholds) || (*totalPods <= 0 && *totalCpu <= 0 && *totalMem <= 0) {
|
if !IsNodeAboveTargetUtilization(nodeUsage, targetThresholds) || (*totalPods <= 0 && *totalCPU <= 0 && *totalMem <= 0) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -233,12 +287,35 @@ func SortNodesByUsage(nodes []NodeUsageMap) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func CreateNodePodsMap(client clientset.Interface, nodes []*v1.Node) NodePodsMap {
|
// sortPodsBasedOnPriority sorts pods based on priority and if their priorities are equal, they are sorted based on QoS tiers.
|
||||||
|
func sortPodsBasedOnPriority(evictablePods []*v1.Pod) {
|
||||||
|
sort.Slice(evictablePods, func(i, j int) bool {
|
||||||
|
if evictablePods[i].Spec.Priority == nil && evictablePods[j].Spec.Priority != nil {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if evictablePods[j].Spec.Priority == nil && evictablePods[i].Spec.Priority != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if (evictablePods[j].Spec.Priority == nil && evictablePods[i].Spec.Priority == nil) || (*evictablePods[i].Spec.Priority == *evictablePods[j].Spec.Priority) {
|
||||||
|
if podutil.IsBestEffortPod(evictablePods[i]) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if podutil.IsBurstablePod(evictablePods[i]) && podutil.IsGuaranteedPod(evictablePods[j]) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return *evictablePods[i].Spec.Priority < *evictablePods[j].Spec.Priority
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// createNodePodsMap returns nodepodsmap with evictable pods on node.
|
||||||
|
func createNodePodsMap(client clientset.Interface, nodes []*v1.Node) NodePodsMap {
|
||||||
npm := NodePodsMap{}
|
npm := NodePodsMap{}
|
||||||
for _, node := range nodes {
|
for _, node := range nodes {
|
||||||
pods, err := podutil.ListPodsOnANode(client, node)
|
pods, err := podutil.ListPodsOnANode(client, node)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Infof("node %s will not be processed, error in accessing its pods (%#v)", node.Name, err)
|
klog.Warningf("node %s will not be processed, error in accessing its pods (%#v)", node.Name, err)
|
||||||
} else {
|
} else {
|
||||||
npm[node] = pods
|
npm[node] = pods
|
||||||
}
|
}
|
||||||
@@ -272,19 +349,16 @@ func IsNodeWithLowUtilization(nodeThresholds api.ResourceThresholds, thresholds
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
func NodeUtilization(node *v1.Node, pods []*v1.Pod) (api.ResourceThresholds, []*v1.Pod, []*v1.Pod, []*v1.Pod, []*v1.Pod) {
|
// NodeUtilization returns the current usage of node.
|
||||||
|
func NodeUtilization(node *v1.Node, pods []*v1.Pod, evictLocalStoragePods bool) (api.ResourceThresholds, []*v1.Pod, []*v1.Pod, []*v1.Pod, []*v1.Pod, []*v1.Pod) {
|
||||||
bePods := []*v1.Pod{}
|
bePods := []*v1.Pod{}
|
||||||
nonRemovablePods := []*v1.Pod{}
|
nonRemovablePods := []*v1.Pod{}
|
||||||
bPods := []*v1.Pod{}
|
bPods := []*v1.Pod{}
|
||||||
gPods := []*v1.Pod{}
|
gPods := []*v1.Pod{}
|
||||||
totalReqs := map[v1.ResourceName]resource.Quantity{}
|
totalReqs := map[v1.ResourceName]resource.Quantity{}
|
||||||
for _, pod := range pods {
|
for _, pod := range pods {
|
||||||
sr, err := podutil.CreatorRef(pod)
|
// We need to compute the usage of nonRemovablePods unless it is a best effort pod. So, cannot use podutil.ListEvictablePodsOnNode
|
||||||
if err != nil {
|
if !podutil.IsEvictable(pod, evictLocalStoragePods) {
|
||||||
sr = nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if podutil.IsMirrorPod(pod) || podutil.IsPodWithLocalStorage(pod) || sr == nil || podutil.IsDaemonsetPod(sr) || podutil.IsCriticalPod(pod) {
|
|
||||||
nonRemovablePods = append(nonRemovablePods, pod)
|
nonRemovablePods = append(nonRemovablePods, pod)
|
||||||
if podutil.IsBestEffortPod(pod) {
|
if podutil.IsBestEffortPod(pod) {
|
||||||
continue
|
continue
|
||||||
@@ -298,15 +372,11 @@ func NodeUtilization(node *v1.Node, pods []*v1.Pod) (api.ResourceThresholds, []*
|
|||||||
gPods = append(gPods, pod)
|
gPods = append(gPods, pod)
|
||||||
}
|
}
|
||||||
|
|
||||||
req, _, err := helper.PodRequestsAndLimits(pod)
|
req, _ := utils.PodRequestsAndLimits(pod)
|
||||||
if err != nil {
|
|
||||||
glog.Infof("Error computing resource usage of pod, ignoring: %#v", pod.Name)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
for name, quantity := range req {
|
for name, quantity := range req {
|
||||||
if name == v1.ResourceCPU || name == v1.ResourceMemory {
|
if name == v1.ResourceCPU || name == v1.ResourceMemory {
|
||||||
if value, ok := totalReqs[name]; !ok {
|
if value, ok := totalReqs[name]; !ok {
|
||||||
totalReqs[name] = *quantity.Copy()
|
totalReqs[name] = quantity.DeepCopy()
|
||||||
} else {
|
} else {
|
||||||
value.Add(quantity)
|
value.Add(quantity)
|
||||||
totalReqs[name] = value
|
totalReqs[name] = value
|
||||||
@@ -327,5 +397,5 @@ func NodeUtilization(node *v1.Node, pods []*v1.Pod) (api.ResourceThresholds, []*
|
|||||||
usage[v1.ResourceCPU] = api.Percentage((float64(totalCPUReq.MilliValue()) * 100) / float64(nodeCapacity.Cpu().MilliValue()))
|
usage[v1.ResourceCPU] = api.Percentage((float64(totalCPUReq.MilliValue()) * 100) / float64(nodeCapacity.Cpu().MilliValue()))
|
||||||
usage[v1.ResourceMemory] = api.Percentage(float64(totalMemReq.Value()) / float64(nodeCapacity.Memory().Value()) * 100)
|
usage[v1.ResourceMemory] = api.Percentage(float64(totalMemReq.Value()) / float64(nodeCapacity.Memory().Value()) * 100)
|
||||||
usage[v1.ResourcePods] = api.Percentage((float64(totalPods) * 100) / float64(nodeCapacity.Pods().Value()))
|
usage[v1.ResourcePods] = api.Percentage((float64(totalPods) * 100) / float64(nodeCapacity.Pods().Value()))
|
||||||
return usage, nonRemovablePods, bePods, bPods, gPods
|
return usage, pods, nonRemovablePods, bePods, bPods, gPods
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -18,19 +18,23 @@ package strategies
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/kubernetes-incubator/descheduler/pkg/api"
|
|
||||||
"github.com/kubernetes-incubator/descheduler/test"
|
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
|
||||||
core "k8s.io/client-go/testing"
|
|
||||||
"k8s.io/kubernetes/pkg/api/v1"
|
|
||||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset/fake"
|
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"reflect"
|
||||||
|
|
||||||
|
"k8s.io/api/core/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
|
"k8s.io/client-go/kubernetes/fake"
|
||||||
|
core "k8s.io/client-go/testing"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/api"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/utils"
|
||||||
|
"sigs.k8s.io/descheduler/test"
|
||||||
)
|
)
|
||||||
|
|
||||||
// TODO: Make this table driven.
|
// TODO: Make this table driven.
|
||||||
func TestLowNodeUtilization(t *testing.T) {
|
func TestLowNodeUtilizationWithoutPriority(t *testing.T) {
|
||||||
var thresholds = make(api.ResourceThresholds)
|
var thresholds = make(api.ResourceThresholds)
|
||||||
var targetThresholds = make(api.ResourceThresholds)
|
var targetThresholds = make(api.ResourceThresholds)
|
||||||
thresholds[v1.ResourceCPU] = 30
|
thresholds[v1.ResourceCPU] = 30
|
||||||
@@ -40,6 +44,9 @@ func TestLowNodeUtilization(t *testing.T) {
|
|||||||
|
|
||||||
n1 := test.BuildTestNode("n1", 4000, 3000, 9)
|
n1 := test.BuildTestNode("n1", 4000, 3000, 9)
|
||||||
n2 := test.BuildTestNode("n2", 4000, 3000, 10)
|
n2 := test.BuildTestNode("n2", 4000, 3000, 10)
|
||||||
|
n3 := test.BuildTestNode("n3", 4000, 3000, 10)
|
||||||
|
// Making n3 node unschedulable so that it won't counted in lowUtilized nodes list.
|
||||||
|
n3.Spec.Unschedulable = true
|
||||||
p1 := test.BuildTestPod("p1", 400, 0, n1.Name)
|
p1 := test.BuildTestPod("p1", 400, 0, n1.Name)
|
||||||
p2 := test.BuildTestPod("p2", 400, 0, n1.Name)
|
p2 := test.BuildTestPod("p2", 400, 0, n1.Name)
|
||||||
p3 := test.BuildTestPod("p3", 400, 0, n1.Name)
|
p3 := test.BuildTestPod("p3", 400, 0, n1.Name)
|
||||||
@@ -51,16 +58,16 @@ func TestLowNodeUtilization(t *testing.T) {
|
|||||||
p7 := test.BuildTestPod("p7", 400, 0, n1.Name)
|
p7 := test.BuildTestPod("p7", 400, 0, n1.Name)
|
||||||
p8 := test.BuildTestPod("p8", 400, 0, n1.Name)
|
p8 := test.BuildTestPod("p8", 400, 0, n1.Name)
|
||||||
|
|
||||||
p1.Annotations = test.GetReplicaSetAnnotation()
|
p1.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
|
||||||
p2.Annotations = test.GetReplicaSetAnnotation()
|
p2.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
|
||||||
p3.Annotations = test.GetReplicaSetAnnotation()
|
p3.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
|
||||||
p4.Annotations = test.GetReplicaSetAnnotation()
|
p4.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
|
||||||
p5.Annotations = test.GetReplicaSetAnnotation()
|
p5.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
|
||||||
// The following 4 pods won't get evicted.
|
// The following 4 pods won't get evicted.
|
||||||
// A daemonset.
|
// A daemonset.
|
||||||
p6.Annotations = test.GetDaemonSetAnnotation()
|
p6.ObjectMeta.OwnerReferences = test.GetDaemonSetOwnerRefList()
|
||||||
// A pod with local storage.
|
// A pod with local storage.
|
||||||
p7.Annotations = test.GetNormalPodAnnotation()
|
p7.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||||
p7.Spec.Volumes = []v1.Volume{
|
p7.Spec.Volumes = []v1.Volume{
|
||||||
{
|
{
|
||||||
Name: "sample",
|
Name: "sample",
|
||||||
@@ -75,9 +82,10 @@ func TestLowNodeUtilization(t *testing.T) {
|
|||||||
p7.Annotations = test.GetMirrorPodAnnotation()
|
p7.Annotations = test.GetMirrorPodAnnotation()
|
||||||
// A Critical Pod.
|
// A Critical Pod.
|
||||||
p8.Namespace = "kube-system"
|
p8.Namespace = "kube-system"
|
||||||
p8.Annotations = test.GetCriticalPodAnnotation()
|
priority := utils.SystemCriticalPriority
|
||||||
|
p8.Spec.Priority = &priority
|
||||||
p9 := test.BuildTestPod("p9", 400, 0, n1.Name)
|
p9 := test.BuildTestPod("p9", 400, 0, n1.Name)
|
||||||
p9.Annotations = test.GetReplicaSetAnnotation()
|
p9.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
|
||||||
fakeClient := &fake.Clientset{}
|
fakeClient := &fake.Clientset{}
|
||||||
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
|
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
|
||||||
list := action.(core.ListAction)
|
list := action.(core.ListAction)
|
||||||
@@ -88,6 +96,9 @@ func TestLowNodeUtilization(t *testing.T) {
|
|||||||
if strings.Contains(fieldString, "n2") {
|
if strings.Contains(fieldString, "n2") {
|
||||||
return true, &v1.PodList{Items: []v1.Pod{*p9}}, nil
|
return true, &v1.PodList{Items: []v1.Pod{*p9}}, nil
|
||||||
}
|
}
|
||||||
|
if strings.Contains(fieldString, "n3") {
|
||||||
|
return true, &v1.PodList{Items: []v1.Pod{}}, nil
|
||||||
|
}
|
||||||
return true, nil, fmt.Errorf("Failed to list: %v", list)
|
return true, nil, fmt.Errorf("Failed to list: %v", list)
|
||||||
})
|
})
|
||||||
fakeClient.Fake.AddReactor("get", "nodes", func(action core.Action) (bool, runtime.Object, error) {
|
fakeClient.Fake.AddReactor("get", "nodes", func(action core.Action) (bool, runtime.Object, error) {
|
||||||
@@ -97,15 +108,222 @@ func TestLowNodeUtilization(t *testing.T) {
|
|||||||
return true, n1, nil
|
return true, n1, nil
|
||||||
case n2.Name:
|
case n2.Name:
|
||||||
return true, n2, nil
|
return true, n2, nil
|
||||||
|
case n3.Name:
|
||||||
|
return true, n3, nil
|
||||||
}
|
}
|
||||||
return true, nil, fmt.Errorf("Wrong node: %v", getAction.GetName())
|
return true, nil, fmt.Errorf("Wrong node: %v", getAction.GetName())
|
||||||
})
|
})
|
||||||
expectedPodsEvicted := 4
|
expectedPodsEvicted := 3
|
||||||
npm := CreateNodePodsMap(fakeClient, []*v1.Node{n1, n2})
|
npm := createNodePodsMap(fakeClient, []*v1.Node{n1, n2, n3})
|
||||||
lowNodes, targetNodes, _ := classifyNodes(npm, thresholds, targetThresholds)
|
lowNodes, targetNodes := classifyNodes(npm, thresholds, targetThresholds, false)
|
||||||
podsEvicted := evictPodsFromTargetNodes(fakeClient, "v1", targetNodes, lowNodes, targetThresholds, false)
|
if len(lowNodes) != 1 {
|
||||||
|
t.Errorf("After ignoring unschedulable nodes, expected only one node to be under utilized.")
|
||||||
|
}
|
||||||
|
npe := utils.NodePodEvictedCount{}
|
||||||
|
npe[n1] = 0
|
||||||
|
npe[n2] = 0
|
||||||
|
npe[n3] = 0
|
||||||
|
podsEvicted := evictPodsFromTargetNodes(fakeClient, "v1", targetNodes, lowNodes, targetThresholds, false, 3, npe)
|
||||||
if expectedPodsEvicted != podsEvicted {
|
if expectedPodsEvicted != podsEvicted {
|
||||||
t.Errorf("Expected %#v pods to be evicted but %#v got evicted", expectedPodsEvicted)
|
t.Errorf("Expected %#v pods to be evicted but %#v got evicted", expectedPodsEvicted, podsEvicted)
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO: Make this table driven.
|
||||||
|
func TestLowNodeUtilizationWithPriorities(t *testing.T) {
|
||||||
|
var thresholds = make(api.ResourceThresholds)
|
||||||
|
var targetThresholds = make(api.ResourceThresholds)
|
||||||
|
thresholds[v1.ResourceCPU] = 30
|
||||||
|
thresholds[v1.ResourcePods] = 30
|
||||||
|
targetThresholds[v1.ResourceCPU] = 50
|
||||||
|
targetThresholds[v1.ResourcePods] = 50
|
||||||
|
lowPriority := int32(0)
|
||||||
|
highPriority := int32(10000)
|
||||||
|
n1 := test.BuildTestNode("n1", 4000, 3000, 9)
|
||||||
|
n2 := test.BuildTestNode("n2", 4000, 3000, 10)
|
||||||
|
n3 := test.BuildTestNode("n3", 4000, 3000, 10)
|
||||||
|
// Making n3 node unschedulable so that it won't counted in lowUtilized nodes list.
|
||||||
|
n3.Spec.Unschedulable = true
|
||||||
|
p1 := test.BuildTestPod("p1", 400, 0, n1.Name)
|
||||||
|
p1.Spec.Priority = &highPriority
|
||||||
|
p2 := test.BuildTestPod("p2", 400, 0, n1.Name)
|
||||||
|
p2.Spec.Priority = &highPriority
|
||||||
|
p3 := test.BuildTestPod("p3", 400, 0, n1.Name)
|
||||||
|
p3.Spec.Priority = &highPriority
|
||||||
|
p4 := test.BuildTestPod("p4", 400, 0, n1.Name)
|
||||||
|
p4.Spec.Priority = &highPriority
|
||||||
|
p5 := test.BuildTestPod("p5", 400, 0, n1.Name)
|
||||||
|
p5.Spec.Priority = &lowPriority
|
||||||
|
|
||||||
|
// These won't be evicted.
|
||||||
|
p6 := test.BuildTestPod("p6", 400, 0, n1.Name)
|
||||||
|
p6.Spec.Priority = &highPriority
|
||||||
|
p7 := test.BuildTestPod("p7", 400, 0, n1.Name)
|
||||||
|
p7.Spec.Priority = &lowPriority
|
||||||
|
p8 := test.BuildTestPod("p8", 400, 0, n1.Name)
|
||||||
|
p8.Spec.Priority = &lowPriority
|
||||||
|
|
||||||
|
p1.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
|
||||||
|
p2.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
|
||||||
|
p3.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
|
||||||
|
p4.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
|
||||||
|
p5.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
|
||||||
|
// The following 4 pods won't get evicted.
|
||||||
|
// A daemonset.
|
||||||
|
p6.ObjectMeta.OwnerReferences = test.GetDaemonSetOwnerRefList()
|
||||||
|
// A pod with local storage.
|
||||||
|
p7.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||||
|
p7.Spec.Volumes = []v1.Volume{
|
||||||
|
{
|
||||||
|
Name: "sample",
|
||||||
|
VolumeSource: v1.VolumeSource{
|
||||||
|
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||||
|
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||||
|
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
// A Mirror Pod.
|
||||||
|
p7.Annotations = test.GetMirrorPodAnnotation()
|
||||||
|
// A Critical Pod.
|
||||||
|
p8.Namespace = "kube-system"
|
||||||
|
priority := utils.SystemCriticalPriority
|
||||||
|
p8.Spec.Priority = &priority
|
||||||
|
p9 := test.BuildTestPod("p9", 400, 0, n1.Name)
|
||||||
|
p9.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
|
||||||
|
fakeClient := &fake.Clientset{}
|
||||||
|
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
|
||||||
|
list := action.(core.ListAction)
|
||||||
|
fieldString := list.GetListRestrictions().Fields.String()
|
||||||
|
if strings.Contains(fieldString, "n1") {
|
||||||
|
return true, &v1.PodList{Items: []v1.Pod{*p1, *p2, *p3, *p4, *p5, *p6, *p7, *p8}}, nil
|
||||||
|
}
|
||||||
|
if strings.Contains(fieldString, "n2") {
|
||||||
|
return true, &v1.PodList{Items: []v1.Pod{*p9}}, nil
|
||||||
|
}
|
||||||
|
if strings.Contains(fieldString, "n3") {
|
||||||
|
return true, &v1.PodList{Items: []v1.Pod{}}, nil
|
||||||
|
}
|
||||||
|
return true, nil, fmt.Errorf("Failed to list: %v", list)
|
||||||
|
})
|
||||||
|
fakeClient.Fake.AddReactor("get", "nodes", func(action core.Action) (bool, runtime.Object, error) {
|
||||||
|
getAction := action.(core.GetAction)
|
||||||
|
switch getAction.GetName() {
|
||||||
|
case n1.Name:
|
||||||
|
return true, n1, nil
|
||||||
|
case n2.Name:
|
||||||
|
return true, n2, nil
|
||||||
|
case n3.Name:
|
||||||
|
return true, n3, nil
|
||||||
|
}
|
||||||
|
return true, nil, fmt.Errorf("Wrong node: %v", getAction.GetName())
|
||||||
|
})
|
||||||
|
expectedPodsEvicted := 3
|
||||||
|
npm := createNodePodsMap(fakeClient, []*v1.Node{n1, n2, n3})
|
||||||
|
lowNodes, targetNodes := classifyNodes(npm, thresholds, targetThresholds, false)
|
||||||
|
if len(lowNodes) != 1 {
|
||||||
|
t.Errorf("After ignoring unschedulable nodes, expected only one node to be under utilized.")
|
||||||
|
}
|
||||||
|
npe := utils.NodePodEvictedCount{}
|
||||||
|
npe[n1] = 0
|
||||||
|
npe[n2] = 0
|
||||||
|
npe[n3] = 0
|
||||||
|
podsEvicted := evictPodsFromTargetNodes(fakeClient, "v1", targetNodes, lowNodes, targetThresholds, false, 3, npe)
|
||||||
|
if expectedPodsEvicted != podsEvicted {
|
||||||
|
t.Errorf("Expected %#v pods to be evicted but %#v got evicted", expectedPodsEvicted, podsEvicted)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSortPodsByPriority(t *testing.T) {
|
||||||
|
n1 := test.BuildTestNode("n1", 4000, 3000, 9)
|
||||||
|
lowPriority := int32(0)
|
||||||
|
highPriority := int32(10000)
|
||||||
|
p1 := test.BuildTestPod("p1", 400, 0, n1.Name)
|
||||||
|
p1.Spec.Priority = &lowPriority
|
||||||
|
|
||||||
|
// BestEffort
|
||||||
|
p2 := test.BuildTestPod("p2", 400, 0, n1.Name)
|
||||||
|
p2.Spec.Priority = &highPriority
|
||||||
|
|
||||||
|
p2.Spec.Containers[0].Resources.Requests = nil
|
||||||
|
p2.Spec.Containers[0].Resources.Limits = nil
|
||||||
|
|
||||||
|
// Burstable
|
||||||
|
p3 := test.BuildTestPod("p3", 400, 0, n1.Name)
|
||||||
|
p3.Spec.Priority = &highPriority
|
||||||
|
|
||||||
|
// Guaranteed
|
||||||
|
p4 := test.BuildTestPod("p4", 400, 100, n1.Name)
|
||||||
|
p4.Spec.Priority = &highPriority
|
||||||
|
p4.Spec.Containers[0].Resources.Limits[v1.ResourceCPU] = *resource.NewMilliQuantity(400, resource.DecimalSI)
|
||||||
|
p4.Spec.Containers[0].Resources.Limits[v1.ResourceMemory] = *resource.NewQuantity(100, resource.DecimalSI)
|
||||||
|
|
||||||
|
// Best effort with nil priorities.
|
||||||
|
p5 := test.BuildTestPod("p5", 400, 100, n1.Name)
|
||||||
|
p5.Spec.Priority = nil
|
||||||
|
p6 := test.BuildTestPod("p6", 400, 100, n1.Name)
|
||||||
|
p6.Spec.Containers[0].Resources.Limits[v1.ResourceCPU] = *resource.NewMilliQuantity(400, resource.DecimalSI)
|
||||||
|
p6.Spec.Containers[0].Resources.Limits[v1.ResourceMemory] = *resource.NewQuantity(100, resource.DecimalSI)
|
||||||
|
p6.Spec.Priority = nil
|
||||||
|
|
||||||
|
podList := []*v1.Pod{p4, p3, p2, p1, p6, p5}
|
||||||
|
|
||||||
|
sortPodsBasedOnPriority(podList)
|
||||||
|
if !reflect.DeepEqual(podList[len(podList)-1], p4) {
|
||||||
|
t.Errorf("Expected last pod in sorted list to be %v which of highest priority and guaranteed but got %v", p4, podList[len(podList)-1])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestValidateThresholds(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
input api.ResourceThresholds
|
||||||
|
succeed bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "passing nil map for threshold",
|
||||||
|
input: nil,
|
||||||
|
succeed: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "passing no threshold",
|
||||||
|
input: api.ResourceThresholds{},
|
||||||
|
succeed: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "passing unsupported resource name",
|
||||||
|
input: api.ResourceThresholds{
|
||||||
|
v1.ResourceCPU: 40,
|
||||||
|
v1.ResourceStorage: 25.5,
|
||||||
|
},
|
||||||
|
succeed: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "passing invalid resource name",
|
||||||
|
input: api.ResourceThresholds{
|
||||||
|
v1.ResourceCPU: 40,
|
||||||
|
"coolResource": 42.0,
|
||||||
|
},
|
||||||
|
succeed: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "passing a valid threshold with cpu, memory and pods",
|
||||||
|
input: api.ResourceThresholds{
|
||||||
|
v1.ResourceCPU: 20,
|
||||||
|
v1.ResourceMemory: 30,
|
||||||
|
v1.ResourcePods: 40,
|
||||||
|
},
|
||||||
|
succeed: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range tests {
|
||||||
|
isValid := validateThresholds(test.input)
|
||||||
|
|
||||||
|
if isValid != test.succeed {
|
||||||
|
t.Errorf("expected validity of threshold: %#v\nto be %v but got %v instead", test.input, test.succeed, isValid)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
76
pkg/descheduler/strategies/node_affinity.go
Normal file
76
pkg/descheduler/strategies/node_affinity.go
Normal file
@@ -0,0 +1,76 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2017 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package strategies
|
||||||
|
|
||||||
|
import (
|
||||||
|
"k8s.io/api/core/v1"
|
||||||
|
"k8s.io/klog"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/utils"
|
||||||
|
|
||||||
|
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/api"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||||
|
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
|
||||||
|
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||||
|
)
|
||||||
|
|
||||||
|
func RemovePodsViolatingNodeAffinity(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, evictionPolicyGroupVersion string, nodes []*v1.Node, nodePodCount utils.NodePodEvictedCount) {
|
||||||
|
removePodsViolatingNodeAffinityCount(ds, strategy, evictionPolicyGroupVersion, nodes, nodePodCount, ds.MaxNoOfPodsToEvictPerNode, ds.EvictLocalStoragePods)
|
||||||
|
}
|
||||||
|
|
||||||
|
func removePodsViolatingNodeAffinityCount(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, evictionPolicyGroupVersion string, nodes []*v1.Node, nodepodCount utils.NodePodEvictedCount, maxPodsToEvict int, evictLocalStoragePods bool) int {
|
||||||
|
evictedPodCount := 0
|
||||||
|
if !strategy.Enabled {
|
||||||
|
return evictedPodCount
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, nodeAffinity := range strategy.Params.NodeAffinityType {
|
||||||
|
klog.V(2).Infof("Executing for nodeAffinityType: %v", nodeAffinity)
|
||||||
|
|
||||||
|
switch nodeAffinity {
|
||||||
|
case "requiredDuringSchedulingIgnoredDuringExecution":
|
||||||
|
for _, node := range nodes {
|
||||||
|
klog.V(1).Infof("Processing node: %#v\n", node.Name)
|
||||||
|
|
||||||
|
pods, err := podutil.ListEvictablePodsOnNode(ds.Client, node, evictLocalStoragePods)
|
||||||
|
if err != nil {
|
||||||
|
klog.Errorf("failed to get pods from %v: %v", node.Name, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, pod := range pods {
|
||||||
|
if maxPodsToEvict > 0 && nodepodCount[node]+1 > maxPodsToEvict {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if pod.Spec.Affinity != nil && pod.Spec.Affinity.NodeAffinity != nil && pod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil {
|
||||||
|
|
||||||
|
if !nodeutil.PodFitsCurrentNode(pod, node) && nodeutil.PodFitsAnyNode(pod, nodes) {
|
||||||
|
klog.V(1).Infof("Evicting pod: %v", pod.Name)
|
||||||
|
evictions.EvictPod(ds.Client, pod, evictionPolicyGroupVersion, ds.DryRun)
|
||||||
|
nodepodCount[node]++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
evictedPodCount += nodepodCount[node]
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
klog.Errorf("invalid nodeAffinityType: %v", nodeAffinity)
|
||||||
|
return evictedPodCount
|
||||||
|
}
|
||||||
|
}
|
||||||
|
klog.V(1).Infof("Evicted %v pods", evictedPodCount)
|
||||||
|
return evictedPodCount
|
||||||
|
}
|
||||||
185
pkg/descheduler/strategies/node_affinity_test.go
Normal file
185
pkg/descheduler/strategies/node_affinity_test.go
Normal file
@@ -0,0 +1,185 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2017 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package strategies
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"k8s.io/api/core/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
|
"k8s.io/client-go/kubernetes/fake"
|
||||||
|
core "k8s.io/client-go/testing"
|
||||||
|
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/api"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/utils"
|
||||||
|
"sigs.k8s.io/descheduler/test"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||||
|
|
||||||
|
requiredDuringSchedulingIgnoredDuringExecutionStrategy := api.DeschedulerStrategy{
|
||||||
|
Enabled: true,
|
||||||
|
Params: api.StrategyParameters{
|
||||||
|
NodeAffinityType: []string{
|
||||||
|
"requiredDuringSchedulingIgnoredDuringExecution",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
nodeLabelKey := "kubernetes.io/desiredNode"
|
||||||
|
nodeLabelValue := "yes"
|
||||||
|
nodeWithLabels := test.BuildTestNode("nodeWithLabels", 2000, 3000, 10)
|
||||||
|
nodeWithLabels.Labels[nodeLabelKey] = nodeLabelValue
|
||||||
|
|
||||||
|
nodeWithoutLabels := test.BuildTestNode("nodeWithoutLabels", 2000, 3000, 10)
|
||||||
|
|
||||||
|
unschedulableNodeWithLabels := test.BuildTestNode("unschedulableNodeWithLabels", 2000, 3000, 10)
|
||||||
|
nodeWithLabels.Labels[nodeLabelKey] = nodeLabelValue
|
||||||
|
unschedulableNodeWithLabels.Spec.Unschedulable = true
|
||||||
|
|
||||||
|
addPodsToNode := func(node *v1.Node) []v1.Pod {
|
||||||
|
podWithNodeAffinity := test.BuildTestPod("podWithNodeAffinity", 100, 0, node.Name)
|
||||||
|
podWithNodeAffinity.Spec.Affinity = &v1.Affinity{
|
||||||
|
NodeAffinity: &v1.NodeAffinity{
|
||||||
|
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
||||||
|
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||||
|
{
|
||||||
|
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||||
|
{
|
||||||
|
Key: nodeLabelKey,
|
||||||
|
Operator: "In",
|
||||||
|
Values: []string{
|
||||||
|
nodeLabelValue,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
pod1 := test.BuildTestPod("pod1", 100, 0, node.Name)
|
||||||
|
pod2 := test.BuildTestPod("pod2", 100, 0, node.Name)
|
||||||
|
|
||||||
|
podWithNodeAffinity.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||||
|
pod1.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||||
|
pod2.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||||
|
|
||||||
|
return []v1.Pod{
|
||||||
|
*podWithNodeAffinity,
|
||||||
|
*pod1,
|
||||||
|
*pod2,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
description string
|
||||||
|
nodes []*v1.Node
|
||||||
|
pods []v1.Pod
|
||||||
|
strategy api.DeschedulerStrategy
|
||||||
|
expectedEvictedPodCount int
|
||||||
|
npe utils.NodePodEvictedCount
|
||||||
|
maxPodsToEvict int
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
description: "Strategy disabled, should not evict any pods",
|
||||||
|
strategy: api.DeschedulerStrategy{
|
||||||
|
Enabled: false,
|
||||||
|
Params: api.StrategyParameters{
|
||||||
|
NodeAffinityType: []string{
|
||||||
|
"requiredDuringSchedulingIgnoredDuringExecution",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expectedEvictedPodCount: 0,
|
||||||
|
pods: addPodsToNode(nodeWithoutLabels),
|
||||||
|
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
|
||||||
|
npe: utils.NodePodEvictedCount{nodeWithoutLabels: 0, nodeWithLabels: 0},
|
||||||
|
maxPodsToEvict: 0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
description: "Invalid strategy type, should not evict any pods",
|
||||||
|
strategy: api.DeschedulerStrategy{
|
||||||
|
Enabled: true,
|
||||||
|
Params: api.StrategyParameters{
|
||||||
|
NodeAffinityType: []string{
|
||||||
|
"requiredDuringSchedulingRequiredDuringExecution",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expectedEvictedPodCount: 0,
|
||||||
|
pods: addPodsToNode(nodeWithoutLabels),
|
||||||
|
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
|
||||||
|
npe: utils.NodePodEvictedCount{nodeWithoutLabels: 0, nodeWithLabels: 0},
|
||||||
|
maxPodsToEvict: 0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
description: "Pod is correctly scheduled on node, no eviction expected",
|
||||||
|
strategy: requiredDuringSchedulingIgnoredDuringExecutionStrategy,
|
||||||
|
expectedEvictedPodCount: 0,
|
||||||
|
pods: addPodsToNode(nodeWithLabels),
|
||||||
|
nodes: []*v1.Node{nodeWithLabels},
|
||||||
|
npe: utils.NodePodEvictedCount{nodeWithLabels: 0},
|
||||||
|
maxPodsToEvict: 0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
description: "Pod is scheduled on node without matching labels, another schedulable node available, should be evicted",
|
||||||
|
expectedEvictedPodCount: 1,
|
||||||
|
strategy: requiredDuringSchedulingIgnoredDuringExecutionStrategy,
|
||||||
|
pods: addPodsToNode(nodeWithoutLabels),
|
||||||
|
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
|
||||||
|
npe: utils.NodePodEvictedCount{nodeWithoutLabels: 0, nodeWithLabels: 0},
|
||||||
|
maxPodsToEvict: 0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
description: "Pod is scheduled on node without matching labels, another schedulable node available, maxPodsToEvict set to 1, should not be evicted",
|
||||||
|
expectedEvictedPodCount: 1,
|
||||||
|
strategy: requiredDuringSchedulingIgnoredDuringExecutionStrategy,
|
||||||
|
pods: addPodsToNode(nodeWithoutLabels),
|
||||||
|
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
|
||||||
|
npe: utils.NodePodEvictedCount{nodeWithoutLabels: 0, nodeWithLabels: 0},
|
||||||
|
maxPodsToEvict: 1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
description: "Pod is scheduled on node without matching labels, but no node where pod fits is available, should not evict",
|
||||||
|
expectedEvictedPodCount: 0,
|
||||||
|
strategy: requiredDuringSchedulingIgnoredDuringExecutionStrategy,
|
||||||
|
pods: addPodsToNode(nodeWithoutLabels),
|
||||||
|
nodes: []*v1.Node{nodeWithoutLabels, unschedulableNodeWithLabels},
|
||||||
|
npe: utils.NodePodEvictedCount{nodeWithoutLabels: 0, unschedulableNodeWithLabels: 0},
|
||||||
|
maxPodsToEvict: 0,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tests {
|
||||||
|
|
||||||
|
fakeClient := &fake.Clientset{}
|
||||||
|
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
|
||||||
|
return true, &v1.PodList{Items: tc.pods}, nil
|
||||||
|
})
|
||||||
|
|
||||||
|
ds := options.DeschedulerServer{
|
||||||
|
Client: fakeClient,
|
||||||
|
}
|
||||||
|
|
||||||
|
actualEvictedPodCount := removePodsViolatingNodeAffinityCount(&ds, tc.strategy, "v1", tc.nodes, tc.npe, tc.maxPodsToEvict, false)
|
||||||
|
if actualEvictedPodCount != tc.expectedEvictedPodCount {
|
||||||
|
t.Errorf("Test %#v failed, expected %v pod evictions, but got %v pod evictions\n", tc.description, tc.expectedEvictedPodCount, actualEvictedPodCount)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
139
pkg/descheduler/strategies/node_taint.go
Normal file
139
pkg/descheduler/strategies/node_taint.go
Normal file
@@ -0,0 +1,139 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2017 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package strategies
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/api"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||||
|
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/utils"
|
||||||
|
|
||||||
|
"k8s.io/api/core/v1"
|
||||||
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
|
"k8s.io/klog"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
TolerationOpExists v1.TolerationOperator = "Exists"
|
||||||
|
TolerationOpEqual v1.TolerationOperator = "Equal"
|
||||||
|
)
|
||||||
|
|
||||||
|
// RemovePodsViolatingNodeTaints with elimination strategy
|
||||||
|
func RemovePodsViolatingNodeTaints(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, policyGroupVersion string, nodes []*v1.Node, nodePodCount utils.NodePodEvictedCount) {
|
||||||
|
if !strategy.Enabled {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
deletePodsViolatingNodeTaints(ds.Client, policyGroupVersion, nodes, ds.DryRun, nodePodCount, ds.MaxNoOfPodsToEvictPerNode, ds.EvictLocalStoragePods)
|
||||||
|
}
|
||||||
|
|
||||||
|
// deletePodsViolatingNodeTaints evicts pods on the node which violate NoSchedule Taints on nodes
|
||||||
|
func deletePodsViolatingNodeTaints(client clientset.Interface, policyGroupVersion string, nodes []*v1.Node, dryRun bool, nodePodCount utils.NodePodEvictedCount, maxPodsToEvict int, evictLocalStoragePods bool) int {
|
||||||
|
podsEvicted := 0
|
||||||
|
for _, node := range nodes {
|
||||||
|
klog.V(1).Infof("Processing node: %#v\n", node.Name)
|
||||||
|
pods, err := podutil.ListEvictablePodsOnNode(client, node, evictLocalStoragePods)
|
||||||
|
if err != nil {
|
||||||
|
//no pods evicted as error encountered retrieving evictable Pods
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
totalPods := len(pods)
|
||||||
|
for i := 0; i < totalPods; i++ {
|
||||||
|
if maxPodsToEvict > 0 && nodePodCount[node]+1 > maxPodsToEvict {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if !checkPodsSatisfyTolerations(pods[i], node) {
|
||||||
|
success, err := evictions.EvictPod(client, pods[i], policyGroupVersion, dryRun)
|
||||||
|
if !success {
|
||||||
|
klog.Errorf("Error when evicting pod: %#v (%#v)\n", pods[i].Name, err)
|
||||||
|
} else {
|
||||||
|
nodePodCount[node]++
|
||||||
|
klog.V(1).Infof("Evicted pod: %#v (%#v)", pods[i].Name, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
podsEvicted += nodePodCount[node]
|
||||||
|
}
|
||||||
|
return podsEvicted
|
||||||
|
}
|
||||||
|
|
||||||
|
// checkPodsSatisfyTolerations checks if the node's taints (NoSchedule) are still satisfied by pods' tolerations.
|
||||||
|
func checkPodsSatisfyTolerations(pod *v1.Pod, node *v1.Node) bool {
|
||||||
|
tolerations := pod.Spec.Tolerations
|
||||||
|
taints := node.Spec.Taints
|
||||||
|
if len(taints) == 0 {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
noScheduleTaints := getNoScheduleTaints(taints)
|
||||||
|
if !allTaintsTolerated(noScheduleTaints, tolerations) {
|
||||||
|
klog.V(2).Infof("Not all taints are tolerated after update for Pod %v on node %v", pod.Name, node.Name)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// getNoScheduleTaints return a slice of NoSchedule taints from the a slice of taints that it receives.
|
||||||
|
func getNoScheduleTaints(taints []v1.Taint) []v1.Taint {
|
||||||
|
result := []v1.Taint{}
|
||||||
|
for i := range taints {
|
||||||
|
if taints[i].Effect == v1.TaintEffectNoSchedule {
|
||||||
|
result = append(result, taints[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
//toleratesTaint returns true if a toleration tolerates a taint, or false otherwise
|
||||||
|
func toleratesTaint(toleration *v1.Toleration, taint *v1.Taint) bool {
|
||||||
|
|
||||||
|
if (len(toleration.Key) > 0 && toleration.Key != taint.Key) ||
|
||||||
|
(len(toleration.Effect) > 0 && toleration.Effect != taint.Effect) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
switch toleration.Operator {
|
||||||
|
// empty operator means Equal
|
||||||
|
case "", TolerationOpEqual:
|
||||||
|
return toleration.Value == taint.Value
|
||||||
|
case TolerationOpExists:
|
||||||
|
return true
|
||||||
|
default:
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// allTaintsTolerated returns true if all are tolerated, or false otherwise.
|
||||||
|
func allTaintsTolerated(taints []v1.Taint, tolerations []v1.Toleration) bool {
|
||||||
|
if len(taints) == 0 {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if len(tolerations) == 0 && len(taints) > 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
for i := range taints {
|
||||||
|
tolerated := false
|
||||||
|
for j := range tolerations {
|
||||||
|
if toleratesTaint(&tolerations[j], &taints[i]) {
|
||||||
|
tolerated = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !tolerated {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
298
pkg/descheduler/strategies/node_taint_test.go
Normal file
298
pkg/descheduler/strategies/node_taint_test.go
Normal file
@@ -0,0 +1,298 @@
|
|||||||
|
package strategies
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"k8s.io/api/core/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
|
"k8s.io/client-go/kubernetes/fake"
|
||||||
|
core "k8s.io/client-go/testing"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/utils"
|
||||||
|
"sigs.k8s.io/descheduler/test"
|
||||||
|
)
|
||||||
|
|
||||||
|
func createNoScheduleTaint(key, value string, index int) v1.Taint {
|
||||||
|
return v1.Taint{
|
||||||
|
Key: "testTaint" + fmt.Sprintf("%v", index),
|
||||||
|
Value: "test" + fmt.Sprintf("%v", index),
|
||||||
|
Effect: v1.TaintEffectNoSchedule,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func addTaintsToNode(node *v1.Node, key, value string, indices []int) *v1.Node {
|
||||||
|
taints := []v1.Taint{}
|
||||||
|
for _, index := range indices {
|
||||||
|
taints = append(taints, createNoScheduleTaint(key, value, index))
|
||||||
|
}
|
||||||
|
node.Spec.Taints = taints
|
||||||
|
return node
|
||||||
|
}
|
||||||
|
|
||||||
|
func addTolerationToPod(pod *v1.Pod, key, value string, index int) *v1.Pod {
|
||||||
|
if pod.Annotations == nil {
|
||||||
|
pod.Annotations = map[string]string{}
|
||||||
|
}
|
||||||
|
|
||||||
|
pod.Spec.Tolerations = []v1.Toleration{{Key: key + fmt.Sprintf("%v", index), Value: value + fmt.Sprintf("%v", index), Effect: v1.TaintEffectNoSchedule}}
|
||||||
|
|
||||||
|
return pod
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDeletePodsViolatingNodeTaints(t *testing.T) {
|
||||||
|
|
||||||
|
node1 := test.BuildTestNode("n1", 2000, 3000, 10)
|
||||||
|
node1 = addTaintsToNode(node1, "testTaint", "test", []int{1})
|
||||||
|
node2 := test.BuildTestNode("n2", 2000, 3000, 10)
|
||||||
|
node1 = addTaintsToNode(node2, "testingTaint", "testing", []int{1})
|
||||||
|
|
||||||
|
p1 := test.BuildTestPod("p1", 100, 0, node1.Name)
|
||||||
|
p2 := test.BuildTestPod("p2", 100, 0, node1.Name)
|
||||||
|
p3 := test.BuildTestPod("p3", 100, 0, node1.Name)
|
||||||
|
p4 := test.BuildTestPod("p4", 100, 0, node1.Name)
|
||||||
|
p5 := test.BuildTestPod("p5", 100, 0, node1.Name)
|
||||||
|
p6 := test.BuildTestPod("p6", 100, 0, node1.Name)
|
||||||
|
|
||||||
|
p1.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||||
|
p2.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||||
|
p3.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||||
|
p4.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||||
|
p5.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||||
|
p6.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||||
|
p7 := test.BuildTestPod("p7", 100, 0, node2.Name)
|
||||||
|
p8 := test.BuildTestPod("p8", 100, 0, node2.Name)
|
||||||
|
p9 := test.BuildTestPod("p9", 100, 0, node2.Name)
|
||||||
|
p10 := test.BuildTestPod("p10", 100, 0, node2.Name)
|
||||||
|
p11 := test.BuildTestPod("p11", 100, 0, node2.Name)
|
||||||
|
p11.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||||
|
|
||||||
|
// The following 4 pods won't get evicted.
|
||||||
|
// A Critical Pod.
|
||||||
|
p7.Namespace = "kube-system"
|
||||||
|
priority := utils.SystemCriticalPriority
|
||||||
|
p7.Spec.Priority = &priority
|
||||||
|
|
||||||
|
// A daemonset.
|
||||||
|
p8.ObjectMeta.OwnerReferences = test.GetDaemonSetOwnerRefList()
|
||||||
|
// A pod with local storage.
|
||||||
|
p9.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||||
|
p9.Spec.Volumes = []v1.Volume{
|
||||||
|
{
|
||||||
|
Name: "sample",
|
||||||
|
VolumeSource: v1.VolumeSource{
|
||||||
|
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||||
|
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||||
|
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
// A Mirror Pod.
|
||||||
|
p10.Annotations = test.GetMirrorPodAnnotation()
|
||||||
|
|
||||||
|
p1 = addTolerationToPod(p1, "testTaint", "test", 1)
|
||||||
|
p3 = addTolerationToPod(p3, "testTaint", "test", 1)
|
||||||
|
p4 = addTolerationToPod(p4, "testTaintX", "testX", 1)
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
description string
|
||||||
|
nodes []*v1.Node
|
||||||
|
pods []v1.Pod
|
||||||
|
evictLocalStoragePods bool
|
||||||
|
npe utils.NodePodEvictedCount
|
||||||
|
maxPodsToEvict int
|
||||||
|
expectedEvictedPodCount int
|
||||||
|
}{
|
||||||
|
|
||||||
|
{
|
||||||
|
description: "Pods not tolerating node taint should be evicted",
|
||||||
|
pods: []v1.Pod{*p1, *p2, *p3},
|
||||||
|
nodes: []*v1.Node{node1},
|
||||||
|
evictLocalStoragePods: false,
|
||||||
|
npe: utils.NodePodEvictedCount{node1: 0},
|
||||||
|
maxPodsToEvict: 0,
|
||||||
|
expectedEvictedPodCount: 1, //p2 gets evicted
|
||||||
|
},
|
||||||
|
{
|
||||||
|
description: "Pods with tolerations but not tolerating node taint should be evicted",
|
||||||
|
pods: []v1.Pod{*p1, *p3, *p4},
|
||||||
|
nodes: []*v1.Node{node1},
|
||||||
|
evictLocalStoragePods: false,
|
||||||
|
npe: utils.NodePodEvictedCount{node1: 0},
|
||||||
|
maxPodsToEvict: 0,
|
||||||
|
expectedEvictedPodCount: 1, //p4 gets evicted
|
||||||
|
},
|
||||||
|
{
|
||||||
|
description: "Only <maxPodsToEvict> number of Pods not tolerating node taint should be evicted",
|
||||||
|
pods: []v1.Pod{*p1, *p5, *p6},
|
||||||
|
nodes: []*v1.Node{node1},
|
||||||
|
evictLocalStoragePods: false,
|
||||||
|
npe: utils.NodePodEvictedCount{node1: 0},
|
||||||
|
maxPodsToEvict: 1,
|
||||||
|
expectedEvictedPodCount: 1, //p5 or p6 gets evicted
|
||||||
|
},
|
||||||
|
{
|
||||||
|
description: "Critical pods not tolerating node taint should not be evicted",
|
||||||
|
pods: []v1.Pod{*p7, *p8, *p9, *p10},
|
||||||
|
nodes: []*v1.Node{node2},
|
||||||
|
evictLocalStoragePods: false,
|
||||||
|
npe: utils.NodePodEvictedCount{node2: 0},
|
||||||
|
maxPodsToEvict: 0,
|
||||||
|
expectedEvictedPodCount: 0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
description: "Critical pods except storage pods not tolerating node taint should not be evicted",
|
||||||
|
pods: []v1.Pod{*p7, *p8, *p9, *p10},
|
||||||
|
nodes: []*v1.Node{node2},
|
||||||
|
evictLocalStoragePods: true,
|
||||||
|
npe: utils.NodePodEvictedCount{node2: 0},
|
||||||
|
maxPodsToEvict: 0,
|
||||||
|
expectedEvictedPodCount: 1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
description: "Critical and non critical pods, only non critical pods not tolerating node taint should be evicted",
|
||||||
|
pods: []v1.Pod{*p7, *p8, *p10, *p11},
|
||||||
|
nodes: []*v1.Node{node2},
|
||||||
|
evictLocalStoragePods: false,
|
||||||
|
npe: utils.NodePodEvictedCount{node2: 0},
|
||||||
|
maxPodsToEvict: 0,
|
||||||
|
expectedEvictedPodCount: 1,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tests {
|
||||||
|
|
||||||
|
// create fake client
|
||||||
|
fakeClient := &fake.Clientset{}
|
||||||
|
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
|
||||||
|
return true, &v1.PodList{Items: tc.pods}, nil
|
||||||
|
})
|
||||||
|
|
||||||
|
actualEvictedPodCount := deletePodsViolatingNodeTaints(fakeClient, "v1", tc.nodes, false, tc.npe, tc.maxPodsToEvict, tc.evictLocalStoragePods)
|
||||||
|
if actualEvictedPodCount != tc.expectedEvictedPodCount {
|
||||||
|
t.Errorf("Test %#v failed, Unexpected no of pods evicted: pods evicted: %d, expected: %d", tc.description, actualEvictedPodCount, tc.expectedEvictedPodCount)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestToleratesTaint(t *testing.T) {
|
||||||
|
|
||||||
|
testCases := []struct {
|
||||||
|
description string
|
||||||
|
toleration v1.Toleration
|
||||||
|
taint v1.Taint
|
||||||
|
expectTolerated bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
description: "toleration and taint have the same key and effect, and operator is Exists, and taint has no value, expect tolerated",
|
||||||
|
toleration: v1.Toleration{
|
||||||
|
Key: "foo",
|
||||||
|
Operator: TolerationOpExists,
|
||||||
|
Effect: v1.TaintEffectNoSchedule,
|
||||||
|
},
|
||||||
|
taint: v1.Taint{
|
||||||
|
Key: "foo",
|
||||||
|
Effect: v1.TaintEffectNoSchedule,
|
||||||
|
},
|
||||||
|
expectTolerated: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
description: "toleration and taint have the same key and effect, and operator is Exists, and taint has some value, expect tolerated",
|
||||||
|
toleration: v1.Toleration{
|
||||||
|
Key: "foo",
|
||||||
|
Operator: TolerationOpExists,
|
||||||
|
Effect: v1.TaintEffectNoSchedule,
|
||||||
|
},
|
||||||
|
taint: v1.Taint{
|
||||||
|
Key: "foo",
|
||||||
|
Value: "bar",
|
||||||
|
Effect: v1.TaintEffectNoSchedule,
|
||||||
|
},
|
||||||
|
expectTolerated: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
description: "toleration and taint have the same effect, toleration has empty key and operator is Exists, means match all taints, expect tolerated",
|
||||||
|
toleration: v1.Toleration{
|
||||||
|
Key: "",
|
||||||
|
Operator: TolerationOpExists,
|
||||||
|
Effect: v1.TaintEffectNoSchedule,
|
||||||
|
},
|
||||||
|
taint: v1.Taint{
|
||||||
|
Key: "foo",
|
||||||
|
Value: "bar",
|
||||||
|
Effect: v1.TaintEffectNoSchedule,
|
||||||
|
},
|
||||||
|
expectTolerated: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
description: "toleration and taint have the same key, effect and value, and operator is Equal, expect tolerated",
|
||||||
|
toleration: v1.Toleration{
|
||||||
|
Key: "foo",
|
||||||
|
Operator: TolerationOpEqual,
|
||||||
|
Value: "bar",
|
||||||
|
Effect: v1.TaintEffectNoSchedule,
|
||||||
|
},
|
||||||
|
taint: v1.Taint{
|
||||||
|
Key: "foo",
|
||||||
|
Value: "bar",
|
||||||
|
Effect: v1.TaintEffectNoSchedule,
|
||||||
|
},
|
||||||
|
expectTolerated: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
description: "toleration and taint have the same key and effect, but different values, and operator is Equal, expect not tolerated",
|
||||||
|
toleration: v1.Toleration{
|
||||||
|
Key: "foo",
|
||||||
|
Operator: TolerationOpEqual,
|
||||||
|
Value: "value1",
|
||||||
|
Effect: v1.TaintEffectNoSchedule,
|
||||||
|
},
|
||||||
|
taint: v1.Taint{
|
||||||
|
Key: "foo",
|
||||||
|
Value: "value2",
|
||||||
|
Effect: v1.TaintEffectNoSchedule,
|
||||||
|
},
|
||||||
|
expectTolerated: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
description: "toleration and taint have the same key and value, but different effects, and operator is Equal, expect not tolerated",
|
||||||
|
toleration: v1.Toleration{
|
||||||
|
Key: "foo",
|
||||||
|
Operator: TolerationOpEqual,
|
||||||
|
Value: "bar",
|
||||||
|
Effect: v1.TaintEffectNoSchedule,
|
||||||
|
},
|
||||||
|
taint: v1.Taint{
|
||||||
|
Key: "foo",
|
||||||
|
Value: "bar",
|
||||||
|
Effect: v1.TaintEffectNoExecute,
|
||||||
|
},
|
||||||
|
expectTolerated: false,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, tc := range testCases {
|
||||||
|
if tolerated := toleratesTaint(&tc.toleration, &tc.taint); tc.expectTolerated != tolerated {
|
||||||
|
t.Errorf("[%s] expect %v, got %v: toleration %+v, taint %s", tc.description, tc.expectTolerated, tolerated, tc.toleration, tc.taint.ToString())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFilterNoExecuteTaints(t *testing.T) {
|
||||||
|
taints := []v1.Taint{
|
||||||
|
{
|
||||||
|
Key: "one",
|
||||||
|
Value: "one",
|
||||||
|
Effect: v1.TaintEffectNoExecute,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Key: "two",
|
||||||
|
Value: "two",
|
||||||
|
Effect: v1.TaintEffectNoSchedule,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
taints = getNoScheduleTaints(taints)
|
||||||
|
if len(taints) != 1 || taints[0].Key != "two" {
|
||||||
|
t.Errorf("Filtering doesn't work. Got %v", taints)
|
||||||
|
}
|
||||||
|
}
|
||||||
104
pkg/descheduler/strategies/pod_antiaffinity.go
Normal file
104
pkg/descheduler/strategies/pod_antiaffinity.go
Normal file
@@ -0,0 +1,104 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2017 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package strategies
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/api"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||||
|
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/utils"
|
||||||
|
|
||||||
|
"k8s.io/api/core/v1"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
|
"k8s.io/klog"
|
||||||
|
)
|
||||||
|
|
||||||
|
// RemovePodsViolatingInterPodAntiAffinity with elimination strategy
|
||||||
|
func RemovePodsViolatingInterPodAntiAffinity(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, policyGroupVersion string, nodes []*v1.Node, nodePodCount utils.NodePodEvictedCount) {
|
||||||
|
if !strategy.Enabled {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
removePodsWithAffinityRules(ds.Client, policyGroupVersion, nodes, ds.DryRun, nodePodCount, ds.MaxNoOfPodsToEvictPerNode, ds.EvictLocalStoragePods)
|
||||||
|
}
|
||||||
|
|
||||||
|
// removePodsWithAffinityRules evicts pods on the node which are having a pod affinity rules.
|
||||||
|
func removePodsWithAffinityRules(client clientset.Interface, policyGroupVersion string, nodes []*v1.Node, dryRun bool, nodePodCount utils.NodePodEvictedCount, maxPodsToEvict int, evictLocalStoragePods bool) int {
|
||||||
|
podsEvicted := 0
|
||||||
|
for _, node := range nodes {
|
||||||
|
klog.V(1).Infof("Processing node: %#v\n", node.Name)
|
||||||
|
pods, err := podutil.ListEvictablePodsOnNode(client, node, evictLocalStoragePods)
|
||||||
|
if err != nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
totalPods := len(pods)
|
||||||
|
for i := 0; i < totalPods; i++ {
|
||||||
|
if maxPodsToEvict > 0 && nodePodCount[node]+1 > maxPodsToEvict {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if checkPodsWithAntiAffinityExist(pods[i], pods) {
|
||||||
|
success, err := evictions.EvictPod(client, pods[i], policyGroupVersion, dryRun)
|
||||||
|
if !success {
|
||||||
|
klog.Infof("Error when evicting pod: %#v (%#v)\n", pods[i].Name, err)
|
||||||
|
} else {
|
||||||
|
nodePodCount[node]++
|
||||||
|
klog.V(1).Infof("Evicted pod: %#v (%#v)\n because of existing anti-affinity", pods[i].Name, err)
|
||||||
|
// Since the current pod is evicted all other pods which have anti-affinity with this
|
||||||
|
// pod need not be evicted.
|
||||||
|
// Update pods.
|
||||||
|
pods = append(pods[:i], pods[i+1:]...)
|
||||||
|
i--
|
||||||
|
totalPods--
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
podsEvicted += nodePodCount[node]
|
||||||
|
}
|
||||||
|
return podsEvicted
|
||||||
|
}
|
||||||
|
|
||||||
|
// checkPodsWithAntiAffinityExist checks if there are other pods on the node that the current pod cannot tolerate.
|
||||||
|
func checkPodsWithAntiAffinityExist(pod *v1.Pod, pods []*v1.Pod) bool {
|
||||||
|
affinity := pod.Spec.Affinity
|
||||||
|
if affinity != nil && affinity.PodAntiAffinity != nil {
|
||||||
|
for _, term := range getPodAntiAffinityTerms(affinity.PodAntiAffinity) {
|
||||||
|
namespaces := utils.GetNamespacesFromPodAffinityTerm(pod, &term)
|
||||||
|
selector, err := metav1.LabelSelectorAsSelector(term.LabelSelector)
|
||||||
|
if err != nil {
|
||||||
|
klog.Infof("%v", err)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
for _, existingPod := range pods {
|
||||||
|
if existingPod.Name != pod.Name && utils.PodMatchesTermsNamespaceAndSelector(existingPod, namespaces, selector) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// getPodAntiAffinityTerms gets the antiaffinity terms for the given pod.
|
||||||
|
func getPodAntiAffinityTerms(podAntiAffinity *v1.PodAntiAffinity) (terms []v1.PodAffinityTerm) {
|
||||||
|
if podAntiAffinity != nil {
|
||||||
|
if len(podAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution) != 0 {
|
||||||
|
terms = podAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return terms
|
||||||
|
}
|
||||||
90
pkg/descheduler/strategies/pod_antiaffinity_test.go
Normal file
90
pkg/descheduler/strategies/pod_antiaffinity_test.go
Normal file
@@ -0,0 +1,90 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2017 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package strategies
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"k8s.io/api/core/v1"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
|
"k8s.io/client-go/kubernetes/fake"
|
||||||
|
core "k8s.io/client-go/testing"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/utils"
|
||||||
|
"sigs.k8s.io/descheduler/test"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestPodAntiAffinity(t *testing.T) {
|
||||||
|
node := test.BuildTestNode("n1", 2000, 3000, 10)
|
||||||
|
p1 := test.BuildTestPod("p1", 100, 0, node.Name)
|
||||||
|
p2 := test.BuildTestPod("p2", 100, 0, node.Name)
|
||||||
|
p3 := test.BuildTestPod("p3", 100, 0, node.Name)
|
||||||
|
p4 := test.BuildTestPod("p4", 100, 0, node.Name)
|
||||||
|
p2.Labels = map[string]string{"foo": "bar"}
|
||||||
|
p1.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||||
|
p2.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||||
|
p3.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||||
|
p4.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||||
|
|
||||||
|
// set pod anti affinity
|
||||||
|
setPodAntiAffinity(p1)
|
||||||
|
setPodAntiAffinity(p3)
|
||||||
|
setPodAntiAffinity(p4)
|
||||||
|
|
||||||
|
// create fake client
|
||||||
|
fakeClient := &fake.Clientset{}
|
||||||
|
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
|
||||||
|
return true, &v1.PodList{Items: []v1.Pod{*p1, *p2, *p3, *p4}}, nil
|
||||||
|
})
|
||||||
|
fakeClient.Fake.AddReactor("get", "nodes", func(action core.Action) (bool, runtime.Object, error) {
|
||||||
|
return true, node, nil
|
||||||
|
})
|
||||||
|
npe := utils.NodePodEvictedCount{}
|
||||||
|
npe[node] = 0
|
||||||
|
expectedEvictedPodCount := 3
|
||||||
|
podsEvicted := removePodsWithAffinityRules(fakeClient, "v1", []*v1.Node{node}, false, npe, 0, false)
|
||||||
|
if podsEvicted != expectedEvictedPodCount {
|
||||||
|
t.Errorf("Unexpected no of pods evicted: pods evicted: %d, expected: %d", podsEvicted, expectedEvictedPodCount)
|
||||||
|
}
|
||||||
|
npe[node] = 0
|
||||||
|
expectedEvictedPodCount = 1
|
||||||
|
podsEvicted = removePodsWithAffinityRules(fakeClient, "v1", []*v1.Node{node}, false, npe, 1, false)
|
||||||
|
if podsEvicted != expectedEvictedPodCount {
|
||||||
|
t.Errorf("Unexpected no of pods evicted: pods evicted: %d, expected: %d", podsEvicted, expectedEvictedPodCount)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func setPodAntiAffinity(inputPod *v1.Pod) {
|
||||||
|
inputPod.Spec.Affinity = &v1.Affinity{
|
||||||
|
PodAntiAffinity: &v1.PodAntiAffinity{
|
||||||
|
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
|
||||||
|
{
|
||||||
|
LabelSelector: &metav1.LabelSelector{
|
||||||
|
MatchExpressions: []metav1.LabelSelectorRequirement{
|
||||||
|
{
|
||||||
|
Key: "foo",
|
||||||
|
Operator: metav1.LabelSelectorOpIn,
|
||||||
|
Values: []string{"bar"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
TopologyKey: "region",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
181
pkg/utils/pod.go
Normal file
181
pkg/utils/pod.go
Normal file
@@ -0,0 +1,181 @@
|
|||||||
|
package utils
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
v1 "k8s.io/api/core/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
|
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||||
|
"k8s.io/component-base/featuregate"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// owner: @jinxu
|
||||||
|
// beta: v1.10
|
||||||
|
//
|
||||||
|
// New local storage types to support local storage capacity isolation
|
||||||
|
LocalStorageCapacityIsolation featuregate.Feature = "LocalStorageCapacityIsolation"
|
||||||
|
|
||||||
|
// owner: @egernst
|
||||||
|
// alpha: v1.16
|
||||||
|
//
|
||||||
|
// Enables PodOverhead, for accounting pod overheads which are specific to a given RuntimeClass
|
||||||
|
PodOverhead featuregate.Feature = "PodOverhead"
|
||||||
|
)
|
||||||
|
|
||||||
|
// GetResourceRequest finds and returns the request value for a specific resource.
|
||||||
|
func GetResourceRequest(pod *v1.Pod, resource v1.ResourceName) int64 {
|
||||||
|
if resource == v1.ResourcePods {
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
requestQuantity := GetResourceRequestQuantity(pod, resource)
|
||||||
|
|
||||||
|
if resource == v1.ResourceCPU {
|
||||||
|
return requestQuantity.MilliValue()
|
||||||
|
}
|
||||||
|
|
||||||
|
return requestQuantity.Value()
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetResourceRequestQuantity finds and returns the request quantity for a specific resource.
|
||||||
|
func GetResourceRequestQuantity(pod *v1.Pod, resourceName v1.ResourceName) resource.Quantity {
|
||||||
|
requestQuantity := resource.Quantity{}
|
||||||
|
|
||||||
|
switch resourceName {
|
||||||
|
case v1.ResourceCPU:
|
||||||
|
requestQuantity = resource.Quantity{Format: resource.DecimalSI}
|
||||||
|
case v1.ResourceMemory, v1.ResourceStorage, v1.ResourceEphemeralStorage:
|
||||||
|
requestQuantity = resource.Quantity{Format: resource.BinarySI}
|
||||||
|
default:
|
||||||
|
requestQuantity = resource.Quantity{Format: resource.DecimalSI}
|
||||||
|
}
|
||||||
|
|
||||||
|
if resourceName == v1.ResourceEphemeralStorage && !utilfeature.DefaultFeatureGate.Enabled(LocalStorageCapacityIsolation) {
|
||||||
|
// if the local storage capacity isolation feature gate is disabled, pods request 0 disk
|
||||||
|
return requestQuantity
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, container := range pod.Spec.Containers {
|
||||||
|
if rQuantity, ok := container.Resources.Requests[resourceName]; ok {
|
||||||
|
requestQuantity.Add(rQuantity)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, container := range pod.Spec.InitContainers {
|
||||||
|
if rQuantity, ok := container.Resources.Requests[resourceName]; ok {
|
||||||
|
if requestQuantity.Cmp(rQuantity) < 0 {
|
||||||
|
requestQuantity = rQuantity.DeepCopy()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// if PodOverhead feature is supported, add overhead for running a pod
|
||||||
|
// to the total requests if the resource total is non-zero
|
||||||
|
if pod.Spec.Overhead != nil && utilfeature.DefaultFeatureGate.Enabled(PodOverhead) {
|
||||||
|
if podOverhead, ok := pod.Spec.Overhead[resourceName]; ok && !requestQuantity.IsZero() {
|
||||||
|
requestQuantity.Add(podOverhead)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return requestQuantity
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsMirrorPod returns true if the passed Pod is a Mirror Pod.
|
||||||
|
func IsMirrorPod(pod *v1.Pod) bool {
|
||||||
|
_, ok := pod.Annotations[v1.MirrorPodAnnotationKey]
|
||||||
|
return ok
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsStaticPod returns true if the pod is a static pod.
|
||||||
|
func IsStaticPod(pod *v1.Pod) bool {
|
||||||
|
source, err := GetPodSource(pod)
|
||||||
|
return err == nil && source != "api"
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetPodSource returns the source of the pod based on the annotation.
|
||||||
|
func GetPodSource(pod *v1.Pod) (string, error) {
|
||||||
|
if pod.Annotations != nil {
|
||||||
|
if source, ok := pod.Annotations["kubernetes.io/config.source"]; ok {
|
||||||
|
return source, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return "", fmt.Errorf("cannot get source of pod %q", pod.UID)
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsCriticalPod returns true if pod's priority is greater than or equal to SystemCriticalPriority.
|
||||||
|
func IsCriticalPod(pod *v1.Pod) bool {
|
||||||
|
if IsStaticPod(pod) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if IsMirrorPod(pod) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if pod.Spec.Priority != nil && IsCriticalPodBasedOnPriority(*pod.Spec.Priority) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsCriticalPodBasedOnPriority checks if the given pod is a critical pod based on priority resolved from pod Spec.
|
||||||
|
func IsCriticalPodBasedOnPriority(priority int32) bool {
|
||||||
|
return priority >= SystemCriticalPriority
|
||||||
|
}
|
||||||
|
|
||||||
|
// PodRequestsAndLimits returns a dictionary of all defined resources summed up for all
|
||||||
|
// containers of the pod. If PodOverhead feature is enabled, pod overhead is added to the
|
||||||
|
// total container resource requests and to the total container limits which have a
|
||||||
|
// non-zero quantity.
|
||||||
|
func PodRequestsAndLimits(pod *v1.Pod) (reqs, limits v1.ResourceList) {
|
||||||
|
reqs, limits = v1.ResourceList{}, v1.ResourceList{}
|
||||||
|
for _, container := range pod.Spec.Containers {
|
||||||
|
addResourceList(reqs, container.Resources.Requests)
|
||||||
|
addResourceList(limits, container.Resources.Limits)
|
||||||
|
}
|
||||||
|
// init containers define the minimum of any resource
|
||||||
|
for _, container := range pod.Spec.InitContainers {
|
||||||
|
maxResourceList(reqs, container.Resources.Requests)
|
||||||
|
maxResourceList(limits, container.Resources.Limits)
|
||||||
|
}
|
||||||
|
|
||||||
|
// if PodOverhead feature is supported, add overhead for running a pod
|
||||||
|
// to the sum of reqeuests and to non-zero limits:
|
||||||
|
if pod.Spec.Overhead != nil && utilfeature.DefaultFeatureGate.Enabled(PodOverhead) {
|
||||||
|
addResourceList(reqs, pod.Spec.Overhead)
|
||||||
|
|
||||||
|
for name, quantity := range pod.Spec.Overhead {
|
||||||
|
if value, ok := limits[name]; ok && !value.IsZero() {
|
||||||
|
value.Add(quantity)
|
||||||
|
limits[name] = value
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// addResourceList adds the resources in newList to list
|
||||||
|
func addResourceList(list, newList v1.ResourceList) {
|
||||||
|
for name, quantity := range newList {
|
||||||
|
if value, ok := list[name]; !ok {
|
||||||
|
list[name] = quantity.DeepCopy()
|
||||||
|
} else {
|
||||||
|
value.Add(quantity)
|
||||||
|
list[name] = value
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// maxResourceList sets list to the greater of list/newList for every resource
|
||||||
|
// either list
|
||||||
|
func maxResourceList(list, new v1.ResourceList) {
|
||||||
|
for name, quantity := range new {
|
||||||
|
if value, ok := list[name]; !ok {
|
||||||
|
list[name] = quantity.DeepCopy()
|
||||||
|
continue
|
||||||
|
} else {
|
||||||
|
if quantity.Cmp(value) > 0 {
|
||||||
|
list[name] = quantity.DeepCopy()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
128
pkg/utils/predicates.go
Normal file
128
pkg/utils/predicates.go
Normal file
@@ -0,0 +1,128 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2017 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package utils
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"k8s.io/api/core/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
|
"k8s.io/apimachinery/pkg/selection"
|
||||||
|
"k8s.io/klog"
|
||||||
|
)
|
||||||
|
|
||||||
|
// The following code has been copied from predicates package to avoid the
|
||||||
|
// huge vendoring issues, mostly copied from
|
||||||
|
// k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates/
|
||||||
|
// Some minor changes have been made to ease the imports, but most of the code
|
||||||
|
// remains untouched
|
||||||
|
|
||||||
|
// PodMatchNodeSelector checks if a pod node selector matches the node label.
|
||||||
|
func PodMatchNodeSelector(pod *v1.Pod, node *v1.Node) (bool, error) {
|
||||||
|
if node == nil {
|
||||||
|
return false, fmt.Errorf("node not found")
|
||||||
|
}
|
||||||
|
if podMatchesNodeLabels(pod, node) {
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// The pod can only schedule onto nodes that satisfy requirements in both NodeAffinity and nodeSelector.
|
||||||
|
func podMatchesNodeLabels(pod *v1.Pod, node *v1.Node) bool {
|
||||||
|
// Check if node.Labels match pod.Spec.NodeSelector.
|
||||||
|
if len(pod.Spec.NodeSelector) > 0 {
|
||||||
|
selector := labels.SelectorFromSet(pod.Spec.NodeSelector)
|
||||||
|
if !selector.Matches(labels.Set(node.Labels)) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// 1. nil NodeSelector matches all nodes (i.e. does not filter out any nodes)
|
||||||
|
// 2. nil []NodeSelectorTerm (equivalent to non-nil empty NodeSelector) matches no nodes
|
||||||
|
// 3. zero-length non-nil []NodeSelectorTerm matches no nodes also, just for simplicity
|
||||||
|
// 4. nil []NodeSelectorRequirement (equivalent to non-nil empty NodeSelectorTerm) matches no nodes
|
||||||
|
// 5. zero-length non-nil []NodeSelectorRequirement matches no nodes also, just for simplicity
|
||||||
|
// 6. non-nil empty NodeSelectorRequirement is not allowed
|
||||||
|
|
||||||
|
affinity := pod.Spec.Affinity
|
||||||
|
if affinity != nil && affinity.NodeAffinity != nil {
|
||||||
|
nodeAffinity := affinity.NodeAffinity
|
||||||
|
// if no required NodeAffinity requirements, will do no-op, means select all nodes.
|
||||||
|
if nodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution == nil {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Match node selector for requiredDuringSchedulingIgnoredDuringExecution.
|
||||||
|
if nodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil {
|
||||||
|
nodeSelectorTerms := nodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms
|
||||||
|
klog.V(10).Infof("Match for RequiredDuringSchedulingIgnoredDuringExecution node selector terms %+v", nodeSelectorTerms)
|
||||||
|
return nodeMatchesNodeSelectorTerms(node, nodeSelectorTerms)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// nodeMatchesNodeSelectorTerms checks if a node's labels satisfy a list of node selector terms,
|
||||||
|
// terms are ORed, and an empty list of terms will match nothing.
|
||||||
|
func nodeMatchesNodeSelectorTerms(node *v1.Node, nodeSelectorTerms []v1.NodeSelectorTerm) bool {
|
||||||
|
for _, req := range nodeSelectorTerms {
|
||||||
|
nodeSelector, err := NodeSelectorRequirementsAsSelector(req.MatchExpressions)
|
||||||
|
if err != nil {
|
||||||
|
klog.V(10).Infof("Failed to parse MatchExpressions: %+v, regarding as not match.", req.MatchExpressions)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if nodeSelector.Matches(labels.Set(node.Labels)) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// NodeSelectorRequirementsAsSelector converts the []NodeSelectorRequirement api type into a struct that implements
|
||||||
|
// labels.Selector.
|
||||||
|
func NodeSelectorRequirementsAsSelector(nsm []v1.NodeSelectorRequirement) (labels.Selector, error) {
|
||||||
|
if len(nsm) == 0 {
|
||||||
|
return labels.Nothing(), nil
|
||||||
|
}
|
||||||
|
selector := labels.NewSelector()
|
||||||
|
for _, expr := range nsm {
|
||||||
|
var op selection.Operator
|
||||||
|
switch expr.Operator {
|
||||||
|
case v1.NodeSelectorOpIn:
|
||||||
|
op = selection.In
|
||||||
|
case v1.NodeSelectorOpNotIn:
|
||||||
|
op = selection.NotIn
|
||||||
|
case v1.NodeSelectorOpExists:
|
||||||
|
op = selection.Exists
|
||||||
|
case v1.NodeSelectorOpDoesNotExist:
|
||||||
|
op = selection.DoesNotExist
|
||||||
|
case v1.NodeSelectorOpGt:
|
||||||
|
op = selection.GreaterThan
|
||||||
|
case v1.NodeSelectorOpLt:
|
||||||
|
op = selection.LessThan
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("%q is not a valid node selector operator", expr.Operator)
|
||||||
|
}
|
||||||
|
r, err := labels.NewRequirement(expr.Key, op, expr.Values)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
selector = selector.Add(*r)
|
||||||
|
}
|
||||||
|
return selector, nil
|
||||||
|
}
|
||||||
35
pkg/utils/priority.go
Normal file
35
pkg/utils/priority.go
Normal file
@@ -0,0 +1,35 @@
|
|||||||
|
package utils
|
||||||
|
|
||||||
|
import (
|
||||||
|
v1 "k8s.io/api/core/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
|
"k8s.io/apimachinery/pkg/util/sets"
|
||||||
|
)
|
||||||
|
|
||||||
|
const SystemCriticalPriority = 2 * int32(1000000000)
|
||||||
|
|
||||||
|
// GetNamespacesFromPodAffinityTerm returns a set of names
|
||||||
|
// according to the namespaces indicated in podAffinityTerm.
|
||||||
|
// If namespaces is empty it considers the given pod's namespace.
|
||||||
|
func GetNamespacesFromPodAffinityTerm(pod *v1.Pod, podAffinityTerm *v1.PodAffinityTerm) sets.String {
|
||||||
|
names := sets.String{}
|
||||||
|
if len(podAffinityTerm.Namespaces) == 0 {
|
||||||
|
names.Insert(pod.Namespace)
|
||||||
|
} else {
|
||||||
|
names.Insert(podAffinityTerm.Namespaces...)
|
||||||
|
}
|
||||||
|
return names
|
||||||
|
}
|
||||||
|
|
||||||
|
// PodMatchesTermsNamespaceAndSelector returns true if the given <pod>
|
||||||
|
// matches the namespace and selector defined by <affinityPod>`s <term>.
|
||||||
|
func PodMatchesTermsNamespaceAndSelector(pod *v1.Pod, namespaces sets.String, selector labels.Selector) bool {
|
||||||
|
if !namespaces.Has(pod.Namespace) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if !selector.Matches(labels.Set(pod.Labels)) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
85
pkg/utils/qos.go
Normal file
85
pkg/utils/qos.go
Normal file
@@ -0,0 +1,85 @@
|
|||||||
|
package utils
|
||||||
|
|
||||||
|
import (
|
||||||
|
"k8s.io/api/core/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
|
"k8s.io/apimachinery/pkg/util/sets"
|
||||||
|
)
|
||||||
|
|
||||||
|
var supportedQoSComputeResources = sets.NewString(string(v1.ResourceCPU), string(v1.ResourceMemory))
|
||||||
|
|
||||||
|
// QOSList is a set of (resource name, QoS class) pairs.
|
||||||
|
type QOSList map[v1.ResourceName]v1.PodQOSClass
|
||||||
|
|
||||||
|
func isSupportedQoSComputeResource(name v1.ResourceName) bool {
|
||||||
|
return supportedQoSComputeResources.Has(string(name))
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetPodQOS returns the QoS class of a pod.
|
||||||
|
// A pod is besteffort if none of its containers have specified any requests or limits.
|
||||||
|
// A pod is guaranteed only when requests and limits are specified for all the containers and they are equal.
|
||||||
|
// A pod is burstable if limits and requests do not match across all containers.
|
||||||
|
func GetPodQOS(pod *v1.Pod) v1.PodQOSClass {
|
||||||
|
requests := v1.ResourceList{}
|
||||||
|
limits := v1.ResourceList{}
|
||||||
|
zeroQuantity := resource.MustParse("0")
|
||||||
|
isGuaranteed := true
|
||||||
|
allContainers := []v1.Container{}
|
||||||
|
allContainers = append(allContainers, pod.Spec.Containers...)
|
||||||
|
allContainers = append(allContainers, pod.Spec.InitContainers...)
|
||||||
|
for _, container := range allContainers {
|
||||||
|
// process requests
|
||||||
|
for name, quantity := range container.Resources.Requests {
|
||||||
|
if !isSupportedQoSComputeResource(name) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if quantity.Cmp(zeroQuantity) == 1 {
|
||||||
|
delta := quantity.DeepCopy()
|
||||||
|
if _, exists := requests[name]; !exists {
|
||||||
|
requests[name] = delta
|
||||||
|
} else {
|
||||||
|
delta.Add(requests[name])
|
||||||
|
requests[name] = delta
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// process limits
|
||||||
|
qosLimitsFound := sets.NewString()
|
||||||
|
for name, quantity := range container.Resources.Limits {
|
||||||
|
if !isSupportedQoSComputeResource(name) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if quantity.Cmp(zeroQuantity) == 1 {
|
||||||
|
qosLimitsFound.Insert(string(name))
|
||||||
|
delta := quantity.DeepCopy()
|
||||||
|
if _, exists := limits[name]; !exists {
|
||||||
|
limits[name] = delta
|
||||||
|
} else {
|
||||||
|
delta.Add(limits[name])
|
||||||
|
limits[name] = delta
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !qosLimitsFound.HasAll(string(v1.ResourceMemory), string(v1.ResourceCPU)) {
|
||||||
|
isGuaranteed = false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(requests) == 0 && len(limits) == 0 {
|
||||||
|
return v1.PodQOSBestEffort
|
||||||
|
}
|
||||||
|
// Check is requests match limits for all resources.
|
||||||
|
if isGuaranteed {
|
||||||
|
for name, req := range requests {
|
||||||
|
if lim, exists := limits[name]; !exists || lim.Cmp(req) != 0 {
|
||||||
|
isGuaranteed = false
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if isGuaranteed &&
|
||||||
|
len(requests) == len(limits) {
|
||||||
|
return v1.PodQOSGuaranteed
|
||||||
|
}
|
||||||
|
return v1.PodQOSBurstable
|
||||||
|
}
|
||||||
@@ -15,3 +15,21 @@ limitations under the License.
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
package utils
|
package utils
|
||||||
|
|
||||||
|
import v1 "k8s.io/api/core/v1"
|
||||||
|
|
||||||
|
// This file contains the datastructures, types & functions needed by all the strategies so that we don't have
|
||||||
|
// to compute them again in each strategy.
|
||||||
|
|
||||||
|
// NodePodEvictedCount keeps count of pods evicted on node. This is used in conjunction with strategies to
|
||||||
|
type NodePodEvictedCount map[*v1.Node]int
|
||||||
|
|
||||||
|
// InitializeNodePodCount initializes the nodePodCount.
|
||||||
|
func InitializeNodePodCount(nodeList []*v1.Node) NodePodEvictedCount {
|
||||||
|
var nodePodCount = make(NodePodEvictedCount)
|
||||||
|
for _, node := range nodeList {
|
||||||
|
// Initialize podsEvicted till now with 0.
|
||||||
|
nodePodCount[node] = 0
|
||||||
|
}
|
||||||
|
return nodePodCount
|
||||||
|
}
|
||||||
|
|||||||
249
test/e2e/e2e_test.go
Normal file
249
test/e2e/e2e_test.go
Normal file
@@ -0,0 +1,249 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2017 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package e2e
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
v1 "k8s.io/api/core/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
|
"k8s.io/klog"
|
||||||
|
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/api"
|
||||||
|
deschedulerapi "sigs.k8s.io/descheduler/pkg/api"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/descheduler"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/descheduler/client"
|
||||||
|
eutils "sigs.k8s.io/descheduler/pkg/descheduler/evictions/utils"
|
||||||
|
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
|
||||||
|
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/descheduler/strategies"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/utils"
|
||||||
|
)
|
||||||
|
|
||||||
|
func MakePodSpec() v1.PodSpec {
|
||||||
|
return v1.PodSpec{
|
||||||
|
Containers: []v1.Container{{
|
||||||
|
Name: "pause",
|
||||||
|
ImagePullPolicy: "Never",
|
||||||
|
Image: "kubernetes/pause",
|
||||||
|
Ports: []v1.ContainerPort{{ContainerPort: 80}},
|
||||||
|
Resources: v1.ResourceRequirements{
|
||||||
|
Limits: v1.ResourceList{
|
||||||
|
v1.ResourceCPU: resource.MustParse("100m"),
|
||||||
|
v1.ResourceMemory: resource.MustParse("1000Mi"),
|
||||||
|
},
|
||||||
|
Requests: v1.ResourceList{
|
||||||
|
v1.ResourceCPU: resource.MustParse("100m"),
|
||||||
|
v1.ResourceMemory: resource.MustParse("800Mi"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// RcByNameContainer returns a ReplicationControoler with specified name and container
|
||||||
|
func RcByNameContainer(name string, replicas int32, labels map[string]string, gracePeriod *int64) *v1.ReplicationController {
|
||||||
|
|
||||||
|
zeroGracePeriod := int64(0)
|
||||||
|
|
||||||
|
// Add "name": name to the labels, overwriting if it exists.
|
||||||
|
labels["name"] = name
|
||||||
|
if gracePeriod == nil {
|
||||||
|
gracePeriod = &zeroGracePeriod
|
||||||
|
}
|
||||||
|
return &v1.ReplicationController{
|
||||||
|
TypeMeta: metav1.TypeMeta{
|
||||||
|
Kind: "ReplicationController",
|
||||||
|
APIVersion: "v1",
|
||||||
|
},
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: name,
|
||||||
|
},
|
||||||
|
Spec: v1.ReplicationControllerSpec{
|
||||||
|
Replicas: func(i int32) *int32 { return &i }(replicas),
|
||||||
|
Selector: map[string]string{
|
||||||
|
"name": name,
|
||||||
|
},
|
||||||
|
Template: &v1.PodTemplateSpec{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Labels: labels,
|
||||||
|
},
|
||||||
|
Spec: MakePodSpec(),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// startEndToEndForLowNodeUtilization tests the lownode utilization strategy.
|
||||||
|
func startEndToEndForLowNodeUtilization(clientset clientset.Interface) {
|
||||||
|
var thresholds = make(deschedulerapi.ResourceThresholds)
|
||||||
|
var targetThresholds = make(deschedulerapi.ResourceThresholds)
|
||||||
|
thresholds[v1.ResourceMemory] = 20
|
||||||
|
thresholds[v1.ResourcePods] = 20
|
||||||
|
thresholds[v1.ResourceCPU] = 85
|
||||||
|
targetThresholds[v1.ResourceMemory] = 20
|
||||||
|
targetThresholds[v1.ResourcePods] = 20
|
||||||
|
targetThresholds[v1.ResourceCPU] = 90
|
||||||
|
// Run descheduler.
|
||||||
|
evictionPolicyGroupVersion, err := eutils.SupportEviction(clientset)
|
||||||
|
if err != nil || len(evictionPolicyGroupVersion) == 0 {
|
||||||
|
klog.Fatalf("%v", err)
|
||||||
|
}
|
||||||
|
stopChannel := make(chan struct{})
|
||||||
|
nodes, err := nodeutil.ReadyNodes(clientset, "", stopChannel)
|
||||||
|
if err != nil {
|
||||||
|
klog.Fatalf("%v", err)
|
||||||
|
}
|
||||||
|
nodeUtilizationThresholds := deschedulerapi.NodeResourceUtilizationThresholds{Thresholds: thresholds, TargetThresholds: targetThresholds}
|
||||||
|
nodeUtilizationStrategyParams := deschedulerapi.StrategyParameters{NodeResourceUtilizationThresholds: nodeUtilizationThresholds}
|
||||||
|
lowNodeUtilizationStrategy := deschedulerapi.DeschedulerStrategy{Enabled: true, Params: nodeUtilizationStrategyParams}
|
||||||
|
ds := &options.DeschedulerServer{Client: clientset}
|
||||||
|
nodePodCount := utils.InitializeNodePodCount(nodes)
|
||||||
|
strategies.LowNodeUtilization(ds, lowNodeUtilizationStrategy, evictionPolicyGroupVersion, nodes, nodePodCount)
|
||||||
|
time.Sleep(10 * time.Second)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestE2E(t *testing.T) {
|
||||||
|
// If we have reached here, it means cluster would have been already setup and the kubeconfig file should
|
||||||
|
// be in /tmp directory as admin.conf.
|
||||||
|
clientSet, err := client.CreateClient("/tmp/admin.conf")
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Error during client creation with %v", err)
|
||||||
|
}
|
||||||
|
nodeList, err := clientSet.CoreV1().Nodes().List(metav1.ListOptions{})
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Error listing node with %v", err)
|
||||||
|
}
|
||||||
|
// Assumption: We would have 3 node cluster by now. Kubeadm brings all the master components onto master node.
|
||||||
|
// So, the last node would have least utilization.
|
||||||
|
rc := RcByNameContainer("test-rc", int32(15), map[string]string{"test": "app"}, nil)
|
||||||
|
_, err = clientSet.CoreV1().ReplicationControllers("default").Create(rc)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Error creating deployment %v", err)
|
||||||
|
}
|
||||||
|
evictPods(t, clientSet, nodeList, rc)
|
||||||
|
|
||||||
|
rc.Spec.Template.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"}
|
||||||
|
rc.Spec.Replicas = func(i int32) *int32 { return &i }(15)
|
||||||
|
rc.Spec.Template.Spec.Volumes = []v1.Volume{
|
||||||
|
{
|
||||||
|
Name: "sample",
|
||||||
|
VolumeSource: v1.VolumeSource{
|
||||||
|
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||||
|
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
_, err = clientSet.CoreV1().ReplicationControllers("default").Create(rc)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Error creating deployment %v", err)
|
||||||
|
}
|
||||||
|
evictPods(t, clientSet, nodeList, rc)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDeschedulingInterval(t *testing.T) {
|
||||||
|
clientSet, err := client.CreateClient("/tmp/admin.conf")
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Error during client creation with %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// By default, the DeschedulingInterval param should be set to 0, meaning Descheduler only runs once then exits
|
||||||
|
s := options.NewDeschedulerServer()
|
||||||
|
s.Client = clientSet
|
||||||
|
|
||||||
|
deschedulerPolicy := &api.DeschedulerPolicy{}
|
||||||
|
|
||||||
|
c := make(chan bool)
|
||||||
|
go func() {
|
||||||
|
err := descheduler.RunDeschedulerStrategies(s, deschedulerPolicy)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Error running descheduler strategies: %+v", err)
|
||||||
|
}
|
||||||
|
c <- true
|
||||||
|
}()
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-c:
|
||||||
|
// successfully returned
|
||||||
|
case <-time.After(3 * time.Minute):
|
||||||
|
t.Errorf("descheduler.Run timed out even without descheduling-interval set")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func evictPods(t *testing.T, clientSet clientset.Interface, nodeList *v1.NodeList, rc *v1.ReplicationController) {
|
||||||
|
var leastLoadedNode v1.Node
|
||||||
|
podsBefore := math.MaxInt16
|
||||||
|
for i := range nodeList.Items {
|
||||||
|
// Skip the Master Node
|
||||||
|
if _, exist := nodeList.Items[i].Labels["node-role.kubernetes.io/master"]; exist {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// List all the pods on the current Node
|
||||||
|
podsOnANode, err := podutil.ListEvictablePodsOnNode(clientSet, &nodeList.Items[i], true)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Error listing pods on a node %v", err)
|
||||||
|
}
|
||||||
|
// Update leastLoadedNode if necessary
|
||||||
|
if tmpLoads := len(podsOnANode); tmpLoads < podsBefore {
|
||||||
|
leastLoadedNode = nodeList.Items[i]
|
||||||
|
podsBefore = tmpLoads
|
||||||
|
}
|
||||||
|
}
|
||||||
|
t.Log("Eviction of pods starting")
|
||||||
|
startEndToEndForLowNodeUtilization(clientSet)
|
||||||
|
podsOnleastUtilizedNode, err := podutil.ListEvictablePodsOnNode(clientSet, &leastLoadedNode, true)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Error listing pods on a node %v", err)
|
||||||
|
}
|
||||||
|
podsAfter := len(podsOnleastUtilizedNode)
|
||||||
|
if podsBefore > podsAfter {
|
||||||
|
t.Fatalf("We should have see more pods on this node as per kubeadm's way of installing %v, %v", podsBefore, podsAfter)
|
||||||
|
}
|
||||||
|
|
||||||
|
//set number of replicas to 0
|
||||||
|
rc.Spec.Replicas = func(i int32) *int32 { return &i }(0)
|
||||||
|
_, err = clientSet.CoreV1().ReplicationControllers("default").Update(rc)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Error updating replica controller %v", err)
|
||||||
|
}
|
||||||
|
allPodsDeleted := false
|
||||||
|
//wait 30 seconds until all pods are deleted
|
||||||
|
for i := 0; i < 6; i++ {
|
||||||
|
scale, _ := clientSet.CoreV1().ReplicationControllers("default").GetScale(rc.Name, metav1.GetOptions{})
|
||||||
|
if scale.Spec.Replicas == 0 {
|
||||||
|
allPodsDeleted = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
time.Sleep(5 * time.Second)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !allPodsDeleted {
|
||||||
|
t.Errorf("Deleting of rc pods took too long")
|
||||||
|
}
|
||||||
|
|
||||||
|
err = clientSet.CoreV1().ReplicationControllers("default").Delete(rc.Name, &metav1.DeleteOptions{})
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Error deleting rc %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
//wait until rc is deleted
|
||||||
|
time.Sleep(5 * time.Second)
|
||||||
|
}
|
||||||
19
test/run-e2e-tests.sh
Executable file
19
test/run-e2e-tests.sh
Executable file
@@ -0,0 +1,19 @@
|
|||||||
|
# Copyright 2017 The Kubernetes Authors.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# This just run e2e tests.
|
||||||
|
PRJ_PREFIX="sigs.k8s.io/descheduler"
|
||||||
|
go test ${PRJ_PREFIX}/test/e2e/ -v
|
||||||
@@ -14,6 +14,6 @@
|
|||||||
|
|
||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
# run unit tests
|
# This just run unit-tests. Ignoring the current directory so as to avoid running e2e tests.
|
||||||
go test $(go list github.com/kubernetes-incubator/descheduler/... | grep -v github.com/kubernetes-incubator/descheduler/vendor/)
|
PRJ_PREFIX="sigs.k8s.io/descheduler"
|
||||||
|
go test $(go list ${PRJ_PREFIX}/... | grep -v ${PRJ_PREFIX}/vendor/| grep -v ${PRJ_PREFIX}/test/)
|
||||||
|
|||||||
@@ -19,14 +19,12 @@ package test
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
|
"k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/kubernetes/pkg/api/v1"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// TODO:@ravisantoshgudimetla. As of now building some test pods here. This needs to
|
// BuildTestPod creates a test pod with given parameters.
|
||||||
// move to utils after refactor.
|
|
||||||
// buildTestPod creates a test pod with given parameters.
|
|
||||||
func BuildTestPod(name string, cpu int64, memory int64, nodeName string) *v1.Pod {
|
func BuildTestPod(name string, cpu int64, memory int64, nodeName string) *v1.Pod {
|
||||||
pod := &v1.Pod{
|
pod := &v1.Pod{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
@@ -39,6 +37,7 @@ func BuildTestPod(name string, cpu int64, memory int64, nodeName string) *v1.Pod
|
|||||||
{
|
{
|
||||||
Resources: v1.ResourceRequirements{
|
Resources: v1.ResourceRequirements{
|
||||||
Requests: v1.ResourceList{},
|
Requests: v1.ResourceList{},
|
||||||
|
Limits: v1.ResourceList{},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@@ -64,33 +63,25 @@ func GetMirrorPodAnnotation() map[string]string {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetNormalPodAnnotation returns the annotation needed for a pod.
|
// GetNormalPodOwnerRefList returns the ownerRef needed for a pod.
|
||||||
func GetNormalPodAnnotation() map[string]string {
|
func GetNormalPodOwnerRefList() []metav1.OwnerReference {
|
||||||
return map[string]string{
|
ownerRefList := make([]metav1.OwnerReference, 0)
|
||||||
"kubernetes.io/created-by": "{\"kind\":\"SerializedReference\",\"apiVersion\":\"v1\",\"reference\":{\"kind\":\"Pod\"}}",
|
ownerRefList = append(ownerRefList, metav1.OwnerReference{Kind: "Pod", APIVersion: "v1"})
|
||||||
}
|
return ownerRefList
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetReplicaSetAnnotation returns the annotation needed for replicaset pod.
|
// GetReplicaSetOwnerRefList returns the ownerRef needed for replicaset pod.
|
||||||
func GetReplicaSetAnnotation() map[string]string {
|
func GetReplicaSetOwnerRefList() []metav1.OwnerReference {
|
||||||
return map[string]string{
|
ownerRefList := make([]metav1.OwnerReference, 0)
|
||||||
"kubernetes.io/created-by": "{\"kind\":\"SerializedReference\",\"apiVersion\":\"v1\",\"reference\":{\"kind\":\"ReplicaSet\"}}",
|
ownerRefList = append(ownerRefList, metav1.OwnerReference{Kind: "ReplicaSet", APIVersion: "v1", Name: "replicaset-1"})
|
||||||
}
|
return ownerRefList
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetDaemonSetAnnotation returns the annotation needed for daemonset pod.
|
// GetDaemonSetOwnerRefList returns the ownerRef needed for daemonset pod.
|
||||||
func GetDaemonSetAnnotation() map[string]string {
|
func GetDaemonSetOwnerRefList() []metav1.OwnerReference {
|
||||||
return map[string]string{
|
ownerRefList := make([]metav1.OwnerReference, 0)
|
||||||
"kubernetes.io/created-by": "{\"kind\":\"SerializedReference\",\"apiVersion\":\"v1\",\"reference\":{\"kind\":\"DaemonSet\"}}",
|
ownerRefList = append(ownerRefList, metav1.OwnerReference{Kind: "DaemonSet", APIVersion: "v1"})
|
||||||
}
|
return ownerRefList
|
||||||
}
|
|
||||||
|
|
||||||
// GetCriticalPodAnnotation returns the annotation needed for critical pod.
|
|
||||||
func GetCriticalPodAnnotation() map[string]string {
|
|
||||||
return map[string]string{
|
|
||||||
"kubernetes.io/created-by": "{\"kind\":\"SerializedReference\",\"apiVersion\":\"v1\",\"reference\":{\"kind\":\"Pod\"}}",
|
|
||||||
"scheduler.alpha.kubernetes.io/critical-pod": "",
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// BuildTestNode creates a node with specified capacity.
|
// BuildTestNode creates a node with specified capacity.
|
||||||
|
|||||||
0
vendor/github.com/go-openapi/analysis/LICENSE → vendor/cloud.google.com/go/LICENSE
generated
vendored
0
vendor/github.com/go-openapi/analysis/LICENSE → vendor/cloud.google.com/go/LICENSE
generated
vendored
513
vendor/cloud.google.com/go/compute/metadata/metadata.go
generated
vendored
Normal file
513
vendor/cloud.google.com/go/compute/metadata/metadata.go
generated
vendored
Normal file
@@ -0,0 +1,513 @@
|
|||||||
|
// Copyright 2014 Google LLC
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// Package metadata provides access to Google Compute Engine (GCE)
|
||||||
|
// metadata and API service accounts.
|
||||||
|
//
|
||||||
|
// This package is a wrapper around the GCE metadata service,
|
||||||
|
// as documented at https://developers.google.com/compute/docs/metadata.
|
||||||
|
package metadata // import "cloud.google.com/go/compute/metadata"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"net"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"os"
|
||||||
|
"runtime"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// metadataIP is the documented metadata server IP address.
|
||||||
|
metadataIP = "169.254.169.254"
|
||||||
|
|
||||||
|
// metadataHostEnv is the environment variable specifying the
|
||||||
|
// GCE metadata hostname. If empty, the default value of
|
||||||
|
// metadataIP ("169.254.169.254") is used instead.
|
||||||
|
// This is variable name is not defined by any spec, as far as
|
||||||
|
// I know; it was made up for the Go package.
|
||||||
|
metadataHostEnv = "GCE_METADATA_HOST"
|
||||||
|
|
||||||
|
userAgent = "gcloud-golang/0.1"
|
||||||
|
)
|
||||||
|
|
||||||
|
type cachedValue struct {
|
||||||
|
k string
|
||||||
|
trim bool
|
||||||
|
mu sync.Mutex
|
||||||
|
v string
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
projID = &cachedValue{k: "project/project-id", trim: true}
|
||||||
|
projNum = &cachedValue{k: "project/numeric-project-id", trim: true}
|
||||||
|
instID = &cachedValue{k: "instance/id", trim: true}
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
defaultClient = &Client{hc: &http.Client{
|
||||||
|
Transport: &http.Transport{
|
||||||
|
Dial: (&net.Dialer{
|
||||||
|
Timeout: 2 * time.Second,
|
||||||
|
KeepAlive: 30 * time.Second,
|
||||||
|
}).Dial,
|
||||||
|
ResponseHeaderTimeout: 2 * time.Second,
|
||||||
|
},
|
||||||
|
}}
|
||||||
|
subscribeClient = &Client{hc: &http.Client{
|
||||||
|
Transport: &http.Transport{
|
||||||
|
Dial: (&net.Dialer{
|
||||||
|
Timeout: 2 * time.Second,
|
||||||
|
KeepAlive: 30 * time.Second,
|
||||||
|
}).Dial,
|
||||||
|
},
|
||||||
|
}}
|
||||||
|
)
|
||||||
|
|
||||||
|
// NotDefinedError is returned when requested metadata is not defined.
|
||||||
|
//
|
||||||
|
// The underlying string is the suffix after "/computeMetadata/v1/".
|
||||||
|
//
|
||||||
|
// This error is not returned if the value is defined to be the empty
|
||||||
|
// string.
|
||||||
|
type NotDefinedError string
|
||||||
|
|
||||||
|
func (suffix NotDefinedError) Error() string {
|
||||||
|
return fmt.Sprintf("metadata: GCE metadata %q not defined", string(suffix))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *cachedValue) get(cl *Client) (v string, err error) {
|
||||||
|
defer c.mu.Unlock()
|
||||||
|
c.mu.Lock()
|
||||||
|
if c.v != "" {
|
||||||
|
return c.v, nil
|
||||||
|
}
|
||||||
|
if c.trim {
|
||||||
|
v, err = cl.getTrimmed(c.k)
|
||||||
|
} else {
|
||||||
|
v, err = cl.Get(c.k)
|
||||||
|
}
|
||||||
|
if err == nil {
|
||||||
|
c.v = v
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
onGCEOnce sync.Once
|
||||||
|
onGCE bool
|
||||||
|
)
|
||||||
|
|
||||||
|
// OnGCE reports whether this process is running on Google Compute Engine.
|
||||||
|
func OnGCE() bool {
|
||||||
|
onGCEOnce.Do(initOnGCE)
|
||||||
|
return onGCE
|
||||||
|
}
|
||||||
|
|
||||||
|
func initOnGCE() {
|
||||||
|
onGCE = testOnGCE()
|
||||||
|
}
|
||||||
|
|
||||||
|
func testOnGCE() bool {
|
||||||
|
// The user explicitly said they're on GCE, so trust them.
|
||||||
|
if os.Getenv(metadataHostEnv) != "" {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
resc := make(chan bool, 2)
|
||||||
|
|
||||||
|
// Try two strategies in parallel.
|
||||||
|
// See https://github.com/googleapis/google-cloud-go/issues/194
|
||||||
|
go func() {
|
||||||
|
req, _ := http.NewRequest("GET", "http://"+metadataIP, nil)
|
||||||
|
req.Header.Set("User-Agent", userAgent)
|
||||||
|
res, err := defaultClient.hc.Do(req.WithContext(ctx))
|
||||||
|
if err != nil {
|
||||||
|
resc <- false
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer res.Body.Close()
|
||||||
|
resc <- res.Header.Get("Metadata-Flavor") == "Google"
|
||||||
|
}()
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
addrs, err := net.LookupHost("metadata.google.internal")
|
||||||
|
if err != nil || len(addrs) == 0 {
|
||||||
|
resc <- false
|
||||||
|
return
|
||||||
|
}
|
||||||
|
resc <- strsContains(addrs, metadataIP)
|
||||||
|
}()
|
||||||
|
|
||||||
|
tryHarder := systemInfoSuggestsGCE()
|
||||||
|
if tryHarder {
|
||||||
|
res := <-resc
|
||||||
|
if res {
|
||||||
|
// The first strategy succeeded, so let's use it.
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
// Wait for either the DNS or metadata server probe to
|
||||||
|
// contradict the other one and say we are running on
|
||||||
|
// GCE. Give it a lot of time to do so, since the system
|
||||||
|
// info already suggests we're running on a GCE BIOS.
|
||||||
|
timer := time.NewTimer(5 * time.Second)
|
||||||
|
defer timer.Stop()
|
||||||
|
select {
|
||||||
|
case res = <-resc:
|
||||||
|
return res
|
||||||
|
case <-timer.C:
|
||||||
|
// Too slow. Who knows what this system is.
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// There's no hint from the system info that we're running on
|
||||||
|
// GCE, so use the first probe's result as truth, whether it's
|
||||||
|
// true or false. The goal here is to optimize for speed for
|
||||||
|
// users who are NOT running on GCE. We can't assume that
|
||||||
|
// either a DNS lookup or an HTTP request to a blackholed IP
|
||||||
|
// address is fast. Worst case this should return when the
|
||||||
|
// metaClient's Transport.ResponseHeaderTimeout or
|
||||||
|
// Transport.Dial.Timeout fires (in two seconds).
|
||||||
|
return <-resc
|
||||||
|
}
|
||||||
|
|
||||||
|
// systemInfoSuggestsGCE reports whether the local system (without
|
||||||
|
// doing network requests) suggests that we're running on GCE. If this
|
||||||
|
// returns true, testOnGCE tries a bit harder to reach its metadata
|
||||||
|
// server.
|
||||||
|
func systemInfoSuggestsGCE() bool {
|
||||||
|
if runtime.GOOS != "linux" {
|
||||||
|
// We don't have any non-Linux clues available, at least yet.
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
slurp, _ := ioutil.ReadFile("/sys/class/dmi/id/product_name")
|
||||||
|
name := strings.TrimSpace(string(slurp))
|
||||||
|
return name == "Google" || name == "Google Compute Engine"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Subscribe calls Client.Subscribe on a client designed for subscribing (one with no
|
||||||
|
// ResponseHeaderTimeout).
|
||||||
|
func Subscribe(suffix string, fn func(v string, ok bool) error) error {
|
||||||
|
return subscribeClient.Subscribe(suffix, fn)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get calls Client.Get on the default client.
|
||||||
|
func Get(suffix string) (string, error) { return defaultClient.Get(suffix) }
|
||||||
|
|
||||||
|
// ProjectID returns the current instance's project ID string.
|
||||||
|
func ProjectID() (string, error) { return defaultClient.ProjectID() }
|
||||||
|
|
||||||
|
// NumericProjectID returns the current instance's numeric project ID.
|
||||||
|
func NumericProjectID() (string, error) { return defaultClient.NumericProjectID() }
|
||||||
|
|
||||||
|
// InternalIP returns the instance's primary internal IP address.
|
||||||
|
func InternalIP() (string, error) { return defaultClient.InternalIP() }
|
||||||
|
|
||||||
|
// ExternalIP returns the instance's primary external (public) IP address.
|
||||||
|
func ExternalIP() (string, error) { return defaultClient.ExternalIP() }
|
||||||
|
|
||||||
|
// Hostname returns the instance's hostname. This will be of the form
|
||||||
|
// "<instanceID>.c.<projID>.internal".
|
||||||
|
func Hostname() (string, error) { return defaultClient.Hostname() }
|
||||||
|
|
||||||
|
// InstanceTags returns the list of user-defined instance tags,
|
||||||
|
// assigned when initially creating a GCE instance.
|
||||||
|
func InstanceTags() ([]string, error) { return defaultClient.InstanceTags() }
|
||||||
|
|
||||||
|
// InstanceID returns the current VM's numeric instance ID.
|
||||||
|
func InstanceID() (string, error) { return defaultClient.InstanceID() }
|
||||||
|
|
||||||
|
// InstanceName returns the current VM's instance ID string.
|
||||||
|
func InstanceName() (string, error) { return defaultClient.InstanceName() }
|
||||||
|
|
||||||
|
// Zone returns the current VM's zone, such as "us-central1-b".
|
||||||
|
func Zone() (string, error) { return defaultClient.Zone() }
|
||||||
|
|
||||||
|
// InstanceAttributes calls Client.InstanceAttributes on the default client.
|
||||||
|
func InstanceAttributes() ([]string, error) { return defaultClient.InstanceAttributes() }
|
||||||
|
|
||||||
|
// ProjectAttributes calls Client.ProjectAttributes on the default client.
|
||||||
|
func ProjectAttributes() ([]string, error) { return defaultClient.ProjectAttributes() }
|
||||||
|
|
||||||
|
// InstanceAttributeValue calls Client.InstanceAttributeValue on the default client.
|
||||||
|
func InstanceAttributeValue(attr string) (string, error) {
|
||||||
|
return defaultClient.InstanceAttributeValue(attr)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProjectAttributeValue calls Client.ProjectAttributeValue on the default client.
|
||||||
|
func ProjectAttributeValue(attr string) (string, error) {
|
||||||
|
return defaultClient.ProjectAttributeValue(attr)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scopes calls Client.Scopes on the default client.
|
||||||
|
func Scopes(serviceAccount string) ([]string, error) { return defaultClient.Scopes(serviceAccount) }
|
||||||
|
|
||||||
|
func strsContains(ss []string, s string) bool {
|
||||||
|
for _, v := range ss {
|
||||||
|
if v == s {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// A Client provides metadata.
|
||||||
|
type Client struct {
|
||||||
|
hc *http.Client
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewClient returns a Client that can be used to fetch metadata. All HTTP requests
|
||||||
|
// will use the given http.Client instead of the default client.
|
||||||
|
func NewClient(c *http.Client) *Client {
|
||||||
|
return &Client{hc: c}
|
||||||
|
}
|
||||||
|
|
||||||
|
// getETag returns a value from the metadata service as well as the associated ETag.
|
||||||
|
// This func is otherwise equivalent to Get.
|
||||||
|
func (c *Client) getETag(suffix string) (value, etag string, err error) {
|
||||||
|
// Using a fixed IP makes it very difficult to spoof the metadata service in
|
||||||
|
// a container, which is an important use-case for local testing of cloud
|
||||||
|
// deployments. To enable spoofing of the metadata service, the environment
|
||||||
|
// variable GCE_METADATA_HOST is first inspected to decide where metadata
|
||||||
|
// requests shall go.
|
||||||
|
host := os.Getenv(metadataHostEnv)
|
||||||
|
if host == "" {
|
||||||
|
// Using 169.254.169.254 instead of "metadata" here because Go
|
||||||
|
// binaries built with the "netgo" tag and without cgo won't
|
||||||
|
// know the search suffix for "metadata" is
|
||||||
|
// ".google.internal", and this IP address is documented as
|
||||||
|
// being stable anyway.
|
||||||
|
host = metadataIP
|
||||||
|
}
|
||||||
|
u := "http://" + host + "/computeMetadata/v1/" + suffix
|
||||||
|
req, _ := http.NewRequest("GET", u, nil)
|
||||||
|
req.Header.Set("Metadata-Flavor", "Google")
|
||||||
|
req.Header.Set("User-Agent", userAgent)
|
||||||
|
res, err := c.hc.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return "", "", err
|
||||||
|
}
|
||||||
|
defer res.Body.Close()
|
||||||
|
if res.StatusCode == http.StatusNotFound {
|
||||||
|
return "", "", NotDefinedError(suffix)
|
||||||
|
}
|
||||||
|
all, err := ioutil.ReadAll(res.Body)
|
||||||
|
if err != nil {
|
||||||
|
return "", "", err
|
||||||
|
}
|
||||||
|
if res.StatusCode != 200 {
|
||||||
|
return "", "", &Error{Code: res.StatusCode, Message: string(all)}
|
||||||
|
}
|
||||||
|
return string(all), res.Header.Get("Etag"), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get returns a value from the metadata service.
|
||||||
|
// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/".
|
||||||
|
//
|
||||||
|
// If the GCE_METADATA_HOST environment variable is not defined, a default of
|
||||||
|
// 169.254.169.254 will be used instead.
|
||||||
|
//
|
||||||
|
// If the requested metadata is not defined, the returned error will
|
||||||
|
// be of type NotDefinedError.
|
||||||
|
func (c *Client) Get(suffix string) (string, error) {
|
||||||
|
val, _, err := c.getETag(suffix)
|
||||||
|
return val, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) getTrimmed(suffix string) (s string, err error) {
|
||||||
|
s, err = c.Get(suffix)
|
||||||
|
s = strings.TrimSpace(s)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) lines(suffix string) ([]string, error) {
|
||||||
|
j, err := c.Get(suffix)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
s := strings.Split(strings.TrimSpace(j), "\n")
|
||||||
|
for i := range s {
|
||||||
|
s[i] = strings.TrimSpace(s[i])
|
||||||
|
}
|
||||||
|
return s, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProjectID returns the current instance's project ID string.
|
||||||
|
func (c *Client) ProjectID() (string, error) { return projID.get(c) }
|
||||||
|
|
||||||
|
// NumericProjectID returns the current instance's numeric project ID.
|
||||||
|
func (c *Client) NumericProjectID() (string, error) { return projNum.get(c) }
|
||||||
|
|
||||||
|
// InstanceID returns the current VM's numeric instance ID.
|
||||||
|
func (c *Client) InstanceID() (string, error) { return instID.get(c) }
|
||||||
|
|
||||||
|
// InternalIP returns the instance's primary internal IP address.
|
||||||
|
func (c *Client) InternalIP() (string, error) {
|
||||||
|
return c.getTrimmed("instance/network-interfaces/0/ip")
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExternalIP returns the instance's primary external (public) IP address.
|
||||||
|
func (c *Client) ExternalIP() (string, error) {
|
||||||
|
return c.getTrimmed("instance/network-interfaces/0/access-configs/0/external-ip")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Hostname returns the instance's hostname. This will be of the form
|
||||||
|
// "<instanceID>.c.<projID>.internal".
|
||||||
|
func (c *Client) Hostname() (string, error) {
|
||||||
|
return c.getTrimmed("instance/hostname")
|
||||||
|
}
|
||||||
|
|
||||||
|
// InstanceTags returns the list of user-defined instance tags,
|
||||||
|
// assigned when initially creating a GCE instance.
|
||||||
|
func (c *Client) InstanceTags() ([]string, error) {
|
||||||
|
var s []string
|
||||||
|
j, err := c.Get("instance/tags")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err := json.NewDecoder(strings.NewReader(j)).Decode(&s); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return s, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// InstanceName returns the current VM's instance ID string.
|
||||||
|
func (c *Client) InstanceName() (string, error) {
|
||||||
|
host, err := c.Hostname()
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return strings.Split(host, ".")[0], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Zone returns the current VM's zone, such as "us-central1-b".
|
||||||
|
func (c *Client) Zone() (string, error) {
|
||||||
|
zone, err := c.getTrimmed("instance/zone")
|
||||||
|
// zone is of the form "projects/<projNum>/zones/<zoneName>".
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return zone[strings.LastIndex(zone, "/")+1:], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// InstanceAttributes returns the list of user-defined attributes,
|
||||||
|
// assigned when initially creating a GCE VM instance. The value of an
|
||||||
|
// attribute can be obtained with InstanceAttributeValue.
|
||||||
|
func (c *Client) InstanceAttributes() ([]string, error) { return c.lines("instance/attributes/") }
|
||||||
|
|
||||||
|
// ProjectAttributes returns the list of user-defined attributes
|
||||||
|
// applying to the project as a whole, not just this VM. The value of
|
||||||
|
// an attribute can be obtained with ProjectAttributeValue.
|
||||||
|
func (c *Client) ProjectAttributes() ([]string, error) { return c.lines("project/attributes/") }
|
||||||
|
|
||||||
|
// InstanceAttributeValue returns the value of the provided VM
|
||||||
|
// instance attribute.
|
||||||
|
//
|
||||||
|
// If the requested attribute is not defined, the returned error will
|
||||||
|
// be of type NotDefinedError.
|
||||||
|
//
|
||||||
|
// InstanceAttributeValue may return ("", nil) if the attribute was
|
||||||
|
// defined to be the empty string.
|
||||||
|
func (c *Client) InstanceAttributeValue(attr string) (string, error) {
|
||||||
|
return c.Get("instance/attributes/" + attr)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProjectAttributeValue returns the value of the provided
|
||||||
|
// project attribute.
|
||||||
|
//
|
||||||
|
// If the requested attribute is not defined, the returned error will
|
||||||
|
// be of type NotDefinedError.
|
||||||
|
//
|
||||||
|
// ProjectAttributeValue may return ("", nil) if the attribute was
|
||||||
|
// defined to be the empty string.
|
||||||
|
func (c *Client) ProjectAttributeValue(attr string) (string, error) {
|
||||||
|
return c.Get("project/attributes/" + attr)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scopes returns the service account scopes for the given account.
|
||||||
|
// The account may be empty or the string "default" to use the instance's
|
||||||
|
// main account.
|
||||||
|
func (c *Client) Scopes(serviceAccount string) ([]string, error) {
|
||||||
|
if serviceAccount == "" {
|
||||||
|
serviceAccount = "default"
|
||||||
|
}
|
||||||
|
return c.lines("instance/service-accounts/" + serviceAccount + "/scopes")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Subscribe subscribes to a value from the metadata service.
|
||||||
|
// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/".
|
||||||
|
// The suffix may contain query parameters.
|
||||||
|
//
|
||||||
|
// Subscribe calls fn with the latest metadata value indicated by the provided
|
||||||
|
// suffix. If the metadata value is deleted, fn is called with the empty string
|
||||||
|
// and ok false. Subscribe blocks until fn returns a non-nil error or the value
|
||||||
|
// is deleted. Subscribe returns the error value returned from the last call to
|
||||||
|
// fn, which may be nil when ok == false.
|
||||||
|
func (c *Client) Subscribe(suffix string, fn func(v string, ok bool) error) error {
|
||||||
|
const failedSubscribeSleep = time.Second * 5
|
||||||
|
|
||||||
|
// First check to see if the metadata value exists at all.
|
||||||
|
val, lastETag, err := c.getETag(suffix)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := fn(val, true); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
ok := true
|
||||||
|
if strings.ContainsRune(suffix, '?') {
|
||||||
|
suffix += "&wait_for_change=true&last_etag="
|
||||||
|
} else {
|
||||||
|
suffix += "?wait_for_change=true&last_etag="
|
||||||
|
}
|
||||||
|
for {
|
||||||
|
val, etag, err := c.getETag(suffix + url.QueryEscape(lastETag))
|
||||||
|
if err != nil {
|
||||||
|
if _, deleted := err.(NotDefinedError); !deleted {
|
||||||
|
time.Sleep(failedSubscribeSleep)
|
||||||
|
continue // Retry on other errors.
|
||||||
|
}
|
||||||
|
ok = false
|
||||||
|
}
|
||||||
|
lastETag = etag
|
||||||
|
|
||||||
|
if err := fn(val, ok); err != nil || !ok {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Error contains an error response from the server.
|
||||||
|
type Error struct {
|
||||||
|
// Code is the HTTP response status code.
|
||||||
|
Code int
|
||||||
|
// Message is the server response message.
|
||||||
|
Message string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *Error) Error() string {
|
||||||
|
return fmt.Sprintf("compute: Received %d `%s`", e.Code, e.Message)
|
||||||
|
}
|
||||||
191
vendor/github.com/Azure/go-autorest/autorest/LICENSE
generated
vendored
Normal file
191
vendor/github.com/Azure/go-autorest/autorest/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,191 @@
|
|||||||
|
|
||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
http://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
Copyright 2015 Microsoft Corporation
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
191
vendor/github.com/Azure/go-autorest/autorest/adal/LICENSE
generated
vendored
Normal file
191
vendor/github.com/Azure/go-autorest/autorest/adal/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,191 @@
|
|||||||
|
|
||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
http://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
Copyright 2015 Microsoft Corporation
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
292
vendor/github.com/Azure/go-autorest/autorest/adal/README.md
generated
vendored
Normal file
292
vendor/github.com/Azure/go-autorest/autorest/adal/README.md
generated
vendored
Normal file
@@ -0,0 +1,292 @@
|
|||||||
|
# Azure Active Directory authentication for Go
|
||||||
|
|
||||||
|
This is a standalone package for authenticating with Azure Active
|
||||||
|
Directory from other Go libraries and applications, in particular the [Azure SDK
|
||||||
|
for Go](https://github.com/Azure/azure-sdk-for-go).
|
||||||
|
|
||||||
|
Note: Despite the package's name it is not related to other "ADAL" libraries
|
||||||
|
maintained in the [github.com/AzureAD](https://github.com/AzureAD) org. Issues
|
||||||
|
should be opened in [this repo's](https://github.com/Azure/go-autorest/issues)
|
||||||
|
or [the SDK's](https://github.com/Azure/azure-sdk-for-go/issues) issue
|
||||||
|
trackers.
|
||||||
|
|
||||||
|
## Install
|
||||||
|
|
||||||
|
```bash
|
||||||
|
go get -u github.com/Azure/go-autorest/autorest/adal
|
||||||
|
```
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
An Active Directory application is required in order to use this library. An application can be registered in the [Azure Portal](https://portal.azure.com/) by following these [guidelines](https://docs.microsoft.com/en-us/azure/active-directory/develop/active-directory-integrating-applications) or using the [Azure CLI](https://github.com/Azure/azure-cli).
|
||||||
|
|
||||||
|
### Register an Azure AD Application with secret
|
||||||
|
|
||||||
|
|
||||||
|
1. Register a new application with a `secret` credential
|
||||||
|
|
||||||
|
```
|
||||||
|
az ad app create \
|
||||||
|
--display-name example-app \
|
||||||
|
--homepage https://example-app/home \
|
||||||
|
--identifier-uris https://example-app/app \
|
||||||
|
--password secret
|
||||||
|
```
|
||||||
|
|
||||||
|
2. Create a service principal using the `Application ID` from previous step
|
||||||
|
|
||||||
|
```
|
||||||
|
az ad sp create --id "Application ID"
|
||||||
|
```
|
||||||
|
|
||||||
|
* Replace `Application ID` with `appId` from step 1.
|
||||||
|
|
||||||
|
### Register an Azure AD Application with certificate
|
||||||
|
|
||||||
|
1. Create a private key
|
||||||
|
|
||||||
|
```
|
||||||
|
openssl genrsa -out "example-app.key" 2048
|
||||||
|
```
|
||||||
|
|
||||||
|
2. Create the certificate
|
||||||
|
|
||||||
|
```
|
||||||
|
openssl req -new -key "example-app.key" -subj "/CN=example-app" -out "example-app.csr"
|
||||||
|
openssl x509 -req -in "example-app.csr" -signkey "example-app.key" -out "example-app.crt" -days 10000
|
||||||
|
```
|
||||||
|
|
||||||
|
3. Create the PKCS12 version of the certificate containing also the private key
|
||||||
|
|
||||||
|
```
|
||||||
|
openssl pkcs12 -export -out "example-app.pfx" -inkey "example-app.key" -in "example-app.crt" -passout pass:
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
4. Register a new application with the certificate content form `example-app.crt`
|
||||||
|
|
||||||
|
```
|
||||||
|
certificateContents="$(tail -n+2 "example-app.crt" | head -n-1)"
|
||||||
|
|
||||||
|
az ad app create \
|
||||||
|
--display-name example-app \
|
||||||
|
--homepage https://example-app/home \
|
||||||
|
--identifier-uris https://example-app/app \
|
||||||
|
--key-usage Verify --end-date 2018-01-01 \
|
||||||
|
--key-value "${certificateContents}"
|
||||||
|
```
|
||||||
|
|
||||||
|
5. Create a service principal using the `Application ID` from previous step
|
||||||
|
|
||||||
|
```
|
||||||
|
az ad sp create --id "APPLICATION_ID"
|
||||||
|
```
|
||||||
|
|
||||||
|
* Replace `APPLICATION_ID` with `appId` from step 4.
|
||||||
|
|
||||||
|
|
||||||
|
### Grant the necessary permissions
|
||||||
|
|
||||||
|
Azure relies on a Role-Based Access Control (RBAC) model to manage the access to resources at a fine-grained
|
||||||
|
level. There is a set of [pre-defined roles](https://docs.microsoft.com/en-us/azure/active-directory/role-based-access-built-in-roles)
|
||||||
|
which can be assigned to a service principal of an Azure AD application depending of your needs.
|
||||||
|
|
||||||
|
```
|
||||||
|
az role assignment create --assigner "SERVICE_PRINCIPAL_ID" --role "ROLE_NAME"
|
||||||
|
```
|
||||||
|
|
||||||
|
* Replace the `SERVICE_PRINCIPAL_ID` with the `appId` from previous step.
|
||||||
|
* Replace the `ROLE_NAME` with a role name of your choice.
|
||||||
|
|
||||||
|
It is also possible to define custom role definitions.
|
||||||
|
|
||||||
|
```
|
||||||
|
az role definition create --role-definition role-definition.json
|
||||||
|
```
|
||||||
|
|
||||||
|
* Check [custom roles](https://docs.microsoft.com/en-us/azure/active-directory/role-based-access-control-custom-roles) for more details regarding the content of `role-definition.json` file.
|
||||||
|
|
||||||
|
|
||||||
|
### Acquire Access Token
|
||||||
|
|
||||||
|
The common configuration used by all flows:
|
||||||
|
|
||||||
|
```Go
|
||||||
|
const activeDirectoryEndpoint = "https://login.microsoftonline.com/"
|
||||||
|
tenantID := "TENANT_ID"
|
||||||
|
oauthConfig, err := adal.NewOAuthConfig(activeDirectoryEndpoint, tenantID)
|
||||||
|
|
||||||
|
applicationID := "APPLICATION_ID"
|
||||||
|
|
||||||
|
callback := func(token adal.Token) error {
|
||||||
|
// This is called after the token is acquired
|
||||||
|
}
|
||||||
|
|
||||||
|
// The resource for which the token is acquired
|
||||||
|
resource := "https://management.core.windows.net/"
|
||||||
|
```
|
||||||
|
|
||||||
|
* Replace the `TENANT_ID` with your tenant ID.
|
||||||
|
* Replace the `APPLICATION_ID` with the value from previous section.
|
||||||
|
|
||||||
|
#### Client Credentials
|
||||||
|
|
||||||
|
```Go
|
||||||
|
applicationSecret := "APPLICATION_SECRET"
|
||||||
|
|
||||||
|
spt, err := adal.NewServicePrincipalToken(
|
||||||
|
*oauthConfig,
|
||||||
|
appliationID,
|
||||||
|
applicationSecret,
|
||||||
|
resource,
|
||||||
|
callbacks...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Acquire a new access token
|
||||||
|
err = spt.Refresh()
|
||||||
|
if (err == nil) {
|
||||||
|
token := spt.Token
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
* Replace the `APPLICATION_SECRET` with the `password` value from previous section.
|
||||||
|
|
||||||
|
#### Client Certificate
|
||||||
|
|
||||||
|
```Go
|
||||||
|
certificatePath := "./example-app.pfx"
|
||||||
|
|
||||||
|
certData, err := ioutil.ReadFile(certificatePath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to read the certificate file (%s): %v", certificatePath, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get the certificate and private key from pfx file
|
||||||
|
certificate, rsaPrivateKey, err := decodePkcs12(certData, "")
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to decode pkcs12 certificate while creating spt: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
spt, err := adal.NewServicePrincipalTokenFromCertificate(
|
||||||
|
*oauthConfig,
|
||||||
|
applicationID,
|
||||||
|
certificate,
|
||||||
|
rsaPrivateKey,
|
||||||
|
resource,
|
||||||
|
callbacks...)
|
||||||
|
|
||||||
|
// Acquire a new access token
|
||||||
|
err = spt.Refresh()
|
||||||
|
if (err == nil) {
|
||||||
|
token := spt.Token
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
* Update the certificate path to point to the example-app.pfx file which was created in previous section.
|
||||||
|
|
||||||
|
|
||||||
|
#### Device Code
|
||||||
|
|
||||||
|
```Go
|
||||||
|
oauthClient := &http.Client{}
|
||||||
|
|
||||||
|
// Acquire the device code
|
||||||
|
deviceCode, err := adal.InitiateDeviceAuth(
|
||||||
|
oauthClient,
|
||||||
|
*oauthConfig,
|
||||||
|
applicationID,
|
||||||
|
resource)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("Failed to start device auth flow: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Display the authentication message
|
||||||
|
fmt.Println(*deviceCode.Message)
|
||||||
|
|
||||||
|
// Wait here until the user is authenticated
|
||||||
|
token, err := adal.WaitForUserCompletion(oauthClient, deviceCode)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("Failed to finish device auth flow: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
spt, err := adal.NewServicePrincipalTokenFromManualToken(
|
||||||
|
*oauthConfig,
|
||||||
|
applicationID,
|
||||||
|
resource,
|
||||||
|
*token,
|
||||||
|
callbacks...)
|
||||||
|
|
||||||
|
if (err == nil) {
|
||||||
|
token := spt.Token
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Username password authenticate
|
||||||
|
|
||||||
|
```Go
|
||||||
|
spt, err := adal.NewServicePrincipalTokenFromUsernamePassword(
|
||||||
|
*oauthConfig,
|
||||||
|
applicationID,
|
||||||
|
username,
|
||||||
|
password,
|
||||||
|
resource,
|
||||||
|
callbacks...)
|
||||||
|
|
||||||
|
if (err == nil) {
|
||||||
|
token := spt.Token
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Authorization code authenticate
|
||||||
|
|
||||||
|
``` Go
|
||||||
|
spt, err := adal.NewServicePrincipalTokenFromAuthorizationCode(
|
||||||
|
*oauthConfig,
|
||||||
|
applicationID,
|
||||||
|
clientSecret,
|
||||||
|
authorizationCode,
|
||||||
|
redirectURI,
|
||||||
|
resource,
|
||||||
|
callbacks...)
|
||||||
|
|
||||||
|
err = spt.Refresh()
|
||||||
|
if (err == nil) {
|
||||||
|
token := spt.Token
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Command Line Tool
|
||||||
|
|
||||||
|
A command line tool is available in `cmd/adal.go` that can acquire a token for a given resource. It supports all flows mentioned above.
|
||||||
|
|
||||||
|
```
|
||||||
|
adal -h
|
||||||
|
|
||||||
|
Usage of ./adal:
|
||||||
|
-applicationId string
|
||||||
|
application id
|
||||||
|
-certificatePath string
|
||||||
|
path to pk12/PFC application certificate
|
||||||
|
-mode string
|
||||||
|
authentication mode (device, secret, cert, refresh) (default "device")
|
||||||
|
-resource string
|
||||||
|
resource for which the token is requested
|
||||||
|
-secret string
|
||||||
|
application secret
|
||||||
|
-tenantId string
|
||||||
|
tenant id
|
||||||
|
-tokenCachePath string
|
||||||
|
location of oath token cache (default "/home/cgc/.adal/accessToken.json")
|
||||||
|
```
|
||||||
|
|
||||||
|
Example acquire a token for `https://management.core.windows.net/` using device code flow:
|
||||||
|
|
||||||
|
```
|
||||||
|
adal -mode device \
|
||||||
|
-applicationId "APPLICATION_ID" \
|
||||||
|
-tenantId "TENANT_ID" \
|
||||||
|
-resource https://management.core.windows.net/
|
||||||
|
|
||||||
|
```
|
||||||
151
vendor/github.com/Azure/go-autorest/autorest/adal/config.go
generated
vendored
Normal file
151
vendor/github.com/Azure/go-autorest/autorest/adal/config.go
generated
vendored
Normal file
@@ -0,0 +1,151 @@
|
|||||||
|
package adal
|
||||||
|
|
||||||
|
// Copyright 2017 Microsoft Corporation
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"net/url"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
activeDirectoryEndpointTemplate = "%s/oauth2/%s%s"
|
||||||
|
)
|
||||||
|
|
||||||
|
// OAuthConfig represents the endpoints needed
|
||||||
|
// in OAuth operations
|
||||||
|
type OAuthConfig struct {
|
||||||
|
AuthorityEndpoint url.URL `json:"authorityEndpoint"`
|
||||||
|
AuthorizeEndpoint url.URL `json:"authorizeEndpoint"`
|
||||||
|
TokenEndpoint url.URL `json:"tokenEndpoint"`
|
||||||
|
DeviceCodeEndpoint url.URL `json:"deviceCodeEndpoint"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsZero returns true if the OAuthConfig object is zero-initialized.
|
||||||
|
func (oac OAuthConfig) IsZero() bool {
|
||||||
|
return oac == OAuthConfig{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func validateStringParam(param, name string) error {
|
||||||
|
if len(param) == 0 {
|
||||||
|
return fmt.Errorf("parameter '" + name + "' cannot be empty")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewOAuthConfig returns an OAuthConfig with tenant specific urls
|
||||||
|
func NewOAuthConfig(activeDirectoryEndpoint, tenantID string) (*OAuthConfig, error) {
|
||||||
|
apiVer := "1.0"
|
||||||
|
return NewOAuthConfigWithAPIVersion(activeDirectoryEndpoint, tenantID, &apiVer)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewOAuthConfigWithAPIVersion returns an OAuthConfig with tenant specific urls.
|
||||||
|
// If apiVersion is not nil the "api-version" query parameter will be appended to the endpoint URLs with the specified value.
|
||||||
|
func NewOAuthConfigWithAPIVersion(activeDirectoryEndpoint, tenantID string, apiVersion *string) (*OAuthConfig, error) {
|
||||||
|
if err := validateStringParam(activeDirectoryEndpoint, "activeDirectoryEndpoint"); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
api := ""
|
||||||
|
// it's legal for tenantID to be empty so don't validate it
|
||||||
|
if apiVersion != nil {
|
||||||
|
if err := validateStringParam(*apiVersion, "apiVersion"); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
api = fmt.Sprintf("?api-version=%s", *apiVersion)
|
||||||
|
}
|
||||||
|
u, err := url.Parse(activeDirectoryEndpoint)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
authorityURL, err := u.Parse(tenantID)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
authorizeURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "authorize", api))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
tokenURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "token", api))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
deviceCodeURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "devicecode", api))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &OAuthConfig{
|
||||||
|
AuthorityEndpoint: *authorityURL,
|
||||||
|
AuthorizeEndpoint: *authorizeURL,
|
||||||
|
TokenEndpoint: *tokenURL,
|
||||||
|
DeviceCodeEndpoint: *deviceCodeURL,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// MultiTenantOAuthConfig provides endpoints for primary and aulixiary tenant IDs.
|
||||||
|
type MultiTenantOAuthConfig interface {
|
||||||
|
PrimaryTenant() *OAuthConfig
|
||||||
|
AuxiliaryTenants() []*OAuthConfig
|
||||||
|
}
|
||||||
|
|
||||||
|
// OAuthOptions contains optional OAuthConfig creation arguments.
|
||||||
|
type OAuthOptions struct {
|
||||||
|
APIVersion string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c OAuthOptions) apiVersion() string {
|
||||||
|
if c.APIVersion != "" {
|
||||||
|
return fmt.Sprintf("?api-version=%s", c.APIVersion)
|
||||||
|
}
|
||||||
|
return "1.0"
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewMultiTenantOAuthConfig creates an object that support multitenant OAuth configuration.
|
||||||
|
// See https://docs.microsoft.com/en-us/azure/azure-resource-manager/authenticate-multi-tenant for more information.
|
||||||
|
func NewMultiTenantOAuthConfig(activeDirectoryEndpoint, primaryTenantID string, auxiliaryTenantIDs []string, options OAuthOptions) (MultiTenantOAuthConfig, error) {
|
||||||
|
if len(auxiliaryTenantIDs) == 0 || len(auxiliaryTenantIDs) > 3 {
|
||||||
|
return nil, errors.New("must specify one to three auxiliary tenants")
|
||||||
|
}
|
||||||
|
mtCfg := multiTenantOAuthConfig{
|
||||||
|
cfgs: make([]*OAuthConfig, len(auxiliaryTenantIDs)+1),
|
||||||
|
}
|
||||||
|
apiVer := options.apiVersion()
|
||||||
|
pri, err := NewOAuthConfigWithAPIVersion(activeDirectoryEndpoint, primaryTenantID, &apiVer)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to create OAuthConfig for primary tenant: %v", err)
|
||||||
|
}
|
||||||
|
mtCfg.cfgs[0] = pri
|
||||||
|
for i := range auxiliaryTenantIDs {
|
||||||
|
aux, err := NewOAuthConfig(activeDirectoryEndpoint, auxiliaryTenantIDs[i])
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to create OAuthConfig for tenant '%s': %v", auxiliaryTenantIDs[i], err)
|
||||||
|
}
|
||||||
|
mtCfg.cfgs[i+1] = aux
|
||||||
|
}
|
||||||
|
return mtCfg, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type multiTenantOAuthConfig struct {
|
||||||
|
// first config in the slice is the primary tenant
|
||||||
|
cfgs []*OAuthConfig
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m multiTenantOAuthConfig) PrimaryTenant() *OAuthConfig {
|
||||||
|
return m.cfgs[0]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m multiTenantOAuthConfig) AuxiliaryTenants() []*OAuthConfig {
|
||||||
|
return m.cfgs[1:]
|
||||||
|
}
|
||||||
242
vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go
generated
vendored
Normal file
242
vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go
generated
vendored
Normal file
@@ -0,0 +1,242 @@
|
|||||||
|
package adal
|
||||||
|
|
||||||
|
// Copyright 2017 Microsoft Corporation
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
/*
|
||||||
|
This file is largely based on rjw57/oauth2device's code, with the follow differences:
|
||||||
|
* scope -> resource, and only allow a single one
|
||||||
|
* receive "Message" in the DeviceCode struct and show it to users as the prompt
|
||||||
|
* azure-xplat-cli has the following behavior that this emulates:
|
||||||
|
- does not send client_secret during the token exchange
|
||||||
|
- sends resource again in the token exchange request
|
||||||
|
*/
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
logPrefix = "autorest/adal/devicetoken:"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// ErrDeviceGeneric represents an unknown error from the token endpoint when using device flow
|
||||||
|
ErrDeviceGeneric = fmt.Errorf("%s Error while retrieving OAuth token: Unknown Error", logPrefix)
|
||||||
|
|
||||||
|
// ErrDeviceAccessDenied represents an access denied error from the token endpoint when using device flow
|
||||||
|
ErrDeviceAccessDenied = fmt.Errorf("%s Error while retrieving OAuth token: Access Denied", logPrefix)
|
||||||
|
|
||||||
|
// ErrDeviceAuthorizationPending represents the server waiting on the user to complete the device flow
|
||||||
|
ErrDeviceAuthorizationPending = fmt.Errorf("%s Error while retrieving OAuth token: Authorization Pending", logPrefix)
|
||||||
|
|
||||||
|
// ErrDeviceCodeExpired represents the server timing out and expiring the code during device flow
|
||||||
|
ErrDeviceCodeExpired = fmt.Errorf("%s Error while retrieving OAuth token: Code Expired", logPrefix)
|
||||||
|
|
||||||
|
// ErrDeviceSlowDown represents the service telling us we're polling too often during device flow
|
||||||
|
ErrDeviceSlowDown = fmt.Errorf("%s Error while retrieving OAuth token: Slow Down", logPrefix)
|
||||||
|
|
||||||
|
// ErrDeviceCodeEmpty represents an empty device code from the device endpoint while using device flow
|
||||||
|
ErrDeviceCodeEmpty = fmt.Errorf("%s Error while retrieving device code: Device Code Empty", logPrefix)
|
||||||
|
|
||||||
|
// ErrOAuthTokenEmpty represents an empty OAuth token from the token endpoint when using device flow
|
||||||
|
ErrOAuthTokenEmpty = fmt.Errorf("%s Error while retrieving OAuth token: Token Empty", logPrefix)
|
||||||
|
|
||||||
|
errCodeSendingFails = "Error occurred while sending request for Device Authorization Code"
|
||||||
|
errCodeHandlingFails = "Error occurred while handling response from the Device Endpoint"
|
||||||
|
errTokenSendingFails = "Error occurred while sending request with device code for a token"
|
||||||
|
errTokenHandlingFails = "Error occurred while handling response from the Token Endpoint (during device flow)"
|
||||||
|
errStatusNotOK = "Error HTTP status != 200"
|
||||||
|
)
|
||||||
|
|
||||||
|
// DeviceCode is the object returned by the device auth endpoint
|
||||||
|
// It contains information to instruct the user to complete the auth flow
|
||||||
|
type DeviceCode struct {
|
||||||
|
DeviceCode *string `json:"device_code,omitempty"`
|
||||||
|
UserCode *string `json:"user_code,omitempty"`
|
||||||
|
VerificationURL *string `json:"verification_url,omitempty"`
|
||||||
|
ExpiresIn *int64 `json:"expires_in,string,omitempty"`
|
||||||
|
Interval *int64 `json:"interval,string,omitempty"`
|
||||||
|
|
||||||
|
Message *string `json:"message"` // Azure specific
|
||||||
|
Resource string // store the following, stored when initiating, used when exchanging
|
||||||
|
OAuthConfig OAuthConfig
|
||||||
|
ClientID string
|
||||||
|
}
|
||||||
|
|
||||||
|
// TokenError is the object returned by the token exchange endpoint
|
||||||
|
// when something is amiss
|
||||||
|
type TokenError struct {
|
||||||
|
Error *string `json:"error,omitempty"`
|
||||||
|
ErrorCodes []int `json:"error_codes,omitempty"`
|
||||||
|
ErrorDescription *string `json:"error_description,omitempty"`
|
||||||
|
Timestamp *string `json:"timestamp,omitempty"`
|
||||||
|
TraceID *string `json:"trace_id,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeviceToken is the object return by the token exchange endpoint
|
||||||
|
// It can either look like a Token or an ErrorToken, so put both here
|
||||||
|
// and check for presence of "Error" to know if we are in error state
|
||||||
|
type deviceToken struct {
|
||||||
|
Token
|
||||||
|
TokenError
|
||||||
|
}
|
||||||
|
|
||||||
|
// InitiateDeviceAuth initiates a device auth flow. It returns a DeviceCode
|
||||||
|
// that can be used with CheckForUserCompletion or WaitForUserCompletion.
|
||||||
|
func InitiateDeviceAuth(sender Sender, oauthConfig OAuthConfig, clientID, resource string) (*DeviceCode, error) {
|
||||||
|
v := url.Values{
|
||||||
|
"client_id": []string{clientID},
|
||||||
|
"resource": []string{resource},
|
||||||
|
}
|
||||||
|
|
||||||
|
s := v.Encode()
|
||||||
|
body := ioutil.NopCloser(strings.NewReader(s))
|
||||||
|
|
||||||
|
req, err := http.NewRequest(http.MethodPost, oauthConfig.DeviceCodeEndpoint.String(), body)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeSendingFails, err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
req.ContentLength = int64(len(s))
|
||||||
|
req.Header.Set(contentType, mimeTypeFormPost)
|
||||||
|
resp, err := sender.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeSendingFails, err.Error())
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
rb, err := ioutil.ReadAll(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, errStatusNotOK)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(strings.Trim(string(rb), " ")) == 0 {
|
||||||
|
return nil, ErrDeviceCodeEmpty
|
||||||
|
}
|
||||||
|
|
||||||
|
var code DeviceCode
|
||||||
|
err = json.Unmarshal(rb, &code)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
code.ClientID = clientID
|
||||||
|
code.Resource = resource
|
||||||
|
code.OAuthConfig = oauthConfig
|
||||||
|
|
||||||
|
return &code, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// CheckForUserCompletion takes a DeviceCode and checks with the Azure AD OAuth endpoint
|
||||||
|
// to see if the device flow has: been completed, timed out, or otherwise failed
|
||||||
|
func CheckForUserCompletion(sender Sender, code *DeviceCode) (*Token, error) {
|
||||||
|
v := url.Values{
|
||||||
|
"client_id": []string{code.ClientID},
|
||||||
|
"code": []string{*code.DeviceCode},
|
||||||
|
"grant_type": []string{OAuthGrantTypeDeviceCode},
|
||||||
|
"resource": []string{code.Resource},
|
||||||
|
}
|
||||||
|
|
||||||
|
s := v.Encode()
|
||||||
|
body := ioutil.NopCloser(strings.NewReader(s))
|
||||||
|
|
||||||
|
req, err := http.NewRequest(http.MethodPost, code.OAuthConfig.TokenEndpoint.String(), body)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenSendingFails, err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
req.ContentLength = int64(len(s))
|
||||||
|
req.Header.Set(contentType, mimeTypeFormPost)
|
||||||
|
resp, err := sender.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenSendingFails, err.Error())
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
rb, err := ioutil.ReadAll(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
if resp.StatusCode != http.StatusOK && len(strings.Trim(string(rb), " ")) == 0 {
|
||||||
|
return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, errStatusNotOK)
|
||||||
|
}
|
||||||
|
if len(strings.Trim(string(rb), " ")) == 0 {
|
||||||
|
return nil, ErrOAuthTokenEmpty
|
||||||
|
}
|
||||||
|
|
||||||
|
var token deviceToken
|
||||||
|
err = json.Unmarshal(rb, &token)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
if token.Error == nil {
|
||||||
|
return &token.Token, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
switch *token.Error {
|
||||||
|
case "authorization_pending":
|
||||||
|
return nil, ErrDeviceAuthorizationPending
|
||||||
|
case "slow_down":
|
||||||
|
return nil, ErrDeviceSlowDown
|
||||||
|
case "access_denied":
|
||||||
|
return nil, ErrDeviceAccessDenied
|
||||||
|
case "code_expired":
|
||||||
|
return nil, ErrDeviceCodeExpired
|
||||||
|
default:
|
||||||
|
return nil, ErrDeviceGeneric
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WaitForUserCompletion calls CheckForUserCompletion repeatedly until a token is granted or an error state occurs.
|
||||||
|
// This prevents the user from looping and checking against 'ErrDeviceAuthorizationPending'.
|
||||||
|
func WaitForUserCompletion(sender Sender, code *DeviceCode) (*Token, error) {
|
||||||
|
intervalDuration := time.Duration(*code.Interval) * time.Second
|
||||||
|
waitDuration := intervalDuration
|
||||||
|
|
||||||
|
for {
|
||||||
|
token, err := CheckForUserCompletion(sender, code)
|
||||||
|
|
||||||
|
if err == nil {
|
||||||
|
return token, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
switch err {
|
||||||
|
case ErrDeviceSlowDown:
|
||||||
|
waitDuration += waitDuration
|
||||||
|
case ErrDeviceAuthorizationPending:
|
||||||
|
// noop
|
||||||
|
default: // everything else is "fatal" to us
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if waitDuration > (intervalDuration * 3) {
|
||||||
|
return nil, fmt.Errorf("%s Error waiting for user to complete device flow. Server told us to slow_down too much", logPrefix)
|
||||||
|
}
|
||||||
|
|
||||||
|
time.Sleep(waitDuration)
|
||||||
|
}
|
||||||
|
}
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user