mirror of
https://github.com/kubernetes-sigs/descheduler.git
synced 2026-01-26 13:29:11 +01:00
Compare commits
235 Commits
chart-0.18
...
release-1.
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c86d1c7eb2 | ||
|
|
f67c265533 | ||
|
|
969921640c | ||
|
|
0273fd7597 | ||
|
|
e84d0c5587 | ||
|
|
5267ec407c | ||
|
|
6714d8e0b7 | ||
|
|
b3439eab41 | ||
|
|
509118587a | ||
|
|
f482537dff | ||
|
|
0f95817746 | ||
|
|
70d1fadae7 | ||
|
|
499beb2fd7 | ||
|
|
de24f3854b | ||
|
|
a5e8ba1a70 | ||
|
|
45ad48042f | ||
|
|
550de966c7 | ||
|
|
c94342db31 | ||
|
|
94f1c7dd8d | ||
|
|
bf91e6790e | ||
|
|
251f44e568 | ||
|
|
922c4f6a63 | ||
|
|
2b5ec01381 | ||
|
|
7bcd562ff5 | ||
|
|
03852d0914 | ||
|
|
5f1e9a97c4 | ||
|
|
cd6f2cd4cb | ||
|
|
e679c7fabc | ||
|
|
6f5918d765 | ||
|
|
5a46ba0630 | ||
|
|
c1323719f4 | ||
|
|
8795fe6b90 | ||
|
|
a3f8bb0369 | ||
|
|
cd3c3bf4da | ||
|
|
652ee87bf5 | ||
|
|
5225ec4597 | ||
|
|
b2af720ddb | ||
|
|
94f07996f7 | ||
|
|
4839d5f369 | ||
|
|
022e07c278 | ||
|
|
620d71abdf | ||
|
|
85d00ab457 | ||
|
|
b30bd40860 | ||
|
|
4108362158 | ||
|
|
b27dc5f14e | ||
|
|
3c54378749 | ||
|
|
6240aa68f7 | ||
|
|
301af7fd9c | ||
|
|
41d529ebe2 | ||
|
|
cc6bb633ba | ||
|
|
31cf70c34c | ||
|
|
3399619395 | ||
|
|
cfc4cce08b | ||
|
|
f9e9f0654a | ||
|
|
73af0e84fa | ||
|
|
b33928ac91 | ||
|
|
3ac0c408de | ||
|
|
149f900811 | ||
|
|
9ede04ba9b | ||
|
|
f9cbed8b71 | ||
|
|
fa4da031e4 | ||
|
|
9511f308d0 | ||
|
|
52f43f0fcb | ||
|
|
4bb0ceeed5 | ||
|
|
279f648e9a | ||
|
|
411ec740ff | ||
|
|
6237ba5a43 | ||
|
|
5d65a9ad68 | ||
|
|
28f3f867c3 | ||
|
|
00f79aa28d | ||
|
|
6042d717e9 | ||
|
|
7afa54519f | ||
|
|
8c3a80fbf9 | ||
|
|
11b9829885 | ||
|
|
4798559545 | ||
|
|
8b34d6eb94 | ||
|
|
70700a1c97 | ||
|
|
d7420eb945 | ||
|
|
c9cfeb35c2 | ||
|
|
fda63a816f | ||
|
|
6329b6c27b | ||
|
|
9b4f781c5c | ||
|
|
63039fcfd6 | ||
|
|
d25f3757d6 | ||
|
|
1303fe6eb9 | ||
|
|
1682cc9462 | ||
|
|
605927676f | ||
|
|
dc41e6a41c | ||
|
|
e37c27313e | ||
|
|
e5d9756ebe | ||
|
|
e6f1c6f78a | ||
|
|
fceebded6d | ||
|
|
08b2dffa42 | ||
|
|
745e29959c | ||
|
|
aa1bab2c4a | ||
|
|
19c3e02b44 | ||
|
|
a45057200f | ||
|
|
74d6be3943 | ||
|
|
1fb3445692 | ||
|
|
195082d33b | ||
|
|
03dbc93961 | ||
|
|
d27f64480b | ||
|
|
5645663b71 | ||
|
|
dbc8092282 | ||
|
|
50d2b246d9 | ||
|
|
6220aca03e | ||
|
|
674993d23a | ||
|
|
f4c3f9b18f | ||
|
|
d65a7c4783 | ||
|
|
89541f7545 | ||
|
|
17f769c1c1 | ||
|
|
eb9e62f047 | ||
|
|
6ccd80f2ee | ||
|
|
d8251b9086 | ||
|
|
4cd1f45d90 | ||
|
|
2dc3f53a13 | ||
|
|
f5d8a02f79 | ||
|
|
a7c51ffae0 | ||
|
|
9746fd300f | ||
|
|
b5e17f91cd | ||
|
|
f5524153ba | ||
|
|
6d693d06fb | ||
|
|
4d7a6ee9be | ||
|
|
0fdaac6042 | ||
|
|
bb7ab369d7 | ||
|
|
d96cca2221 | ||
|
|
6ee87d9d7c | ||
|
|
95ce2a4ff7 | ||
|
|
19e1387bf1 | ||
|
|
ec4c5bed5d | ||
|
|
ae38aa63af | ||
|
|
cdcd677aa0 | ||
|
|
a5eb9fc36d | ||
|
|
96efd2312b | ||
|
|
e7699c4f6b | ||
|
|
d0fbebb77c | ||
|
|
46bb5b6f55 | ||
|
|
b799ed074a | ||
|
|
eee41ee111 | ||
|
|
6ac81e0b9c | ||
|
|
5970899029 | ||
|
|
5bb0389538 | ||
|
|
92cb6a378a | ||
|
|
b09932e92a | ||
|
|
63603f38d6 | ||
|
|
42db31683f | ||
|
|
c40a9c397f | ||
|
|
4014ebad92 | ||
|
|
bb7cb05571 | ||
|
|
30b2bd5d9f | ||
|
|
8d5ab05aa0 | ||
|
|
db501da34d | ||
|
|
8d60370612 | ||
|
|
052f011288 | ||
|
|
6e23579bd0 | ||
|
|
7331f4e5de | ||
|
|
11f1333af7 | ||
|
|
74f70fdbc9 | ||
|
|
0006fb039d | ||
|
|
0894f7740c | ||
|
|
c3f07dc366 | ||
|
|
ca8f1051eb | ||
|
|
c7692a2e9f | ||
|
|
53badf7b61 | ||
|
|
f801c5f72f | ||
|
|
327880ba51 | ||
|
|
7bb8b4feda | ||
|
|
36b1e1f061 | ||
|
|
4507a90bb6 | ||
|
|
550f68306c | ||
|
|
2b668566ce | ||
|
|
ee414ea366 | ||
|
|
5761b5d595 | ||
|
|
07f476dfc4 | ||
|
|
f5e9f07321 | ||
|
|
05c69ee26a | ||
|
|
1623e09122 | ||
|
|
696aa7c505 | ||
|
|
c53dce0805 | ||
|
|
cd8b5a0354 | ||
|
|
b51f24eb8e | ||
|
|
ae3b4368ee | ||
|
|
61eef93618 | ||
|
|
fa0a2ec6fe | ||
|
|
7ece10a643 | ||
|
|
71c8eae47e | ||
|
|
267b0837dc | ||
|
|
c713537d56 | ||
|
|
e374229707 | ||
|
|
f834581a8e | ||
|
|
15fcde5229 | ||
|
|
96c5dd3941 | ||
|
|
65a03e76bf | ||
|
|
43525f6493 | ||
|
|
7457626f62 | ||
|
|
08c22e8921 | ||
|
|
4ff533ec17 | ||
|
|
7680e3d079 | ||
|
|
305801dd0e | ||
|
|
9951b85d60 | ||
|
|
ff21ec9432 | ||
|
|
5e15d77bf2 | ||
|
|
d833c73fc4 | ||
|
|
795a80dfb0 | ||
|
|
f5e4acdd8a | ||
|
|
eb4c1bb355 | ||
|
|
6dfa95cc87 | ||
|
|
eb9d974a8b | ||
|
|
d41a1f4a56 | ||
|
|
138ad556a3 | ||
|
|
37124e6e45 | ||
|
|
bd412bf87f | ||
|
|
6f9b31f568 | ||
|
|
c858740c4f | ||
|
|
bfefe634a1 | ||
|
|
7b7b9e1cd7 | ||
|
|
0a4b8b0a25 | ||
|
|
f28183dcbe | ||
|
|
abdf79454f | ||
|
|
46b570b71d | ||
|
|
616a9b5f6b | ||
|
|
003a4cdc2b | ||
|
|
54ea05d8bb | ||
|
|
d0eea0cabb | ||
|
|
f0297dfe03 | ||
|
|
ef1f36f8e4 | ||
|
|
a733c95dcc | ||
|
|
5a81a0661b | ||
|
|
83e04960af | ||
|
|
2a8dc69cbb | ||
|
|
5d82d08af3 | ||
|
|
c265825166 | ||
|
|
ed0126fb63 | ||
|
|
8c7267b379 | ||
|
|
435674fb44 |
46
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
46
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
@@ -0,0 +1,46 @@
|
|||||||
|
---
|
||||||
|
name: Bug report
|
||||||
|
about: Create a bug report to help improve descheduler
|
||||||
|
title: ''
|
||||||
|
labels: 'kind/bug'
|
||||||
|
assignees: ''
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
<!-- Please answer these questions before submitting your bug report. Thanks! -->
|
||||||
|
|
||||||
|
**What version of descheduler are you using?**
|
||||||
|
|
||||||
|
descheduler version:
|
||||||
|
|
||||||
|
|
||||||
|
**Does this issue reproduce with the latest release?**
|
||||||
|
|
||||||
|
|
||||||
|
**Which descheduler CLI options are you using?**
|
||||||
|
|
||||||
|
|
||||||
|
**Please provide a copy of your descheduler policy config file**
|
||||||
|
|
||||||
|
|
||||||
|
**What k8s version are you using (`kubectl version`)?**
|
||||||
|
|
||||||
|
<details><summary><code>kubectl version</code> Output</summary><br><pre>
|
||||||
|
$ kubectl version
|
||||||
|
|
||||||
|
</pre></details>
|
||||||
|
|
||||||
|
|
||||||
|
**What did you do?**
|
||||||
|
|
||||||
|
<!--
|
||||||
|
If possible, provide a recipe for reproducing the error.
|
||||||
|
A detailed sequence of steps describing what to do to observe the issue is good.
|
||||||
|
A complete runnable bash shell script is best.
|
||||||
|
-->
|
||||||
|
|
||||||
|
|
||||||
|
**What did you expect to see?**
|
||||||
|
|
||||||
|
|
||||||
|
**What did you see instead?**
|
||||||
26
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
26
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
@@ -0,0 +1,26 @@
|
|||||||
|
---
|
||||||
|
name: Feature request
|
||||||
|
about: Suggest an idea for descheduler
|
||||||
|
title: ''
|
||||||
|
labels: 'kind/feature'
|
||||||
|
assignees: ''
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
<!-- Please answer these questions before submitting your feature request. Thanks! -->
|
||||||
|
|
||||||
|
**Is your feature request related to a problem? Please describe.**
|
||||||
|
<!-- A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] -->
|
||||||
|
|
||||||
|
**Describe the solution you'd like**
|
||||||
|
<!-- A clear and concise description of what you want to happen. -->
|
||||||
|
|
||||||
|
**Describe alternatives you've considered**
|
||||||
|
<!-- A clear and concise description of any alternative solutions or features you've considered. -->
|
||||||
|
|
||||||
|
**What version of descheduler are you using?**
|
||||||
|
|
||||||
|
descheduler version:
|
||||||
|
|
||||||
|
**Additional context**
|
||||||
|
<!-- Add any other context or screenshots about the feature request here. -->
|
||||||
18
.github/ISSUE_TEMPLATE/misc_request.md
vendored
Normal file
18
.github/ISSUE_TEMPLATE/misc_request.md
vendored
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
---
|
||||||
|
name: Miscellaneous
|
||||||
|
about: Not a bug and not a feature
|
||||||
|
title: ''
|
||||||
|
labels: ''
|
||||||
|
assignees: ''
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
<!--
|
||||||
|
Please do not use this to submit a bug report or feature request. Use the
|
||||||
|
bug report or feature request options instead.
|
||||||
|
|
||||||
|
Also, please consider posting in the Kubernetes Slack #sig-scheduling channel
|
||||||
|
instead of opening an issue if this is a support request.
|
||||||
|
|
||||||
|
Thanks!
|
||||||
|
-->
|
||||||
31
.github/workflows/release.yaml
vendored
Normal file
31
.github/workflows/release.yaml
vendored
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
name: Release Charts
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- release-*
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
release:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
|
||||||
|
- name: Configure Git
|
||||||
|
run: |
|
||||||
|
git config user.name "$GITHUB_ACTOR"
|
||||||
|
git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
|
||||||
|
|
||||||
|
- name: Install Helm
|
||||||
|
uses: azure/setup-helm@v1
|
||||||
|
with:
|
||||||
|
version: v3.4.0
|
||||||
|
|
||||||
|
- name: Run chart-releaser
|
||||||
|
uses: helm/chart-releaser-action@v1.1.0
|
||||||
|
env:
|
||||||
|
CR_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
|
||||||
|
CR_RELEASE_NAME_TEMPLATE: "descheduler-helm-chart-{{ .Version }}"
|
||||||
33
.travis.yml
33
.travis.yml
@@ -1,33 +0,0 @@
|
|||||||
sudo: false
|
|
||||||
|
|
||||||
language: go
|
|
||||||
|
|
||||||
go:
|
|
||||||
- 1.13.x
|
|
||||||
env:
|
|
||||||
- K8S_VERSION=v1.18.2
|
|
||||||
- K8S_VERSION=v1.17.5
|
|
||||||
- K8S_VERSION=v1.16.9
|
|
||||||
- K8S_VERSION=v1.15.11
|
|
||||||
services:
|
|
||||||
- docker
|
|
||||||
before_script:
|
|
||||||
- curl -Lo kubectl https://storage.googleapis.com/kubernetes-release/release/${K8S_VERSION}/bin/linux/amd64/kubectl && chmod +x kubectl && sudo mv kubectl /usr/local/bin/
|
|
||||||
- wget https://github.com/kubernetes-sigs/kind/releases/download/v0.8.1/kind-linux-amd64
|
|
||||||
- chmod +x kind-linux-amd64
|
|
||||||
- mv kind-linux-amd64 kind
|
|
||||||
- export PATH=$PATH:$PWD
|
|
||||||
- kind create cluster --image kindest/node:${K8S_VERSION} --config=$TRAVIS_BUILD_DIR/hack/kind_config.yaml
|
|
||||||
- export KUBECONFIG="$(kind get kubeconfig-path)"
|
|
||||||
- docker pull kubernetes/pause
|
|
||||||
- kind load docker-image kubernetes/pause
|
|
||||||
- kind get kubeconfig > /tmp/admin.conf
|
|
||||||
script:
|
|
||||||
- mkdir -p ~/gopath/src/sigs.k8s.io/
|
|
||||||
- mv ~/gopath/src/github.com/kubernetes-sigs/descheduler ~/gopath/src/sigs.k8s.io/.
|
|
||||||
- hack/verify-gofmt.sh
|
|
||||||
- hack/verify-vendor.sh
|
|
||||||
- make lint
|
|
||||||
- make build
|
|
||||||
- make test-unit
|
|
||||||
- make test-e2e
|
|
||||||
@@ -11,11 +11,13 @@
|
|||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
FROM golang:1.13.9
|
FROM golang:1.15.5
|
||||||
|
|
||||||
WORKDIR /go/src/sigs.k8s.io/descheduler
|
WORKDIR /go/src/sigs.k8s.io/descheduler
|
||||||
COPY . .
|
COPY . .
|
||||||
RUN make
|
ARG ARCH
|
||||||
|
ARG VERSION
|
||||||
|
RUN VERSION=${VERSION} make build.$ARCH
|
||||||
|
|
||||||
FROM scratch
|
FROM scratch
|
||||||
|
|
||||||
|
|||||||
66
Makefile
66
Makefile
@@ -14,16 +14,16 @@
|
|||||||
|
|
||||||
.PHONY: test
|
.PHONY: test
|
||||||
|
|
||||||
# VERSION is currently based on the last commit
|
# VERSION is based on a date stamp plus the last commit
|
||||||
VERSION?=$(shell git describe --tags)
|
VERSION?=v$(shell date +%Y%m%d)-$(shell git describe --tags --match "v*")
|
||||||
COMMIT=$(shell git rev-parse HEAD)
|
|
||||||
BUILD=$(shell date +%FT%T%z)
|
BUILD=$(shell date +%FT%T%z)
|
||||||
LDFLAG_LOCATION=sigs.k8s.io/descheduler/cmd/descheduler/app
|
LDFLAG_LOCATION=sigs.k8s.io/descheduler/cmd/descheduler/app
|
||||||
|
ARCHS = amd64 arm64
|
||||||
|
|
||||||
LDFLAGS=-ldflags "-X ${LDFLAG_LOCATION}.version=${VERSION} -X ${LDFLAG_LOCATION}.buildDate=${BUILD} -X ${LDFLAG_LOCATION}.gitCommit=${COMMIT}"
|
LDFLAGS=-ldflags "-X ${LDFLAG_LOCATION}.version=${VERSION} -X ${LDFLAG_LOCATION}.buildDate=${BUILD}"
|
||||||
|
|
||||||
GOLANGCI_VERSION := v1.15.0
|
GOLANGCI_VERSION := v1.30.0
|
||||||
HAS_GOLANGCI := $(shell which golangci-lint)
|
HAS_GOLANGCI := $(shell ls _output/bin/golangci-lint)
|
||||||
|
|
||||||
# REGISTRY is the container registry to push
|
# REGISTRY is the container registry to push
|
||||||
# into. The default is to push to the staging
|
# into. The default is to push to the staging
|
||||||
@@ -41,27 +41,62 @@ IMAGE_GCLOUD:=$(REGISTRY)/descheduler:$(VERSION)
|
|||||||
# In the future binaries can be uploaded to
|
# In the future binaries can be uploaded to
|
||||||
# GCS bucket gs://k8s-staging-descheduler.
|
# GCS bucket gs://k8s-staging-descheduler.
|
||||||
|
|
||||||
|
HAS_HELM := $(shell which helm)
|
||||||
|
|
||||||
all: build
|
all: build
|
||||||
|
|
||||||
build:
|
build:
|
||||||
CGO_ENABLED=0 go build ${LDFLAGS} -o _output/bin/descheduler sigs.k8s.io/descheduler/cmd/descheduler
|
CGO_ENABLED=0 go build ${LDFLAGS} -o _output/bin/descheduler sigs.k8s.io/descheduler/cmd/descheduler
|
||||||
|
|
||||||
|
build.amd64:
|
||||||
|
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build ${LDFLAGS} -o _output/bin/descheduler sigs.k8s.io/descheduler/cmd/descheduler
|
||||||
|
|
||||||
|
build.arm64:
|
||||||
|
CGO_ENABLED=0 GOOS=linux GOARCH=arm64 go build ${LDFLAGS} -o _output/bin/descheduler sigs.k8s.io/descheduler/cmd/descheduler
|
||||||
|
|
||||||
dev-image: build
|
dev-image: build
|
||||||
docker build -f Dockerfile.dev -t $(IMAGE) .
|
docker build -f Dockerfile.dev -t $(IMAGE) .
|
||||||
|
|
||||||
image:
|
image:
|
||||||
docker build -t $(IMAGE) .
|
docker build --build-arg VERSION="$(VERSION)" --build-arg ARCH="amd64" -t $(IMAGE) .
|
||||||
|
|
||||||
push-container-to-gcloud: image
|
image.amd64:
|
||||||
|
docker build --build-arg VERSION="$(VERSION)" --build-arg ARCH="amd64" -t $(IMAGE)-amd64 .
|
||||||
|
|
||||||
|
image.arm64:
|
||||||
|
docker build --build-arg VERSION="$(VERSION)" --build-arg ARCH="arm64" -t $(IMAGE)-arm64 .
|
||||||
|
|
||||||
|
push: image
|
||||||
gcloud auth configure-docker
|
gcloud auth configure-docker
|
||||||
docker tag $(IMAGE) $(IMAGE_GCLOUD)
|
docker tag $(IMAGE) $(IMAGE_GCLOUD)
|
||||||
docker push $(IMAGE_GCLOUD)
|
docker push $(IMAGE_GCLOUD)
|
||||||
|
|
||||||
push: push-container-to-gcloud
|
push-all: image.amd64 image.arm64
|
||||||
|
gcloud auth configure-docker
|
||||||
|
for arch in $(ARCHS); do \
|
||||||
|
docker tag $(IMAGE)-$${arch} $(IMAGE_GCLOUD)-$${arch} ;\
|
||||||
|
docker push $(IMAGE_GCLOUD)-$${arch} ;\
|
||||||
|
done
|
||||||
|
DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create $(IMAGE_GCLOUD) $(addprefix --amend $(IMAGE_GCLOUD)-, $(ARCHS))
|
||||||
|
for arch in $(ARCHS); do \
|
||||||
|
DOCKER_CLI_EXPERIMENTAL=enabled docker manifest annotate --arch $${arch} $(IMAGE_GCLOUD) $(IMAGE_GCLOUD)-$${arch} ;\
|
||||||
|
done
|
||||||
|
DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push $(IMAGE_GCLOUD) ;\
|
||||||
|
|
||||||
clean:
|
clean:
|
||||||
rm -rf _output
|
rm -rf _output
|
||||||
|
|
||||||
|
verify: verify-gofmt verify-vendor lint lint-chart verify-spelling
|
||||||
|
|
||||||
|
verify-spelling:
|
||||||
|
./hack/verify-spelling.sh
|
||||||
|
|
||||||
|
verify-gofmt:
|
||||||
|
./hack/verify-gofmt.sh
|
||||||
|
|
||||||
|
verify-vendor:
|
||||||
|
./hack/verify-vendor.sh
|
||||||
|
|
||||||
test-unit:
|
test-unit:
|
||||||
./test/run-unit-tests.sh
|
./test/run-unit-tests.sh
|
||||||
|
|
||||||
@@ -72,10 +107,15 @@ gen:
|
|||||||
./hack/update-generated-conversions.sh
|
./hack/update-generated-conversions.sh
|
||||||
./hack/update-generated-deep-copies.sh
|
./hack/update-generated-deep-copies.sh
|
||||||
./hack/update-generated-defaulters.sh
|
./hack/update-generated-defaulters.sh
|
||||||
#undo go mod changes caused by above.
|
|
||||||
go mod tidy
|
|
||||||
lint:
|
lint:
|
||||||
ifndef HAS_GOLANGCI
|
ifndef HAS_GOLANGCI
|
||||||
curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(GOPATH)/bin ${GOLANGCI_VERSION}
|
curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b ./_output/bin ${GOLANGCI_VERSION}
|
||||||
endif
|
endif
|
||||||
golangci-lint run
|
./_output/bin/golangci-lint run
|
||||||
|
|
||||||
|
lint-chart:
|
||||||
|
ifndef HAS_HELM
|
||||||
|
curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 && chmod 700 ./get_helm.sh && ./get_helm.sh
|
||||||
|
endif
|
||||||
|
helm lint ./charts/descheduler
|
||||||
|
|||||||
1
OWNERS
1
OWNERS
@@ -9,3 +9,4 @@ reviewers:
|
|||||||
- ravisantoshgudimetla
|
- ravisantoshgudimetla
|
||||||
- damemi
|
- damemi
|
||||||
- seanmalloy
|
- seanmalloy
|
||||||
|
- ingvagabund
|
||||||
|
|||||||
352
README.md
352
README.md
@@ -1,10 +1,8 @@
|
|||||||
[](https://travis-ci.org/kubernetes-sigs/descheduler)
|
|
||||||
[](https://goreportcard.com/report/sigs.k8s.io/descheduler)
|
[](https://goreportcard.com/report/sigs.k8s.io/descheduler)
|
||||||
|

|
||||||
|
|
||||||
# Descheduler for Kubernetes
|
# Descheduler for Kubernetes
|
||||||
|
|
||||||
## Introduction
|
|
||||||
|
|
||||||
Scheduling in Kubernetes is the process of binding pending pods to nodes, and is performed by
|
Scheduling in Kubernetes is the process of binding pending pods to nodes, and is performed by
|
||||||
a component of Kubernetes called kube-scheduler. The scheduler's decisions, whether or where a
|
a component of Kubernetes called kube-scheduler. The scheduler's decisions, whether or where a
|
||||||
pod can or can not be scheduled, are guided by its configurable policy which comprises of set of
|
pod can or can not be scheduled, are guided by its configurable policy which comprises of set of
|
||||||
@@ -24,6 +22,35 @@ Descheduler, based on its policy, finds pods that can be moved and evicts them.
|
|||||||
note, in current implementation, descheduler does not schedule replacement of evicted pods
|
note, in current implementation, descheduler does not schedule replacement of evicted pods
|
||||||
but relies on the default scheduler for that.
|
but relies on the default scheduler for that.
|
||||||
|
|
||||||
|
Table of Contents
|
||||||
|
=================
|
||||||
|
|
||||||
|
* [Quick Start](#quick-start)
|
||||||
|
* [Run As A Job](#run-as-a-job)
|
||||||
|
* [Run As A CronJob](#run-as-a-cronjob)
|
||||||
|
* [Install Using Helm](#install-using-helm)
|
||||||
|
* [Install Using Kustomize](#install-using-kustomize)
|
||||||
|
* [User Guide](#user-guide)
|
||||||
|
* [Policy and Strategies](#policy-and-strategies)
|
||||||
|
* [RemoveDuplicates](#removeduplicates)
|
||||||
|
* [LowNodeUtilization](#lownodeutilization)
|
||||||
|
* [RemovePodsViolatingInterPodAntiAffinity](#removepodsviolatinginterpodantiaffinity)
|
||||||
|
* [RemovePodsViolatingNodeAffinity](#removepodsviolatingnodeaffinity)
|
||||||
|
* [RemovePodsViolatingNodeTaints](#removepodsviolatingnodetaints)
|
||||||
|
* [RemovePodsViolatingTopologySpreadConstraint](#removepodsviolatingtopologyspreadconstraint)
|
||||||
|
* [RemovePodsHavingTooManyRestarts](#removepodshavingtoomanyrestarts)
|
||||||
|
* [PodLifeTime](#podlifetime)
|
||||||
|
* [Filter Pods](#filter-pods)
|
||||||
|
* [Namespace filtering](#namespace-filtering)
|
||||||
|
* [Priority filtering](#priority-filtering)
|
||||||
|
* [Pod Evictions](#pod-evictions)
|
||||||
|
* [Pod Disruption Budget (PDB)](#pod-disruption-budget-pdb)
|
||||||
|
* [Compatibility Matrix](#compatibility-matrix)
|
||||||
|
* [Getting Involved and Contributing](#getting-involved-and-contributing)
|
||||||
|
* [Communicating With Contributors](#communicating-with-contributors)
|
||||||
|
* [Roadmap](#roadmap)
|
||||||
|
* [Code of conduct](#code-of-conduct)
|
||||||
|
|
||||||
## Quick Start
|
## Quick Start
|
||||||
|
|
||||||
The descheduler can be run as a Job or CronJob inside of a k8s cluster. It has the
|
The descheduler can be run as a Job or CronJob inside of a k8s cluster. It has the
|
||||||
@@ -34,17 +61,39 @@ being evicted by itself or by the kubelet.
|
|||||||
### Run As A Job
|
### Run As A Job
|
||||||
|
|
||||||
```
|
```
|
||||||
kubectl create -f kubernetes/rbac.yaml
|
kubectl create -f kubernetes/base/rbac.yaml
|
||||||
kubectl create -f kubernetes/configmap.yaml
|
kubectl create -f kubernetes/base/configmap.yaml
|
||||||
kubectl create -f kubernetes/job.yaml
|
kubectl create -f kubernetes/job/job.yaml
|
||||||
```
|
```
|
||||||
|
|
||||||
### Run As A CronJob
|
### Run As A CronJob
|
||||||
|
|
||||||
```
|
```
|
||||||
kubectl create -f kubernetes/rbac.yaml
|
kubectl create -f kubernetes/base/rbac.yaml
|
||||||
kubectl create -f kubernetes/configmap.yaml
|
kubectl create -f kubernetes/base/configmap.yaml
|
||||||
kubectl create -f kubernetes/cronjob.yaml
|
kubectl create -f kubernetes/cronjob/cronjob.yaml
|
||||||
|
```
|
||||||
|
|
||||||
|
### Install Using Helm
|
||||||
|
|
||||||
|
Starting with release v0.18.0 there is an official helm chart that can be used to install the
|
||||||
|
descheduler. See the [helm chart README](https://github.com/kubernetes-sigs/descheduler/blob/master/charts/descheduler/README.md) for detailed instructions.
|
||||||
|
|
||||||
|
The descheduler helm chart is also listed on the [artifact hub](https://artifacthub.io/packages/helm/descheduler/descheduler).
|
||||||
|
|
||||||
|
### Install Using Kustomize
|
||||||
|
|
||||||
|
You can use kustomize to install descheduler.
|
||||||
|
See the [resources | Kustomize](https://kubernetes-sigs.github.io/kustomize/api-reference/kustomization/resources/) for detailed instructions.
|
||||||
|
|
||||||
|
Run As A Job
|
||||||
|
```
|
||||||
|
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/job?ref=master' | kubectl apply -f -
|
||||||
|
```
|
||||||
|
|
||||||
|
Run As A CronJob
|
||||||
|
```
|
||||||
|
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/cronjob?ref=master' | kubectl apply -f -
|
||||||
```
|
```
|
||||||
|
|
||||||
## User Guide
|
## User Guide
|
||||||
@@ -54,10 +103,25 @@ See the [user guide](docs/user-guide.md) in the `/docs` directory.
|
|||||||
## Policy and Strategies
|
## Policy and Strategies
|
||||||
|
|
||||||
Descheduler's policy is configurable and includes strategies that can be enabled or disabled.
|
Descheduler's policy is configurable and includes strategies that can be enabled or disabled.
|
||||||
Seven strategies `RemoveDuplicates`, `LowNodeUtilization`, `RemovePodsViolatingInterPodAntiAffinity`,
|
Eight strategies `RemoveDuplicates`, `LowNodeUtilization`, `RemovePodsViolatingInterPodAntiAffinity`,
|
||||||
`RemovePodsViolatingNodeAffinity`, `RemovePodsViolatingNodeTaints`, `RemovePodsHavingTooManyRestarts`, and `PodLifeTime`
|
`RemovePodsViolatingNodeAffinity`, `RemovePodsViolatingNodeTaints`, `RemovePodsViolatingTopologySpreadConstraint`,
|
||||||
are currently implemented. As part of the policy, the parameters associated with the strategies can be configured too.
|
`RemovePodsHavingTooManyRestarts`, and `PodLifeTime` are currently implemented. As part of the policy, the
|
||||||
By default, all strategies are enabled.
|
parameters associated with the strategies can be configured too. By default, all strategies are enabled.
|
||||||
|
|
||||||
|
The policy also includes common configuration for all the strategies:
|
||||||
|
- `nodeSelector` - limiting the nodes which are processed
|
||||||
|
- `evictLocalStoragePods` - allowing to evict pods with local storage
|
||||||
|
- `maxNoOfPodsToEvictPerNode` - maximum number of pods evicted from each node (summed through all strategies)
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
apiVersion: "descheduler/v1alpha1"
|
||||||
|
kind: "DeschedulerPolicy"
|
||||||
|
nodeSelector: prod=dev
|
||||||
|
evictLocalStoragePods: true
|
||||||
|
maxNoOfPodsToEvictPerNode: 40
|
||||||
|
strategies:
|
||||||
|
...
|
||||||
|
```
|
||||||
|
|
||||||
### RemoveDuplicates
|
### RemoveDuplicates
|
||||||
|
|
||||||
@@ -71,7 +135,17 @@ are ready again, this strategy could be enabled to evict those duplicate pods.
|
|||||||
It provides one optional parameter, `ExcludeOwnerKinds`, which is a list of OwnerRef `Kind`s. If a pod
|
It provides one optional parameter, `ExcludeOwnerKinds`, which is a list of OwnerRef `Kind`s. If a pod
|
||||||
has any of these `Kind`s listed as an `OwnerRef`, that pod will not be considered for eviction.
|
has any of these `Kind`s listed as an `OwnerRef`, that pod will not be considered for eviction.
|
||||||
|
|
||||||
```
|
**Parameters:**
|
||||||
|
|
||||||
|
|Name|Type|
|
||||||
|
|---|---|
|
||||||
|
|`excludeOwnerKinds`|list(string)|
|
||||||
|
|`namespaces`|(see [namespace filtering](#namespace-filtering))|
|
||||||
|
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))|
|
||||||
|
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))|
|
||||||
|
|
||||||
|
**Example:**
|
||||||
|
```yaml
|
||||||
apiVersion: "descheduler/v1alpha1"
|
apiVersion: "descheduler/v1alpha1"
|
||||||
kind: "DeschedulerPolicy"
|
kind: "DeschedulerPolicy"
|
||||||
strategies:
|
strategies:
|
||||||
@@ -95,14 +169,26 @@ usage is below threshold for all (cpu, memory, and number of pods), the node is
|
|||||||
Currently, pods request resource requirements are considered for computing node resource utilization.
|
Currently, pods request resource requirements are considered for computing node resource utilization.
|
||||||
|
|
||||||
There is another configurable threshold, `targetThresholds`, that is used to compute those potential nodes
|
There is another configurable threshold, `targetThresholds`, that is used to compute those potential nodes
|
||||||
from where pods could be evicted. Any node, between the thresholds, `thresholds` and `targetThresholds` is
|
from where pods could be evicted. If a node's usage is above targetThreshold for any (cpu, memory, or number of pods),
|
||||||
|
the node is considered over utilized. Any node between the thresholds, `thresholds` and `targetThresholds` is
|
||||||
considered appropriately utilized and is not considered for eviction. The threshold, `targetThresholds`,
|
considered appropriately utilized and is not considered for eviction. The threshold, `targetThresholds`,
|
||||||
can be configured for cpu, memory, and number of pods too in terms of percentage.
|
can be configured for cpu, memory, and number of pods too in terms of percentage.
|
||||||
|
|
||||||
These thresholds, `thresholds` and `targetThresholds`, could be tuned as per your cluster requirements.
|
These thresholds, `thresholds` and `targetThresholds`, could be tuned as per your cluster requirements.
|
||||||
Here is an example of a policy for this strategy:
|
|
||||||
|
|
||||||
```
|
**Parameters:**
|
||||||
|
|
||||||
|
|Name|Type|
|
||||||
|
|---|---|
|
||||||
|
|`thresholds`|map(string:int)|
|
||||||
|
|`targetThresholds`|map(string:int)|
|
||||||
|
|`numberOfNodes`|int|
|
||||||
|
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))|
|
||||||
|
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))|
|
||||||
|
|
||||||
|
**Example:**
|
||||||
|
|
||||||
|
```yaml
|
||||||
apiVersion: "descheduler/v1alpha1"
|
apiVersion: "descheduler/v1alpha1"
|
||||||
kind: "DeschedulerPolicy"
|
kind: "DeschedulerPolicy"
|
||||||
strategies:
|
strategies:
|
||||||
@@ -120,6 +206,15 @@ strategies:
|
|||||||
"pods": 50
|
"pods": 50
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Policy should pass the following validation checks:
|
||||||
|
* Only three types of resources are supported: `cpu`, `memory` and `pods`.
|
||||||
|
* `thresholds` or `targetThresholds` can not be nil and they must configure exactly the same types of resources.
|
||||||
|
* The valid range of the resource's percentage value is \[0, 100\]
|
||||||
|
* Percentage value of `thresholds` can not be greater than `targetThresholds` for the same resource.
|
||||||
|
|
||||||
|
If any of the resource types is not specified, all its thresholds default to 100% to avoid nodes going
|
||||||
|
from underutilized to overutilized.
|
||||||
|
|
||||||
There is another parameter associated with the `LowNodeUtilization` strategy, called `numberOfNodes`.
|
There is another parameter associated with the `LowNodeUtilization` strategy, called `numberOfNodes`.
|
||||||
This parameter can be configured to activate the strategy only when the number of under utilized nodes
|
This parameter can be configured to activate the strategy only when the number of under utilized nodes
|
||||||
are above the configured value. This could be helpful in large clusters where a few nodes could go
|
are above the configured value. This could be helpful in large clusters where a few nodes could go
|
||||||
@@ -131,15 +226,24 @@ This strategy makes sure that pods violating interpod anti-affinity are removed
|
|||||||
if there is podA on a node and podB and podC (running on the same node) have anti-affinity rules which prohibit
|
if there is podA on a node and podB and podC (running on the same node) have anti-affinity rules which prohibit
|
||||||
them to run on the same node, then podA will be evicted from the node so that podB and podC could run. This
|
them to run on the same node, then podA will be evicted from the node so that podB and podC could run. This
|
||||||
issue could happen, when the anti-affinity rules for podB and podC are created when they are already running on
|
issue could happen, when the anti-affinity rules for podB and podC are created when they are already running on
|
||||||
node. Currently, there are no parameters associated with this strategy. To disable this strategy, the
|
node.
|
||||||
policy should look like:
|
|
||||||
|
|
||||||
```
|
**Parameters:**
|
||||||
|
|
||||||
|
|Name|Type|
|
||||||
|
|---|---|
|
||||||
|
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))|
|
||||||
|
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))|
|
||||||
|
|`namespaces`|(see [namespace filtering](#namespace-filtering))|
|
||||||
|
|
||||||
|
**Example:**
|
||||||
|
|
||||||
|
```yaml
|
||||||
apiVersion: "descheduler/v1alpha1"
|
apiVersion: "descheduler/v1alpha1"
|
||||||
kind: "DeschedulerPolicy"
|
kind: "DeschedulerPolicy"
|
||||||
strategies:
|
strategies:
|
||||||
"RemovePodsViolatingInterPodAntiAffinity":
|
"RemovePodsViolatingInterPodAntiAffinity":
|
||||||
enabled: false
|
enabled: true
|
||||||
```
|
```
|
||||||
|
|
||||||
### RemovePodsViolatingNodeAffinity
|
### RemovePodsViolatingNodeAffinity
|
||||||
@@ -160,9 +264,18 @@ of scheduling. Over time nodeA stops to satisfy the rule. When the strategy gets
|
|||||||
executed and there is another node available that satisfies the node affinity rule,
|
executed and there is another node available that satisfies the node affinity rule,
|
||||||
podA gets evicted from nodeA.
|
podA gets evicted from nodeA.
|
||||||
|
|
||||||
The policy file should look like:
|
**Parameters:**
|
||||||
|
|
||||||
```
|
|Name|Type|
|
||||||
|
|---|---|
|
||||||
|
|`nodeAffinityType`|list(string)|
|
||||||
|
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))|
|
||||||
|
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))|
|
||||||
|
|`namespaces`|(see [namespace filtering](#namespace-filtering))|
|
||||||
|
|
||||||
|
**Example:**
|
||||||
|
|
||||||
|
```yaml
|
||||||
apiVersion: "descheduler/v1alpha1"
|
apiVersion: "descheduler/v1alpha1"
|
||||||
kind: "DeschedulerPolicy"
|
kind: "DeschedulerPolicy"
|
||||||
strategies:
|
strategies:
|
||||||
@@ -178,9 +291,19 @@ strategies:
|
|||||||
This strategy makes sure that pods violating NoSchedule taints on nodes are removed. For example there is a
|
This strategy makes sure that pods violating NoSchedule taints on nodes are removed. For example there is a
|
||||||
pod "podA" with a toleration to tolerate a taint ``key=value:NoSchedule`` scheduled and running on the tainted
|
pod "podA" with a toleration to tolerate a taint ``key=value:NoSchedule`` scheduled and running on the tainted
|
||||||
node. If the node's taint is subsequently updated/removed, taint is no longer satisfied by its pods' tolerations
|
node. If the node's taint is subsequently updated/removed, taint is no longer satisfied by its pods' tolerations
|
||||||
and will be evicted. The policy file should look like:
|
and will be evicted.
|
||||||
|
|
||||||
````
|
**Parameters:**
|
||||||
|
|
||||||
|
|Name|Type|
|
||||||
|
|---|---|
|
||||||
|
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))|
|
||||||
|
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))|
|
||||||
|
|`namespaces`|(see [namespace filtering](#namespace-filtering))|
|
||||||
|
|
||||||
|
**Example:**
|
||||||
|
|
||||||
|
````yaml
|
||||||
apiVersion: "descheduler/v1alpha1"
|
apiVersion: "descheduler/v1alpha1"
|
||||||
kind: "DeschedulerPolicy"
|
kind: "DeschedulerPolicy"
|
||||||
strategies:
|
strategies:
|
||||||
@@ -188,11 +311,51 @@ strategies:
|
|||||||
enabled: true
|
enabled: true
|
||||||
````
|
````
|
||||||
|
|
||||||
|
### RemovePodsViolatingTopologySpreadConstraint
|
||||||
|
|
||||||
|
This strategy makes sure that pods violating [topology spread constraints](https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/)
|
||||||
|
are evicted from nodes. Specifically, it tries to evict the minimum number of pods required to balance topology domains to within each constraint's `maxSkew`.
|
||||||
|
This strategy requires k8s version 1.18 at a minimum.
|
||||||
|
|
||||||
|
**Parameters:**
|
||||||
|
|
||||||
|
|Name|Type|
|
||||||
|
|---|---|
|
||||||
|
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))|
|
||||||
|
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))|
|
||||||
|
|`namespaces`|(see [namespace filtering](#namespace-filtering))|
|
||||||
|
|
||||||
|
**Example:**
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
apiVersion: "descheduler/v1alpha1"
|
||||||
|
kind: "DeschedulerPolicy"
|
||||||
|
strategies:
|
||||||
|
"RemovePodsViolatingTopologySpreadConstraint":
|
||||||
|
enabled: true
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
### RemovePodsHavingTooManyRestarts
|
### RemovePodsHavingTooManyRestarts
|
||||||
|
|
||||||
This strategy makes sure that pods having too many restarts are removed from nodes. For example a pod with EBS/PD that can't get the volume/disk attached to the instance, then the pod should be re-scheduled to other nodes.
|
This strategy makes sure that pods having too many restarts are removed from nodes. For example a pod with EBS/PD that
|
||||||
|
can't get the volume/disk attached to the instance, then the pod should be re-scheduled to other nodes. Its parameters
|
||||||
|
include `podRestartThreshold`, which is the number of restarts at which a pod should be evicted, and `includingInitContainers`,
|
||||||
|
which determines whether init container restarts should be factored into that calculation.
|
||||||
|
|
||||||
```
|
**Parameters:**
|
||||||
|
|
||||||
|
|Name|Type|
|
||||||
|
|---|---|
|
||||||
|
|`podRestartThreshold`|int|
|
||||||
|
|`includingInitContainers`|bool|
|
||||||
|
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))|
|
||||||
|
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))|
|
||||||
|
|`namespaces`|(see [namespace filtering](#namespace-filtering))|
|
||||||
|
|
||||||
|
**Example:**
|
||||||
|
|
||||||
|
```yaml
|
||||||
apiVersion: "descheduler/v1alpha1"
|
apiVersion: "descheduler/v1alpha1"
|
||||||
kind: "DeschedulerPolicy"
|
kind: "DeschedulerPolicy"
|
||||||
strategies:
|
strategies:
|
||||||
@@ -206,18 +369,124 @@ strategies:
|
|||||||
|
|
||||||
### PodLifeTime
|
### PodLifeTime
|
||||||
|
|
||||||
This strategy evicts pods that are older than `.strategies.PodLifeTime.params.maxPodLifeTimeSeconds` The policy
|
This strategy evicts pods that are older than `maxPodLifeTimeSeconds`.
|
||||||
file should look like:
|
|
||||||
|
|
||||||
````
|
You can also specify `podStatusPhases` to `only` evict pods with specific `StatusPhases`, currently this parameter is limited
|
||||||
|
to `Running` and `Pending`.
|
||||||
|
|
||||||
|
**Parameters:**
|
||||||
|
|
||||||
|
|Name|Type|
|
||||||
|
|---|---|
|
||||||
|
|`maxPodLifeTimeSeconds`|int|
|
||||||
|
|`podStatusPhases`|list(string)|
|
||||||
|
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))|
|
||||||
|
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))|
|
||||||
|
|`namespaces`|(see [namespace filtering](#namespace-filtering))|
|
||||||
|
|
||||||
|
**Example:**
|
||||||
|
|
||||||
|
```yaml
|
||||||
apiVersion: "descheduler/v1alpha1"
|
apiVersion: "descheduler/v1alpha1"
|
||||||
kind: "DeschedulerPolicy"
|
kind: "DeschedulerPolicy"
|
||||||
strategies:
|
strategies:
|
||||||
"PodLifeTime":
|
"PodLifeTime":
|
||||||
enabled: true
|
enabled: true
|
||||||
params:
|
params:
|
||||||
maxPodLifeTimeSeconds: 86400
|
podLifeTime:
|
||||||
````
|
maxPodLifeTimeSeconds: 86400
|
||||||
|
podStatusPhases:
|
||||||
|
- "Pending"
|
||||||
|
```
|
||||||
|
|
||||||
|
## Filter Pods
|
||||||
|
|
||||||
|
### Namespace filtering
|
||||||
|
|
||||||
|
The following strategies accept a `namespaces` parameter which allows to specify a list of including, resp. excluding namespaces:
|
||||||
|
* `PodLifeTime`
|
||||||
|
* `RemovePodsHavingTooManyRestarts`
|
||||||
|
* `RemovePodsViolatingNodeTaints`
|
||||||
|
* `RemovePodsViolatingNodeAffinity`
|
||||||
|
* `RemovePodsViolatingInterPodAntiAffinity`
|
||||||
|
* `RemoveDuplicates`
|
||||||
|
* `RemovePodsViolatingTopologySpreadConstraint`
|
||||||
|
|
||||||
|
For example:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
apiVersion: "descheduler/v1alpha1"
|
||||||
|
kind: "DeschedulerPolicy"
|
||||||
|
strategies:
|
||||||
|
"PodLifeTime":
|
||||||
|
enabled: true
|
||||||
|
params:
|
||||||
|
podLifeTime:
|
||||||
|
maxPodLifeTimeSeconds: 86400
|
||||||
|
namespaces:
|
||||||
|
include:
|
||||||
|
- "namespace1"
|
||||||
|
- "namespace2"
|
||||||
|
```
|
||||||
|
|
||||||
|
In the examples `PodLifeTime` gets executed only over `namespace1` and `namespace2`.
|
||||||
|
The similar holds for `exclude` field:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
apiVersion: "descheduler/v1alpha1"
|
||||||
|
kind: "DeschedulerPolicy"
|
||||||
|
strategies:
|
||||||
|
"PodLifeTime":
|
||||||
|
enabled: true
|
||||||
|
params:
|
||||||
|
podLifeTime:
|
||||||
|
maxPodLifeTimeSeconds: 86400
|
||||||
|
namespaces:
|
||||||
|
exclude:
|
||||||
|
- "namespace1"
|
||||||
|
- "namespace2"
|
||||||
|
```
|
||||||
|
|
||||||
|
The strategy gets executed over all namespaces but `namespace1` and `namespace2`.
|
||||||
|
|
||||||
|
It's not allowed to compute `include` with `exclude` field.
|
||||||
|
|
||||||
|
### Priority filtering
|
||||||
|
|
||||||
|
All strategies are able to configure a priority threshold, only pods under the threshold can be evicted. You can
|
||||||
|
specify this threshold by setting `thresholdPriorityClassName`(setting the threshold to the value of the given
|
||||||
|
priority class) or `thresholdPriority`(directly setting the threshold) parameters. By default, this threshold
|
||||||
|
is set to the value of `system-cluster-critical` priority class.
|
||||||
|
E.g.
|
||||||
|
|
||||||
|
Setting `thresholdPriority`
|
||||||
|
```yaml
|
||||||
|
apiVersion: "descheduler/v1alpha1"
|
||||||
|
kind: "DeschedulerPolicy"
|
||||||
|
strategies:
|
||||||
|
"PodLifeTime":
|
||||||
|
enabled: true
|
||||||
|
params:
|
||||||
|
podLifeTime:
|
||||||
|
maxPodLifeTimeSeconds: 86400
|
||||||
|
thresholdPriority: 10000
|
||||||
|
```
|
||||||
|
|
||||||
|
Setting `thresholdPriorityClassName`
|
||||||
|
```yaml
|
||||||
|
apiVersion: "descheduler/v1alpha1"
|
||||||
|
kind: "DeschedulerPolicy"
|
||||||
|
strategies:
|
||||||
|
"PodLifeTime":
|
||||||
|
enabled: true
|
||||||
|
params:
|
||||||
|
podLifeTime:
|
||||||
|
maxPodLifeTimeSeconds: 86400
|
||||||
|
thresholdPriorityClassName: "priorityclass1"
|
||||||
|
```
|
||||||
|
|
||||||
|
Note that you can't configure both `thresholdPriority` and `thresholdPriorityClassName`, if the given priority class
|
||||||
|
does not exist, descheduler won't create it and will throw an error.
|
||||||
|
|
||||||
## Pod Evictions
|
## Pod Evictions
|
||||||
|
|
||||||
@@ -227,21 +496,34 @@ When the descheduler decides to evict pods from a node, it employs the following
|
|||||||
* Pods (static or mirrored pods or stand alone pods) not part of an RC, RS, Deployment or Job are
|
* Pods (static or mirrored pods or stand alone pods) not part of an RC, RS, Deployment or Job are
|
||||||
never evicted because these pods won't be recreated.
|
never evicted because these pods won't be recreated.
|
||||||
* Pods associated with DaemonSets are never evicted.
|
* Pods associated with DaemonSets are never evicted.
|
||||||
* Pods with local storage are never evicted.
|
* Pods with local storage are never evicted (unless `evictLocalStoragePods: true` is set)
|
||||||
* Best efforts pods are evicted before burstable and guaranteed pods.
|
* In `LowNodeUtilization` and `RemovePodsViolatingInterPodAntiAffinity`, pods are evicted by their priority from low to high, and if they have same priority,
|
||||||
|
best effort pods are evicted before burstable and guaranteed pods.
|
||||||
* All types of pods with the annotation descheduler.alpha.kubernetes.io/evict are evicted. This
|
* All types of pods with the annotation descheduler.alpha.kubernetes.io/evict are evicted. This
|
||||||
annotation is used to override checks which prevent eviction and users can select which pod is evicted.
|
annotation is used to override checks which prevent eviction and users can select which pod is evicted.
|
||||||
Users should know how and if the pod will be recreated.
|
Users should know how and if the pod will be recreated.
|
||||||
|
|
||||||
|
Setting `--v=4` or greater on the Descheduler will log all reasons why any pod is not evictable.
|
||||||
|
|
||||||
### Pod Disruption Budget (PDB)
|
### Pod Disruption Budget (PDB)
|
||||||
|
|
||||||
Pods subject to a Pod Disruption Budget(PDB) are not evicted if descheduling violates its PDB. The pods
|
Pods subject to a Pod Disruption Budget(PDB) are not evicted if descheduling violates its PDB. The pods
|
||||||
are evicted by using the eviction subresource to handle PDB.
|
are evicted by using the eviction subresource to handle PDB.
|
||||||
|
|
||||||
## Compatibility Matrix
|
## Compatibility Matrix
|
||||||
|
The below compatibility matrix shows the k8s client package(client-go, apimachinery, etc) versions that descheduler
|
||||||
|
is compiled with. At this time descheduler does not have a hard dependency to a specific k8s release. However a
|
||||||
|
particular descheduler release is only tested against the three latest k8s minor versions. For example descheduler
|
||||||
|
v0.18 should work with k8s v1.18, v1.17, and v1.16.
|
||||||
|
|
||||||
Descheduler | supported Kubernetes version
|
Starting with descheduler release v0.18 the minor version of descheduler matches the minor version of the k8s client
|
||||||
|
packages that it is compiled with.
|
||||||
|
|
||||||
|
Descheduler | Supported Kubernetes Version
|
||||||
-------------|-----------------------------
|
-------------|-----------------------------
|
||||||
|
v0.20 | v1.20
|
||||||
|
v0.19 | v1.19
|
||||||
|
v0.18 | v1.18
|
||||||
v0.10 | v1.17
|
v0.10 | v1.17
|
||||||
v0.4-v0.9 | v1.9+
|
v0.4-v0.9 | v1.9+
|
||||||
v0.1-v0.3 | v1.7-v1.8
|
v0.1-v0.3 | v1.7-v1.8
|
||||||
|
|||||||
22
charts/descheduler/.helmignore
Normal file
22
charts/descheduler/.helmignore
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
# Patterns to ignore when building packages.
|
||||||
|
# This supports shell glob matching, relative path matching, and
|
||||||
|
# negation (prefixed with !). Only one pattern per line.
|
||||||
|
.DS_Store
|
||||||
|
# Common VCS dirs
|
||||||
|
.git/
|
||||||
|
.gitignore
|
||||||
|
.bzr/
|
||||||
|
.bzrignore
|
||||||
|
.hg/
|
||||||
|
.hgignore
|
||||||
|
.svn/
|
||||||
|
# Common backup files
|
||||||
|
*.swp
|
||||||
|
*.bak
|
||||||
|
*.tmp
|
||||||
|
*~
|
||||||
|
# Various IDEs
|
||||||
|
.project
|
||||||
|
.idea/
|
||||||
|
*.tmproj
|
||||||
|
.vscode/
|
||||||
16
charts/descheduler/Chart.yaml
Normal file
16
charts/descheduler/Chart.yaml
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
apiVersion: v1
|
||||||
|
name: descheduler
|
||||||
|
version: 0.20.0
|
||||||
|
appVersion: 0.20.0
|
||||||
|
description: Descheduler for Kubernetes is used to rebalance clusters by evicting pods that can potentially be scheduled on better nodes. In the current implementation, descheduler does not schedule replacement of evicted pods but relies on the default scheduler for that.
|
||||||
|
keywords:
|
||||||
|
- kubernetes
|
||||||
|
- descheduler
|
||||||
|
- kube-scheduler
|
||||||
|
home: https://github.com/kubernetes-sigs/descheduler
|
||||||
|
icon: https://kubernetes.io/images/favicon.png
|
||||||
|
sources:
|
||||||
|
- https://github.com/kubernetes-sigs/descheduler
|
||||||
|
maintainers:
|
||||||
|
- name: Kubernetes SIG Scheduling
|
||||||
|
email: kubernetes-sig-scheduling@googlegroups.com
|
||||||
65
charts/descheduler/README.md
Normal file
65
charts/descheduler/README.md
Normal file
@@ -0,0 +1,65 @@
|
|||||||
|
# Descheduler for Kubernetes
|
||||||
|
|
||||||
|
[Descheduler](https://github.com/kubernetes-sigs/descheduler/) for Kubernetes is used to rebalance clusters by evicting pods that can potentially be scheduled on better nodes. In the current implementation, descheduler does not schedule replacement of evicted pods but relies on the default scheduler for that.
|
||||||
|
|
||||||
|
## TL;DR:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
helm repo add descheduler https://kubernetes-sigs.github.io/descheduler/
|
||||||
|
helm install my-release --namespace kube-system descheduler/descheduler
|
||||||
|
```
|
||||||
|
|
||||||
|
## Introduction
|
||||||
|
|
||||||
|
This chart bootstraps a [descheduler](https://github.com/kubernetes-sigs/descheduler/) cron job on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
|
||||||
|
|
||||||
|
## Prerequisites
|
||||||
|
|
||||||
|
- Kubernetes 1.14+
|
||||||
|
|
||||||
|
## Installing the Chart
|
||||||
|
|
||||||
|
To install the chart with the release name `my-release`:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
helm install --namespace kube-system my-release descheduler/descheduler
|
||||||
|
```
|
||||||
|
|
||||||
|
The command deploys _descheduler_ on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation.
|
||||||
|
|
||||||
|
> **Tip**: List all releases using `helm list`
|
||||||
|
|
||||||
|
## Uninstalling the Chart
|
||||||
|
|
||||||
|
To uninstall/delete the `my-release` deployment:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
helm delete my-release
|
||||||
|
```
|
||||||
|
|
||||||
|
The command removes all the Kubernetes components associated with the chart and deletes the release.
|
||||||
|
|
||||||
|
## Configuration
|
||||||
|
|
||||||
|
The following table lists the configurable parameters of the _descheduler_ chart and their default values.
|
||||||
|
|
||||||
|
| Parameter | Description | Default |
|
||||||
|
| ------------------------------ | --------------------------------------------------------------------------------------------------------------------- | ------------------------------------ |
|
||||||
|
| `image.repository` | Docker repository to use | `k8s.gcr.io/descheduler/descheduler` |
|
||||||
|
| `image.tag` | Docker tag to use | `v[chart appVersion]` |
|
||||||
|
| `image.pullPolicy` | Docker image pull policy | `IfNotPresent` |
|
||||||
|
| `nameOverride` | String to partially override `descheduler.fullname` template (will prepend the release name) | `""` |
|
||||||
|
| `fullnameOverride` | String to fully override `descheduler.fullname` template | `""` |
|
||||||
|
| `schedule` | The cron schedule to run the _descheduler_ job on | `"*/2 * * * *"` |
|
||||||
|
| `startingDeadlineSeconds` | If set, configure `startingDeadlineSeconds` for the _descheduler_ job | `nil` |
|
||||||
|
| `successfulJobsHistoryLimit` | If set, configure `successfulJobsHistoryLimit` for the _descheduler_ job | `nil` |
|
||||||
|
| `failedJobsHistoryLimit` | If set, configure `failedJobsHistoryLimit` for the _descheduler_ job | `nil` |
|
||||||
|
| `cmdOptions` | The options to pass to the _descheduler_ command | _see values.yaml_ |
|
||||||
|
| `deschedulerPolicy.strategies` | The _descheduler_ strategies to apply | _see values.yaml_ |
|
||||||
|
| `priorityClassName` | The name of the priority class to add to pods | `system-cluster-critical` |
|
||||||
|
| `rbac.create` | If `true`, create & use RBAC resources | `true` |
|
||||||
|
| `podSecurityPolicy.create` | If `true`, create PodSecurityPolicy | `true` |
|
||||||
|
| `resources.cpuRequest` | Descheduler container CPU request | `500m` |
|
||||||
|
| `resources.memoryRequest` | Descheduler container memory request | `256Mi` |
|
||||||
|
| `serviceAccount.create` | If `true`, create a service account for the cron job | `true` |
|
||||||
|
| `serviceAccount.name` | The name of the service account to use, if not set and create is true a name is generated using the fullname template | `nil` |
|
||||||
1
charts/descheduler/templates/NOTES.txt
Normal file
1
charts/descheduler/templates/NOTES.txt
Normal file
@@ -0,0 +1 @@
|
|||||||
|
Descheduler installed as a cron job.
|
||||||
56
charts/descheduler/templates/_helpers.tpl
Normal file
56
charts/descheduler/templates/_helpers.tpl
Normal file
@@ -0,0 +1,56 @@
|
|||||||
|
{{/* vim: set filetype=mustache: */}}
|
||||||
|
{{/*
|
||||||
|
Expand the name of the chart.
|
||||||
|
*/}}
|
||||||
|
{{- define "descheduler.name" -}}
|
||||||
|
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
|
||||||
|
{{- end -}}
|
||||||
|
|
||||||
|
{{/*
|
||||||
|
Create a default fully qualified app name.
|
||||||
|
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||||
|
If release name contains chart name it will be used as a full name.
|
||||||
|
*/}}
|
||||||
|
{{- define "descheduler.fullname" -}}
|
||||||
|
{{- if .Values.fullnameOverride -}}
|
||||||
|
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
|
||||||
|
{{- else -}}
|
||||||
|
{{- $name := default .Chart.Name .Values.nameOverride -}}
|
||||||
|
{{- if contains $name .Release.Name -}}
|
||||||
|
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
|
||||||
|
{{- else -}}
|
||||||
|
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
|
||||||
|
{{- end -}}
|
||||||
|
{{- end -}}
|
||||||
|
{{- end -}}
|
||||||
|
|
||||||
|
{{/*
|
||||||
|
Create chart name and version as used by the chart label.
|
||||||
|
*/}}
|
||||||
|
{{- define "descheduler.chart" -}}
|
||||||
|
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
|
||||||
|
{{- end -}}
|
||||||
|
|
||||||
|
{{/*
|
||||||
|
Common labels
|
||||||
|
*/}}
|
||||||
|
{{- define "descheduler.labels" -}}
|
||||||
|
app.kubernetes.io/name: {{ include "descheduler.name" . }}
|
||||||
|
helm.sh/chart: {{ include "descheduler.chart" . }}
|
||||||
|
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||||
|
{{- if .Chart.AppVersion }}
|
||||||
|
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
|
||||||
|
{{- end }}
|
||||||
|
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||||
|
{{- end -}}
|
||||||
|
|
||||||
|
{{/*
|
||||||
|
Create the name of the service account to use
|
||||||
|
*/}}
|
||||||
|
{{- define "descheduler.serviceAccountName" -}}
|
||||||
|
{{- if .Values.serviceAccount.create -}}
|
||||||
|
{{ default (include "descheduler.fullname" .) .Values.serviceAccount.name }}
|
||||||
|
{{- else -}}
|
||||||
|
{{ default "default" .Values.serviceAccount.name }}
|
||||||
|
{{- end -}}
|
||||||
|
{{- end -}}
|
||||||
34
charts/descheduler/templates/clusterrole.yaml
Normal file
34
charts/descheduler/templates/clusterrole.yaml
Normal file
@@ -0,0 +1,34 @@
|
|||||||
|
{{- if .Values.rbac.create -}}
|
||||||
|
kind: ClusterRole
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
metadata:
|
||||||
|
name: {{ template "descheduler.fullname" . }}
|
||||||
|
labels:
|
||||||
|
{{- include "descheduler.labels" . | nindent 4 }}
|
||||||
|
rules:
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources: ["events"]
|
||||||
|
verbs: ["create", "update"]
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources: ["nodes"]
|
||||||
|
verbs: ["get", "watch", "list"]
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources: ["namespaces"]
|
||||||
|
verbs: ["get", "list"]
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources: ["pods"]
|
||||||
|
verbs: ["get", "watch", "list", "delete"]
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources: ["pods/eviction"]
|
||||||
|
verbs: ["create"]
|
||||||
|
- apiGroups: ["scheduling.k8s.io"]
|
||||||
|
resources: ["priorityclasses"]
|
||||||
|
verbs: ["get", "watch", "list"]
|
||||||
|
{{- if .Values.podSecurityPolicy.create }}
|
||||||
|
- apiGroups: ['policy']
|
||||||
|
resources: ['podsecuritypolicies']
|
||||||
|
verbs: ['use']
|
||||||
|
resourceNames:
|
||||||
|
- {{ template "descheduler.fullname" . }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end -}}
|
||||||
16
charts/descheduler/templates/clusterrolebinding.yaml
Normal file
16
charts/descheduler/templates/clusterrolebinding.yaml
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
{{- if .Values.rbac.create -}}
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: ClusterRoleBinding
|
||||||
|
metadata:
|
||||||
|
name: {{ template "descheduler.fullname" . }}
|
||||||
|
labels:
|
||||||
|
{{- include "descheduler.labels" . | nindent 4 }}
|
||||||
|
roleRef:
|
||||||
|
apiGroup: rbac.authorization.k8s.io
|
||||||
|
kind: ClusterRole
|
||||||
|
name: {{ template "descheduler.fullname" . }}
|
||||||
|
subjects:
|
||||||
|
- kind: ServiceAccount
|
||||||
|
name: {{ template "descheduler.serviceAccountName" . }}
|
||||||
|
namespace: {{ .Release.Namespace }}
|
||||||
|
{{- end -}}
|
||||||
11
charts/descheduler/templates/configmap.yaml
Normal file
11
charts/descheduler/templates/configmap.yaml
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
apiVersion: v1
|
||||||
|
kind: ConfigMap
|
||||||
|
metadata:
|
||||||
|
name: {{ template "descheduler.fullname" . }}
|
||||||
|
labels:
|
||||||
|
{{- include "descheduler.labels" . | nindent 4 }}
|
||||||
|
data:
|
||||||
|
policy.yaml: |
|
||||||
|
apiVersion: "descheduler/v1alpha1"
|
||||||
|
kind: "DeschedulerPolicy"
|
||||||
|
{{ toYaml .Values.deschedulerPolicy | trim | indent 4 }}
|
||||||
64
charts/descheduler/templates/cronjob.yaml
Normal file
64
charts/descheduler/templates/cronjob.yaml
Normal file
@@ -0,0 +1,64 @@
|
|||||||
|
apiVersion: batch/v1beta1
|
||||||
|
kind: CronJob
|
||||||
|
metadata:
|
||||||
|
name: {{ template "descheduler.fullname" . }}
|
||||||
|
labels:
|
||||||
|
{{- include "descheduler.labels" . | nindent 4 }}
|
||||||
|
spec:
|
||||||
|
schedule: {{ .Values.schedule | quote }}
|
||||||
|
concurrencyPolicy: "Forbid"
|
||||||
|
{{- if .Values.startingDeadlineSeconds }}
|
||||||
|
startingDeadlineSeconds: {{ .Values.startingDeadlineSeconds }}
|
||||||
|
{{- end }}
|
||||||
|
{{- if .Values.successfulJobsHistoryLimit }}
|
||||||
|
successfulJobsHistoryLimit: {{ .Values.successfulJobsHistoryLimit }}
|
||||||
|
{{- end }}
|
||||||
|
{{- if .Values.failedJobsHistoryLimit }}
|
||||||
|
failedJobsHistoryLimit: {{ .Values.failedJobsHistoryLimit }}
|
||||||
|
{{- end }}
|
||||||
|
jobTemplate:
|
||||||
|
spec:
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
name: {{ template "descheduler.fullname" . }}
|
||||||
|
annotations:
|
||||||
|
checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }}
|
||||||
|
{{- if .Values.podAnnotations }}
|
||||||
|
{{- .Values.podAnnotations | toYaml | nindent 12 }}
|
||||||
|
{{- end }}
|
||||||
|
labels:
|
||||||
|
app.kubernetes.io/name: {{ include "descheduler.name" . }}
|
||||||
|
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||||
|
{{- if .Values.podLabels }}
|
||||||
|
{{- .Values.podLabels | toYaml | nindent 12 }}
|
||||||
|
{{- end }}
|
||||||
|
spec:
|
||||||
|
{{- if .Values.priorityClassName }}
|
||||||
|
priorityClassName: {{ .Values.priorityClassName }}
|
||||||
|
{{- end }}
|
||||||
|
serviceAccountName: {{ template "descheduler.serviceAccountName" . }}
|
||||||
|
restartPolicy: "Never"
|
||||||
|
containers:
|
||||||
|
- name: {{ .Chart.Name }}
|
||||||
|
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default (printf "v%s" .Chart.AppVersion) }}"
|
||||||
|
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||||
|
command:
|
||||||
|
- "/bin/descheduler"
|
||||||
|
args:
|
||||||
|
- "--policy-config-file"
|
||||||
|
- "/policy-dir/policy.yaml"
|
||||||
|
{{- range $key, $value := .Values.cmdOptions }}
|
||||||
|
- {{ printf "--%s" $key | quote }}
|
||||||
|
{{- if $value }}
|
||||||
|
- {{ $value | quote }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
resources:
|
||||||
|
{{- toYaml .Values.resources | nindent 16 }}
|
||||||
|
volumeMounts:
|
||||||
|
- mountPath: /policy-dir
|
||||||
|
name: policy-volume
|
||||||
|
volumes:
|
||||||
|
- name: policy-volume
|
||||||
|
configMap:
|
||||||
|
name: {{ template "descheduler.fullname" . }}
|
||||||
38
charts/descheduler/templates/podsecuritypolicy.yaml
Normal file
38
charts/descheduler/templates/podsecuritypolicy.yaml
Normal file
@@ -0,0 +1,38 @@
|
|||||||
|
{{- if .Values.podSecurityPolicy.create -}}
|
||||||
|
apiVersion: policy/v1beta1
|
||||||
|
kind: PodSecurityPolicy
|
||||||
|
metadata:
|
||||||
|
name: {{ template "descheduler.fullname" . }}
|
||||||
|
annotations:
|
||||||
|
seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default,runtime/default'
|
||||||
|
seccomp.security.alpha.kubernetes.io/defaultProfileName: 'runtime/default'
|
||||||
|
spec:
|
||||||
|
privileged: false
|
||||||
|
allowPrivilegeEscalation: false
|
||||||
|
requiredDropCapabilities:
|
||||||
|
- ALL
|
||||||
|
volumes:
|
||||||
|
- 'configMap'
|
||||||
|
- 'secret'
|
||||||
|
hostNetwork: false
|
||||||
|
hostIPC: false
|
||||||
|
hostPID: false
|
||||||
|
runAsUser:
|
||||||
|
rule: 'MustRunAs'
|
||||||
|
ranges:
|
||||||
|
- min: 1
|
||||||
|
max: 65535
|
||||||
|
seLinux:
|
||||||
|
rule: 'RunAsAny'
|
||||||
|
supplementalGroups:
|
||||||
|
rule: 'MustRunAs'
|
||||||
|
ranges:
|
||||||
|
- min: 1
|
||||||
|
max: 65535
|
||||||
|
fsGroup:
|
||||||
|
rule: 'MustRunAs'
|
||||||
|
ranges:
|
||||||
|
- min: 1
|
||||||
|
max: 65535
|
||||||
|
readOnlyRootFilesystem: true
|
||||||
|
{{- end -}}
|
||||||
8
charts/descheduler/templates/serviceaccount.yaml
Normal file
8
charts/descheduler/templates/serviceaccount.yaml
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
{{- if .Values.serviceAccount.create -}}
|
||||||
|
apiVersion: v1
|
||||||
|
kind: ServiceAccount
|
||||||
|
metadata:
|
||||||
|
name: {{ template "descheduler.serviceAccountName" . }}
|
||||||
|
labels:
|
||||||
|
{{- include "descheduler.labels" . | nindent 4 }}
|
||||||
|
{{- end -}}
|
||||||
74
charts/descheduler/values.yaml
Normal file
74
charts/descheduler/values.yaml
Normal file
@@ -0,0 +1,74 @@
|
|||||||
|
# Default values for descheduler.
|
||||||
|
# This is a YAML-formatted file.
|
||||||
|
# Declare variables to be passed into your templates.
|
||||||
|
|
||||||
|
image:
|
||||||
|
repository: k8s.gcr.io/descheduler/descheduler
|
||||||
|
# Overrides the image tag whose default is the chart version
|
||||||
|
tag: ""
|
||||||
|
pullPolicy: IfNotPresent
|
||||||
|
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
cpu: 500m
|
||||||
|
memory: 256Mi
|
||||||
|
# limits:
|
||||||
|
# cpu: 100m
|
||||||
|
# memory: 128Mi
|
||||||
|
|
||||||
|
nameOverride: ""
|
||||||
|
fullnameOverride: ""
|
||||||
|
|
||||||
|
schedule: "*/2 * * * *"
|
||||||
|
#startingDeadlineSeconds: 200
|
||||||
|
#successfulJobsHistoryLimit: 1
|
||||||
|
#failedJobsHistoryLimit: 1
|
||||||
|
|
||||||
|
cmdOptions:
|
||||||
|
v: 3
|
||||||
|
# evict-local-storage-pods:
|
||||||
|
# max-pods-to-evict-per-node: 10
|
||||||
|
# node-selector: "key1=value1,key2=value2"
|
||||||
|
|
||||||
|
deschedulerPolicy:
|
||||||
|
strategies:
|
||||||
|
RemoveDuplicates:
|
||||||
|
enabled: true
|
||||||
|
RemovePodsViolatingNodeTaints:
|
||||||
|
enabled: true
|
||||||
|
RemovePodsViolatingNodeAffinity:
|
||||||
|
enabled: true
|
||||||
|
params:
|
||||||
|
nodeAffinityType:
|
||||||
|
- requiredDuringSchedulingIgnoredDuringExecution
|
||||||
|
RemovePodsViolatingInterPodAntiAffinity:
|
||||||
|
enabled: true
|
||||||
|
LowNodeUtilization:
|
||||||
|
enabled: true
|
||||||
|
params:
|
||||||
|
nodeResourceUtilizationThresholds:
|
||||||
|
thresholds:
|
||||||
|
cpu: 20
|
||||||
|
memory: 20
|
||||||
|
pods: 20
|
||||||
|
targetThresholds:
|
||||||
|
cpu: 50
|
||||||
|
memory: 50
|
||||||
|
pods: 50
|
||||||
|
|
||||||
|
priorityClassName: system-cluster-critical
|
||||||
|
|
||||||
|
rbac:
|
||||||
|
# Specifies whether RBAC resources should be created
|
||||||
|
create: true
|
||||||
|
|
||||||
|
podSecurityPolicy:
|
||||||
|
# Specifies whether PodSecurityPolicy should be created.
|
||||||
|
create: true
|
||||||
|
|
||||||
|
serviceAccount:
|
||||||
|
# Specifies whether a ServiceAccount should be created
|
||||||
|
create: true
|
||||||
|
# The name of the ServiceAccount to use.
|
||||||
|
# If not set and create is true, a name is generated using the fullname template
|
||||||
|
name:
|
||||||
@@ -14,7 +14,7 @@ steps:
|
|||||||
- VERSION=$_GIT_TAG
|
- VERSION=$_GIT_TAG
|
||||||
- BASE_REF=$_PULL_BASE_REF
|
- BASE_REF=$_PULL_BASE_REF
|
||||||
args:
|
args:
|
||||||
- push
|
- push-all
|
||||||
substitutions:
|
substitutions:
|
||||||
# _GIT_TAG will be filled with a git-based tag for the image, of the form vYYYYMMDD-hash, and
|
# _GIT_TAG will be filled with a git-based tag for the image, of the form vYYYYMMDD-hash, and
|
||||||
# can be used as a substitution
|
# can be used as a substitution
|
||||||
|
|||||||
@@ -18,44 +18,63 @@ limitations under the License.
|
|||||||
package options
|
package options
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"github.com/spf13/pflag"
|
||||||
|
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
|
|
||||||
// install the componentconfig api so we get its defaulting and conversion functions
|
"k8s.io/component-base/logs"
|
||||||
"sigs.k8s.io/descheduler/pkg/apis/componentconfig"
|
"sigs.k8s.io/descheduler/pkg/apis/componentconfig"
|
||||||
"sigs.k8s.io/descheduler/pkg/apis/componentconfig/v1alpha1"
|
"sigs.k8s.io/descheduler/pkg/apis/componentconfig/v1alpha1"
|
||||||
deschedulerscheme "sigs.k8s.io/descheduler/pkg/descheduler/scheme"
|
deschedulerscheme "sigs.k8s.io/descheduler/pkg/descheduler/scheme"
|
||||||
|
|
||||||
"github.com/spf13/pflag"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// DeschedulerServer configuration
|
// DeschedulerServer configuration
|
||||||
type DeschedulerServer struct {
|
type DeschedulerServer struct {
|
||||||
componentconfig.DeschedulerConfiguration
|
componentconfig.DeschedulerConfiguration
|
||||||
Client clientset.Interface
|
Client clientset.Interface
|
||||||
|
Logs *logs.Options
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewDeschedulerServer creates a new DeschedulerServer with default parameters
|
// NewDeschedulerServer creates a new DeschedulerServer with default parameters
|
||||||
func NewDeschedulerServer() *DeschedulerServer {
|
func NewDeschedulerServer() (*DeschedulerServer, error) {
|
||||||
versioned := v1alpha1.DeschedulerConfiguration{}
|
cfg, err := newDefaultComponentConfig()
|
||||||
deschedulerscheme.Scheme.Default(&versioned)
|
if err != nil {
|
||||||
cfg := componentconfig.DeschedulerConfiguration{}
|
return nil, err
|
||||||
deschedulerscheme.Scheme.Convert(versioned, &cfg, nil)
|
|
||||||
s := DeschedulerServer{
|
|
||||||
DeschedulerConfiguration: cfg,
|
|
||||||
}
|
}
|
||||||
return &s
|
return &DeschedulerServer{
|
||||||
|
DeschedulerConfiguration: *cfg,
|
||||||
|
Logs: logs.NewOptions(),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validation checks for DeschedulerServer.
|
||||||
|
func (s *DeschedulerServer) Validate() error {
|
||||||
|
var errs []error
|
||||||
|
errs = append(errs, s.Logs.Validate()...)
|
||||||
|
return utilerrors.NewAggregate(errs)
|
||||||
|
}
|
||||||
|
|
||||||
|
func newDefaultComponentConfig() (*componentconfig.DeschedulerConfiguration, error) {
|
||||||
|
versionedCfg := v1alpha1.DeschedulerConfiguration{}
|
||||||
|
deschedulerscheme.Scheme.Default(&versionedCfg)
|
||||||
|
cfg := componentconfig.DeschedulerConfiguration{}
|
||||||
|
if err := deschedulerscheme.Scheme.Convert(&versionedCfg, &cfg, nil); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &cfg, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// AddFlags adds flags for a specific SchedulerServer to the specified FlagSet
|
// AddFlags adds flags for a specific SchedulerServer to the specified FlagSet
|
||||||
func (rs *DeschedulerServer) AddFlags(fs *pflag.FlagSet) {
|
func (rs *DeschedulerServer) AddFlags(fs *pflag.FlagSet) {
|
||||||
|
fs.StringVar(&rs.Logging.Format, "logging-format", rs.Logging.Format, `Sets the log format. Permitted formats: "text", "json". Non-default formats don't honor these flags: --add-dir-header, --alsologtostderr, --log-backtrace-at, --log-dir, --log-file, --log-file-max-size, --logtostderr, --skip-headers, --skip-log-headers, --stderrthreshold, --log-flush-frequency.\nNon-default choices are currently alpha and subject to change without warning.`)
|
||||||
fs.DurationVar(&rs.DeschedulingInterval, "descheduling-interval", rs.DeschedulingInterval, "Time interval between two consecutive descheduler executions. Setting this value instructs the descheduler to run in a continuous loop at the interval specified.")
|
fs.DurationVar(&rs.DeschedulingInterval, "descheduling-interval", rs.DeschedulingInterval, "Time interval between two consecutive descheduler executions. Setting this value instructs the descheduler to run in a continuous loop at the interval specified.")
|
||||||
fs.StringVar(&rs.KubeconfigFile, "kubeconfig", rs.KubeconfigFile, "File with kube configuration.")
|
fs.StringVar(&rs.KubeconfigFile, "kubeconfig", rs.KubeconfigFile, "File with kube configuration.")
|
||||||
fs.StringVar(&rs.PolicyConfigFile, "policy-config-file", rs.PolicyConfigFile, "File with descheduler policy configuration.")
|
fs.StringVar(&rs.PolicyConfigFile, "policy-config-file", rs.PolicyConfigFile, "File with descheduler policy configuration.")
|
||||||
fs.BoolVar(&rs.DryRun, "dry-run", rs.DryRun, "execute descheduler in dry run mode.")
|
fs.BoolVar(&rs.DryRun, "dry-run", rs.DryRun, "execute descheduler in dry run mode.")
|
||||||
// node-selector query causes descheduler to run only on nodes that matches the node labels in the query
|
// node-selector query causes descheduler to run only on nodes that matches the node labels in the query
|
||||||
fs.StringVar(&rs.NodeSelector, "node-selector", rs.NodeSelector, "Selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2)")
|
fs.StringVar(&rs.NodeSelector, "node-selector", rs.NodeSelector, "DEPRECATED: selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2)")
|
||||||
// max-no-pods-to-evict limits the maximum number of pods to be evicted per node by descheduler.
|
// max-no-pods-to-evict limits the maximum number of pods to be evicted per node by descheduler.
|
||||||
fs.IntVar(&rs.MaxNoOfPodsToEvictPerNode, "max-pods-to-evict-per-node", rs.MaxNoOfPodsToEvictPerNode, "Limits the maximum number of pods to be evicted per node by descheduler")
|
fs.IntVar(&rs.MaxNoOfPodsToEvictPerNode, "max-pods-to-evict-per-node", rs.MaxNoOfPodsToEvictPerNode, "DEPRECATED: limits the maximum number of pods to be evicted per node by descheduler")
|
||||||
// evict-local-storage-pods allows eviction of pods that are using local storage. This is false by default.
|
// evict-local-storage-pods allows eviction of pods that are using local storage. This is false by default.
|
||||||
fs.BoolVar(&rs.EvictLocalStoragePods, "evict-local-storage-pods", rs.EvictLocalStoragePods, "Enables evicting pods using local storage by descheduler")
|
fs.BoolVar(&rs.EvictLocalStoragePods, "evict-local-storage-pods", rs.EvictLocalStoragePods, "DEPRECATED: enables evicting pods using local storage by descheduler")
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -27,28 +27,35 @@ import (
|
|||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
|
|
||||||
aflag "k8s.io/component-base/cli/flag"
|
aflag "k8s.io/component-base/cli/flag"
|
||||||
"k8s.io/component-base/logs"
|
"k8s.io/klog/v2"
|
||||||
"k8s.io/klog"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// NewDeschedulerCommand creates a *cobra.Command object with default parameters
|
// NewDeschedulerCommand creates a *cobra.Command object with default parameters
|
||||||
func NewDeschedulerCommand(out io.Writer) *cobra.Command {
|
func NewDeschedulerCommand(out io.Writer) *cobra.Command {
|
||||||
s := options.NewDeschedulerServer()
|
s, err := options.NewDeschedulerServer()
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
klog.ErrorS(err, "unable to initialize server")
|
||||||
|
}
|
||||||
|
|
||||||
cmd := &cobra.Command{
|
cmd := &cobra.Command{
|
||||||
Use: "descheduler",
|
Use: "descheduler",
|
||||||
Short: "descheduler",
|
Short: "descheduler",
|
||||||
Long: `The descheduler evicts pods which may be bound to less desired nodes`,
|
Long: `The descheduler evicts pods which may be bound to less desired nodes`,
|
||||||
Run: func(cmd *cobra.Command, args []string) {
|
Run: func(cmd *cobra.Command, args []string) {
|
||||||
logs.InitLogs()
|
s.Logs.LogFormat = s.Logging.Format
|
||||||
defer logs.FlushLogs()
|
s.Logs.Apply()
|
||||||
|
|
||||||
|
if err := s.Validate(); err != nil {
|
||||||
|
klog.ErrorS(err, "failed to validate server configuration")
|
||||||
|
}
|
||||||
err := Run(s)
|
err := Run(s)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.Errorf("%v", err)
|
klog.ErrorS(err, "descheduler server")
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
cmd.SetOutput(out)
|
cmd.SetOut(out)
|
||||||
|
|
||||||
flags := cmd.Flags()
|
flags := cmd.Flags()
|
||||||
flags.SetNormalizeFunc(aflag.WordSepNormalizeFunc)
|
flags.SetNormalizeFunc(aflag.WordSepNormalizeFunc)
|
||||||
flags.AddGoFlagSet(flag.CommandLine)
|
flags.AddGoFlagSet(flag.CommandLine)
|
||||||
|
|||||||
@@ -18,6 +18,7 @@ package app
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"regexp"
|
||||||
"runtime"
|
"runtime"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
@@ -25,9 +26,6 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
// gitCommit is a constant representing the source version that
|
|
||||||
// generated this build. It should be set during build via -ldflags.
|
|
||||||
gitCommit string
|
|
||||||
// version is a constant representing the version tag that
|
// version is a constant representing the version tag that
|
||||||
// generated this build. It should be set during build via -ldflags.
|
// generated this build. It should be set during build via -ldflags.
|
||||||
version string
|
version string
|
||||||
@@ -40,7 +38,6 @@ var (
|
|||||||
type Info struct {
|
type Info struct {
|
||||||
Major string `json:"major"`
|
Major string `json:"major"`
|
||||||
Minor string `json:"minor"`
|
Minor string `json:"minor"`
|
||||||
GitCommit string `json:"gitCommit"`
|
|
||||||
GitVersion string `json:"gitVersion"`
|
GitVersion string `json:"gitVersion"`
|
||||||
BuildDate string `json:"buildDate"`
|
BuildDate string `json:"buildDate"`
|
||||||
GoVersion string `json:"goVersion"`
|
GoVersion string `json:"goVersion"`
|
||||||
@@ -55,7 +52,6 @@ func Get() Info {
|
|||||||
return Info{
|
return Info{
|
||||||
Major: majorVersion,
|
Major: majorVersion,
|
||||||
Minor: minorVersion,
|
Minor: minorVersion,
|
||||||
GitCommit: gitCommit,
|
|
||||||
GitVersion: version,
|
GitVersion: version,
|
||||||
BuildDate: buildDate,
|
BuildDate: buildDate,
|
||||||
GoVersion: runtime.Version(),
|
GoVersion: runtime.Version(),
|
||||||
@@ -81,7 +77,18 @@ func splitVersion(version string) (string, string) {
|
|||||||
if version == "" {
|
if version == "" {
|
||||||
return "", ""
|
return "", ""
|
||||||
}
|
}
|
||||||
// A sample version would be of form v0.1.0-7-ge884046, so split at first '.' and
|
|
||||||
// then return 0 and 1+(+ appended to follow semver convention) for major and minor versions.
|
// Version from an automated container build environment for a tag. For example v20200521-v0.18.0.
|
||||||
return strings.Trim(strings.Split(version, ".")[0], "v"), strings.Split(version, ".")[1] + "+"
|
m1, _ := regexp.MatchString(`^v\d{8}-v\d+\.\d+\.\d+$`, version)
|
||||||
|
|
||||||
|
// Version from an automated container build environment(not a tag) or a local build. For example v20201009-v0.18.0-46-g939c1c0.
|
||||||
|
m2, _ := regexp.MatchString(`^v\d{8}-v\d+\.\d+\.\d+-\w+-\w+$`, version)
|
||||||
|
|
||||||
|
if m1 || m2 {
|
||||||
|
semVer := strings.Split(version, "-")[1]
|
||||||
|
return strings.Trim(strings.Split(semVer, ".")[0], "v"), strings.Split(semVer, ".")[1] + "+"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Something went wrong
|
||||||
|
return "", ""
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -17,10 +17,9 @@ limitations under the License.
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"flag"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"k8s.io/component-base/logs"
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
"sigs.k8s.io/descheduler/cmd/descheduler/app"
|
"sigs.k8s.io/descheduler/cmd/descheduler/app"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -28,7 +27,10 @@ func main() {
|
|||||||
out := os.Stdout
|
out := os.Stdout
|
||||||
cmd := app.NewDeschedulerCommand(out)
|
cmd := app.NewDeschedulerCommand(out)
|
||||||
cmd.AddCommand(app.NewVersionCommand())
|
cmd.AddCommand(app.NewVersionCommand())
|
||||||
flag.CommandLine.Parse([]string{})
|
|
||||||
|
logs.InitLogs()
|
||||||
|
defer logs.FlushLogs()
|
||||||
|
|
||||||
if err := cmd.Execute(); err != nil {
|
if err := cmd.Execute(); err != nil {
|
||||||
fmt.Println(err)
|
fmt.Println(err)
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
|
|||||||
@@ -3,10 +3,10 @@
|
|||||||
## Required Tools
|
## Required Tools
|
||||||
|
|
||||||
- [Git](https://git-scm.com/downloads)
|
- [Git](https://git-scm.com/downloads)
|
||||||
- [Go 1.13+](https://golang.org/dl/)
|
- [Go 1.15+](https://golang.org/dl/)
|
||||||
- [Docker](https://docs.docker.com/install/)
|
- [Docker](https://docs.docker.com/install/)
|
||||||
- [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl)
|
- [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl)
|
||||||
- [kind](https://kind.sigs.k8s.io/)
|
- [kind v0.9.0+](https://kind.sigs.k8s.io/)
|
||||||
|
|
||||||
## Build and Run
|
## Build and Run
|
||||||
|
|
||||||
@@ -34,9 +34,10 @@ GOOS=linux make dev-image
|
|||||||
kind create cluster --config hack/kind_config.yaml
|
kind create cluster --config hack/kind_config.yaml
|
||||||
kind load docker-image <image name>
|
kind load docker-image <image name>
|
||||||
kind get kubeconfig > /tmp/admin.conf
|
kind get kubeconfig > /tmp/admin.conf
|
||||||
|
export KUBECONFIG=/tmp/admin.conf
|
||||||
make test-unit
|
make test-unit
|
||||||
make test-e2e
|
make test-e2e
|
||||||
```
|
```
|
||||||
|
|
||||||
### Miscellaneous
|
### Miscellaneous
|
||||||
See the [hack directory](https://github.com/kubernetes-sigs/descheduler/tree/master/hack) for additional tools and scripts used for developing the descheduler.
|
See the [hack directory](https://github.com/kubernetes-sigs/descheduler/tree/master/hack) for additional tools and scripts used for developing the descheduler.
|
||||||
|
|||||||
@@ -1,32 +1,42 @@
|
|||||||
# Release Guide
|
# Release Guide
|
||||||
|
|
||||||
## Semi-automatic
|
## Container Image
|
||||||
|
|
||||||
|
### Semi-automatic
|
||||||
|
|
||||||
1. Make sure your repo is clean by git's standards
|
1. Make sure your repo is clean by git's standards
|
||||||
2. Create a release branch `git checkout -b release-1.18` (not required for patch releases)
|
2. Create a release branch `git checkout -b release-1.18` (not required for patch releases)
|
||||||
3. Push the release branch to the descheuler repo and ensure branch protection is enabled (not required for patch releases)
|
3. Push the release branch to the descheuler repo and ensure branch protection is enabled (not required for patch releases)
|
||||||
4. Tag the repository and push the tag `VERSION=v0.18.0 git tag -m $VERSION $VERSION; git push origin $VERSION`
|
4. Tag the repository from the `master` branch (from the `release-1.18` branch for a patch release) and push the tag `VERSION=v0.18.0 git tag -m $VERSION $VERSION; git push origin $VERSION`
|
||||||
5. Publish a draft release using the tag you just created
|
5. Publish a draft release using the tag you just created
|
||||||
6. Perform the [image promotion process](https://github.com/kubernetes/k8s.io/tree/master/k8s.gcr.io#image-promoter)
|
6. Perform the [image promotion process](https://github.com/kubernetes/k8s.io/tree/master/k8s.gcr.io#image-promoter)
|
||||||
7. Publish release
|
7. Publish release
|
||||||
8. Email `kubernetes-sig-scheduling@googlegroups.com` to announce the release
|
8. Email `kubernetes-sig-scheduling@googlegroups.com` to announce the release
|
||||||
|
|
||||||
## Manual
|
### Manual
|
||||||
|
|
||||||
1. Make sure your repo is clean by git's standards
|
1. Make sure your repo is clean by git's standards
|
||||||
2. Create a release branch `git checkout -b release-1.18` (not required for patch releases)
|
2. Create a release branch `git checkout -b release-1.18` (not required for patch releases)
|
||||||
3. Push the release branch to the descheuler repo and ensure branch protection is enabled (not required for patch releases)
|
3. Push the release branch to the descheuler repo and ensure branch protection is enabled (not required for patch releases)
|
||||||
4. Tag the repository and push the tag `VERSION=v0.18.0 git tag -m $VERSION $VERSION; git push origin $VERSION`
|
4. Tag the repository from the `master` branch (from the `release-1.18` branch for a patch release) and push the tag `VERSION=v0.18.0 git tag -m $VERSION $VERSION; git push origin $VERSION`
|
||||||
5. Checkout the tag you just created and make sure your repo is clean by git's standards `git checkout $VERSION`
|
5. Checkout the tag you just created and make sure your repo is clean by git's standards `git checkout $VERSION`
|
||||||
6. Build and push the container image to the staging registry `VERSION=$VERSION make push`
|
6. Build and push the container image to the staging registry `VERSION=$VERSION make push-all`
|
||||||
7. Publish a draft release using the tag you just created
|
7. Publish a draft release using the tag you just created
|
||||||
8. Perform the [image promotion process](https://github.com/kubernetes/k8s.io/tree/master/k8s.gcr.io#image-promoter)
|
8. Perform the [image promotion process](https://github.com/kubernetes/k8s.io/tree/master/k8s.gcr.io#image-promoter)
|
||||||
9. Publish release
|
9. Publish release
|
||||||
10. Email `kubernetes-sig-scheduling@googlegroups.com` to announce the release
|
10. Email `kubernetes-sig-scheduling@googlegroups.com` to announce the release
|
||||||
|
|
||||||
## Notes
|
### Notes
|
||||||
|
It's important to create the tag on the master branch after creating the release-* branch so that the [Helm releaser-action](#helm-chart) can work.
|
||||||
|
It compares the changes in the action-triggering branch to the latest tag on that branch, so if you tag before creating the new branch there
|
||||||
|
will be nothing to compare and it will fail (creating a new release branch usually involves no code changes). For this same reason, you should
|
||||||
|
also tag patch releases (on the release-* branch) *after* pushing changes (if those changes involve bumping the Helm chart version).
|
||||||
|
|
||||||
See [post-descheduler-push-images dashboard](https://testgrid.k8s.io/sig-scheduling#post-descheduler-push-images) for staging registry image build job status.
|
See [post-descheduler-push-images dashboard](https://testgrid.k8s.io/sig-scheduling#post-descheduler-push-images) for staging registry image build job status.
|
||||||
|
|
||||||
|
View the descheduler staging registry using [this URL](https://console.cloud.google.com/gcr/images/k8s-staging-descheduler/GLOBAL/descheduler) in a web browser
|
||||||
|
or use the below `gcloud` commands.
|
||||||
|
|
||||||
List images in staging registry.
|
List images in staging registry.
|
||||||
```
|
```
|
||||||
gcloud container images list --repository gcr.io/k8s-staging-descheduler
|
gcloud container images list --repository gcr.io/k8s-staging-descheduler
|
||||||
@@ -46,3 +56,19 @@ Pull image from the staging registry.
|
|||||||
```
|
```
|
||||||
docker pull gcr.io/k8s-staging-descheduler/descheduler:v20200206-0.9.0-94-ge2a23f284
|
docker pull gcr.io/k8s-staging-descheduler/descheduler:v20200206-0.9.0-94-ge2a23f284
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Helm Chart
|
||||||
|
Helm chart releases are managed by a separate set of git tags that are prefixed with `descheduler-helm-chart-*`. Example git tag name is `descheduler-helm-chart-0.18.0`.
|
||||||
|
Released versions of the helm charts are stored in the `gh-pages` branch of this repo. The [chart-releaser-action GitHub Action](https://github.com/helm/chart-releaser-action)
|
||||||
|
is setup to build and push the helm charts to the `gh-pages` branch when changes are pushed to a `release-*` branch.
|
||||||
|
|
||||||
|
The major and minor version of the chart matches the descheduler major and minor versions. For example descheduler helm chart version helm-descheduler-chart-0.18.0 corresponds
|
||||||
|
to descheduler version v0.18.0. The patch version of the descheduler helm chart and the patcher version of the descheduler will not necessarily match. The patch
|
||||||
|
version of the descheduler helm chart is used to version changes specific to the helm chart.
|
||||||
|
|
||||||
|
1. Merge all helm chart changes into the master branch before the release is tagged/cut
|
||||||
|
1. Ensure that `appVersion` in file `charts/descheduler/Chart.yaml` matches the descheduler version(no `v` prefix)
|
||||||
|
2. Ensure that `version` in file `charts/descheduler/Chart.yaml` has been incremented. This is the chart version.
|
||||||
|
2. Make sure your repo is clean by git's standards
|
||||||
|
3. Follow the release-branch or patch release tagging pattern from the above section.
|
||||||
|
4. Verify the new helm artifact has been successfully pushed to the `gh-pages` branch
|
||||||
@@ -1,9 +1,17 @@
|
|||||||
# User Guide
|
# User Guide
|
||||||
|
|
||||||
Starting with descheduler release v0.10.0 container images are available in these container registries.
|
Starting with descheduler release v0.10.0 container images are available in the official k8s container registry.
|
||||||
* `asia.gcr.io/k8s-artifacts-prod/descheduler/descheduler`
|
* `k8s.gcr.io/descheduler/descheduler`
|
||||||
* `eu.gcr.io/k8s-artifacts-prod/descheduler/descheduler`
|
|
||||||
* `us.gcr.io/k8s-artifacts-prod/descheduler/descheduler`
|
Also, starting with descheduler release v0.20.0 multi-arch container images are provided. Currently AMD64 and ARM64
|
||||||
|
container images are provided. Multi-arch container images cannot be pulled by [kind](https://kind.sigs.k8s.io) from
|
||||||
|
a registry. Therefore starting with descheduler release v0.20.0 use the below process to download the official descheduler
|
||||||
|
image into a kind cluster.
|
||||||
|
```
|
||||||
|
kind create cluster
|
||||||
|
docker pull k8s.gcr.io/descheduler/descheduler:v0.20.0
|
||||||
|
kind load docker-image k8s.gcr.io/descheduler/descheduler:v0.20.0
|
||||||
|
```
|
||||||
|
|
||||||
## Policy Configuration Examples
|
## Policy Configuration Examples
|
||||||
The [examples](https://github.com/kubernetes-sigs/descheduler/tree/master/examples) directory has descheduler policy configuration examples.
|
The [examples](https://github.com/kubernetes-sigs/descheduler/tree/master/examples) directory has descheduler policy configuration examples.
|
||||||
@@ -23,11 +31,11 @@ Available Commands:
|
|||||||
version Version of descheduler
|
version Version of descheduler
|
||||||
|
|
||||||
Flags:
|
Flags:
|
||||||
--add-dir-header If true, adds the file directory to the header
|
--add-dir-header If true, adds the file directory to the header of the log messages
|
||||||
--alsologtostderr log to standard error as well as files
|
--alsologtostderr log to standard error as well as files
|
||||||
--descheduling-interval duration Time interval between two consecutive descheduler executions. Setting this value instructs the descheduler to run in a continuous loop at the interval specified.
|
--descheduling-interval duration Time interval between two consecutive descheduler executions. Setting this value instructs the descheduler to run in a continuous loop at the interval specified.
|
||||||
--dry-run execute descheduler in dry run mode.
|
--dry-run execute descheduler in dry run mode.
|
||||||
--evict-local-storage-pods Enables evicting pods using local storage by descheduler
|
--evict-local-storage-pods DEPRECATED: enables evicting pods using local storage by descheduler
|
||||||
-h, --help help for descheduler
|
-h, --help help for descheduler
|
||||||
--kubeconfig string File with kube configuration.
|
--kubeconfig string File with kube configuration.
|
||||||
--log-backtrace-at traceLocation when logging hits line file:N, emit a stack trace (default :0)
|
--log-backtrace-at traceLocation when logging hits line file:N, emit a stack trace (default :0)
|
||||||
@@ -36,8 +44,8 @@ Flags:
|
|||||||
--log-file-max-size uint Defines the maximum size a log file can grow to. Unit is megabytes. If the value is 0, the maximum file size is unlimited. (default 1800)
|
--log-file-max-size uint Defines the maximum size a log file can grow to. Unit is megabytes. If the value is 0, the maximum file size is unlimited. (default 1800)
|
||||||
--log-flush-frequency duration Maximum number of seconds between log flushes (default 5s)
|
--log-flush-frequency duration Maximum number of seconds between log flushes (default 5s)
|
||||||
--logtostderr log to standard error instead of files (default true)
|
--logtostderr log to standard error instead of files (default true)
|
||||||
--max-pods-to-evict-per-node int Limits the maximum number of pods to be evicted per node by descheduler
|
--max-pods-to-evict-per-node int DEPRECATED: limits the maximum number of pods to be evicted per node by descheduler
|
||||||
--node-selector string Selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2)
|
--node-selector string DEPRECATED: selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2)
|
||||||
--policy-config-file string File with descheduler policy configuration.
|
--policy-config-file string File with descheduler policy configuration.
|
||||||
--skip-headers If true, avoid header prefixes in the log messages
|
--skip-headers If true, avoid header prefixes in the log messages
|
||||||
--skip-log-headers If true, avoid headers when opening log files
|
--skip-log-headers If true, avoid headers when opening log files
|
||||||
@@ -71,20 +79,40 @@ This policy configuration file ensures that pods created more than 7 days ago ar
|
|||||||
apiVersion: "descheduler/v1alpha1"
|
apiVersion: "descheduler/v1alpha1"
|
||||||
kind: "DeschedulerPolicy"
|
kind: "DeschedulerPolicy"
|
||||||
strategies:
|
strategies:
|
||||||
"LowNodeUtilization":
|
|
||||||
enabled: false
|
|
||||||
"RemoveDuplicates":
|
|
||||||
enabled: false
|
|
||||||
"RemovePodsViolatingInterPodAntiAffinity":
|
|
||||||
enabled: false
|
|
||||||
"RemovePodsViolatingNodeAffinity":
|
|
||||||
enabled: false
|
|
||||||
"RemovePodsViolatingNodeTaints":
|
|
||||||
enabled: false
|
|
||||||
"RemovePodsHavingTooManyRestarts":
|
|
||||||
enabled: false
|
|
||||||
"PodLifeTime":
|
"PodLifeTime":
|
||||||
enabled: true
|
enabled: true
|
||||||
params:
|
params:
|
||||||
maxPodLifeTimeSeconds: 604800 # pods run for a maximum of 7 days
|
maxPodLifeTimeSeconds: 604800 # pods run for a maximum of 7 days
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### Balance Cluster By Node Memory Utilization
|
||||||
|
|
||||||
|
If your cluster has been running for a long period of time, you may find that the resource utilization is not very
|
||||||
|
balanced. The `LowNodeUtilization` strategy can be used to rebalance your cluster based on `cpu`, `memory`
|
||||||
|
or `number of pods`.
|
||||||
|
|
||||||
|
Using the following policy configuration file, descheduler will rebalance the cluster based on memory by evicting pods
|
||||||
|
from nodes with memory utilization over 70% to nodes with memory utilization below 20%.
|
||||||
|
|
||||||
|
```
|
||||||
|
apiVersion: "descheduler/v1alpha1"
|
||||||
|
kind: "DeschedulerPolicy"
|
||||||
|
strategies:
|
||||||
|
"LowNodeUtilization":
|
||||||
|
enabled: true
|
||||||
|
params:
|
||||||
|
nodeResourceUtilizationThresholds:
|
||||||
|
thresholds:
|
||||||
|
"memory": 20
|
||||||
|
targetThresholds:
|
||||||
|
"memory": 70
|
||||||
|
```
|
||||||
|
|
||||||
|
### Autoheal Node Problems
|
||||||
|
Descheduler's `RemovePodsViolatingNodeTaints` strategy can be combined with
|
||||||
|
[Node Problem Detector](https://github.com/kubernetes/node-problem-detector/) and
|
||||||
|
[Cluster Autoscaler](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler) to automatically remove
|
||||||
|
Nodes which have problems. Node Problem Detector can detect specific Node problems and taint any Nodes which have those
|
||||||
|
problems. The Descheduler will then deschedule workloads from those Nodes. Finally, if the descheduled Node's resource
|
||||||
|
allocation falls below the Cluster Autoscaler's scale down threshold, the Node will become a scale down candidate
|
||||||
|
and can be removed by Cluster Autoscaler. These three components form an autohealing cycle for Node problems.
|
||||||
|
|||||||
12
examples/low-node-utilization.yml
Normal file
12
examples/low-node-utilization.yml
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
---
|
||||||
|
apiVersion: "descheduler/v1alpha1"
|
||||||
|
kind: "DeschedulerPolicy"
|
||||||
|
strategies:
|
||||||
|
"LowNodeUtilization":
|
||||||
|
enabled: true
|
||||||
|
params:
|
||||||
|
nodeResourceUtilizationThresholds:
|
||||||
|
thresholds:
|
||||||
|
"memory": 20
|
||||||
|
targetThresholds:
|
||||||
|
"memory": 70
|
||||||
@@ -2,19 +2,8 @@
|
|||||||
apiVersion: "descheduler/v1alpha1"
|
apiVersion: "descheduler/v1alpha1"
|
||||||
kind: "DeschedulerPolicy"
|
kind: "DeschedulerPolicy"
|
||||||
strategies:
|
strategies:
|
||||||
"LowNodeUtilization":
|
|
||||||
enabled: false
|
|
||||||
"RemoveDuplicates":
|
|
||||||
enabled: false
|
|
||||||
"RemovePodsViolatingInterPodAntiAffinity":
|
|
||||||
enabled: false
|
|
||||||
"RemovePodsViolatingNodeAffinity":
|
|
||||||
enabled: false
|
|
||||||
"RemovePodsViolatingNodeTaints":
|
|
||||||
enabled: false
|
|
||||||
"RemovePodsHavingTooManyRestarts":
|
|
||||||
enabled: false
|
|
||||||
"PodLifeTime":
|
"PodLifeTime":
|
||||||
enabled: true
|
enabled: true
|
||||||
params:
|
params:
|
||||||
maxPodLifeTimeSeconds: 604800 # 7 days
|
podLifeTime:
|
||||||
|
maxPodLifeTimeSeconds: 604800 # 7 days
|
||||||
|
|||||||
@@ -21,5 +21,5 @@ strategies:
|
|||||||
enabled: true
|
enabled: true
|
||||||
params:
|
params:
|
||||||
podsHavingTooManyRestarts:
|
podsHavingTooManyRestarts:
|
||||||
podRestartThresholds: 100
|
podRestartThreshold: 100
|
||||||
includingInitContainers: true
|
includingInitContainers: true
|
||||||
|
|||||||
17
go.mod
17
go.mod
@@ -1,14 +1,17 @@
|
|||||||
module sigs.k8s.io/descheduler
|
module sigs.k8s.io/descheduler
|
||||||
|
|
||||||
go 1.13
|
go 1.15
|
||||||
|
|
||||||
require (
|
require (
|
||||||
|
github.com/client9/misspell v0.3.4
|
||||||
github.com/spf13/cobra v0.0.5
|
github.com/spf13/cobra v0.0.5
|
||||||
github.com/spf13/pflag v1.0.5
|
github.com/spf13/pflag v1.0.5
|
||||||
k8s.io/api v0.18.2
|
k8s.io/api v0.20.0
|
||||||
k8s.io/apimachinery v0.18.2
|
k8s.io/apimachinery v0.20.0
|
||||||
k8s.io/apiserver v0.18.2
|
k8s.io/apiserver v0.20.0
|
||||||
k8s.io/client-go v0.18.2
|
k8s.io/client-go v0.20.0
|
||||||
k8s.io/component-base v0.18.2
|
k8s.io/code-generator v0.20.0
|
||||||
k8s.io/klog v1.0.0
|
k8s.io/component-base v0.20.0
|
||||||
|
k8s.io/component-helpers v0.20.0
|
||||||
|
k8s.io/klog/v2 v2.4.0
|
||||||
)
|
)
|
||||||
|
|||||||
420
go.sum
420
go.sum
@@ -2,34 +2,68 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT
|
|||||||
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||||
cloud.google.com/go v0.38.0 h1:ROfEUZz+Gh5pa62DJWXSaonyu3StP6EA6lPEXPI6mCo=
|
cloud.google.com/go v0.38.0 h1:ROfEUZz+Gh5pa62DJWXSaonyu3StP6EA6lPEXPI6mCo=
|
||||||
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
|
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
|
||||||
|
cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
|
||||||
|
cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
|
||||||
|
cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
|
||||||
|
cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
|
||||||
|
cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
|
||||||
|
cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
|
||||||
|
cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
|
||||||
|
cloud.google.com/go v0.54.0 h1:3ithwDMr7/3vpAMXiH+ZQnYbuIsh+OPhUPMFC9enmn0=
|
||||||
|
cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
|
||||||
|
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
|
||||||
|
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
|
||||||
|
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
|
||||||
|
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
|
||||||
|
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
|
||||||
|
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
|
||||||
|
cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
|
||||||
|
cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
|
||||||
|
cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
|
||||||
|
cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
|
||||||
|
cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
|
||||||
|
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||||
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
|
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
|
||||||
github.com/Azure/go-autorest/autorest v0.9.0 h1:MRvx8gncNaXJqOoLmhNjUAKh33JJF8LyxPhomEtOsjs=
|
github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs=
|
||||||
github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI=
|
github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
|
||||||
github.com/Azure/go-autorest/autorest/adal v0.5.0 h1:q2gDruN08/guU9vAjuPWff0+QIrpH6ediguzdAzXAUU=
|
github.com/Azure/go-autorest/autorest v0.11.1 h1:eVvIXUKiTgv++6YnWb42DUA1YL7qDugnKP0HljexdnQ=
|
||||||
github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0=
|
github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw=
|
||||||
github.com/Azure/go-autorest/autorest/date v0.1.0 h1:YGrhWfrgtFs84+h0o46rJrlmsZtyZRg470CqAXTZaGM=
|
github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg=
|
||||||
github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA=
|
github.com/Azure/go-autorest/autorest/adal v0.9.5 h1:Y3bBUV4rTuxenJJs41HU3qmqsb+auo+a3Lz+PlJPpL0=
|
||||||
github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
|
github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A=
|
||||||
github.com/Azure/go-autorest/autorest/mocks v0.2.0 h1:Ww5g4zThfD/6cLb4z6xxgeyDa7QDkizMkJKe0ysZXp0=
|
github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw=
|
||||||
github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
|
github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74=
|
||||||
github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY=
|
github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
|
||||||
github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc=
|
github.com/Azure/go-autorest/autorest/mocks v0.4.1 h1:K0laFcLE6VLTOwNgSxaGbUcLPuGXlNkbVvq4cW4nIHk=
|
||||||
github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k=
|
github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
|
||||||
github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk=
|
github.com/Azure/go-autorest/logger v0.2.0 h1:e4RVHVZKC5p6UANLJHkM4OfR1UKZPj8Wt8Pcx+3oqrE=
|
||||||
|
github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
|
||||||
|
github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo=
|
||||||
|
github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
|
||||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||||
|
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||||
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
|
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
|
||||||
github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
|
github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI=
|
||||||
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
|
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
|
||||||
github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
|
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M=
|
||||||
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
|
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
|
||||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||||
|
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||||
|
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||||
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
|
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
|
||||||
|
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
|
||||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||||
|
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||||
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
|
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
|
||||||
github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
|
github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
|
||||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||||
|
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||||
|
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
|
||||||
|
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
|
||||||
|
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
||||||
|
github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI=
|
||||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||||
github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
|
github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
|
||||||
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||||
@@ -40,7 +74,7 @@ github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3Ee
|
|||||||
github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||||
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||||
github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
|
github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
|
||||||
github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
|
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
|
||||||
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
|
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
|
||||||
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
|
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
|
||||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
@@ -48,74 +82,116 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c
|
|||||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=
|
github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=
|
||||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||||
github.com/docker/docker v0.7.3-0.20190327010347-be7ac8be2ae0/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
|
||||||
github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
|
github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
|
||||||
|
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
|
||||||
github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||||
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||||
github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
|
github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
|
||||||
github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
|
github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
|
||||||
|
github.com/emicklei/go-restful v2.9.5+incompatible h1:spTtZBk5DYEvbxMVutUuTyh1Ao2r4iyvLdACqsl/Ljk=
|
||||||
github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
|
github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
|
||||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||||
github.com/evanphx/json-patch v4.2.0+incompatible h1:fUDGZCv/7iAN7u0puUVhvKCcsR6vRfwrJatElLBEf0I=
|
github.com/evanphx/json-patch v4.9.0+incompatible h1:kLcOMZeuLAJvL2BPWLMIj5oaZQobrkAqrL+WFZwQses=
|
||||||
github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||||
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||||
|
github.com/form3tech-oss/jwt-go v3.2.2+incompatible h1:TcekIExNqud5crz4xD2pavyTgWiPvpYe4Xau31I0PRk=
|
||||||
|
github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
|
||||||
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
|
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
|
||||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||||
|
github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
|
||||||
|
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||||
github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||||
|
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
|
||||||
|
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||||
|
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||||
|
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||||
|
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||||
|
github.com/go-logr/logr v0.1.0 h1:M1Tv3VzNlEHg6uyACnRdtrploV2P7wZqH8BoQMtz0cg=
|
||||||
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
|
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
|
||||||
github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0=
|
github.com/go-logr/logr v0.2.0 h1:QvGt2nLcHH0WK9orKa+ppBPAxREcH364nPUedEpK0TY=
|
||||||
|
github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
|
||||||
github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg=
|
github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg=
|
||||||
|
github.com/go-openapi/jsonpointer v0.19.3 h1:gihV7YNZK1iK6Tgwwsxo2rJbD1GTbdm72325Bq8FI3w=
|
||||||
github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
|
github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
|
||||||
github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg=
|
|
||||||
github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc=
|
github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc=
|
||||||
|
github.com/go-openapi/jsonreference v0.19.3 h1:5cxNfTy0UVC3X8JL5ymxzyoUZmo8iZb+jeTWn7tUa8o=
|
||||||
github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8=
|
github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8=
|
||||||
github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc=
|
github.com/go-openapi/spec v0.19.3 h1:0XRyw8kguri6Yw4SxhsQA/atC88yqrk0+G4YhI2wabc=
|
||||||
github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo=
|
github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo=
|
||||||
github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I=
|
|
||||||
github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
|
github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
|
||||||
|
github.com/go-openapi/swag v0.19.5 h1:lTz6Ys4CmqqCQmZPBlbQENR1/GucA2bzYTE12Pw4tFY=
|
||||||
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
|
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
|
||||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||||
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
|
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
|
||||||
github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls=
|
github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls=
|
||||||
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
|
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
|
||||||
|
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
|
||||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||||
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903 h1:LbsanbbD6LieFkXbj9YNNBupiGHJgFeLpO0j0Fza1h8=
|
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903 h1:LbsanbbD6LieFkXbj9YNNBupiGHJgFeLpO0j0Fza1h8=
|
||||||
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||||
|
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||||
|
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7 h1:5ZkaAPbicIKTF2I64qf5Fh8Aa83Q/dnOafMYV0OMwjA=
|
||||||
|
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||||
|
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY=
|
||||||
|
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||||
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||||
github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
|
||||||
|
github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
|
||||||
|
github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
|
||||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
|
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
|
||||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
|
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
|
||||||
|
github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
|
||||||
|
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
|
||||||
|
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
|
||||||
|
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
|
||||||
|
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
|
||||||
|
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
||||||
|
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
|
||||||
|
github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0=
|
||||||
|
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||||
|
github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM=
|
||||||
|
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||||
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||||
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||||
github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY=
|
github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY=
|
||||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||||
github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw=
|
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||||
|
github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=
|
||||||
|
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
|
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
|
github.com/google/go-cmp v0.5.2 h1:X2ev0eStA3AbceY54o37/0PQ/UWqKEiiO2dKL5OPaFM=
|
||||||
|
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||||
github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g=
|
github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g=
|
||||||
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||||
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||||
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||||
|
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||||
|
github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||||
|
github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||||
|
github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||||
|
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||||
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
|
github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
|
||||||
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
|
github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y=
|
||||||
|
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||||
github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d h1:7XGaL1e6bYS1yIonGp9761ExpPPV1ui0SAC59Yube9k=
|
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
||||||
github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
|
github.com/googleapis/gnostic v0.4.1 h1:DLJCy1n/vrD4HPjOvYcT8aYQXpPIzoRZONaYwyycI+I=
|
||||||
github.com/googleapis/gnostic v0.1.0 h1:rVsPeBmXbYv4If/cumu1AzZPwV58q433hvONV1UEZoI=
|
github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg=
|
||||||
github.com/googleapis/gnostic v0.1.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
|
|
||||||
github.com/gophercloud/gophercloud v0.1.0 h1:P/nh25+rzXouhytV2pUHBb65fnds26Ghl8/391+sT5o=
|
|
||||||
github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8=
|
|
||||||
github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
||||||
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||||
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
|
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
|
||||||
github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
|
github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
|
||||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
|
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
|
||||||
@@ -126,6 +202,7 @@ github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ
|
|||||||
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
|
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
|
||||||
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
|
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
|
||||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||||
|
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||||
github.com/imdario/mergo v0.3.5 h1:JboBksRwiiAJWvIYJVo46AfV+IAIKZpfrSzVKj42R4Q=
|
github.com/imdario/mergo v0.3.5 h1:JboBksRwiiAJWvIYJVo46AfV+IAIKZpfrSzVKj42R4Q=
|
||||||
github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
||||||
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
|
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
|
||||||
@@ -133,32 +210,38 @@ github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANyt
|
|||||||
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
|
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
|
||||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||||
github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||||
github.com/json-iterator/go v1.1.8 h1:QiWkFLKq0T7mpzwOTu6BzNDbfTE8OLrYhVKYMLF46Ok=
|
github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68=
|
||||||
github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||||
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
|
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
|
||||||
|
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
|
||||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||||
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
|
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
|
||||||
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
|
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
|
||||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||||
|
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||||
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
|
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
|
||||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||||
|
github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs=
|
||||||
|
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||||
github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=
|
github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=
|
||||||
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||||
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||||
github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
|
||||||
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||||
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||||
|
github.com/mailru/easyjson v0.7.0 h1:aizVhC/NAAcKWb+5QsU1iNOZb4Yws5UO2I+aIprQITM=
|
||||||
github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs=
|
github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs=
|
||||||
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
|
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
|
||||||
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
||||||
github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
||||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||||
|
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
|
||||||
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||||
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||||
|
github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo=
|
||||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||||
@@ -181,22 +264,30 @@ github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/9
|
|||||||
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
|
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
|
||||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||||
|
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||||
|
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA=
|
github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA=
|
||||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||||
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||||
|
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
|
||||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||||
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||||
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||||
|
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
|
||||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||||
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||||
|
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||||
|
github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||||
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
|
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
|
||||||
|
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||||
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
|
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
|
||||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||||
|
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
|
||||||
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
|
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
|
||||||
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
|
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
|
||||||
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
|
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
|
||||||
@@ -218,30 +309,71 @@ github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXf
|
|||||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||||
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
|
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
|
||||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||||
|
github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0=
|
||||||
|
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||||
|
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||||
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
|
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
|
||||||
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
|
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
|
||||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
||||||
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
|
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
|
||||||
|
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||||
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||||
go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg=
|
go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
|
||||||
|
go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg=
|
||||||
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
|
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
|
||||||
|
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
|
||||||
|
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||||
|
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||||
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||||
|
go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU=
|
||||||
|
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||||
|
go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI=
|
||||||
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
||||||
|
go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM=
|
||||||
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||||
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||||
golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
|
||||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||||
|
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||||
|
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||||
golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||||
golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975 h1:/Tl7pH94bvbAAHBdZJT947M/+gp0+CqQXDtMRC0fseo=
|
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||||
golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI=
|
||||||
|
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||||
|
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0 h1:hb9wdF1z5waM+dSIICn1l0DkLVDT3hqhhQsDNUmHPRE=
|
||||||
|
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||||
|
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||||
|
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||||
|
golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
|
||||||
|
golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
|
||||||
|
golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||||
|
golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||||
|
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||||
|
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
|
||||||
|
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
|
||||||
|
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
||||||
|
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||||
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||||
golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||||
|
golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||||
|
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||||
|
golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
|
||||||
|
golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||||
|
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||||
|
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
|
||||||
|
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
|
||||||
|
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
|
||||||
|
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
|
||||||
|
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||||
|
golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||||
|
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||||
|
golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4=
|
||||||
|
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
@@ -251,74 +383,195 @@ golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73r
|
|||||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||||
|
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||||
|
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||||
|
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
||||||
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
|
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
|
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
golang.org/x/net v0.0.0-20191004110552-13f9640d40b9 h1:rjwSpXsdiK0dV8/Naq3kAw9ymfAeJIyd0upUIElB+lI=
|
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
|
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
|
golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
|
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
|
golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
|
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e h1:3G+cUijn7XD+S4eJFddp53Pv7+slrESplyjG25HgL+k=
|
||||||
|
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||||
|
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b h1:uwuIcX0g4Yl1NC5XAz37xsr2lTtcqevgzYNVt49waME=
|
||||||
|
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0=
|
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0=
|
||||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||||
|
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6 h1:pE8b58s1HRDMi8RDc79m0HISf9D4TzseP40cEA6IGfs=
|
||||||
|
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||||
|
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw=
|
||||||
|
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
|
||||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
|
||||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456 h1:ng0gs1AKnRRuEMZoTLLlbOd+C17zUDepwGQBb/n+JVg=
|
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7 h1:HmbHVPwrPEKPGLAcHSrMe6+hqSUlvZU0rab6x5EXfGU=
|
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20201112073958-5cba982894dd h1:5CtCZbICpIOFdgO940moixOPjc0178IU44m4EjOO5IY=
|
||||||
|
golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
|
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
|
||||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||||
|
golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k=
|
||||||
|
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
|
golang.org/x/text v0.3.4 h1:0YWbFKbhXG/wIiuHDSKpS0Iy7FSA+u45VtBMfQcFTTc=
|
||||||
|
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ=
|
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ=
|
||||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
|
golang.org/x/time v0.0.0-20191024005414-555d28b269f0 h1:/5xXl8Y5W96D+TtHSlonuFqGHIWVuyCkGJLwGh9JJFs=
|
||||||
|
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
|
golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e h1:EHBhcS0mlXEAVwNyO2dLfjToGsyY4j24pTs2ScHnX7s=
|
||||||
|
golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
|
||||||
golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||||
|
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||||
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||||
|
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||||
|
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||||
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||||
|
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||||
golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||||
|
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||||
|
golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||||
|
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||||
|
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
|
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
|
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
|
golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
|
golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
|
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
|
golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
|
golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
|
golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||||
|
golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||||
|
golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||||
|
golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||||
|
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||||
|
golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||||
|
golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||||
|
golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||||
|
golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||||
|
golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
|
||||||
|
golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||||
|
golang.org/x/tools v0.0.0-20200616133436-c1934b75d054 h1:HHeAlu5H9b71C+Fx0K+1dGgVFN1DM1/wz4aoGOA5qS8=
|
||||||
|
golang.org/x/tools v0.0.0-20200616133436-c1934b75d054/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||||
|
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
|
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
|
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
|
||||||
|
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
|
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
|
||||||
|
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
|
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
|
||||||
|
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
|
||||||
|
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
|
||||||
|
google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
|
||||||
|
google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
|
||||||
|
google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
|
||||||
|
google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
|
||||||
|
google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||||
|
google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||||
|
google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||||
google.golang.org/appengine v1.5.0 h1:KxkO13IPW4Lslp2bz+KHP2E3gtFlrIGNThxkZQ3g+4c=
|
google.golang.org/appengine v1.5.0 h1:KxkO13IPW4Lslp2bz+KHP2E3gtFlrIGNThxkZQ3g+4c=
|
||||||
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||||
|
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
|
||||||
|
google.golang.org/appengine v1.6.5 h1:tycE03LOZYQNhDpS27tcQdAzLCVMaj7QT2SXxebnpCM=
|
||||||
|
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||||
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||||
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||||
|
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||||
|
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||||
|
google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||||
|
google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
|
||||||
|
google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||||
|
google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||||
|
google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||||
|
google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||||
|
google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||||
|
google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||||
|
google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=
|
||||||
|
google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||||
|
google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||||
|
google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||||
|
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||||
|
google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||||
|
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||||
|
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||||
google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
|
||||||
google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||||
|
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||||
|
google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||||
|
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||||
|
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||||
|
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||||
|
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
|
||||||
|
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
|
||||||
|
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||||
|
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||||
|
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||||
|
google.golang.org/protobuf v1.24.0 h1:UhZDfRO8JRQru4/+LlLE0BRKGF8L+PICnvYZmx/fEGA=
|
||||||
|
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
|
||||||
|
google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c=
|
||||||
|
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
||||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
|
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
|
||||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
|
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
|
||||||
|
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
|
gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
|
||||||
|
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||||
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
|
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
|
||||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||||
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
|
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
|
||||||
@@ -331,36 +584,53 @@ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWD
|
|||||||
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
|
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
|
||||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
|
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
|
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
|
gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
|
||||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
|
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
|
||||||
|
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
|
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
|
||||||
|
gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk=
|
||||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||||
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||||
|
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||||
k8s.io/api v0.18.2 h1:wG5g5ZmSVgm5B+eHMIbI9EGATS2L8Z72rda19RIEgY8=
|
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
||||||
k8s.io/api v0.18.2/go.mod h1:SJCWI7OLzhZSvbY7U8zwNl9UA4o1fizoug34OV/2r78=
|
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||||
k8s.io/apimachinery v0.18.2 h1:44CmtbmkzVDAhCpRVSiP2R5PPrC2RtlIv/MoB8xpdRA=
|
k8s.io/api v0.20.0 h1:WwrYoZNM1W1aQEbyl8HNG+oWGzLpZQBlcerS9BQw9yI=
|
||||||
k8s.io/apimachinery v0.18.2/go.mod h1:9SnR/e11v5IbyPCGbvJViimtJ0SwHG4nfZFjU77ftcA=
|
k8s.io/api v0.20.0/go.mod h1:HyLC5l5eoS/ygQYl1BXBgFzWNlkHiAuyNAbevIn+FKg=
|
||||||
k8s.io/apiserver v0.18.2 h1:fwKxdTWwwYhxvtjo0UUfX+/fsitsNtfErPNegH2x9ic=
|
k8s.io/apimachinery v0.20.0 h1:jjzbTJRXk0unNS71L7h3lxGDH/2HPxMPaQY+MjECKL8=
|
||||||
k8s.io/apiserver v0.18.2/go.mod h1:Xbh066NqrZO8cbsoenCwyDJ1OSi8Ag8I2lezeHxzwzw=
|
k8s.io/apimachinery v0.20.0/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU=
|
||||||
k8s.io/client-go v0.18.2 h1:aLB0iaD4nmwh7arT2wIn+lMnAq7OswjaejkQ8p9bBYE=
|
k8s.io/apiserver v0.20.0 h1:0MwO4xCoqZwhoLbFyyBSJdu55CScp4V4sAgX6z4oPBY=
|
||||||
k8s.io/client-go v0.18.2/go.mod h1:Xcm5wVGXX9HAA2JJ2sSBUn3tCJ+4SVlCbl2MNNv+CIU=
|
k8s.io/apiserver v0.20.0/go.mod h1:6gRIWiOkvGvQt12WTYmsiYoUyYW0FXSiMdNl4m+sxY8=
|
||||||
k8s.io/component-base v0.18.2 h1:SJweNZAGcUvsypLGNPNGeJ9UgPZQ6+bW+gEHe8uyh/Y=
|
k8s.io/client-go v0.20.0 h1:Xlax8PKbZsjX4gFvNtt4F5MoJ1V5prDvCuoq9B7iax0=
|
||||||
k8s.io/component-base v0.18.2/go.mod h1:kqLlMuhJNHQ9lz8Z7V5bxUUtjFZnrypArGl58gmDfUM=
|
k8s.io/client-go v0.20.0/go.mod h1:4KWh/g+Ocd8KkCwKF8vUNnmqgv+EVnQDK4MBF4oB5tY=
|
||||||
k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
|
k8s.io/code-generator v0.20.0 h1:c8JaABvEEZPDE8MICTOtveHX2axchl+EptM+o4OGvbg=
|
||||||
k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
|
k8s.io/code-generator v0.20.0/go.mod h1:UsqdF+VX4PU2g46NC2JRs4gc+IfrctnwHb76RNbWHJg=
|
||||||
k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
|
k8s.io/component-base v0.20.0 h1:BXGL8iitIQD+0NgW49UsM7MraNUUGDU3FBmrfUAtmVQ=
|
||||||
k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8=
|
k8s.io/component-base v0.20.0/go.mod h1:wKPj+RHnAr8LW2EIBIK7AxOHPde4gme2lzXwVSoRXeA=
|
||||||
k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
|
k8s.io/component-helpers v0.20.0 h1:7Zi1fcb5nV0h03d9eeZGk71+ZWYvAN4Be+xMOZyFerc=
|
||||||
k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c h1:/KUFqjjqAcY4Us6luF5RDNZ16KJtb49HfR3ZHB9qYXM=
|
k8s.io/component-helpers v0.20.0/go.mod h1:nx6NOtfSfGOxnSZsDJxpGbnsVuUA1UXpwDvZIrtigNk=
|
||||||
k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E=
|
k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
|
||||||
k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89 h1:d4vVOjXm687F1iLSP2q3lyPPuyvTUt3aVoBpi2DqRsU=
|
k8s.io/gengo v0.0.0-20201113003025-83324d819ded h1:JApXBKYyB7l9xx+DK7/+mFjC7A9Bt5A93FPvFD0HIFE=
|
||||||
k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
|
k8s.io/gengo v0.0.0-20201113003025-83324d819ded/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
|
||||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.7/go.mod h1:PHgbrJT7lCHcxMU+mDHEm+nx46H4zuuHZkDP6icnhu0=
|
k8s.io/klog/v2 v2.0.0 h1:Foj74zO6RbjjP4hBEKjnYtjjAhGg4jNynUdYF6fJrok=
|
||||||
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
|
k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
|
||||||
sigs.k8s.io/structured-merge-diff/v3 v3.0.0 h1:dOmIZBMfhcHS09XZkMyUgkq5trg3/jRyJYFZUiaOp8E=
|
k8s.io/klog/v2 v2.2.0 h1:XRvcwJozkgZ1UQJmfMGpvRthQHOvihEhYtDfAaxMz/A=
|
||||||
sigs.k8s.io/structured-merge-diff/v3 v3.0.0/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
|
k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
|
||||||
sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs=
|
k8s.io/klog/v2 v2.4.0 h1:7+X0fUguPyrKEC4WjH8iGDg3laWgMo5tMnRTIGTTxGQ=
|
||||||
|
k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
|
||||||
|
k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd h1:sOHNzJIkytDF6qadMNKhhDRpc6ODik8lVC6nOur7B2c=
|
||||||
|
k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM=
|
||||||
|
k8s.io/utils v0.0.0-20201110183641-67b214c5f920 h1:CbnUZsM497iRC5QMVkHwyl8s2tB3g7yaSHkYPkpgelw=
|
||||||
|
k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
||||||
|
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
|
||||||
|
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
|
||||||
|
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
|
||||||
|
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg=
|
||||||
|
sigs.k8s.io/structured-merge-diff/v4 v4.0.2 h1:YHQV7Dajm86OuqnIR6zAelnDWBRjo+YhYV9PmGrh1s8=
|
||||||
|
sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
|
||||||
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
|
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
|
||||||
sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q=
|
sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q=
|
||||||
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
|
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
|
||||||
|
|||||||
6
hack/.spelling_failures
Normal file
6
hack/.spelling_failures
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
BUILD
|
||||||
|
CHANGELOG
|
||||||
|
OWNERS
|
||||||
|
go.mod
|
||||||
|
go.sum
|
||||||
|
vendor/
|
||||||
@@ -1,5 +1,5 @@
|
|||||||
kind: Cluster
|
kind: Cluster
|
||||||
apiVersion: kind.sigs.k8s.io/v1alpha3
|
apiVersion: kind.x-k8s.io/v1alpha4
|
||||||
nodes:
|
nodes:
|
||||||
- role: control-plane
|
- role: control-plane
|
||||||
- role: worker
|
- role: worker
|
||||||
|
|||||||
25
hack/tools.go
Normal file
25
hack/tools.go
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
// +build tools
|
||||||
|
|
||||||
|
/*
|
||||||
|
Copyright 2019 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
// This package imports things required by build scripts, to force `go mod` to see them as dependencies
|
||||||
|
package tools
|
||||||
|
|
||||||
|
import (
|
||||||
|
_ "github.com/client9/misspell/cmd/misspell"
|
||||||
|
_ "k8s.io/code-generator"
|
||||||
|
)
|
||||||
@@ -23,7 +23,7 @@ DESCHEDULER_ROOT=$(dirname "${BASH_SOURCE}")/..
|
|||||||
|
|
||||||
GO_VERSION=($(go version))
|
GO_VERSION=($(go version))
|
||||||
|
|
||||||
if [[ -z $(echo "${GO_VERSION[2]}" | grep -E 'go1.2|go1.3|go1.4|go1.5|go1.6|go1.7|go1.8|go1.9|go1.10|go1.11|go1.12|go1.13') ]]; then
|
if [[ -z $(echo "${GO_VERSION[2]}" | grep -E 'go1.13|go1.14|go1.15') ]]; then
|
||||||
echo "Unknown go version '${GO_VERSION[2]}', skipping gofmt."
|
echo "Unknown go version '${GO_VERSION[2]}', skipping gofmt."
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|||||||
@@ -23,7 +23,7 @@ DESCHEDULER_ROOT=$(dirname "${BASH_SOURCE}")/..
|
|||||||
|
|
||||||
GO_VERSION=($(go version))
|
GO_VERSION=($(go version))
|
||||||
|
|
||||||
if [[ -z $(echo "${GO_VERSION[2]}" | grep -E 'go1.2|go1.3|go1.4|go1.5|go1.6|go1.7|go1.8|go1.9|go1.10|go1.11|go1.12|go1.13') ]]; then
|
if [[ -z $(echo "${GO_VERSION[2]}" | grep -E 'go1.13|go1.14|go1.15') ]]; then
|
||||||
echo "Unknown go version '${GO_VERSION[2]}', skipping gofmt."
|
echo "Unknown go version '${GO_VERSION[2]}', skipping gofmt."
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|||||||
41
hack/verify-spelling.sh
Executable file
41
hack/verify-spelling.sh
Executable file
@@ -0,0 +1,41 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
# Copyright 2018 The Kubernetes Authors.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
# This script checks commonly misspelled English words in all files in the
|
||||||
|
# working directory by client9/misspell package.
|
||||||
|
# Usage: `hack/verify-spelling.sh`.
|
||||||
|
|
||||||
|
set -o errexit
|
||||||
|
set -o nounset
|
||||||
|
set -o pipefail
|
||||||
|
|
||||||
|
KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/..
|
||||||
|
export KUBE_ROOT
|
||||||
|
source "${KUBE_ROOT}/hack/lib/init.sh"
|
||||||
|
|
||||||
|
# Ensure that we find the binaries we build before anything else.
|
||||||
|
export GOBIN="${OS_OUTPUT_BINPATH}"
|
||||||
|
PATH="${GOBIN}:${PATH}"
|
||||||
|
|
||||||
|
# Install tools we need
|
||||||
|
pushd "${KUBE_ROOT}" >/dev/null
|
||||||
|
GO111MODULE=on go install github.com/client9/misspell/cmd/misspell
|
||||||
|
popd >/dev/null
|
||||||
|
|
||||||
|
# Spell checking
|
||||||
|
# All the skipping files are defined in hack/.spelling_failures
|
||||||
|
skipping_file="${KUBE_ROOT}/hack/.spelling_failures"
|
||||||
|
failing_packages=$(sed "s| | -e |g" "${skipping_file}")
|
||||||
|
git ls-files | grep -v -e "${failing_packages}" | xargs misspell -i "Creater,creater,ect" -error -o stderr
|
||||||
6
kubernetes/base/kustomization.yaml
Normal file
6
kubernetes/base/kustomization.yaml
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||||
|
kind: Kustomization
|
||||||
|
|
||||||
|
resources:
|
||||||
|
- configmap.yaml
|
||||||
|
- rbac.yaml
|
||||||
@@ -3,7 +3,6 @@ kind: ClusterRole
|
|||||||
apiVersion: rbac.authorization.k8s.io/v1
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
metadata:
|
metadata:
|
||||||
name: descheduler-cluster-role
|
name: descheduler-cluster-role
|
||||||
namespace: kube-system
|
|
||||||
rules:
|
rules:
|
||||||
- apiGroups: [""]
|
- apiGroups: [""]
|
||||||
resources: ["events"]
|
resources: ["events"]
|
||||||
@@ -11,12 +10,18 @@ rules:
|
|||||||
- apiGroups: [""]
|
- apiGroups: [""]
|
||||||
resources: ["nodes"]
|
resources: ["nodes"]
|
||||||
verbs: ["get", "watch", "list"]
|
verbs: ["get", "watch", "list"]
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources: ["namespaces"]
|
||||||
|
verbs: ["get", "list"]
|
||||||
- apiGroups: [""]
|
- apiGroups: [""]
|
||||||
resources: ["pods"]
|
resources: ["pods"]
|
||||||
verbs: ["get", "watch", "list", "delete"]
|
verbs: ["get", "watch", "list", "delete"]
|
||||||
- apiGroups: [""]
|
- apiGroups: [""]
|
||||||
resources: ["pods/eviction"]
|
resources: ["pods/eviction"]
|
||||||
verbs: ["create"]
|
verbs: ["create"]
|
||||||
|
- apiGroups: ["scheduling.k8s.io"]
|
||||||
|
resources: ["priorityclasses"]
|
||||||
|
verbs: ["get", "watch", "list"]
|
||||||
---
|
---
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
kind: ServiceAccount
|
kind: ServiceAccount
|
||||||
@@ -16,7 +16,7 @@ spec:
|
|||||||
priorityClassName: system-cluster-critical
|
priorityClassName: system-cluster-critical
|
||||||
containers:
|
containers:
|
||||||
- name: descheduler
|
- name: descheduler
|
||||||
image: us.gcr.io/k8s-artifacts-prod/descheduler/descheduler:v0.10.0
|
image: k8s.gcr.io/descheduler/descheduler:v0.20.0
|
||||||
volumeMounts:
|
volumeMounts:
|
||||||
- mountPath: /policy-dir
|
- mountPath: /policy-dir
|
||||||
name: policy-volume
|
name: policy-volume
|
||||||
@@ -27,6 +27,10 @@ spec:
|
|||||||
- "/policy-dir/policy.yaml"
|
- "/policy-dir/policy.yaml"
|
||||||
- "--v"
|
- "--v"
|
||||||
- "3"
|
- "3"
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
cpu: "500m"
|
||||||
|
memory: "256Mi"
|
||||||
restartPolicy: "Never"
|
restartPolicy: "Never"
|
||||||
serviceAccountName: descheduler-sa
|
serviceAccountName: descheduler-sa
|
||||||
volumes:
|
volumes:
|
||||||
6
kubernetes/cronjob/kustomization.yaml
Normal file
6
kubernetes/cronjob/kustomization.yaml
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||||
|
kind: Kustomization
|
||||||
|
|
||||||
|
resources:
|
||||||
|
- ../base
|
||||||
|
- cronjob.yaml
|
||||||
@@ -14,7 +14,7 @@ spec:
|
|||||||
priorityClassName: system-cluster-critical
|
priorityClassName: system-cluster-critical
|
||||||
containers:
|
containers:
|
||||||
- name: descheduler
|
- name: descheduler
|
||||||
image: us.gcr.io/k8s-artifacts-prod/descheduler/descheduler:v0.10.0
|
image: k8s.gcr.io/descheduler/descheduler:v0.20.0
|
||||||
volumeMounts:
|
volumeMounts:
|
||||||
- mountPath: /policy-dir
|
- mountPath: /policy-dir
|
||||||
name: policy-volume
|
name: policy-volume
|
||||||
@@ -25,6 +25,10 @@ spec:
|
|||||||
- "/policy-dir/policy.yaml"
|
- "/policy-dir/policy.yaml"
|
||||||
- "--v"
|
- "--v"
|
||||||
- "3"
|
- "3"
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
cpu: "500m"
|
||||||
|
memory: "256Mi"
|
||||||
restartPolicy: "Never"
|
restartPolicy: "Never"
|
||||||
serviceAccountName: descheduler-sa
|
serviceAccountName: descheduler-sa
|
||||||
volumes:
|
volumes:
|
||||||
6
kubernetes/job/kustomization.yaml
Normal file
6
kubernetes/job/kustomization.yaml
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||||
|
kind: Kustomization
|
||||||
|
|
||||||
|
resources:
|
||||||
|
- ../base
|
||||||
|
- job.yaml
|
||||||
@@ -19,8 +19,6 @@ package api
|
|||||||
import (
|
import (
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
|
|
||||||
"sigs.k8s.io/descheduler/pkg/descheduler/scheme"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@@ -35,12 +33,6 @@ const GroupName = "descheduler"
|
|||||||
// SchemeGroupVersion is group version used to register these objects
|
// SchemeGroupVersion is group version used to register these objects
|
||||||
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal}
|
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal}
|
||||||
|
|
||||||
func init() {
|
|
||||||
if err := addKnownTypes(scheme.Scheme); err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Kind takes an unqualified kind and returns a Group qualified GroupKind
|
// Kind takes an unqualified kind and returns a Group qualified GroupKind
|
||||||
func Kind(kind string) schema.GroupKind {
|
func Kind(kind string) schema.GroupKind {
|
||||||
return SchemeGroupVersion.WithKind(kind).GroupKind()
|
return SchemeGroupVersion.WithKind(kind).GroupKind()
|
||||||
|
|||||||
@@ -28,6 +28,15 @@ type DeschedulerPolicy struct {
|
|||||||
|
|
||||||
// Strategies
|
// Strategies
|
||||||
Strategies StrategyList
|
Strategies StrategyList
|
||||||
|
|
||||||
|
// NodeSelector for a set of nodes to operate over
|
||||||
|
NodeSelector *string
|
||||||
|
|
||||||
|
// EvictLocalStoragePods allows pods using local storage to be evicted.
|
||||||
|
EvictLocalStoragePods *bool
|
||||||
|
|
||||||
|
// MaxNoOfPodsToEvictPerNode restricts maximum of pods to be evicted per node.
|
||||||
|
MaxNoOfPodsToEvictPerNode *int
|
||||||
}
|
}
|
||||||
|
|
||||||
type StrategyName string
|
type StrategyName string
|
||||||
@@ -41,16 +50,28 @@ type DeschedulerStrategy struct {
|
|||||||
Weight int
|
Weight int
|
||||||
|
|
||||||
// Strategy parameters
|
// Strategy parameters
|
||||||
Params StrategyParameters
|
Params *StrategyParameters
|
||||||
}
|
}
|
||||||
|
|
||||||
// Only one of its members may be specified
|
// Namespaces carries a list of included/excluded namespaces
|
||||||
|
// for which a given strategy is applicable
|
||||||
|
type Namespaces struct {
|
||||||
|
Include []string
|
||||||
|
Exclude []string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Besides Namespaces only one of its members may be specified
|
||||||
|
// TODO(jchaloup): move Namespaces ThresholdPriority and ThresholdPriorityClassName to individual strategies
|
||||||
|
// once the policy version is bumped to v1alpha2
|
||||||
type StrategyParameters struct {
|
type StrategyParameters struct {
|
||||||
NodeResourceUtilizationThresholds *NodeResourceUtilizationThresholds
|
NodeResourceUtilizationThresholds *NodeResourceUtilizationThresholds
|
||||||
NodeAffinityType []string
|
NodeAffinityType []string
|
||||||
PodsHavingTooManyRestarts *PodsHavingTooManyRestarts
|
PodsHavingTooManyRestarts *PodsHavingTooManyRestarts
|
||||||
MaxPodLifeTimeSeconds *uint
|
PodLifeTime *PodLifeTime
|
||||||
RemoveDuplicates *RemoveDuplicates
|
RemoveDuplicates *RemoveDuplicates
|
||||||
|
Namespaces *Namespaces
|
||||||
|
ThresholdPriority *int32
|
||||||
|
ThresholdPriorityClassName string
|
||||||
}
|
}
|
||||||
|
|
||||||
type Percentage float64
|
type Percentage float64
|
||||||
@@ -70,3 +91,8 @@ type PodsHavingTooManyRestarts struct {
|
|||||||
type RemoveDuplicates struct {
|
type RemoveDuplicates struct {
|
||||||
ExcludeOwnerKinds []string
|
ExcludeOwnerKinds []string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type PodLifeTime struct {
|
||||||
|
MaxPodLifeTimeSeconds *uint
|
||||||
|
PodStatusPhases []string
|
||||||
|
}
|
||||||
|
|||||||
@@ -28,6 +28,15 @@ type DeschedulerPolicy struct {
|
|||||||
|
|
||||||
// Strategies
|
// Strategies
|
||||||
Strategies StrategyList `json:"strategies,omitempty"`
|
Strategies StrategyList `json:"strategies,omitempty"`
|
||||||
|
|
||||||
|
// NodeSelector for a set of nodes to operate over
|
||||||
|
NodeSelector *string `json:"nodeSelector,omitempty"`
|
||||||
|
|
||||||
|
// EvictLocalStoragePods allows pods using local storage to be evicted.
|
||||||
|
EvictLocalStoragePods *bool `json:"evictLocalStoragePods,omitempty"`
|
||||||
|
|
||||||
|
// MaxNoOfPodsToEvictPerNode restricts maximum of pods to be evicted per node.
|
||||||
|
MaxNoOfPodsToEvictPerNode *int `json:"maxNoOfPodsToEvictPerNode,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type StrategyName string
|
type StrategyName string
|
||||||
@@ -41,16 +50,26 @@ type DeschedulerStrategy struct {
|
|||||||
Weight int `json:"weight,omitempty"`
|
Weight int `json:"weight,omitempty"`
|
||||||
|
|
||||||
// Strategy parameters
|
// Strategy parameters
|
||||||
Params StrategyParameters `json:"params,omitempty"`
|
Params *StrategyParameters `json:"params,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Only one of its members may be specified
|
// Namespaces carries a list of included/excluded namespaces
|
||||||
|
// for which a given strategy is applicable.
|
||||||
|
type Namespaces struct {
|
||||||
|
Include []string `json:"include"`
|
||||||
|
Exclude []string `json:"exclude"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Besides Namespaces ThresholdPriority and ThresholdPriorityClassName only one of its members may be specified
|
||||||
type StrategyParameters struct {
|
type StrategyParameters struct {
|
||||||
NodeResourceUtilizationThresholds *NodeResourceUtilizationThresholds `json:"nodeResourceUtilizationThresholds,omitempty"`
|
NodeResourceUtilizationThresholds *NodeResourceUtilizationThresholds `json:"nodeResourceUtilizationThresholds,omitempty"`
|
||||||
NodeAffinityType []string `json:"nodeAffinityType,omitempty"`
|
NodeAffinityType []string `json:"nodeAffinityType,omitempty"`
|
||||||
PodsHavingTooManyRestarts *PodsHavingTooManyRestarts `json:"podsHavingTooManyRestarts,omitempty"`
|
PodsHavingTooManyRestarts *PodsHavingTooManyRestarts `json:"podsHavingTooManyRestarts,omitempty"`
|
||||||
MaxPodLifeTimeSeconds *uint `json:"maxPodLifeTimeSeconds,omitempty"`
|
PodLifeTime *PodLifeTime `json:"podLifeTime,omitempty"`
|
||||||
RemoveDuplicates *RemoveDuplicates `json:"removeDuplicates,omitempty"`
|
RemoveDuplicates *RemoveDuplicates `json:"removeDuplicates,omitempty"`
|
||||||
|
Namespaces *Namespaces `json:"namespaces"`
|
||||||
|
ThresholdPriority *int32 `json:"thresholdPriority"`
|
||||||
|
ThresholdPriorityClassName string `json:"thresholdPriorityClassName"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type Percentage float64
|
type Percentage float64
|
||||||
@@ -70,3 +89,8 @@ type PodsHavingTooManyRestarts struct {
|
|||||||
type RemoveDuplicates struct {
|
type RemoveDuplicates struct {
|
||||||
ExcludeOwnerKinds []string `json:"excludeOwnerKinds,omitempty"`
|
ExcludeOwnerKinds []string `json:"excludeOwnerKinds,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type PodLifeTime struct {
|
||||||
|
MaxPodLifeTimeSeconds *uint `json:"maxPodLifeTimeSeconds,omitempty"`
|
||||||
|
PodStatusPhases []string `json:"podStatusPhases,omitempty"`
|
||||||
|
}
|
||||||
|
|||||||
@@ -55,6 +55,16 @@ func RegisterConversions(s *runtime.Scheme) error {
|
|||||||
}); err != nil {
|
}); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
if err := s.AddGeneratedConversionFunc((*Namespaces)(nil), (*api.Namespaces)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||||
|
return Convert_v1alpha1_Namespaces_To_api_Namespaces(a.(*Namespaces), b.(*api.Namespaces), scope)
|
||||||
|
}); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := s.AddGeneratedConversionFunc((*api.Namespaces)(nil), (*Namespaces)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||||
|
return Convert_api_Namespaces_To_v1alpha1_Namespaces(a.(*api.Namespaces), b.(*Namespaces), scope)
|
||||||
|
}); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
if err := s.AddGeneratedConversionFunc((*NodeResourceUtilizationThresholds)(nil), (*api.NodeResourceUtilizationThresholds)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
if err := s.AddGeneratedConversionFunc((*NodeResourceUtilizationThresholds)(nil), (*api.NodeResourceUtilizationThresholds)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||||
return Convert_v1alpha1_NodeResourceUtilizationThresholds_To_api_NodeResourceUtilizationThresholds(a.(*NodeResourceUtilizationThresholds), b.(*api.NodeResourceUtilizationThresholds), scope)
|
return Convert_v1alpha1_NodeResourceUtilizationThresholds_To_api_NodeResourceUtilizationThresholds(a.(*NodeResourceUtilizationThresholds), b.(*api.NodeResourceUtilizationThresholds), scope)
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
@@ -65,6 +75,16 @@ func RegisterConversions(s *runtime.Scheme) error {
|
|||||||
}); err != nil {
|
}); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
if err := s.AddGeneratedConversionFunc((*PodLifeTime)(nil), (*api.PodLifeTime)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||||
|
return Convert_v1alpha1_PodLifeTime_To_api_PodLifeTime(a.(*PodLifeTime), b.(*api.PodLifeTime), scope)
|
||||||
|
}); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := s.AddGeneratedConversionFunc((*api.PodLifeTime)(nil), (*PodLifeTime)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||||
|
return Convert_api_PodLifeTime_To_v1alpha1_PodLifeTime(a.(*api.PodLifeTime), b.(*PodLifeTime), scope)
|
||||||
|
}); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
if err := s.AddGeneratedConversionFunc((*PodsHavingTooManyRestarts)(nil), (*api.PodsHavingTooManyRestarts)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
if err := s.AddGeneratedConversionFunc((*PodsHavingTooManyRestarts)(nil), (*api.PodsHavingTooManyRestarts)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||||
return Convert_v1alpha1_PodsHavingTooManyRestarts_To_api_PodsHavingTooManyRestarts(a.(*PodsHavingTooManyRestarts), b.(*api.PodsHavingTooManyRestarts), scope)
|
return Convert_v1alpha1_PodsHavingTooManyRestarts_To_api_PodsHavingTooManyRestarts(a.(*PodsHavingTooManyRestarts), b.(*api.PodsHavingTooManyRestarts), scope)
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
@@ -100,6 +120,9 @@ func RegisterConversions(s *runtime.Scheme) error {
|
|||||||
|
|
||||||
func autoConvert_v1alpha1_DeschedulerPolicy_To_api_DeschedulerPolicy(in *DeschedulerPolicy, out *api.DeschedulerPolicy, s conversion.Scope) error {
|
func autoConvert_v1alpha1_DeschedulerPolicy_To_api_DeschedulerPolicy(in *DeschedulerPolicy, out *api.DeschedulerPolicy, s conversion.Scope) error {
|
||||||
out.Strategies = *(*api.StrategyList)(unsafe.Pointer(&in.Strategies))
|
out.Strategies = *(*api.StrategyList)(unsafe.Pointer(&in.Strategies))
|
||||||
|
out.NodeSelector = (*string)(unsafe.Pointer(in.NodeSelector))
|
||||||
|
out.EvictLocalStoragePods = (*bool)(unsafe.Pointer(in.EvictLocalStoragePods))
|
||||||
|
out.MaxNoOfPodsToEvictPerNode = (*int)(unsafe.Pointer(in.MaxNoOfPodsToEvictPerNode))
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -110,6 +133,9 @@ func Convert_v1alpha1_DeschedulerPolicy_To_api_DeschedulerPolicy(in *Descheduler
|
|||||||
|
|
||||||
func autoConvert_api_DeschedulerPolicy_To_v1alpha1_DeschedulerPolicy(in *api.DeschedulerPolicy, out *DeschedulerPolicy, s conversion.Scope) error {
|
func autoConvert_api_DeschedulerPolicy_To_v1alpha1_DeschedulerPolicy(in *api.DeschedulerPolicy, out *DeschedulerPolicy, s conversion.Scope) error {
|
||||||
out.Strategies = *(*StrategyList)(unsafe.Pointer(&in.Strategies))
|
out.Strategies = *(*StrategyList)(unsafe.Pointer(&in.Strategies))
|
||||||
|
out.NodeSelector = (*string)(unsafe.Pointer(in.NodeSelector))
|
||||||
|
out.EvictLocalStoragePods = (*bool)(unsafe.Pointer(in.EvictLocalStoragePods))
|
||||||
|
out.MaxNoOfPodsToEvictPerNode = (*int)(unsafe.Pointer(in.MaxNoOfPodsToEvictPerNode))
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -121,9 +147,7 @@ func Convert_api_DeschedulerPolicy_To_v1alpha1_DeschedulerPolicy(in *api.Desched
|
|||||||
func autoConvert_v1alpha1_DeschedulerStrategy_To_api_DeschedulerStrategy(in *DeschedulerStrategy, out *api.DeschedulerStrategy, s conversion.Scope) error {
|
func autoConvert_v1alpha1_DeschedulerStrategy_To_api_DeschedulerStrategy(in *DeschedulerStrategy, out *api.DeschedulerStrategy, s conversion.Scope) error {
|
||||||
out.Enabled = in.Enabled
|
out.Enabled = in.Enabled
|
||||||
out.Weight = in.Weight
|
out.Weight = in.Weight
|
||||||
if err := Convert_v1alpha1_StrategyParameters_To_api_StrategyParameters(&in.Params, &out.Params, s); err != nil {
|
out.Params = (*api.StrategyParameters)(unsafe.Pointer(in.Params))
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -135,9 +159,7 @@ func Convert_v1alpha1_DeschedulerStrategy_To_api_DeschedulerStrategy(in *Desched
|
|||||||
func autoConvert_api_DeschedulerStrategy_To_v1alpha1_DeschedulerStrategy(in *api.DeschedulerStrategy, out *DeschedulerStrategy, s conversion.Scope) error {
|
func autoConvert_api_DeschedulerStrategy_To_v1alpha1_DeschedulerStrategy(in *api.DeschedulerStrategy, out *DeschedulerStrategy, s conversion.Scope) error {
|
||||||
out.Enabled = in.Enabled
|
out.Enabled = in.Enabled
|
||||||
out.Weight = in.Weight
|
out.Weight = in.Weight
|
||||||
if err := Convert_api_StrategyParameters_To_v1alpha1_StrategyParameters(&in.Params, &out.Params, s); err != nil {
|
out.Params = (*StrategyParameters)(unsafe.Pointer(in.Params))
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -146,6 +168,28 @@ func Convert_api_DeschedulerStrategy_To_v1alpha1_DeschedulerStrategy(in *api.Des
|
|||||||
return autoConvert_api_DeschedulerStrategy_To_v1alpha1_DeschedulerStrategy(in, out, s)
|
return autoConvert_api_DeschedulerStrategy_To_v1alpha1_DeschedulerStrategy(in, out, s)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func autoConvert_v1alpha1_Namespaces_To_api_Namespaces(in *Namespaces, out *api.Namespaces, s conversion.Scope) error {
|
||||||
|
out.Include = *(*[]string)(unsafe.Pointer(&in.Include))
|
||||||
|
out.Exclude = *(*[]string)(unsafe.Pointer(&in.Exclude))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert_v1alpha1_Namespaces_To_api_Namespaces is an autogenerated conversion function.
|
||||||
|
func Convert_v1alpha1_Namespaces_To_api_Namespaces(in *Namespaces, out *api.Namespaces, s conversion.Scope) error {
|
||||||
|
return autoConvert_v1alpha1_Namespaces_To_api_Namespaces(in, out, s)
|
||||||
|
}
|
||||||
|
|
||||||
|
func autoConvert_api_Namespaces_To_v1alpha1_Namespaces(in *api.Namespaces, out *Namespaces, s conversion.Scope) error {
|
||||||
|
out.Include = *(*[]string)(unsafe.Pointer(&in.Include))
|
||||||
|
out.Exclude = *(*[]string)(unsafe.Pointer(&in.Exclude))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert_api_Namespaces_To_v1alpha1_Namespaces is an autogenerated conversion function.
|
||||||
|
func Convert_api_Namespaces_To_v1alpha1_Namespaces(in *api.Namespaces, out *Namespaces, s conversion.Scope) error {
|
||||||
|
return autoConvert_api_Namespaces_To_v1alpha1_Namespaces(in, out, s)
|
||||||
|
}
|
||||||
|
|
||||||
func autoConvert_v1alpha1_NodeResourceUtilizationThresholds_To_api_NodeResourceUtilizationThresholds(in *NodeResourceUtilizationThresholds, out *api.NodeResourceUtilizationThresholds, s conversion.Scope) error {
|
func autoConvert_v1alpha1_NodeResourceUtilizationThresholds_To_api_NodeResourceUtilizationThresholds(in *NodeResourceUtilizationThresholds, out *api.NodeResourceUtilizationThresholds, s conversion.Scope) error {
|
||||||
out.Thresholds = *(*api.ResourceThresholds)(unsafe.Pointer(&in.Thresholds))
|
out.Thresholds = *(*api.ResourceThresholds)(unsafe.Pointer(&in.Thresholds))
|
||||||
out.TargetThresholds = *(*api.ResourceThresholds)(unsafe.Pointer(&in.TargetThresholds))
|
out.TargetThresholds = *(*api.ResourceThresholds)(unsafe.Pointer(&in.TargetThresholds))
|
||||||
@@ -170,6 +214,28 @@ func Convert_api_NodeResourceUtilizationThresholds_To_v1alpha1_NodeResourceUtili
|
|||||||
return autoConvert_api_NodeResourceUtilizationThresholds_To_v1alpha1_NodeResourceUtilizationThresholds(in, out, s)
|
return autoConvert_api_NodeResourceUtilizationThresholds_To_v1alpha1_NodeResourceUtilizationThresholds(in, out, s)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func autoConvert_v1alpha1_PodLifeTime_To_api_PodLifeTime(in *PodLifeTime, out *api.PodLifeTime, s conversion.Scope) error {
|
||||||
|
out.MaxPodLifeTimeSeconds = (*uint)(unsafe.Pointer(in.MaxPodLifeTimeSeconds))
|
||||||
|
out.PodStatusPhases = *(*[]string)(unsafe.Pointer(&in.PodStatusPhases))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert_v1alpha1_PodLifeTime_To_api_PodLifeTime is an autogenerated conversion function.
|
||||||
|
func Convert_v1alpha1_PodLifeTime_To_api_PodLifeTime(in *PodLifeTime, out *api.PodLifeTime, s conversion.Scope) error {
|
||||||
|
return autoConvert_v1alpha1_PodLifeTime_To_api_PodLifeTime(in, out, s)
|
||||||
|
}
|
||||||
|
|
||||||
|
func autoConvert_api_PodLifeTime_To_v1alpha1_PodLifeTime(in *api.PodLifeTime, out *PodLifeTime, s conversion.Scope) error {
|
||||||
|
out.MaxPodLifeTimeSeconds = (*uint)(unsafe.Pointer(in.MaxPodLifeTimeSeconds))
|
||||||
|
out.PodStatusPhases = *(*[]string)(unsafe.Pointer(&in.PodStatusPhases))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert_api_PodLifeTime_To_v1alpha1_PodLifeTime is an autogenerated conversion function.
|
||||||
|
func Convert_api_PodLifeTime_To_v1alpha1_PodLifeTime(in *api.PodLifeTime, out *PodLifeTime, s conversion.Scope) error {
|
||||||
|
return autoConvert_api_PodLifeTime_To_v1alpha1_PodLifeTime(in, out, s)
|
||||||
|
}
|
||||||
|
|
||||||
func autoConvert_v1alpha1_PodsHavingTooManyRestarts_To_api_PodsHavingTooManyRestarts(in *PodsHavingTooManyRestarts, out *api.PodsHavingTooManyRestarts, s conversion.Scope) error {
|
func autoConvert_v1alpha1_PodsHavingTooManyRestarts_To_api_PodsHavingTooManyRestarts(in *PodsHavingTooManyRestarts, out *api.PodsHavingTooManyRestarts, s conversion.Scope) error {
|
||||||
out.PodRestartThreshold = in.PodRestartThreshold
|
out.PodRestartThreshold = in.PodRestartThreshold
|
||||||
out.IncludingInitContainers = in.IncludingInitContainers
|
out.IncludingInitContainers = in.IncludingInitContainers
|
||||||
@@ -216,8 +282,11 @@ func autoConvert_v1alpha1_StrategyParameters_To_api_StrategyParameters(in *Strat
|
|||||||
out.NodeResourceUtilizationThresholds = (*api.NodeResourceUtilizationThresholds)(unsafe.Pointer(in.NodeResourceUtilizationThresholds))
|
out.NodeResourceUtilizationThresholds = (*api.NodeResourceUtilizationThresholds)(unsafe.Pointer(in.NodeResourceUtilizationThresholds))
|
||||||
out.NodeAffinityType = *(*[]string)(unsafe.Pointer(&in.NodeAffinityType))
|
out.NodeAffinityType = *(*[]string)(unsafe.Pointer(&in.NodeAffinityType))
|
||||||
out.PodsHavingTooManyRestarts = (*api.PodsHavingTooManyRestarts)(unsafe.Pointer(in.PodsHavingTooManyRestarts))
|
out.PodsHavingTooManyRestarts = (*api.PodsHavingTooManyRestarts)(unsafe.Pointer(in.PodsHavingTooManyRestarts))
|
||||||
out.MaxPodLifeTimeSeconds = (*uint)(unsafe.Pointer(in.MaxPodLifeTimeSeconds))
|
out.PodLifeTime = (*api.PodLifeTime)(unsafe.Pointer(in.PodLifeTime))
|
||||||
out.RemoveDuplicates = (*api.RemoveDuplicates)(unsafe.Pointer(in.RemoveDuplicates))
|
out.RemoveDuplicates = (*api.RemoveDuplicates)(unsafe.Pointer(in.RemoveDuplicates))
|
||||||
|
out.Namespaces = (*api.Namespaces)(unsafe.Pointer(in.Namespaces))
|
||||||
|
out.ThresholdPriority = (*int32)(unsafe.Pointer(in.ThresholdPriority))
|
||||||
|
out.ThresholdPriorityClassName = in.ThresholdPriorityClassName
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -230,8 +299,11 @@ func autoConvert_api_StrategyParameters_To_v1alpha1_StrategyParameters(in *api.S
|
|||||||
out.NodeResourceUtilizationThresholds = (*NodeResourceUtilizationThresholds)(unsafe.Pointer(in.NodeResourceUtilizationThresholds))
|
out.NodeResourceUtilizationThresholds = (*NodeResourceUtilizationThresholds)(unsafe.Pointer(in.NodeResourceUtilizationThresholds))
|
||||||
out.NodeAffinityType = *(*[]string)(unsafe.Pointer(&in.NodeAffinityType))
|
out.NodeAffinityType = *(*[]string)(unsafe.Pointer(&in.NodeAffinityType))
|
||||||
out.PodsHavingTooManyRestarts = (*PodsHavingTooManyRestarts)(unsafe.Pointer(in.PodsHavingTooManyRestarts))
|
out.PodsHavingTooManyRestarts = (*PodsHavingTooManyRestarts)(unsafe.Pointer(in.PodsHavingTooManyRestarts))
|
||||||
out.MaxPodLifeTimeSeconds = (*uint)(unsafe.Pointer(in.MaxPodLifeTimeSeconds))
|
out.PodLifeTime = (*PodLifeTime)(unsafe.Pointer(in.PodLifeTime))
|
||||||
out.RemoveDuplicates = (*RemoveDuplicates)(unsafe.Pointer(in.RemoveDuplicates))
|
out.RemoveDuplicates = (*RemoveDuplicates)(unsafe.Pointer(in.RemoveDuplicates))
|
||||||
|
out.Namespaces = (*Namespaces)(unsafe.Pointer(in.Namespaces))
|
||||||
|
out.ThresholdPriority = (*int32)(unsafe.Pointer(in.ThresholdPriority))
|
||||||
|
out.ThresholdPriorityClassName = in.ThresholdPriorityClassName
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -35,6 +35,21 @@ func (in *DeschedulerPolicy) DeepCopyInto(out *DeschedulerPolicy) {
|
|||||||
(*out)[key] = *val.DeepCopy()
|
(*out)[key] = *val.DeepCopy()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if in.NodeSelector != nil {
|
||||||
|
in, out := &in.NodeSelector, &out.NodeSelector
|
||||||
|
*out = new(string)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
if in.EvictLocalStoragePods != nil {
|
||||||
|
in, out := &in.EvictLocalStoragePods, &out.EvictLocalStoragePods
|
||||||
|
*out = new(bool)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
if in.MaxNoOfPodsToEvictPerNode != nil {
|
||||||
|
in, out := &in.MaxNoOfPodsToEvictPerNode, &out.MaxNoOfPodsToEvictPerNode
|
||||||
|
*out = new(int)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -59,7 +74,11 @@ func (in *DeschedulerPolicy) DeepCopyObject() runtime.Object {
|
|||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
func (in *DeschedulerStrategy) DeepCopyInto(out *DeschedulerStrategy) {
|
func (in *DeschedulerStrategy) DeepCopyInto(out *DeschedulerStrategy) {
|
||||||
*out = *in
|
*out = *in
|
||||||
in.Params.DeepCopyInto(&out.Params)
|
if in.Params != nil {
|
||||||
|
in, out := &in.Params, &out.Params
|
||||||
|
*out = new(StrategyParameters)
|
||||||
|
(*in).DeepCopyInto(*out)
|
||||||
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -73,6 +92,32 @@ func (in *DeschedulerStrategy) DeepCopy() *DeschedulerStrategy {
|
|||||||
return out
|
return out
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *Namespaces) DeepCopyInto(out *Namespaces) {
|
||||||
|
*out = *in
|
||||||
|
if in.Include != nil {
|
||||||
|
in, out := &in.Include, &out.Include
|
||||||
|
*out = make([]string, len(*in))
|
||||||
|
copy(*out, *in)
|
||||||
|
}
|
||||||
|
if in.Exclude != nil {
|
||||||
|
in, out := &in.Exclude, &out.Exclude
|
||||||
|
*out = make([]string, len(*in))
|
||||||
|
copy(*out, *in)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Namespaces.
|
||||||
|
func (in *Namespaces) DeepCopy() *Namespaces {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(Namespaces)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
func (in *NodeResourceUtilizationThresholds) DeepCopyInto(out *NodeResourceUtilizationThresholds) {
|
func (in *NodeResourceUtilizationThresholds) DeepCopyInto(out *NodeResourceUtilizationThresholds) {
|
||||||
*out = *in
|
*out = *in
|
||||||
@@ -103,6 +148,32 @@ func (in *NodeResourceUtilizationThresholds) DeepCopy() *NodeResourceUtilization
|
|||||||
return out
|
return out
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *PodLifeTime) DeepCopyInto(out *PodLifeTime) {
|
||||||
|
*out = *in
|
||||||
|
if in.MaxPodLifeTimeSeconds != nil {
|
||||||
|
in, out := &in.MaxPodLifeTimeSeconds, &out.MaxPodLifeTimeSeconds
|
||||||
|
*out = new(uint)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
if in.PodStatusPhases != nil {
|
||||||
|
in, out := &in.PodStatusPhases, &out.PodStatusPhases
|
||||||
|
*out = make([]string, len(*in))
|
||||||
|
copy(*out, *in)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodLifeTime.
|
||||||
|
func (in *PodLifeTime) DeepCopy() *PodLifeTime {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(PodLifeTime)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
func (in *PodsHavingTooManyRestarts) DeepCopyInto(out *PodsHavingTooManyRestarts) {
|
func (in *PodsHavingTooManyRestarts) DeepCopyInto(out *PodsHavingTooManyRestarts) {
|
||||||
*out = *in
|
*out = *in
|
||||||
@@ -202,16 +273,26 @@ func (in *StrategyParameters) DeepCopyInto(out *StrategyParameters) {
|
|||||||
*out = new(PodsHavingTooManyRestarts)
|
*out = new(PodsHavingTooManyRestarts)
|
||||||
**out = **in
|
**out = **in
|
||||||
}
|
}
|
||||||
if in.MaxPodLifeTimeSeconds != nil {
|
if in.PodLifeTime != nil {
|
||||||
in, out := &in.MaxPodLifeTimeSeconds, &out.MaxPodLifeTimeSeconds
|
in, out := &in.PodLifeTime, &out.PodLifeTime
|
||||||
*out = new(uint)
|
*out = new(PodLifeTime)
|
||||||
**out = **in
|
(*in).DeepCopyInto(*out)
|
||||||
}
|
}
|
||||||
if in.RemoveDuplicates != nil {
|
if in.RemoveDuplicates != nil {
|
||||||
in, out := &in.RemoveDuplicates, &out.RemoveDuplicates
|
in, out := &in.RemoveDuplicates, &out.RemoveDuplicates
|
||||||
*out = new(RemoveDuplicates)
|
*out = new(RemoveDuplicates)
|
||||||
(*in).DeepCopyInto(*out)
|
(*in).DeepCopyInto(*out)
|
||||||
}
|
}
|
||||||
|
if in.Namespaces != nil {
|
||||||
|
in, out := &in.Namespaces, &out.Namespaces
|
||||||
|
*out = new(Namespaces)
|
||||||
|
(*in).DeepCopyInto(*out)
|
||||||
|
}
|
||||||
|
if in.ThresholdPriority != nil {
|
||||||
|
in, out := &in.ThresholdPriority, &out.ThresholdPriority
|
||||||
|
*out = new(int32)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -35,6 +35,21 @@ func (in *DeschedulerPolicy) DeepCopyInto(out *DeschedulerPolicy) {
|
|||||||
(*out)[key] = *val.DeepCopy()
|
(*out)[key] = *val.DeepCopy()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if in.NodeSelector != nil {
|
||||||
|
in, out := &in.NodeSelector, &out.NodeSelector
|
||||||
|
*out = new(string)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
if in.EvictLocalStoragePods != nil {
|
||||||
|
in, out := &in.EvictLocalStoragePods, &out.EvictLocalStoragePods
|
||||||
|
*out = new(bool)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
if in.MaxNoOfPodsToEvictPerNode != nil {
|
||||||
|
in, out := &in.MaxNoOfPodsToEvictPerNode, &out.MaxNoOfPodsToEvictPerNode
|
||||||
|
*out = new(int)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -59,7 +74,11 @@ func (in *DeschedulerPolicy) DeepCopyObject() runtime.Object {
|
|||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
func (in *DeschedulerStrategy) DeepCopyInto(out *DeschedulerStrategy) {
|
func (in *DeschedulerStrategy) DeepCopyInto(out *DeschedulerStrategy) {
|
||||||
*out = *in
|
*out = *in
|
||||||
in.Params.DeepCopyInto(&out.Params)
|
if in.Params != nil {
|
||||||
|
in, out := &in.Params, &out.Params
|
||||||
|
*out = new(StrategyParameters)
|
||||||
|
(*in).DeepCopyInto(*out)
|
||||||
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -73,6 +92,32 @@ func (in *DeschedulerStrategy) DeepCopy() *DeschedulerStrategy {
|
|||||||
return out
|
return out
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *Namespaces) DeepCopyInto(out *Namespaces) {
|
||||||
|
*out = *in
|
||||||
|
if in.Include != nil {
|
||||||
|
in, out := &in.Include, &out.Include
|
||||||
|
*out = make([]string, len(*in))
|
||||||
|
copy(*out, *in)
|
||||||
|
}
|
||||||
|
if in.Exclude != nil {
|
||||||
|
in, out := &in.Exclude, &out.Exclude
|
||||||
|
*out = make([]string, len(*in))
|
||||||
|
copy(*out, *in)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Namespaces.
|
||||||
|
func (in *Namespaces) DeepCopy() *Namespaces {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(Namespaces)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
func (in *NodeResourceUtilizationThresholds) DeepCopyInto(out *NodeResourceUtilizationThresholds) {
|
func (in *NodeResourceUtilizationThresholds) DeepCopyInto(out *NodeResourceUtilizationThresholds) {
|
||||||
*out = *in
|
*out = *in
|
||||||
@@ -103,6 +148,32 @@ func (in *NodeResourceUtilizationThresholds) DeepCopy() *NodeResourceUtilization
|
|||||||
return out
|
return out
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *PodLifeTime) DeepCopyInto(out *PodLifeTime) {
|
||||||
|
*out = *in
|
||||||
|
if in.MaxPodLifeTimeSeconds != nil {
|
||||||
|
in, out := &in.MaxPodLifeTimeSeconds, &out.MaxPodLifeTimeSeconds
|
||||||
|
*out = new(uint)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
if in.PodStatusPhases != nil {
|
||||||
|
in, out := &in.PodStatusPhases, &out.PodStatusPhases
|
||||||
|
*out = make([]string, len(*in))
|
||||||
|
copy(*out, *in)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodLifeTime.
|
||||||
|
func (in *PodLifeTime) DeepCopy() *PodLifeTime {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(PodLifeTime)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
func (in *PodsHavingTooManyRestarts) DeepCopyInto(out *PodsHavingTooManyRestarts) {
|
func (in *PodsHavingTooManyRestarts) DeepCopyInto(out *PodsHavingTooManyRestarts) {
|
||||||
*out = *in
|
*out = *in
|
||||||
@@ -202,16 +273,26 @@ func (in *StrategyParameters) DeepCopyInto(out *StrategyParameters) {
|
|||||||
*out = new(PodsHavingTooManyRestarts)
|
*out = new(PodsHavingTooManyRestarts)
|
||||||
**out = **in
|
**out = **in
|
||||||
}
|
}
|
||||||
if in.MaxPodLifeTimeSeconds != nil {
|
if in.PodLifeTime != nil {
|
||||||
in, out := &in.MaxPodLifeTimeSeconds, &out.MaxPodLifeTimeSeconds
|
in, out := &in.PodLifeTime, &out.PodLifeTime
|
||||||
*out = new(uint)
|
*out = new(PodLifeTime)
|
||||||
**out = **in
|
(*in).DeepCopyInto(*out)
|
||||||
}
|
}
|
||||||
if in.RemoveDuplicates != nil {
|
if in.RemoveDuplicates != nil {
|
||||||
in, out := &in.RemoveDuplicates, &out.RemoveDuplicates
|
in, out := &in.RemoveDuplicates, &out.RemoveDuplicates
|
||||||
*out = new(RemoveDuplicates)
|
*out = new(RemoveDuplicates)
|
||||||
(*in).DeepCopyInto(*out)
|
(*in).DeepCopyInto(*out)
|
||||||
}
|
}
|
||||||
|
if in.Namespaces != nil {
|
||||||
|
in, out := &in.Namespaces, &out.Namespaces
|
||||||
|
*out = new(Namespaces)
|
||||||
|
(*in).DeepCopyInto(*out)
|
||||||
|
}
|
||||||
|
if in.ThresholdPriority != nil {
|
||||||
|
in, out := &in.ThresholdPriority, &out.ThresholdPriority
|
||||||
|
*out = new(int32)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -19,8 +19,6 @@ package componentconfig
|
|||||||
import (
|
import (
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
|
|
||||||
"sigs.k8s.io/descheduler/pkg/descheduler/scheme"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@@ -34,12 +32,6 @@ const GroupName = "deschedulercomponentconfig"
|
|||||||
// SchemeGroupVersion is group version used to register these objects
|
// SchemeGroupVersion is group version used to register these objects
|
||||||
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal}
|
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal}
|
||||||
|
|
||||||
func init() {
|
|
||||||
if err := addKnownTypes(scheme.Scheme); err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Kind takes an unqualified kind and returns a Group qualified GroupKind
|
// Kind takes an unqualified kind and returns a Group qualified GroupKind
|
||||||
func Kind(kind string) schema.GroupKind {
|
func Kind(kind string) schema.GroupKind {
|
||||||
return SchemeGroupVersion.WithKind(kind).GroupKind()
|
return SchemeGroupVersion.WithKind(kind).GroupKind()
|
||||||
|
|||||||
@@ -20,6 +20,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
componentbaseconfig "k8s.io/component-base/config"
|
||||||
)
|
)
|
||||||
|
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
@@ -48,4 +49,8 @@ type DeschedulerConfiguration struct {
|
|||||||
|
|
||||||
// EvictLocalStoragePods allows pods using local storage to be evicted.
|
// EvictLocalStoragePods allows pods using local storage to be evicted.
|
||||||
EvictLocalStoragePods bool
|
EvictLocalStoragePods bool
|
||||||
|
|
||||||
|
// Logging specifies the options of logging.
|
||||||
|
// Refer [Logs Options](https://github.com/kubernetes/component-base/blob/master/logs/options.go) for more information.
|
||||||
|
Logging componentbaseconfig.LoggingConfiguration
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -20,9 +20,9 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/klog"
|
"k8s.io/klog/v2"
|
||||||
|
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
"k8s.io/client-go/informers"
|
"k8s.io/client-go/informers"
|
||||||
@@ -60,7 +60,7 @@ func Run(rs *options.DeschedulerServer) error {
|
|||||||
return RunDeschedulerStrategies(ctx, rs, deschedulerPolicy, evictionPolicyGroupVersion, stopChannel)
|
return RunDeschedulerStrategies(ctx, rs, deschedulerPolicy, evictionPolicyGroupVersion, stopChannel)
|
||||||
}
|
}
|
||||||
|
|
||||||
type strategyFunction func(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, evictLocalStoragePods bool, podEvictor *evictions.PodEvictor)
|
type strategyFunction func(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor)
|
||||||
|
|
||||||
func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer, deschedulerPolicy *api.DeschedulerPolicy, evictionPolicyGroupVersion string, stopChannel chan struct{}) error {
|
func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer, deschedulerPolicy *api.DeschedulerPolicy, evictionPolicyGroupVersion string, stopChannel chan struct{}) error {
|
||||||
sharedInformerFactory := informers.NewSharedInformerFactory(rs.Client, 0)
|
sharedInformerFactory := informers.NewSharedInformerFactory(rs.Client, 0)
|
||||||
@@ -70,25 +70,41 @@ func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer
|
|||||||
sharedInformerFactory.WaitForCacheSync(stopChannel)
|
sharedInformerFactory.WaitForCacheSync(stopChannel)
|
||||||
|
|
||||||
strategyFuncs := map[string]strategyFunction{
|
strategyFuncs := map[string]strategyFunction{
|
||||||
"RemoveDuplicates": strategies.RemoveDuplicatePods,
|
"RemoveDuplicates": strategies.RemoveDuplicatePods,
|
||||||
"LowNodeUtilization": strategies.LowNodeUtilization,
|
"LowNodeUtilization": strategies.LowNodeUtilization,
|
||||||
"RemovePodsViolatingInterPodAntiAffinity": strategies.RemovePodsViolatingInterPodAntiAffinity,
|
"RemovePodsViolatingInterPodAntiAffinity": strategies.RemovePodsViolatingInterPodAntiAffinity,
|
||||||
"RemovePodsViolatingNodeAffinity": strategies.RemovePodsViolatingNodeAffinity,
|
"RemovePodsViolatingNodeAffinity": strategies.RemovePodsViolatingNodeAffinity,
|
||||||
"RemovePodsViolatingNodeTaints": strategies.RemovePodsViolatingNodeTaints,
|
"RemovePodsViolatingNodeTaints": strategies.RemovePodsViolatingNodeTaints,
|
||||||
"RemovePodsHavingTooManyRestarts": strategies.RemovePodsHavingTooManyRestarts,
|
"RemovePodsHavingTooManyRestarts": strategies.RemovePodsHavingTooManyRestarts,
|
||||||
"PodLifeTime": strategies.PodLifeTime,
|
"PodLifeTime": strategies.PodLifeTime,
|
||||||
|
"RemovePodsViolatingTopologySpreadConstraint": strategies.RemovePodsViolatingTopologySpreadConstraint,
|
||||||
|
}
|
||||||
|
|
||||||
|
nodeSelector := rs.NodeSelector
|
||||||
|
if deschedulerPolicy.NodeSelector != nil {
|
||||||
|
nodeSelector = *deschedulerPolicy.NodeSelector
|
||||||
|
}
|
||||||
|
|
||||||
|
evictLocalStoragePods := rs.EvictLocalStoragePods
|
||||||
|
if deschedulerPolicy.EvictLocalStoragePods != nil {
|
||||||
|
evictLocalStoragePods = *deschedulerPolicy.EvictLocalStoragePods
|
||||||
|
}
|
||||||
|
|
||||||
|
maxNoOfPodsToEvictPerNode := rs.MaxNoOfPodsToEvictPerNode
|
||||||
|
if deschedulerPolicy.MaxNoOfPodsToEvictPerNode != nil {
|
||||||
|
maxNoOfPodsToEvictPerNode = *deschedulerPolicy.MaxNoOfPodsToEvictPerNode
|
||||||
}
|
}
|
||||||
|
|
||||||
wait.Until(func() {
|
wait.Until(func() {
|
||||||
nodes, err := nodeutil.ReadyNodes(ctx, rs.Client, nodeInformer, rs.NodeSelector, stopChannel)
|
nodes, err := nodeutil.ReadyNodes(ctx, rs.Client, nodeInformer, nodeSelector, stopChannel)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.V(1).Infof("Unable to get ready nodes: %v", err)
|
klog.V(1).InfoS("Unable to get ready nodes", "err", err)
|
||||||
close(stopChannel)
|
close(stopChannel)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(nodes) <= 1 {
|
if len(nodes) <= 1 {
|
||||||
klog.V(1).Infof("The cluster size is 0 or 1 meaning eviction causes service disruption or degradation. So aborting..")
|
klog.V(1).InfoS("The cluster size is 0 or 1 meaning eviction causes service disruption or degradation. So aborting..")
|
||||||
close(stopChannel)
|
close(stopChannel)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -97,13 +113,14 @@ func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer
|
|||||||
rs.Client,
|
rs.Client,
|
||||||
evictionPolicyGroupVersion,
|
evictionPolicyGroupVersion,
|
||||||
rs.DryRun,
|
rs.DryRun,
|
||||||
rs.MaxNoOfPodsToEvictPerNode,
|
maxNoOfPodsToEvictPerNode,
|
||||||
nodes,
|
nodes,
|
||||||
|
evictLocalStoragePods,
|
||||||
)
|
)
|
||||||
|
|
||||||
for name, f := range strategyFuncs {
|
for name, f := range strategyFuncs {
|
||||||
if strategy := deschedulerPolicy.Strategies[api.StrategyName(name)]; strategy.Enabled {
|
if strategy := deschedulerPolicy.Strategies[api.StrategyName(name)]; strategy.Enabled {
|
||||||
f(ctx, rs.Client, strategy, nodes, rs.EvictLocalStoragePods, podEvictor)
|
f(ctx, rs.Client, strategy, nodes, podEvictor)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -38,7 +38,10 @@ func TestTaintsUpdated(t *testing.T) {
|
|||||||
stopChannel := make(chan struct{})
|
stopChannel := make(chan struct{})
|
||||||
defer close(stopChannel)
|
defer close(stopChannel)
|
||||||
|
|
||||||
rs := options.NewDeschedulerServer()
|
rs, err := options.NewDeschedulerServer()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Unable to initialize server: %v", err)
|
||||||
|
}
|
||||||
rs.Client = client
|
rs.Client = client
|
||||||
rs.DeschedulingInterval = 100 * time.Millisecond
|
rs.DeschedulingInterval = 100 * time.Millisecond
|
||||||
go func() {
|
go func() {
|
||||||
|
|||||||
@@ -19,37 +19,47 @@ package evictions
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
policy "k8s.io/api/policy/v1beta1"
|
policy "k8s.io/api/policy/v1beta1"
|
||||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/util/errors"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/client-go/kubernetes/scheme"
|
"k8s.io/client-go/kubernetes/scheme"
|
||||||
clientcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
clientcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||||
"k8s.io/client-go/tools/record"
|
"k8s.io/client-go/tools/record"
|
||||||
"k8s.io/klog"
|
"k8s.io/klog/v2"
|
||||||
|
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/utils"
|
||||||
|
|
||||||
eutils "sigs.k8s.io/descheduler/pkg/descheduler/evictions/utils"
|
eutils "sigs.k8s.io/descheduler/pkg/descheduler/evictions/utils"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
evictPodAnnotationKey = "descheduler.alpha.kubernetes.io/evict"
|
||||||
|
)
|
||||||
|
|
||||||
// nodePodEvictedCount keeps count of pods evicted on node
|
// nodePodEvictedCount keeps count of pods evicted on node
|
||||||
type nodePodEvictedCount map[*v1.Node]int
|
type nodePodEvictedCount map[*v1.Node]int
|
||||||
|
|
||||||
type PodEvictor struct {
|
type PodEvictor struct {
|
||||||
client clientset.Interface
|
client clientset.Interface
|
||||||
policyGroupVersion string
|
policyGroupVersion string
|
||||||
dryRun bool
|
dryRun bool
|
||||||
maxPodsToEvict int
|
maxPodsToEvictPerNode int
|
||||||
nodepodCount nodePodEvictedCount
|
nodepodCount nodePodEvictedCount
|
||||||
|
evictLocalStoragePods bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewPodEvictor(
|
func NewPodEvictor(
|
||||||
client clientset.Interface,
|
client clientset.Interface,
|
||||||
policyGroupVersion string,
|
policyGroupVersion string,
|
||||||
dryRun bool,
|
dryRun bool,
|
||||||
maxPodsToEvict int,
|
maxPodsToEvictPerNode int,
|
||||||
nodes []*v1.Node,
|
nodes []*v1.Node,
|
||||||
|
evictLocalStoragePods bool,
|
||||||
) *PodEvictor {
|
) *PodEvictor {
|
||||||
var nodePodCount = make(nodePodEvictedCount)
|
var nodePodCount = make(nodePodEvictedCount)
|
||||||
for _, node := range nodes {
|
for _, node := range nodes {
|
||||||
@@ -58,11 +68,12 @@ func NewPodEvictor(
|
|||||||
}
|
}
|
||||||
|
|
||||||
return &PodEvictor{
|
return &PodEvictor{
|
||||||
client: client,
|
client: client,
|
||||||
policyGroupVersion: policyGroupVersion,
|
policyGroupVersion: policyGroupVersion,
|
||||||
dryRun: dryRun,
|
dryRun: dryRun,
|
||||||
maxPodsToEvict: maxPodsToEvict,
|
maxPodsToEvictPerNode: maxPodsToEvictPerNode,
|
||||||
nodepodCount: nodePodCount,
|
nodepodCount: nodePodCount,
|
||||||
|
evictLocalStoragePods: evictLocalStoragePods,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -81,25 +92,34 @@ func (pe *PodEvictor) TotalEvicted() int {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// EvictPod returns non-nil error only when evicting a pod on a node is not
|
// EvictPod returns non-nil error only when evicting a pod on a node is not
|
||||||
// possible (due to maxPodsToEvict constraint). Success is true when the pod
|
// possible (due to maxPodsToEvictPerNode constraint). Success is true when the pod
|
||||||
// is evicted on the server side.
|
// is evicted on the server side.
|
||||||
func (pe *PodEvictor) EvictPod(ctx context.Context, pod *v1.Pod, node *v1.Node) (bool, error) {
|
func (pe *PodEvictor) EvictPod(ctx context.Context, pod *v1.Pod, node *v1.Node, reasons ...string) (bool, error) {
|
||||||
if pe.maxPodsToEvict > 0 && pe.nodepodCount[node]+1 > pe.maxPodsToEvict {
|
var reason string
|
||||||
return false, fmt.Errorf("Maximum number %v of evicted pods per %q node reached", pe.maxPodsToEvict, node.Name)
|
if len(reasons) > 0 {
|
||||||
|
reason = " (" + strings.Join(reasons, ", ") + ")"
|
||||||
|
}
|
||||||
|
if pe.maxPodsToEvictPerNode > 0 && pe.nodepodCount[node]+1 > pe.maxPodsToEvictPerNode {
|
||||||
|
return false, fmt.Errorf("Maximum number %v of evicted pods per %q node reached", pe.maxPodsToEvictPerNode, node.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
err := evictPod(ctx, pe.client, pod, pe.policyGroupVersion, pe.dryRun)
|
err := evictPod(ctx, pe.client, pod, pe.policyGroupVersion, pe.dryRun)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// err is used only for logging purposes
|
// err is used only for logging purposes
|
||||||
klog.Errorf("Error evicting pod: %#v in namespace %#v (%#v)", pod.Name, pod.Namespace, err)
|
klog.ErrorS(err, "Error evicting pod", "pod", klog.KObj(pod), "reason", reason)
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
pe.nodepodCount[node]++
|
pe.nodepodCount[node]++
|
||||||
if pe.dryRun {
|
if pe.dryRun {
|
||||||
klog.V(1).Infof("Evicted pod in dry run mode: %#v in namespace %#v", pod.Name, pod.Namespace)
|
klog.V(1).InfoS("Evicted pod in dry run mode", "pod", klog.KObj(pod), "reason", reason)
|
||||||
} else {
|
} else {
|
||||||
klog.V(1).Infof("Evicted pod: %#v in namespace %#v", pod.Name, pod.Namespace)
|
klog.V(1).InfoS("Evicted pod", "pod", klog.KObj(pod), "reason", reason)
|
||||||
|
eventBroadcaster := record.NewBroadcaster()
|
||||||
|
eventBroadcaster.StartStructuredLogging(3)
|
||||||
|
eventBroadcaster.StartRecordingToSink(&clientcorev1.EventSinkImpl{Interface: pe.client.CoreV1().Events(pod.Namespace)})
|
||||||
|
r := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "sigs.k8s.io.descheduler"})
|
||||||
|
r.Event(pod, v1.EventTypeNormal, "Descheduled", fmt.Sprintf("pod evicted by sigs.k8s.io/descheduler%s", reason))
|
||||||
}
|
}
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
@@ -123,14 +143,6 @@ func evictPod(ctx context.Context, client clientset.Interface, pod *v1.Pod, poli
|
|||||||
}
|
}
|
||||||
err := client.PolicyV1beta1().Evictions(eviction.Namespace).Evict(ctx, eviction)
|
err := client.PolicyV1beta1().Evictions(eviction.Namespace).Evict(ctx, eviction)
|
||||||
|
|
||||||
if err == nil {
|
|
||||||
eventBroadcaster := record.NewBroadcaster()
|
|
||||||
eventBroadcaster.StartLogging(klog.V(3).Infof)
|
|
||||||
eventBroadcaster.StartRecordingToSink(&clientcorev1.EventSinkImpl{Interface: client.CoreV1().Events(pod.Namespace)})
|
|
||||||
r := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "sigs.k8s.io.descheduler"})
|
|
||||||
r.Event(pod, v1.EventTypeNormal, "Descheduled", "pod evicted by sigs.k8s.io/descheduler")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if apierrors.IsTooManyRequests(err) {
|
if apierrors.IsTooManyRequests(err) {
|
||||||
return fmt.Errorf("error when evicting pod (ignoring) %q: %v", pod.Name, err)
|
return fmt.Errorf("error when evicting pod (ignoring) %q: %v", pod.Name, err)
|
||||||
}
|
}
|
||||||
@@ -139,3 +151,123 @@ func evictPod(ctx context.Context, client clientset.Interface, pod *v1.Pod, poli
|
|||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type Options struct {
|
||||||
|
priority *int32
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithPriorityThreshold sets a threshold for pod's priority class.
|
||||||
|
// Any pod whose priority class is lower is evictable.
|
||||||
|
func WithPriorityThreshold(priority int32) func(opts *Options) {
|
||||||
|
return func(opts *Options) {
|
||||||
|
var p int32 = priority
|
||||||
|
opts.priority = &p
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type constraint func(pod *v1.Pod) error
|
||||||
|
|
||||||
|
type evictable struct {
|
||||||
|
constraints []constraint
|
||||||
|
}
|
||||||
|
|
||||||
|
// Evictable provides an implementation of IsEvictable(IsEvictable(pod *v1.Pod) bool).
|
||||||
|
// The method accepts a list of options which allow to extend constraints
|
||||||
|
// which decides when a pod is considered evictable.
|
||||||
|
func (pe *PodEvictor) Evictable(opts ...func(opts *Options)) *evictable {
|
||||||
|
options := &Options{}
|
||||||
|
for _, opt := range opts {
|
||||||
|
opt(options)
|
||||||
|
}
|
||||||
|
|
||||||
|
ev := &evictable{}
|
||||||
|
if !pe.evictLocalStoragePods {
|
||||||
|
ev.constraints = append(ev.constraints, func(pod *v1.Pod) error {
|
||||||
|
if IsPodWithLocalStorage(pod) {
|
||||||
|
return fmt.Errorf("pod has local storage and descheduler is not configured with --evict-local-storage-pods")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
if options.priority != nil {
|
||||||
|
ev.constraints = append(ev.constraints, func(pod *v1.Pod) error {
|
||||||
|
if IsPodEvictableBasedOnPriority(pod, *options.priority) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return fmt.Errorf("pod has higher priority than specified priority class threshold")
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return ev
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsEvictable decides when a pod is evictable
|
||||||
|
func (ev *evictable) IsEvictable(pod *v1.Pod) bool {
|
||||||
|
checkErrs := []error{}
|
||||||
|
if IsCriticalPod(pod) {
|
||||||
|
checkErrs = append(checkErrs, fmt.Errorf("pod is critical"))
|
||||||
|
}
|
||||||
|
|
||||||
|
ownerRefList := podutil.OwnerRef(pod)
|
||||||
|
if IsDaemonsetPod(ownerRefList) {
|
||||||
|
checkErrs = append(checkErrs, fmt.Errorf("pod is a DaemonSet pod"))
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(ownerRefList) == 0 {
|
||||||
|
checkErrs = append(checkErrs, fmt.Errorf("pod does not have any ownerrefs"))
|
||||||
|
}
|
||||||
|
|
||||||
|
if IsMirrorPod(pod) {
|
||||||
|
checkErrs = append(checkErrs, fmt.Errorf("pod is a mirror pod"))
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, c := range ev.constraints {
|
||||||
|
if err := c(pod); err != nil {
|
||||||
|
checkErrs = append(checkErrs, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(checkErrs) > 0 && !HaveEvictAnnotation(pod) {
|
||||||
|
klog.V(4).InfoS("Pod lacks an eviction annotation and fails the following checks", "pod", klog.KObj(pod), "checks", errors.NewAggregate(checkErrs).Error())
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func IsCriticalPod(pod *v1.Pod) bool {
|
||||||
|
return utils.IsCriticalPod(pod)
|
||||||
|
}
|
||||||
|
|
||||||
|
func IsDaemonsetPod(ownerRefList []metav1.OwnerReference) bool {
|
||||||
|
for _, ownerRef := range ownerRefList {
|
||||||
|
if ownerRef.Kind == "DaemonSet" {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsMirrorPod checks whether the pod is a mirror pod.
|
||||||
|
func IsMirrorPod(pod *v1.Pod) bool {
|
||||||
|
return utils.IsMirrorPod(pod)
|
||||||
|
}
|
||||||
|
|
||||||
|
// HaveEvictAnnotation checks if the pod have evict annotation
|
||||||
|
func HaveEvictAnnotation(pod *v1.Pod) bool {
|
||||||
|
_, found := pod.ObjectMeta.Annotations[evictPodAnnotationKey]
|
||||||
|
return found
|
||||||
|
}
|
||||||
|
|
||||||
|
func IsPodWithLocalStorage(pod *v1.Pod) bool {
|
||||||
|
for _, volume := range pod.Spec.Volumes {
|
||||||
|
if volume.HostPath != nil || volume.EmptyDir != nil {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsPodEvictableBasedOnPriority checks if the given pod is evictable based on priority resolved from pod Spec.
|
||||||
|
func IsPodEvictableBasedOnPriority(pod *v1.Pod, priority int32) bool {
|
||||||
|
return pod.Spec.Priority == nil || *pod.Spec.Priority < priority
|
||||||
|
}
|
||||||
|
|||||||
@@ -21,9 +21,12 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
"k8s.io/client-go/kubernetes/fake"
|
"k8s.io/client-go/kubernetes/fake"
|
||||||
core "k8s.io/client-go/testing"
|
core "k8s.io/client-go/testing"
|
||||||
|
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/utils"
|
||||||
"sigs.k8s.io/descheduler/test"
|
"sigs.k8s.io/descheduler/test"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -65,3 +68,237 @@ func TestEvictPod(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestIsEvictable(t *testing.T) {
|
||||||
|
n1 := test.BuildTestNode("node1", 1000, 2000, 13, nil)
|
||||||
|
lowPriority := int32(800)
|
||||||
|
highPriority := int32(900)
|
||||||
|
type testCase struct {
|
||||||
|
pod *v1.Pod
|
||||||
|
runBefore func(*v1.Pod)
|
||||||
|
evictLocalStoragePods bool
|
||||||
|
priorityThreshold *int32
|
||||||
|
result bool
|
||||||
|
}
|
||||||
|
|
||||||
|
testCases := []testCase{
|
||||||
|
{
|
||||||
|
pod: test.BuildTestPod("p1", 400, 0, n1.Name, nil),
|
||||||
|
runBefore: func(pod *v1.Pod) {
|
||||||
|
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||||
|
},
|
||||||
|
evictLocalStoragePods: false,
|
||||||
|
result: true,
|
||||||
|
}, {
|
||||||
|
pod: test.BuildTestPod("p2", 400, 0, n1.Name, nil),
|
||||||
|
runBefore: func(pod *v1.Pod) {
|
||||||
|
pod.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"}
|
||||||
|
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||||
|
},
|
||||||
|
evictLocalStoragePods: false,
|
||||||
|
result: true,
|
||||||
|
}, {
|
||||||
|
pod: test.BuildTestPod("p3", 400, 0, n1.Name, nil),
|
||||||
|
runBefore: func(pod *v1.Pod) {
|
||||||
|
pod.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
|
||||||
|
},
|
||||||
|
evictLocalStoragePods: false,
|
||||||
|
result: true,
|
||||||
|
}, {
|
||||||
|
pod: test.BuildTestPod("p4", 400, 0, n1.Name, nil),
|
||||||
|
runBefore: func(pod *v1.Pod) {
|
||||||
|
pod.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"}
|
||||||
|
pod.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
|
||||||
|
},
|
||||||
|
evictLocalStoragePods: false,
|
||||||
|
result: true,
|
||||||
|
}, {
|
||||||
|
pod: test.BuildTestPod("p5", 400, 0, n1.Name, nil),
|
||||||
|
runBefore: func(pod *v1.Pod) {
|
||||||
|
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||||
|
pod.Spec.Volumes = []v1.Volume{
|
||||||
|
{
|
||||||
|
Name: "sample",
|
||||||
|
VolumeSource: v1.VolumeSource{
|
||||||
|
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||||
|
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||||
|
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
},
|
||||||
|
evictLocalStoragePods: false,
|
||||||
|
result: false,
|
||||||
|
}, {
|
||||||
|
pod: test.BuildTestPod("p6", 400, 0, n1.Name, nil),
|
||||||
|
runBefore: func(pod *v1.Pod) {
|
||||||
|
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||||
|
pod.Spec.Volumes = []v1.Volume{
|
||||||
|
{
|
||||||
|
Name: "sample",
|
||||||
|
VolumeSource: v1.VolumeSource{
|
||||||
|
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||||
|
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||||
|
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
},
|
||||||
|
evictLocalStoragePods: true,
|
||||||
|
result: true,
|
||||||
|
}, {
|
||||||
|
pod: test.BuildTestPod("p7", 400, 0, n1.Name, nil),
|
||||||
|
runBefore: func(pod *v1.Pod) {
|
||||||
|
pod.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"}
|
||||||
|
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||||
|
pod.Spec.Volumes = []v1.Volume{
|
||||||
|
{
|
||||||
|
Name: "sample",
|
||||||
|
VolumeSource: v1.VolumeSource{
|
||||||
|
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||||
|
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||||
|
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
},
|
||||||
|
evictLocalStoragePods: false,
|
||||||
|
result: true,
|
||||||
|
}, {
|
||||||
|
pod: test.BuildTestPod("p8", 400, 0, n1.Name, nil),
|
||||||
|
runBefore: func(pod *v1.Pod) {
|
||||||
|
pod.ObjectMeta.OwnerReferences = test.GetDaemonSetOwnerRefList()
|
||||||
|
},
|
||||||
|
evictLocalStoragePods: false,
|
||||||
|
result: false,
|
||||||
|
}, {
|
||||||
|
pod: test.BuildTestPod("p9", 400, 0, n1.Name, nil),
|
||||||
|
runBefore: func(pod *v1.Pod) {
|
||||||
|
pod.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"}
|
||||||
|
pod.ObjectMeta.OwnerReferences = test.GetDaemonSetOwnerRefList()
|
||||||
|
},
|
||||||
|
evictLocalStoragePods: false,
|
||||||
|
result: true,
|
||||||
|
}, {
|
||||||
|
pod: test.BuildTestPod("p10", 400, 0, n1.Name, nil),
|
||||||
|
runBefore: func(pod *v1.Pod) {
|
||||||
|
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||||
|
pod.Annotations = test.GetMirrorPodAnnotation()
|
||||||
|
},
|
||||||
|
evictLocalStoragePods: false,
|
||||||
|
result: false,
|
||||||
|
}, {
|
||||||
|
pod: test.BuildTestPod("p11", 400, 0, n1.Name, nil),
|
||||||
|
runBefore: func(pod *v1.Pod) {
|
||||||
|
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||||
|
pod.Annotations = test.GetMirrorPodAnnotation()
|
||||||
|
pod.Annotations["descheduler.alpha.kubernetes.io/evict"] = "true"
|
||||||
|
},
|
||||||
|
evictLocalStoragePods: false,
|
||||||
|
result: true,
|
||||||
|
}, {
|
||||||
|
pod: test.BuildTestPod("p12", 400, 0, n1.Name, nil),
|
||||||
|
runBefore: func(pod *v1.Pod) {
|
||||||
|
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||||
|
priority := utils.SystemCriticalPriority
|
||||||
|
pod.Spec.Priority = &priority
|
||||||
|
},
|
||||||
|
evictLocalStoragePods: false,
|
||||||
|
result: false,
|
||||||
|
}, {
|
||||||
|
pod: test.BuildTestPod("p13", 400, 0, n1.Name, nil),
|
||||||
|
runBefore: func(pod *v1.Pod) {
|
||||||
|
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||||
|
priority := utils.SystemCriticalPriority
|
||||||
|
pod.Spec.Priority = &priority
|
||||||
|
pod.Annotations = map[string]string{
|
||||||
|
"descheduler.alpha.kubernetes.io/evict": "true",
|
||||||
|
}
|
||||||
|
},
|
||||||
|
evictLocalStoragePods: false,
|
||||||
|
result: true,
|
||||||
|
}, {
|
||||||
|
pod: test.BuildTestPod("p14", 400, 0, n1.Name, nil),
|
||||||
|
runBefore: func(pod *v1.Pod) {
|
||||||
|
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||||
|
pod.Spec.Priority = &highPriority
|
||||||
|
},
|
||||||
|
evictLocalStoragePods: false,
|
||||||
|
priorityThreshold: &lowPriority,
|
||||||
|
result: false,
|
||||||
|
}, {
|
||||||
|
pod: test.BuildTestPod("p15", 400, 0, n1.Name, nil),
|
||||||
|
runBefore: func(pod *v1.Pod) {
|
||||||
|
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||||
|
pod.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"}
|
||||||
|
pod.Spec.Priority = &highPriority
|
||||||
|
},
|
||||||
|
evictLocalStoragePods: false,
|
||||||
|
priorityThreshold: &lowPriority,
|
||||||
|
result: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range testCases {
|
||||||
|
test.runBefore(test.pod)
|
||||||
|
|
||||||
|
podEvictor := &PodEvictor{
|
||||||
|
evictLocalStoragePods: test.evictLocalStoragePods,
|
||||||
|
}
|
||||||
|
|
||||||
|
evictable := podEvictor.Evictable()
|
||||||
|
if test.priorityThreshold != nil {
|
||||||
|
evictable = podEvictor.Evictable(WithPriorityThreshold(*test.priorityThreshold))
|
||||||
|
}
|
||||||
|
|
||||||
|
result := evictable.IsEvictable(test.pod)
|
||||||
|
if result != test.result {
|
||||||
|
t.Errorf("IsEvictable should return for pod %s %t, but it returns %t", test.pod.Name, test.result, result)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func TestPodTypes(t *testing.T) {
|
||||||
|
n1 := test.BuildTestNode("node1", 1000, 2000, 9, nil)
|
||||||
|
p1 := test.BuildTestPod("p1", 400, 0, n1.Name, nil)
|
||||||
|
|
||||||
|
// These won't be evicted.
|
||||||
|
p2 := test.BuildTestPod("p2", 400, 0, n1.Name, nil)
|
||||||
|
p3 := test.BuildTestPod("p3", 400, 0, n1.Name, nil)
|
||||||
|
p4 := test.BuildTestPod("p4", 400, 0, n1.Name, nil)
|
||||||
|
|
||||||
|
p1.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
|
||||||
|
// The following 4 pods won't get evicted.
|
||||||
|
// A daemonset.
|
||||||
|
//p2.Annotations = test.GetDaemonSetAnnotation()
|
||||||
|
p2.ObjectMeta.OwnerReferences = test.GetDaemonSetOwnerRefList()
|
||||||
|
// A pod with local storage.
|
||||||
|
p3.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||||
|
p3.Spec.Volumes = []v1.Volume{
|
||||||
|
{
|
||||||
|
Name: "sample",
|
||||||
|
VolumeSource: v1.VolumeSource{
|
||||||
|
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||||
|
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||||
|
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
// A Mirror Pod.
|
||||||
|
p4.Annotations = test.GetMirrorPodAnnotation()
|
||||||
|
if !IsMirrorPod(p4) {
|
||||||
|
t.Errorf("Expected p4 to be a mirror pod.")
|
||||||
|
}
|
||||||
|
if !IsPodWithLocalStorage(p3) {
|
||||||
|
t.Errorf("Expected p3 to be a pod with local storage.")
|
||||||
|
}
|
||||||
|
ownerRefList := podutil.OwnerRef(p2)
|
||||||
|
if !IsDaemonsetPod(ownerRefList) {
|
||||||
|
t.Errorf("Expected p2 to be a daemonset pod.")
|
||||||
|
}
|
||||||
|
ownerRefList = podutil.OwnerRef(p1)
|
||||||
|
if IsDaemonsetPod(ownerRefList) || IsPodWithLocalStorage(p1) || IsCriticalPod(p1) || IsMirrorPod(p1) {
|
||||||
|
t.Errorf("Expected p1 to be a normal pod.")
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|||||||
@@ -18,12 +18,13 @@ package node
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"k8s.io/api/core/v1"
|
|
||||||
|
v1 "k8s.io/api/core/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/labels"
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
coreinformers "k8s.io/client-go/informers/core/v1"
|
coreinformers "k8s.io/client-go/informers/core/v1"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/klog"
|
"k8s.io/klog/v2"
|
||||||
"sigs.k8s.io/descheduler/pkg/utils"
|
"sigs.k8s.io/descheduler/pkg/utils"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -42,7 +43,7 @@ func ReadyNodes(ctx context.Context, client clientset.Interface, nodeInformer co
|
|||||||
}
|
}
|
||||||
|
|
||||||
if len(nodes) == 0 {
|
if len(nodes) == 0 {
|
||||||
klog.V(2).Infof("node lister returned empty list, now fetch directly")
|
klog.V(2).InfoS("Node lister returned empty list, now fetch directly")
|
||||||
|
|
||||||
nItems, err := client.CoreV1().Nodes().List(ctx, metav1.ListOptions{LabelSelector: nodeSelector})
|
nItems, err := client.CoreV1().Nodes().List(ctx, metav1.ListOptions{LabelSelector: nodeSelector})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -77,19 +78,19 @@ func IsReady(node *v1.Node) bool {
|
|||||||
// - NodeOutOfDisk condition status is ConditionFalse,
|
// - NodeOutOfDisk condition status is ConditionFalse,
|
||||||
// - NodeNetworkUnavailable condition status is ConditionFalse.
|
// - NodeNetworkUnavailable condition status is ConditionFalse.
|
||||||
if cond.Type == v1.NodeReady && cond.Status != v1.ConditionTrue {
|
if cond.Type == v1.NodeReady && cond.Status != v1.ConditionTrue {
|
||||||
klog.V(1).Infof("Ignoring node %v with %v condition status %v", node.Name, cond.Type, cond.Status)
|
klog.V(1).InfoS("Ignoring node", "node", klog.KObj(node), "condition", cond.Type, "status", cond.Status)
|
||||||
return false
|
return false
|
||||||
} /*else if cond.Type == v1.NodeOutOfDisk && cond.Status != v1.ConditionFalse {
|
} /*else if cond.Type == v1.NodeOutOfDisk && cond.Status != v1.ConditionFalse {
|
||||||
klog.V(4).Infof("Ignoring node %v with %v condition status %v", node.Name, cond.Type, cond.Status)
|
klog.V(4).InfoS("Ignoring node with condition status", "node", klog.KObj(node.Name), "condition", cond.Type, "status", cond.Status)
|
||||||
return false
|
return false
|
||||||
} else if cond.Type == v1.NodeNetworkUnavailable && cond.Status != v1.ConditionFalse {
|
} else if cond.Type == v1.NodeNetworkUnavailable && cond.Status != v1.ConditionFalse {
|
||||||
klog.V(4).Infof("Ignoring node %v with %v condition status %v", node.Name, cond.Type, cond.Status)
|
klog.V(4).InfoS("Ignoring node with condition status", "node", klog.KObj(node.Name), "condition", cond.Type, "status", cond.Status)
|
||||||
return false
|
return false
|
||||||
}*/
|
}*/
|
||||||
}
|
}
|
||||||
// Ignore nodes that are marked unschedulable
|
// Ignore nodes that are marked unschedulable
|
||||||
/*if node.Spec.Unschedulable {
|
/*if node.Spec.Unschedulable {
|
||||||
klog.V(4).Infof("Ignoring node %v since it is unschedulable", node.Name)
|
klog.V(4).InfoS("Ignoring node since it is unschedulable", "node", klog.KObj(node.Name))
|
||||||
return false
|
return false
|
||||||
}*/
|
}*/
|
||||||
return true
|
return true
|
||||||
@@ -112,7 +113,7 @@ func PodFitsAnyNode(pod *v1.Pod, nodes []*v1.Node) bool {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if !IsNodeUnschedulable(node) {
|
if !IsNodeUnschedulable(node) {
|
||||||
klog.V(2).Infof("Pod %v can possibly be scheduled on %v", pod.Name, node.Name)
|
klog.V(2).InfoS("Pod can possibly be scheduled on a different node", "pod", klog.KObj(pod), "node", klog.KObj(node))
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -125,15 +126,15 @@ func PodFitsCurrentNode(pod *v1.Pod, node *v1.Node) bool {
|
|||||||
ok, err := utils.PodMatchNodeSelector(pod, node)
|
ok, err := utils.PodMatchNodeSelector(pod, node)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.Error(err)
|
klog.ErrorS(err, "Failed to match node selector")
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
if !ok {
|
if !ok {
|
||||||
klog.V(1).Infof("Pod %v does not fit on node %v", pod.Name, node.Name)
|
klog.V(2).InfoS("Pod does not fit on node", "pod", klog.KObj(pod), "node", klog.KObj(node))
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
klog.V(3).Infof("Pod %v fits on node %v", pod.Name, node.Name)
|
klog.V(2).InfoS("Pod fits on node", "pod", klog.KObj(pod), "node", klog.KObj(node))
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -20,7 +20,7 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/client-go/informers"
|
"k8s.io/client-go/informers"
|
||||||
"k8s.io/client-go/kubernetes/fake"
|
"k8s.io/client-go/kubernetes/fake"
|
||||||
@@ -69,7 +69,7 @@ func TestReadyNodesWithNodeSelector(t *testing.T) {
|
|||||||
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
|
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
|
||||||
nodeInformer := sharedInformerFactory.Core().V1().Nodes()
|
nodeInformer := sharedInformerFactory.Core().V1().Nodes()
|
||||||
|
|
||||||
stopChannel := make(chan struct{}, 0)
|
stopChannel := make(chan struct{})
|
||||||
sharedInformerFactory.Start(stopChannel)
|
sharedInformerFactory.Start(stopChannel)
|
||||||
sharedInformerFactory.WaitForCacheSync(stopChannel)
|
sharedInformerFactory.WaitForCacheSync(stopChannel)
|
||||||
defer close(stopChannel)
|
defer close(stopChannel)
|
||||||
|
|||||||
@@ -18,6 +18,8 @@ package pod
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"sort"
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/fields"
|
"k8s.io/apimachinery/pkg/fields"
|
||||||
@@ -25,57 +27,112 @@ import (
|
|||||||
"sigs.k8s.io/descheduler/pkg/utils"
|
"sigs.k8s.io/descheduler/pkg/utils"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
type Options struct {
|
||||||
evictPodAnnotationKey = "descheduler.alpha.kubernetes.io/evict"
|
filter func(pod *v1.Pod) bool
|
||||||
)
|
includedNamespaces []string
|
||||||
|
excludedNamespaces []string
|
||||||
// IsEvictable checks if a pod is evictable or not.
|
|
||||||
func IsEvictable(pod *v1.Pod, evictLocalStoragePods bool) bool {
|
|
||||||
ownerRefList := OwnerRef(pod)
|
|
||||||
if !HaveEvictAnnotation(pod) && (IsMirrorPod(pod) || (!evictLocalStoragePods && IsPodWithLocalStorage(pod)) || len(ownerRefList) == 0 || IsDaemonsetPod(ownerRefList) || IsCriticalPod(pod)) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// ListEvictablePodsOnNode returns the list of evictable pods on node.
|
// WithFilter sets a pod filter.
|
||||||
func ListEvictablePodsOnNode(ctx context.Context, client clientset.Interface, node *v1.Node, evictLocalStoragePods bool) ([]*v1.Pod, error) {
|
// The filter function should return true if the pod should be returned from ListPodsOnANode
|
||||||
pods, err := ListPodsOnANode(ctx, client, node)
|
func WithFilter(filter func(pod *v1.Pod) bool) func(opts *Options) {
|
||||||
if err != nil {
|
return func(opts *Options) {
|
||||||
return []*v1.Pod{}, err
|
opts.filter = filter
|
||||||
}
|
}
|
||||||
evictablePods := make([]*v1.Pod, 0)
|
}
|
||||||
for _, pod := range pods {
|
|
||||||
if !IsEvictable(pod, evictLocalStoragePods) {
|
// WithNamespaces sets included namespaces
|
||||||
continue
|
func WithNamespaces(namespaces []string) func(opts *Options) {
|
||||||
} else {
|
return func(opts *Options) {
|
||||||
evictablePods = append(evictablePods, pod)
|
opts.includedNamespaces = namespaces
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithoutNamespaces sets excluded namespaces
|
||||||
|
func WithoutNamespaces(namespaces []string) func(opts *Options) {
|
||||||
|
return func(opts *Options) {
|
||||||
|
opts.excludedNamespaces = namespaces
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListPodsOnANode lists all of the pods on a node
|
||||||
|
// It also accepts an optional "filter" function which can be used to further limit the pods that are returned.
|
||||||
|
// (Usually this is podEvictor.Evictable().IsEvictable, in order to only list the evictable pods on a node, but can
|
||||||
|
// be used by strategies to extend it if there are further restrictions, such as with NodeAffinity).
|
||||||
|
func ListPodsOnANode(
|
||||||
|
ctx context.Context,
|
||||||
|
client clientset.Interface,
|
||||||
|
node *v1.Node,
|
||||||
|
opts ...func(opts *Options),
|
||||||
|
) ([]*v1.Pod, error) {
|
||||||
|
options := &Options{}
|
||||||
|
for _, opt := range opts {
|
||||||
|
opt(options)
|
||||||
|
}
|
||||||
|
|
||||||
|
pods := make([]*v1.Pod, 0)
|
||||||
|
|
||||||
|
fieldSelectorString := "spec.nodeName=" + node.Name + ",status.phase!=" + string(v1.PodSucceeded) + ",status.phase!=" + string(v1.PodFailed)
|
||||||
|
|
||||||
|
if len(options.includedNamespaces) > 0 {
|
||||||
|
fieldSelector, err := fields.ParseSelector(fieldSelectorString)
|
||||||
|
if err != nil {
|
||||||
|
return []*v1.Pod{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, namespace := range options.includedNamespaces {
|
||||||
|
podList, err := client.CoreV1().Pods(namespace).List(ctx,
|
||||||
|
metav1.ListOptions{FieldSelector: fieldSelector.String()})
|
||||||
|
if err != nil {
|
||||||
|
return []*v1.Pod{}, err
|
||||||
|
}
|
||||||
|
for i := range podList.Items {
|
||||||
|
if options.filter != nil && !options.filter(&podList.Items[i]) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
pods = append(pods, &podList.Items[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return pods, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(options.excludedNamespaces) > 0 {
|
||||||
|
for _, namespace := range options.excludedNamespaces {
|
||||||
|
fieldSelectorString += ",metadata.namespace!=" + namespace
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return evictablePods, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func ListPodsOnANode(ctx context.Context, client clientset.Interface, node *v1.Node) ([]*v1.Pod, error) {
|
fieldSelector, err := fields.ParseSelector(fieldSelectorString)
|
||||||
fieldSelector, err := fields.ParseSelector("spec.nodeName=" + node.Name + ",status.phase!=" + string(v1.PodSucceeded) + ",status.phase!=" + string(v1.PodFailed))
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return []*v1.Pod{}, err
|
return []*v1.Pod{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// INFO(jchaloup): field selectors do not work properly with listers
|
||||||
|
// Once the descheduler switches to pod listers (through informers),
|
||||||
|
// We need to flip to client-side filtering.
|
||||||
podList, err := client.CoreV1().Pods(v1.NamespaceAll).List(ctx,
|
podList, err := client.CoreV1().Pods(v1.NamespaceAll).List(ctx,
|
||||||
metav1.ListOptions{FieldSelector: fieldSelector.String()})
|
metav1.ListOptions{FieldSelector: fieldSelector.String()})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return []*v1.Pod{}, err
|
return []*v1.Pod{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
pods := make([]*v1.Pod, 0)
|
|
||||||
for i := range podList.Items {
|
for i := range podList.Items {
|
||||||
|
// fake client does not support field selectors
|
||||||
|
// so let's filter based on the node name as well (quite cheap)
|
||||||
|
if podList.Items[i].Spec.NodeName != node.Name {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if options.filter != nil && !options.filter(&podList.Items[i]) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
pods = append(pods, &podList.Items[i])
|
pods = append(pods, &podList.Items[i])
|
||||||
}
|
}
|
||||||
return pods, nil
|
return pods, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func IsCriticalPod(pod *v1.Pod) bool {
|
// OwnerRef returns the ownerRefList for the pod.
|
||||||
return utils.IsCriticalPod(pod)
|
func OwnerRef(pod *v1.Pod) []metav1.OwnerReference {
|
||||||
|
return pod.ObjectMeta.GetOwnerReferences()
|
||||||
}
|
}
|
||||||
|
|
||||||
func IsBestEffortPod(pod *v1.Pod) bool {
|
func IsBestEffortPod(pod *v1.Pod) bool {
|
||||||
@@ -90,37 +147,26 @@ func IsGuaranteedPod(pod *v1.Pod) bool {
|
|||||||
return utils.GetPodQOS(pod) == v1.PodQOSGuaranteed
|
return utils.GetPodQOS(pod) == v1.PodQOSGuaranteed
|
||||||
}
|
}
|
||||||
|
|
||||||
func IsDaemonsetPod(ownerRefList []metav1.OwnerReference) bool {
|
// SortPodsBasedOnPriorityLowToHigh sorts pods based on their priorities from low to high.
|
||||||
for _, ownerRef := range ownerRefList {
|
// If pods have same priorities, they will be sorted by QoS in the following order:
|
||||||
if ownerRef.Kind == "DaemonSet" {
|
// BestEffort, Burstable, Guaranteed
|
||||||
|
func SortPodsBasedOnPriorityLowToHigh(pods []*v1.Pod) {
|
||||||
|
sort.Slice(pods, func(i, j int) bool {
|
||||||
|
if pods[i].Spec.Priority == nil && pods[j].Spec.Priority != nil {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
}
|
if pods[j].Spec.Priority == nil && pods[i].Spec.Priority != nil {
|
||||||
return false
|
return false
|
||||||
}
|
|
||||||
|
|
||||||
// IsMirrorPod checks whether the pod is a mirror pod.
|
|
||||||
func IsMirrorPod(pod *v1.Pod) bool {
|
|
||||||
return utils.IsMirrorPod(pod)
|
|
||||||
}
|
|
||||||
|
|
||||||
// HaveEvictAnnotation checks if the pod have evict annotation
|
|
||||||
func HaveEvictAnnotation(pod *v1.Pod) bool {
|
|
||||||
_, found := pod.ObjectMeta.Annotations[evictPodAnnotationKey]
|
|
||||||
return found
|
|
||||||
}
|
|
||||||
|
|
||||||
func IsPodWithLocalStorage(pod *v1.Pod) bool {
|
|
||||||
for _, volume := range pod.Spec.Volumes {
|
|
||||||
if volume.HostPath != nil || volume.EmptyDir != nil {
|
|
||||||
return true
|
|
||||||
}
|
}
|
||||||
}
|
if (pods[j].Spec.Priority == nil && pods[i].Spec.Priority == nil) || (*pods[i].Spec.Priority == *pods[j].Spec.Priority) {
|
||||||
|
if IsBestEffortPod(pods[i]) {
|
||||||
return false
|
return true
|
||||||
}
|
}
|
||||||
|
if IsBurstablePod(pods[i]) && IsGuaranteedPod(pods[j]) {
|
||||||
// OwnerRef returns the ownerRefList for the pod.
|
return true
|
||||||
func OwnerRef(pod *v1.Pod) []metav1.OwnerReference {
|
}
|
||||||
return pod.ObjectMeta.GetOwnerReferences()
|
return false
|
||||||
|
}
|
||||||
|
return *pods[i].Spec.Priority < *pods[j].Spec.Priority
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -17,221 +17,99 @@ limitations under the License.
|
|||||||
package pod
|
package pod
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
"sigs.k8s.io/descheduler/pkg/utils"
|
"k8s.io/client-go/kubernetes/fake"
|
||||||
|
core "k8s.io/client-go/testing"
|
||||||
"sigs.k8s.io/descheduler/test"
|
"sigs.k8s.io/descheduler/test"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestIsEvictable(t *testing.T) {
|
var (
|
||||||
n1 := test.BuildTestNode("node1", 1000, 2000, 13, nil)
|
lowPriority = int32(0)
|
||||||
type testCase struct {
|
highPriority = int32(10000)
|
||||||
pod *v1.Pod
|
)
|
||||||
runBefore func(*v1.Pod)
|
|
||||||
evictLocalStoragePods bool
|
|
||||||
result bool
|
|
||||||
}
|
|
||||||
|
|
||||||
testCases := []testCase{
|
func TestListPodsOnANode(t *testing.T) {
|
||||||
|
testCases := []struct {
|
||||||
|
name string
|
||||||
|
pods map[string][]v1.Pod
|
||||||
|
node *v1.Node
|
||||||
|
expectedPodCount int
|
||||||
|
}{
|
||||||
{
|
{
|
||||||
pod: test.BuildTestPod("p1", 400, 0, n1.Name, nil),
|
name: "test listing pods on a node",
|
||||||
runBefore: func(pod *v1.Pod) {
|
pods: map[string][]v1.Pod{
|
||||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
"n1": {
|
||||||
|
*test.BuildTestPod("pod1", 100, 0, "n1", nil),
|
||||||
|
*test.BuildTestPod("pod2", 100, 0, "n1", nil),
|
||||||
|
},
|
||||||
|
"n2": {*test.BuildTestPod("pod3", 100, 0, "n2", nil)},
|
||||||
},
|
},
|
||||||
evictLocalStoragePods: false,
|
node: test.BuildTestNode("n1", 2000, 3000, 10, nil),
|
||||||
result: true,
|
expectedPodCount: 2,
|
||||||
}, {
|
|
||||||
pod: test.BuildTestPod("p2", 400, 0, n1.Name, nil),
|
|
||||||
runBefore: func(pod *v1.Pod) {
|
|
||||||
pod.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"}
|
|
||||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
|
||||||
},
|
|
||||||
evictLocalStoragePods: false,
|
|
||||||
result: true,
|
|
||||||
}, {
|
|
||||||
pod: test.BuildTestPod("p3", 400, 0, n1.Name, nil),
|
|
||||||
runBefore: func(pod *v1.Pod) {
|
|
||||||
pod.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
|
|
||||||
},
|
|
||||||
evictLocalStoragePods: false,
|
|
||||||
result: true,
|
|
||||||
}, {
|
|
||||||
pod: test.BuildTestPod("p4", 400, 0, n1.Name, nil),
|
|
||||||
runBefore: func(pod *v1.Pod) {
|
|
||||||
pod.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"}
|
|
||||||
pod.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
|
|
||||||
},
|
|
||||||
evictLocalStoragePods: false,
|
|
||||||
result: true,
|
|
||||||
}, {
|
|
||||||
pod: test.BuildTestPod("p5", 400, 0, n1.Name, nil),
|
|
||||||
runBefore: func(pod *v1.Pod) {
|
|
||||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
|
||||||
pod.Spec.Volumes = []v1.Volume{
|
|
||||||
{
|
|
||||||
Name: "sample",
|
|
||||||
VolumeSource: v1.VolumeSource{
|
|
||||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
|
||||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
|
||||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
},
|
|
||||||
evictLocalStoragePods: false,
|
|
||||||
result: false,
|
|
||||||
}, {
|
|
||||||
pod: test.BuildTestPod("p6", 400, 0, n1.Name, nil),
|
|
||||||
runBefore: func(pod *v1.Pod) {
|
|
||||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
|
||||||
pod.Spec.Volumes = []v1.Volume{
|
|
||||||
{
|
|
||||||
Name: "sample",
|
|
||||||
VolumeSource: v1.VolumeSource{
|
|
||||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
|
||||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
|
||||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
},
|
|
||||||
evictLocalStoragePods: true,
|
|
||||||
result: true,
|
|
||||||
}, {
|
|
||||||
pod: test.BuildTestPod("p7", 400, 0, n1.Name, nil),
|
|
||||||
runBefore: func(pod *v1.Pod) {
|
|
||||||
pod.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"}
|
|
||||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
|
||||||
pod.Spec.Volumes = []v1.Volume{
|
|
||||||
{
|
|
||||||
Name: "sample",
|
|
||||||
VolumeSource: v1.VolumeSource{
|
|
||||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
|
||||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
|
||||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
},
|
|
||||||
evictLocalStoragePods: false,
|
|
||||||
result: true,
|
|
||||||
}, {
|
|
||||||
pod: test.BuildTestPod("p8", 400, 0, n1.Name, nil),
|
|
||||||
runBefore: func(pod *v1.Pod) {
|
|
||||||
pod.ObjectMeta.OwnerReferences = test.GetDaemonSetOwnerRefList()
|
|
||||||
},
|
|
||||||
evictLocalStoragePods: false,
|
|
||||||
result: false,
|
|
||||||
}, {
|
|
||||||
pod: test.BuildTestPod("p9", 400, 0, n1.Name, nil),
|
|
||||||
runBefore: func(pod *v1.Pod) {
|
|
||||||
pod.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"}
|
|
||||||
pod.ObjectMeta.OwnerReferences = test.GetDaemonSetOwnerRefList()
|
|
||||||
},
|
|
||||||
evictLocalStoragePods: false,
|
|
||||||
result: true,
|
|
||||||
}, {
|
|
||||||
pod: test.BuildTestPod("p10", 400, 0, n1.Name, nil),
|
|
||||||
runBefore: func(pod *v1.Pod) {
|
|
||||||
pod.Annotations = test.GetMirrorPodAnnotation()
|
|
||||||
},
|
|
||||||
evictLocalStoragePods: false,
|
|
||||||
result: false,
|
|
||||||
}, {
|
|
||||||
pod: test.BuildTestPod("p11", 400, 0, n1.Name, nil),
|
|
||||||
runBefore: func(pod *v1.Pod) {
|
|
||||||
pod.Annotations = test.GetMirrorPodAnnotation()
|
|
||||||
pod.Annotations["descheduler.alpha.kubernetes.io/evict"] = "true"
|
|
||||||
},
|
|
||||||
evictLocalStoragePods: false,
|
|
||||||
result: true,
|
|
||||||
}, {
|
|
||||||
pod: test.BuildTestPod("p12", 400, 0, n1.Name, nil),
|
|
||||||
runBefore: func(pod *v1.Pod) {
|
|
||||||
priority := utils.SystemCriticalPriority
|
|
||||||
pod.Spec.Priority = &priority
|
|
||||||
},
|
|
||||||
evictLocalStoragePods: false,
|
|
||||||
result: false,
|
|
||||||
}, {
|
|
||||||
pod: test.BuildTestPod("p13", 400, 0, n1.Name, nil),
|
|
||||||
runBefore: func(pod *v1.Pod) {
|
|
||||||
priority := utils.SystemCriticalPriority
|
|
||||||
pod.Spec.Priority = &priority
|
|
||||||
pod.Annotations = map[string]string{
|
|
||||||
"descheduler.alpha.kubernetes.io/evict": "true",
|
|
||||||
}
|
|
||||||
},
|
|
||||||
evictLocalStoragePods: false,
|
|
||||||
result: true,
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
for _, testCase := range testCases {
|
||||||
for _, test := range testCases {
|
fakeClient := &fake.Clientset{}
|
||||||
test.runBefore(test.pod)
|
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
|
||||||
result := IsEvictable(test.pod, test.evictLocalStoragePods)
|
list := action.(core.ListAction)
|
||||||
if result != test.result {
|
fieldString := list.GetListRestrictions().Fields.String()
|
||||||
t.Errorf("IsEvictable should return for pod %s %t, but it returns %t", test.pod.Name, test.result, result)
|
if strings.Contains(fieldString, "n1") {
|
||||||
|
return true, &v1.PodList{Items: testCase.pods["n1"]}, nil
|
||||||
|
} else if strings.Contains(fieldString, "n2") {
|
||||||
|
return true, &v1.PodList{Items: testCase.pods["n2"]}, nil
|
||||||
|
}
|
||||||
|
return true, nil, fmt.Errorf("Failed to list: %v", list)
|
||||||
|
})
|
||||||
|
pods, _ := ListPodsOnANode(context.TODO(), fakeClient, testCase.node)
|
||||||
|
if len(pods) != testCase.expectedPodCount {
|
||||||
|
t.Errorf("expected %v pods on node %v, got %+v", testCase.expectedPodCount, testCase.node.Name, len(pods))
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
func TestPodTypes(t *testing.T) {
|
|
||||||
n1 := test.BuildTestNode("node1", 1000, 2000, 9, nil)
|
|
||||||
p1 := test.BuildTestPod("p1", 400, 0, n1.Name, nil)
|
|
||||||
|
|
||||||
// These won't be evicted.
|
func TestSortPodsBasedOnPriorityLowToHigh(t *testing.T) {
|
||||||
p2 := test.BuildTestPod("p2", 400, 0, n1.Name, nil)
|
n1 := test.BuildTestNode("n1", 4000, 3000, 9, nil)
|
||||||
p3 := test.BuildTestPod("p3", 400, 0, n1.Name, nil)
|
|
||||||
p4 := test.BuildTestPod("p4", 400, 0, n1.Name, nil)
|
|
||||||
p5 := test.BuildTestPod("p5", 400, 0, n1.Name, nil)
|
|
||||||
p6 := test.BuildTestPod("p6", 400, 0, n1.Name, nil)
|
|
||||||
|
|
||||||
p6.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
p1 := test.BuildTestPod("p1", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||||
|
test.SetPodPriority(pod, lowPriority)
|
||||||
|
})
|
||||||
|
|
||||||
p1.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
|
// BestEffort
|
||||||
// The following 4 pods won't get evicted.
|
p2 := test.BuildTestPod("p2", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||||
// A daemonset.
|
test.SetPodPriority(pod, highPriority)
|
||||||
//p2.Annotations = test.GetDaemonSetAnnotation()
|
test.MakeBestEffortPod(pod)
|
||||||
p2.ObjectMeta.OwnerReferences = test.GetDaemonSetOwnerRefList()
|
})
|
||||||
// A pod with local storage.
|
|
||||||
p3.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
|
||||||
p3.Spec.Volumes = []v1.Volume{
|
|
||||||
{
|
|
||||||
Name: "sample",
|
|
||||||
VolumeSource: v1.VolumeSource{
|
|
||||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
|
||||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
|
||||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
// A Mirror Pod.
|
|
||||||
p4.Annotations = test.GetMirrorPodAnnotation()
|
|
||||||
// A Critical Pod.
|
|
||||||
p5.Namespace = "kube-system"
|
|
||||||
priority := utils.SystemCriticalPriority
|
|
||||||
p5.Spec.Priority = &priority
|
|
||||||
systemCriticalPriority := utils.SystemCriticalPriority
|
|
||||||
p5.Spec.Priority = &systemCriticalPriority
|
|
||||||
if !IsMirrorPod(p4) {
|
|
||||||
t.Errorf("Expected p4 to be a mirror pod.")
|
|
||||||
}
|
|
||||||
if !IsCriticalPod(p5) {
|
|
||||||
t.Errorf("Expected p5 to be a critical pod.")
|
|
||||||
}
|
|
||||||
if !IsPodWithLocalStorage(p3) {
|
|
||||||
t.Errorf("Expected p3 to be a pod with local storage.")
|
|
||||||
}
|
|
||||||
ownerRefList := OwnerRef(p2)
|
|
||||||
if !IsDaemonsetPod(ownerRefList) {
|
|
||||||
t.Errorf("Expected p2 to be a daemonset pod.")
|
|
||||||
}
|
|
||||||
ownerRefList = OwnerRef(p1)
|
|
||||||
if IsDaemonsetPod(ownerRefList) || IsPodWithLocalStorage(p1) || IsCriticalPod(p1) || IsMirrorPod(p1) {
|
|
||||||
t.Errorf("Expected p1 to be a normal pod.")
|
|
||||||
}
|
|
||||||
|
|
||||||
|
// Burstable
|
||||||
|
p3 := test.BuildTestPod("p3", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||||
|
test.SetPodPriority(pod, highPriority)
|
||||||
|
test.MakeBurstablePod(pod)
|
||||||
|
})
|
||||||
|
|
||||||
|
// Guaranteed
|
||||||
|
p4 := test.BuildTestPod("p4", 400, 100, n1.Name, func(pod *v1.Pod) {
|
||||||
|
test.SetPodPriority(pod, highPriority)
|
||||||
|
test.MakeGuaranteedPod(pod)
|
||||||
|
})
|
||||||
|
|
||||||
|
// Best effort with nil priorities.
|
||||||
|
p5 := test.BuildTestPod("p5", 400, 100, n1.Name, test.MakeBestEffortPod)
|
||||||
|
p5.Spec.Priority = nil
|
||||||
|
|
||||||
|
p6 := test.BuildTestPod("p6", 400, 100, n1.Name, test.MakeGuaranteedPod)
|
||||||
|
p6.Spec.Priority = nil
|
||||||
|
|
||||||
|
podList := []*v1.Pod{p4, p3, p2, p1, p6, p5}
|
||||||
|
|
||||||
|
SortPodsBasedOnPriorityLowToHigh(podList)
|
||||||
|
if !reflect.DeepEqual(podList[len(podList)-1], p4) {
|
||||||
|
t.Errorf("Expected last pod in sorted list to be %v which of highest priority and guaranteed but got %v", p4, podList[len(podList)-1])
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -21,7 +21,7 @@ import (
|
|||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
|
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
"k8s.io/klog"
|
"k8s.io/klog/v2"
|
||||||
|
|
||||||
"sigs.k8s.io/descheduler/pkg/api"
|
"sigs.k8s.io/descheduler/pkg/api"
|
||||||
"sigs.k8s.io/descheduler/pkg/api/v1alpha1"
|
"sigs.k8s.io/descheduler/pkg/api/v1alpha1"
|
||||||
@@ -30,7 +30,7 @@ import (
|
|||||||
|
|
||||||
func LoadPolicyConfig(policyConfigFile string) (*api.DeschedulerPolicy, error) {
|
func LoadPolicyConfig(policyConfigFile string) (*api.DeschedulerPolicy, error) {
|
||||||
if policyConfigFile == "" {
|
if policyConfigFile == "" {
|
||||||
klog.V(1).Infof("policy config file not specified")
|
klog.V(1).InfoS("Policy config file not specified")
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -19,9 +19,22 @@ package scheme
|
|||||||
import (
|
import (
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
"k8s.io/apimachinery/pkg/runtime/serializer"
|
"k8s.io/apimachinery/pkg/runtime/serializer"
|
||||||
|
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/api"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/api/v1alpha1"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/apis/componentconfig"
|
||||||
|
componentconfigv1alpha1 "sigs.k8s.io/descheduler/pkg/apis/componentconfig/v1alpha1"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
Scheme = runtime.NewScheme()
|
Scheme = runtime.NewScheme()
|
||||||
Codecs = serializer.NewCodecFactory(Scheme)
|
Codecs = serializer.NewCodecFactory(Scheme)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
utilruntime.Must(api.AddToScheme(Scheme))
|
||||||
|
utilruntime.Must(v1alpha1.AddToScheme(Scheme))
|
||||||
|
|
||||||
|
utilruntime.Must(componentconfig.AddToScheme(Scheme))
|
||||||
|
utilruntime.Must(componentconfigv1alpha1.AddToScheme(Scheme))
|
||||||
|
}
|
||||||
|
|||||||
@@ -18,6 +18,8 @@ package strategies
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"math"
|
||||||
"reflect"
|
"reflect"
|
||||||
"sort"
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
@@ -26,13 +28,34 @@ import (
|
|||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/util/sets"
|
"k8s.io/apimachinery/pkg/util/sets"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/klog"
|
"k8s.io/klog/v2"
|
||||||
|
|
||||||
"sigs.k8s.io/descheduler/pkg/api"
|
"sigs.k8s.io/descheduler/pkg/api"
|
||||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/utils"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func validateRemoveDuplicatePodsParams(params *api.StrategyParameters) error {
|
||||||
|
if params == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
// At most one of include/exclude can be set
|
||||||
|
if params.Namespaces != nil && len(params.Namespaces.Include) > 0 && len(params.Namespaces.Exclude) > 0 {
|
||||||
|
return fmt.Errorf("only one of Include/Exclude namespaces can be set")
|
||||||
|
}
|
||||||
|
if params.ThresholdPriority != nil && params.ThresholdPriorityClassName != "" {
|
||||||
|
return fmt.Errorf("only one of thresholdPriority and thresholdPriorityClassName can be set")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type podOwner struct {
|
||||||
|
namespace, kind, name string
|
||||||
|
imagesHash string
|
||||||
|
}
|
||||||
|
|
||||||
// RemoveDuplicatePods removes the duplicate pods on node. This strategy evicts all duplicate pods on node.
|
// RemoveDuplicatePods removes the duplicate pods on node. This strategy evicts all duplicate pods on node.
|
||||||
// A pod is said to be a duplicate of other if both of them are from same creator, kind and are within the same
|
// A pod is said to be a duplicate of other if both of them are from same creator, kind and are within the same
|
||||||
// namespace, and have at least one container with the same image.
|
// namespace, and have at least one container with the same image.
|
||||||
@@ -42,81 +65,145 @@ func RemoveDuplicatePods(
|
|||||||
client clientset.Interface,
|
client clientset.Interface,
|
||||||
strategy api.DeschedulerStrategy,
|
strategy api.DeschedulerStrategy,
|
||||||
nodes []*v1.Node,
|
nodes []*v1.Node,
|
||||||
evictLocalStoragePods bool,
|
|
||||||
podEvictor *evictions.PodEvictor,
|
podEvictor *evictions.PodEvictor,
|
||||||
) {
|
) {
|
||||||
for _, node := range nodes {
|
if err := validateRemoveDuplicatePodsParams(strategy.Params); err != nil {
|
||||||
klog.V(1).Infof("Processing node: %#v", node.Name)
|
klog.ErrorS(err, "Invalid RemoveDuplicatePods parameters")
|
||||||
duplicatePods := listDuplicatePodsOnANode(ctx, client, node, strategy, evictLocalStoragePods)
|
return
|
||||||
for _, pod := range duplicatePods {
|
|
||||||
if _, err := podEvictor.EvictPod(ctx, pod, node); err != nil {
|
|
||||||
klog.Errorf("Error evicting pod: (%#v)", err)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
thresholdPriority, err := utils.GetPriorityFromStrategyParams(ctx, client, strategy.Params)
|
||||||
|
|
||||||
// listDuplicatePodsOnANode lists duplicate pods on a given node.
|
|
||||||
// It checks for pods which have the same owner and have at least 1 container with the same image spec
|
|
||||||
func listDuplicatePodsOnANode(ctx context.Context, client clientset.Interface, node *v1.Node, strategy api.DeschedulerStrategy, evictLocalStoragePods bool) []*v1.Pod {
|
|
||||||
pods, err := podutil.ListEvictablePodsOnNode(ctx, client, node, evictLocalStoragePods)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil
|
klog.ErrorS(err, "Failed to get threshold priority from strategy's params")
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
duplicatePods := make([]*v1.Pod, 0, len(pods))
|
var includedNamespaces, excludedNamespaces []string
|
||||||
// Each pod has a list of owners and a list of containers, and each container has 1 image spec.
|
if strategy.Params != nil && strategy.Params.Namespaces != nil {
|
||||||
// For each pod, we go through all the OwnerRef/Image mappings and represent them as a "key" string.
|
includedNamespaces = strategy.Params.Namespaces.Include
|
||||||
// All of those mappings together makes a list of "key" strings that essentially represent that pod's uniqueness.
|
excludedNamespaces = strategy.Params.Namespaces.Exclude
|
||||||
// This list of keys representing a single pod is then sorted alphabetically.
|
}
|
||||||
// If any other pod has a list that matches that pod's list, those pods are undeniably duplicates for the following reasons:
|
|
||||||
// - The 2 pods have the exact same ownerrefs
|
evictable := podEvictor.Evictable(evictions.WithPriorityThreshold(thresholdPriority))
|
||||||
// - The 2 pods have the exact same container images
|
|
||||||
//
|
duplicatePods := make(map[podOwner]map[string][]*v1.Pod)
|
||||||
// duplicateKeysMap maps the first Namespace/Kind/Name/Image in a pod's list to a 2D-slice of all the other lists where that is the first key
|
ownerKeyOccurence := make(map[podOwner]int32)
|
||||||
// (Since we sort each pod's list, we only need to key the map on the first entry in each list. Any pod that doesn't have
|
nodeCount := 0
|
||||||
// the same first entry is clearly not a duplicate. This makes lookup quick and minimizes storage needed).
|
nodeMap := make(map[string]*v1.Node)
|
||||||
// If any of the existing lists for that first key matches the current pod's list, the current pod is a duplicate.
|
|
||||||
// If not, then we add this pod's list to the list of lists for that key.
|
for _, node := range nodes {
|
||||||
duplicateKeysMap := map[string][][]string{}
|
klog.V(1).InfoS("Processing node", "node", klog.KObj(node))
|
||||||
for _, pod := range pods {
|
pods, err := podutil.ListPodsOnANode(ctx,
|
||||||
ownerRefList := podutil.OwnerRef(pod)
|
client,
|
||||||
if hasExcludedOwnerRefKind(ownerRefList, strategy) {
|
node,
|
||||||
|
podutil.WithFilter(evictable.IsEvictable),
|
||||||
|
podutil.WithNamespaces(includedNamespaces),
|
||||||
|
podutil.WithoutNamespaces(excludedNamespaces),
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
klog.ErrorS(err, "Error listing evictable pods on node", "node", klog.KObj(node))
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
podContainerKeys := make([]string, 0, len(ownerRefList)*len(pod.Spec.Containers))
|
nodeMap[node.Name] = node
|
||||||
for _, ownerRef := range ownerRefList {
|
nodeCount++
|
||||||
|
// Each pod has a list of owners and a list of containers, and each container has 1 image spec.
|
||||||
|
// For each pod, we go through all the OwnerRef/Image mappings and represent them as a "key" string.
|
||||||
|
// All of those mappings together makes a list of "key" strings that essentially represent that pod's uniqueness.
|
||||||
|
// This list of keys representing a single pod is then sorted alphabetically.
|
||||||
|
// If any other pod has a list that matches that pod's list, those pods are undeniably duplicates for the following reasons:
|
||||||
|
// - The 2 pods have the exact same ownerrefs
|
||||||
|
// - The 2 pods have the exact same container images
|
||||||
|
//
|
||||||
|
// duplicateKeysMap maps the first Namespace/Kind/Name/Image in a pod's list to a 2D-slice of all the other lists where that is the first key
|
||||||
|
// (Since we sort each pod's list, we only need to key the map on the first entry in each list. Any pod that doesn't have
|
||||||
|
// the same first entry is clearly not a duplicate. This makes lookup quick and minimizes storage needed).
|
||||||
|
// If any of the existing lists for that first key matches the current pod's list, the current pod is a duplicate.
|
||||||
|
// If not, then we add this pod's list to the list of lists for that key.
|
||||||
|
duplicateKeysMap := map[string][][]string{}
|
||||||
|
for _, pod := range pods {
|
||||||
|
ownerRefList := podutil.OwnerRef(pod)
|
||||||
|
if hasExcludedOwnerRefKind(ownerRefList, strategy) || len(ownerRefList) == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
podContainerKeys := make([]string, 0, len(ownerRefList)*len(pod.Spec.Containers))
|
||||||
|
imageList := []string{}
|
||||||
for _, container := range pod.Spec.Containers {
|
for _, container := range pod.Spec.Containers {
|
||||||
// Namespace/Kind/Name should be unique for the cluster.
|
imageList = append(imageList, container.Image)
|
||||||
// We also consider the image, as 2 pods could have the same owner but serve different purposes
|
|
||||||
// So any non-unique Namespace/Kind/Name/Image pattern is a duplicate pod.
|
|
||||||
s := strings.Join([]string{pod.ObjectMeta.Namespace, ownerRef.Kind, ownerRef.Name, container.Image}, "/")
|
|
||||||
podContainerKeys = append(podContainerKeys, s)
|
|
||||||
}
|
}
|
||||||
}
|
sort.Strings(imageList)
|
||||||
sort.Strings(podContainerKeys)
|
imagesHash := strings.Join(imageList, "#")
|
||||||
|
for _, ownerRef := range ownerRefList {
|
||||||
// If there have been any other pods with the same first "key", look through all the lists to see if any match
|
ownerKey := podOwner{
|
||||||
if existing, ok := duplicateKeysMap[podContainerKeys[0]]; ok {
|
namespace: pod.ObjectMeta.Namespace,
|
||||||
for _, keys := range existing {
|
kind: ownerRef.Kind,
|
||||||
if reflect.DeepEqual(keys, podContainerKeys) {
|
name: ownerRef.Name,
|
||||||
duplicatePods = append(duplicatePods, pod)
|
imagesHash: imagesHash,
|
||||||
break
|
}
|
||||||
|
ownerKeyOccurence[ownerKey] = ownerKeyOccurence[ownerKey] + 1
|
||||||
|
for _, image := range imageList {
|
||||||
|
// Namespace/Kind/Name should be unique for the cluster.
|
||||||
|
// We also consider the image, as 2 pods could have the same owner but serve different purposes
|
||||||
|
// So any non-unique Namespace/Kind/Name/Image pattern is a duplicate pod.
|
||||||
|
s := strings.Join([]string{pod.ObjectMeta.Namespace, ownerRef.Kind, ownerRef.Name, image}, "/")
|
||||||
|
podContainerKeys = append(podContainerKeys, s)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
sort.Strings(podContainerKeys)
|
||||||
|
|
||||||
|
// If there have been any other pods with the same first "key", look through all the lists to see if any match
|
||||||
|
if existing, ok := duplicateKeysMap[podContainerKeys[0]]; ok {
|
||||||
|
matched := false
|
||||||
|
for _, keys := range existing {
|
||||||
|
if reflect.DeepEqual(keys, podContainerKeys) {
|
||||||
|
matched = true
|
||||||
|
klog.V(3).InfoS("Duplicate found", "pod", klog.KObj(pod))
|
||||||
|
for _, ownerRef := range ownerRefList {
|
||||||
|
ownerKey := podOwner{
|
||||||
|
namespace: pod.ObjectMeta.Namespace,
|
||||||
|
kind: ownerRef.Kind,
|
||||||
|
name: ownerRef.Name,
|
||||||
|
imagesHash: imagesHash,
|
||||||
|
}
|
||||||
|
if _, ok := duplicatePods[ownerKey]; !ok {
|
||||||
|
duplicatePods[ownerKey] = make(map[string][]*v1.Pod)
|
||||||
|
}
|
||||||
|
duplicatePods[ownerKey][node.Name] = append(duplicatePods[ownerKey][node.Name], pod)
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !matched {
|
||||||
|
// Found no matches, add this list of keys to the list of lists that have the same first key
|
||||||
|
duplicateKeysMap[podContainerKeys[0]] = append(duplicateKeysMap[podContainerKeys[0]], podContainerKeys)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// This is the first pod we've seen that has this first "key" entry
|
||||||
|
duplicateKeysMap[podContainerKeys[0]] = [][]string{podContainerKeys}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// 1. how many pods can be evicted to respect uniform placement of pods among viable nodes?
|
||||||
|
for ownerKey, nodes := range duplicatePods {
|
||||||
|
upperAvg := int(math.Ceil(float64(ownerKeyOccurence[ownerKey]) / float64(nodeCount)))
|
||||||
|
for nodeName, pods := range nodes {
|
||||||
|
klog.V(2).InfoS("Average occurrence per node", "node", klog.KObj(nodeMap[nodeName]), "ownerKey", ownerKey, "avg", upperAvg)
|
||||||
|
// list of duplicated pods does not contain the original referential pod
|
||||||
|
if len(pods)+1 > upperAvg {
|
||||||
|
// It's assumed all duplicated pods are in the same priority class
|
||||||
|
// TODO(jchaloup): check if the pod has a different node to lend to
|
||||||
|
for _, pod := range pods[upperAvg-1:] {
|
||||||
|
if _, err := podEvictor.EvictPod(ctx, pod, nodeMap[nodeName], "RemoveDuplicatePods"); err != nil {
|
||||||
|
klog.ErrorS(err, "Error evicting pod", "pod", klog.KObj(pod))
|
||||||
|
break
|
||||||
|
}
|
||||||
}
|
}
|
||||||
// Found no matches, add this list of keys to the list of lists that have the same first key
|
|
||||||
duplicateKeysMap[podContainerKeys[0]] = append(duplicateKeysMap[podContainerKeys[0]], podContainerKeys)
|
|
||||||
}
|
}
|
||||||
} else {
|
|
||||||
// This is the first pod we've seen that has this first "key" entry
|
|
||||||
duplicateKeysMap[podContainerKeys[0]] = [][]string{podContainerKeys}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return duplicatePods
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func hasExcludedOwnerRefKind(ownerRefs []metav1.OwnerReference, strategy api.DeschedulerStrategy) bool {
|
func hasExcludedOwnerRefKind(ownerRefs []metav1.OwnerReference, strategy api.DeschedulerStrategy) bool {
|
||||||
if strategy.Params.RemoveDuplicates == nil {
|
if strategy.Params == nil || strategy.Params.RemoveDuplicates == nil {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
exclude := sets.NewString(strategy.Params.RemoveDuplicates.ExcludeOwnerKinds...)
|
exclude := sets.NewString(strategy.Params.RemoveDuplicates.ExcludeOwnerKinds...)
|
||||||
|
|||||||
@@ -20,8 +20,9 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
"k8s.io/client-go/kubernetes/fake"
|
"k8s.io/client-go/kubernetes/fake"
|
||||||
core "k8s.io/client-go/testing"
|
core "k8s.io/client-go/testing"
|
||||||
@@ -31,34 +32,45 @@ import (
|
|||||||
"sigs.k8s.io/descheduler/test"
|
"sigs.k8s.io/descheduler/test"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func buildTestPodWithImage(podName, node, image string) *v1.Pod {
|
||||||
|
pod := test.BuildTestPod(podName, 100, 0, node, test.SetRSOwnerRef)
|
||||||
|
pod.Spec.Containers = append(pod.Spec.Containers, v1.Container{
|
||||||
|
Name: image,
|
||||||
|
Image: image,
|
||||||
|
})
|
||||||
|
return pod
|
||||||
|
}
|
||||||
|
|
||||||
func TestFindDuplicatePods(t *testing.T) {
|
func TestFindDuplicatePods(t *testing.T) {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
// first setup pods
|
// first setup pods
|
||||||
node := test.BuildTestNode("n1", 2000, 3000, 10, nil)
|
node1 := test.BuildTestNode("n1", 2000, 3000, 10, nil)
|
||||||
p1 := test.BuildTestPod("p1", 100, 0, node.Name, nil)
|
node2 := test.BuildTestNode("n2", 2000, 3000, 10, nil)
|
||||||
|
|
||||||
|
p1 := test.BuildTestPod("p1", 100, 0, node1.Name, nil)
|
||||||
p1.Namespace = "dev"
|
p1.Namespace = "dev"
|
||||||
p2 := test.BuildTestPod("p2", 100, 0, node.Name, nil)
|
p2 := test.BuildTestPod("p2", 100, 0, node1.Name, nil)
|
||||||
p2.Namespace = "dev"
|
p2.Namespace = "dev"
|
||||||
p3 := test.BuildTestPod("p3", 100, 0, node.Name, nil)
|
p3 := test.BuildTestPod("p3", 100, 0, node1.Name, nil)
|
||||||
p3.Namespace = "dev"
|
p3.Namespace = "dev"
|
||||||
p4 := test.BuildTestPod("p4", 100, 0, node.Name, nil)
|
p4 := test.BuildTestPod("p4", 100, 0, node1.Name, nil)
|
||||||
p5 := test.BuildTestPod("p5", 100, 0, node.Name, nil)
|
p5 := test.BuildTestPod("p5", 100, 0, node1.Name, nil)
|
||||||
p6 := test.BuildTestPod("p6", 100, 0, node.Name, nil)
|
p6 := test.BuildTestPod("p6", 100, 0, node1.Name, nil)
|
||||||
p7 := test.BuildTestPod("p7", 100, 0, node.Name, nil)
|
p7 := test.BuildTestPod("p7", 100, 0, node1.Name, nil)
|
||||||
p7.Namespace = "kube-system"
|
p7.Namespace = "kube-system"
|
||||||
p8 := test.BuildTestPod("p8", 100, 0, node.Name, nil)
|
p8 := test.BuildTestPod("p8", 100, 0, node1.Name, nil)
|
||||||
p8.Namespace = "test"
|
p8.Namespace = "test"
|
||||||
p9 := test.BuildTestPod("p9", 100, 0, node.Name, nil)
|
p9 := test.BuildTestPod("p9", 100, 0, node1.Name, nil)
|
||||||
p9.Namespace = "test"
|
p9.Namespace = "test"
|
||||||
p10 := test.BuildTestPod("p10", 100, 0, node.Name, nil)
|
p10 := test.BuildTestPod("p10", 100, 0, node1.Name, nil)
|
||||||
p10.Namespace = "test"
|
p10.Namespace = "test"
|
||||||
p11 := test.BuildTestPod("p11", 100, 0, node.Name, nil)
|
p11 := test.BuildTestPod("p11", 100, 0, node1.Name, nil)
|
||||||
p11.Namespace = "different-images"
|
p11.Namespace = "different-images"
|
||||||
p12 := test.BuildTestPod("p12", 100, 0, node.Name, nil)
|
p12 := test.BuildTestPod("p12", 100, 0, node1.Name, nil)
|
||||||
p12.Namespace = "different-images"
|
p12.Namespace = "different-images"
|
||||||
p13 := test.BuildTestPod("p13", 100, 0, node.Name, nil)
|
p13 := test.BuildTestPod("p13", 100, 0, node1.Name, nil)
|
||||||
p13.Namespace = "different-images"
|
p13.Namespace = "different-images"
|
||||||
p14 := test.BuildTestPod("p14", 100, 0, node.Name, nil)
|
p14 := test.BuildTestPod("p14", 100, 0, node1.Name, nil)
|
||||||
p14.Namespace = "different-images"
|
p14.Namespace = "different-images"
|
||||||
|
|
||||||
// ### Evictable Pods ###
|
// ### Evictable Pods ###
|
||||||
@@ -115,70 +127,70 @@ func TestFindDuplicatePods(t *testing.T) {
|
|||||||
|
|
||||||
testCases := []struct {
|
testCases := []struct {
|
||||||
description string
|
description string
|
||||||
maxPodsToEvict int
|
maxPodsToEvictPerNode int
|
||||||
pods []v1.Pod
|
pods []v1.Pod
|
||||||
expectedEvictedPodCount int
|
expectedEvictedPodCount int
|
||||||
strategy api.DeschedulerStrategy
|
strategy api.DeschedulerStrategy
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
description: "Three pods in the `dev` Namespace, bound to same ReplicaSet. 2 should be evicted.",
|
description: "Three pods in the `dev` Namespace, bound to same ReplicaSet. 1 should be evicted.",
|
||||||
maxPodsToEvict: 5,
|
maxPodsToEvictPerNode: 5,
|
||||||
pods: []v1.Pod{*p1, *p2, *p3},
|
pods: []v1.Pod{*p1, *p2, *p3},
|
||||||
expectedEvictedPodCount: 2,
|
expectedEvictedPodCount: 1,
|
||||||
strategy: api.DeschedulerStrategy{},
|
strategy: api.DeschedulerStrategy{},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
description: "Three pods in the `dev` Namespace, bound to same ReplicaSet, but ReplicaSet kind is excluded. 0 should be evicted.",
|
description: "Three pods in the `dev` Namespace, bound to same ReplicaSet, but ReplicaSet kind is excluded. 0 should be evicted.",
|
||||||
maxPodsToEvict: 5,
|
maxPodsToEvictPerNode: 5,
|
||||||
pods: []v1.Pod{*p1, *p2, *p3},
|
pods: []v1.Pod{*p1, *p2, *p3},
|
||||||
expectedEvictedPodCount: 0,
|
expectedEvictedPodCount: 0,
|
||||||
strategy: api.DeschedulerStrategy{Params: api.StrategyParameters{RemoveDuplicates: &api.RemoveDuplicates{ExcludeOwnerKinds: []string{"ReplicaSet"}}}},
|
strategy: api.DeschedulerStrategy{Params: &api.StrategyParameters{RemoveDuplicates: &api.RemoveDuplicates{ExcludeOwnerKinds: []string{"ReplicaSet"}}}},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
description: "Three Pods in the `test` Namespace, bound to same ReplicaSet. 2 should be evicted.",
|
description: "Three Pods in the `test` Namespace, bound to same ReplicaSet. 1 should be evicted.",
|
||||||
maxPodsToEvict: 5,
|
maxPodsToEvictPerNode: 5,
|
||||||
pods: []v1.Pod{*p8, *p9, *p10},
|
pods: []v1.Pod{*p8, *p9, *p10},
|
||||||
expectedEvictedPodCount: 2,
|
expectedEvictedPodCount: 1,
|
||||||
strategy: api.DeschedulerStrategy{},
|
strategy: api.DeschedulerStrategy{},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
description: "Three Pods in the `dev` Namespace, three Pods in the `test` Namespace. Bound to ReplicaSet with same name. 4 should be evicted.",
|
description: "Three Pods in the `dev` Namespace, three Pods in the `test` Namespace. Bound to ReplicaSet with same name. 4 should be evicted.",
|
||||||
maxPodsToEvict: 5,
|
maxPodsToEvictPerNode: 5,
|
||||||
pods: []v1.Pod{*p1, *p2, *p3, *p8, *p9, *p10},
|
pods: []v1.Pod{*p1, *p2, *p3, *p8, *p9, *p10},
|
||||||
expectedEvictedPodCount: 4,
|
expectedEvictedPodCount: 2,
|
||||||
strategy: api.DeschedulerStrategy{},
|
strategy: api.DeschedulerStrategy{},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
description: "Pods are: part of DaemonSet, with local storage, mirror pod annotation, critical pod annotation - none should be evicted.",
|
description: "Pods are: part of DaemonSet, with local storage, mirror pod annotation, critical pod annotation - none should be evicted.",
|
||||||
maxPodsToEvict: 2,
|
maxPodsToEvictPerNode: 2,
|
||||||
pods: []v1.Pod{*p4, *p5, *p6, *p7},
|
pods: []v1.Pod{*p4, *p5, *p6, *p7},
|
||||||
expectedEvictedPodCount: 0,
|
expectedEvictedPodCount: 0,
|
||||||
strategy: api.DeschedulerStrategy{},
|
strategy: api.DeschedulerStrategy{},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
description: "Test all Pods: 4 should be evicted.",
|
description: "Test all Pods: 4 should be evicted.",
|
||||||
maxPodsToEvict: 5,
|
maxPodsToEvictPerNode: 5,
|
||||||
pods: []v1.Pod{*p1, *p2, *p3, *p4, *p5, *p6, *p7, *p8, *p9, *p10},
|
pods: []v1.Pod{*p1, *p2, *p3, *p4, *p5, *p6, *p7, *p8, *p9, *p10},
|
||||||
expectedEvictedPodCount: 4,
|
expectedEvictedPodCount: 2,
|
||||||
strategy: api.DeschedulerStrategy{},
|
strategy: api.DeschedulerStrategy{},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
description: "Pods with the same owner but different images should not be evicted",
|
description: "Pods with the same owner but different images should not be evicted",
|
||||||
maxPodsToEvict: 5,
|
maxPodsToEvictPerNode: 5,
|
||||||
pods: []v1.Pod{*p11, *p12},
|
pods: []v1.Pod{*p11, *p12},
|
||||||
expectedEvictedPodCount: 0,
|
expectedEvictedPodCount: 0,
|
||||||
strategy: api.DeschedulerStrategy{},
|
strategy: api.DeschedulerStrategy{},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
description: "Pods with multiple containers should not match themselves",
|
description: "Pods with multiple containers should not match themselves",
|
||||||
maxPodsToEvict: 5,
|
maxPodsToEvictPerNode: 5,
|
||||||
pods: []v1.Pod{*p13},
|
pods: []v1.Pod{*p13},
|
||||||
expectedEvictedPodCount: 0,
|
expectedEvictedPodCount: 0,
|
||||||
strategy: api.DeschedulerStrategy{},
|
strategy: api.DeschedulerStrategy{},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
description: "Pods with matching ownerrefs and at not all matching image should not trigger an eviction",
|
description: "Pods with matching ownerrefs and at not all matching image should not trigger an eviction",
|
||||||
maxPodsToEvict: 5,
|
maxPodsToEvictPerNode: 5,
|
||||||
pods: []v1.Pod{*p11, *p13},
|
pods: []v1.Pod{*p11, *p13},
|
||||||
expectedEvictedPodCount: 0,
|
expectedEvictedPodCount: 0,
|
||||||
strategy: api.DeschedulerStrategy{},
|
strategy: api.DeschedulerStrategy{},
|
||||||
@@ -186,26 +198,220 @@ func TestFindDuplicatePods(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, testCase := range testCases {
|
for _, testCase := range testCases {
|
||||||
fakeClient := &fake.Clientset{}
|
t.Run(testCase.description, func(t *testing.T) {
|
||||||
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
|
fakeClient := &fake.Clientset{}
|
||||||
return true, &v1.PodList{Items: testCase.pods}, nil
|
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
|
||||||
})
|
return true, &v1.PodList{Items: testCase.pods}, nil
|
||||||
fakeClient.Fake.AddReactor("get", "nodes", func(action core.Action) (bool, runtime.Object, error) {
|
})
|
||||||
return true, node, nil
|
podEvictor := evictions.NewPodEvictor(
|
||||||
})
|
fakeClient,
|
||||||
podEvictor := evictions.NewPodEvictor(
|
"v1",
|
||||||
fakeClient,
|
false,
|
||||||
"v1",
|
testCase.maxPodsToEvictPerNode,
|
||||||
false,
|
[]*v1.Node{node1, node2},
|
||||||
testCase.maxPodsToEvict,
|
false,
|
||||||
[]*v1.Node{node},
|
)
|
||||||
)
|
|
||||||
|
|
||||||
RemoveDuplicatePods(ctx, fakeClient, testCase.strategy, []*v1.Node{node}, false, podEvictor)
|
RemoveDuplicatePods(ctx, fakeClient, testCase.strategy, []*v1.Node{node1, node2}, podEvictor)
|
||||||
podsEvicted := podEvictor.TotalEvicted()
|
podsEvicted := podEvictor.TotalEvicted()
|
||||||
if podsEvicted != testCase.expectedEvictedPodCount {
|
if podsEvicted != testCase.expectedEvictedPodCount {
|
||||||
t.Errorf("Test error for description: %s. Expected evicted pods count %v, got %v", testCase.description, testCase.expectedEvictedPodCount, podsEvicted)
|
t.Errorf("Test error for description: %s. Expected evicted pods count %v, got %v", testCase.description, testCase.expectedEvictedPodCount, podsEvicted)
|
||||||
}
|
}
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestRemoveDuplicatesUniformly(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
setRSOwnerRef2 := func(pod *v1.Pod) {
|
||||||
|
pod.ObjectMeta.OwnerReferences = []metav1.OwnerReference{
|
||||||
|
{Kind: "ReplicaSet", APIVersion: "v1", Name: "replicaset-2"},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
setTwoRSOwnerRef := func(pod *v1.Pod) {
|
||||||
|
pod.ObjectMeta.OwnerReferences = []metav1.OwnerReference{
|
||||||
|
{Kind: "ReplicaSet", APIVersion: "v1", Name: "replicaset-1"},
|
||||||
|
{Kind: "ReplicaSet", APIVersion: "v1", Name: "replicaset-2"},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
testCases := []struct {
|
||||||
|
description string
|
||||||
|
maxPodsToEvictPerNode int
|
||||||
|
pods []v1.Pod
|
||||||
|
nodes []*v1.Node
|
||||||
|
expectedEvictedPodCount int
|
||||||
|
strategy api.DeschedulerStrategy
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
description: "Evict pods uniformly",
|
||||||
|
pods: []v1.Pod{
|
||||||
|
// (5,3,1) -> (3,3,3) -> 2 evictions
|
||||||
|
*test.BuildTestPod("p1", 100, 0, "n1", test.SetRSOwnerRef),
|
||||||
|
*test.BuildTestPod("p2", 100, 0, "n1", test.SetRSOwnerRef),
|
||||||
|
*test.BuildTestPod("p3", 100, 0, "n1", test.SetRSOwnerRef),
|
||||||
|
*test.BuildTestPod("p4", 100, 0, "n1", test.SetRSOwnerRef),
|
||||||
|
*test.BuildTestPod("p5", 100, 0, "n1", test.SetRSOwnerRef),
|
||||||
|
*test.BuildTestPod("p6", 100, 0, "n2", test.SetRSOwnerRef),
|
||||||
|
*test.BuildTestPod("p7", 100, 0, "n2", test.SetRSOwnerRef),
|
||||||
|
*test.BuildTestPod("p8", 100, 0, "n2", test.SetRSOwnerRef),
|
||||||
|
*test.BuildTestPod("p9", 100, 0, "n3", test.SetRSOwnerRef),
|
||||||
|
},
|
||||||
|
expectedEvictedPodCount: 2,
|
||||||
|
nodes: []*v1.Node{
|
||||||
|
test.BuildTestNode("n1", 2000, 3000, 10, nil),
|
||||||
|
test.BuildTestNode("n2", 2000, 3000, 10, nil),
|
||||||
|
test.BuildTestNode("n3", 2000, 3000, 10, nil),
|
||||||
|
},
|
||||||
|
strategy: api.DeschedulerStrategy{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
description: "Evict pods uniformly with one node left out",
|
||||||
|
pods: []v1.Pod{
|
||||||
|
// (5,3,1) -> (4,4,1) -> 1 eviction
|
||||||
|
*test.BuildTestPod("p1", 100, 0, "n1", test.SetRSOwnerRef),
|
||||||
|
*test.BuildTestPod("p2", 100, 0, "n1", test.SetRSOwnerRef),
|
||||||
|
*test.BuildTestPod("p3", 100, 0, "n1", test.SetRSOwnerRef),
|
||||||
|
*test.BuildTestPod("p4", 100, 0, "n1", test.SetRSOwnerRef),
|
||||||
|
*test.BuildTestPod("p5", 100, 0, "n1", test.SetRSOwnerRef),
|
||||||
|
*test.BuildTestPod("p6", 100, 0, "n2", test.SetRSOwnerRef),
|
||||||
|
*test.BuildTestPod("p7", 100, 0, "n2", test.SetRSOwnerRef),
|
||||||
|
*test.BuildTestPod("p8", 100, 0, "n2", test.SetRSOwnerRef),
|
||||||
|
*test.BuildTestPod("p9", 100, 0, "n3", test.SetRSOwnerRef),
|
||||||
|
},
|
||||||
|
expectedEvictedPodCount: 1,
|
||||||
|
nodes: []*v1.Node{
|
||||||
|
test.BuildTestNode("n1", 2000, 3000, 10, nil),
|
||||||
|
test.BuildTestNode("n2", 2000, 3000, 10, nil),
|
||||||
|
},
|
||||||
|
strategy: api.DeschedulerStrategy{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
description: "Evict pods uniformly with two replica sets",
|
||||||
|
pods: []v1.Pod{
|
||||||
|
// (5,3,1) -> (3,3,3) -> 2 evictions
|
||||||
|
*test.BuildTestPod("p11", 100, 0, "n1", setTwoRSOwnerRef),
|
||||||
|
*test.BuildTestPod("p12", 100, 0, "n1", setTwoRSOwnerRef),
|
||||||
|
*test.BuildTestPod("p13", 100, 0, "n1", setTwoRSOwnerRef),
|
||||||
|
*test.BuildTestPod("p14", 100, 0, "n1", setTwoRSOwnerRef),
|
||||||
|
*test.BuildTestPod("p15", 100, 0, "n1", setTwoRSOwnerRef),
|
||||||
|
*test.BuildTestPod("p16", 100, 0, "n2", setTwoRSOwnerRef),
|
||||||
|
*test.BuildTestPod("p17", 100, 0, "n2", setTwoRSOwnerRef),
|
||||||
|
*test.BuildTestPod("p18", 100, 0, "n2", setTwoRSOwnerRef),
|
||||||
|
*test.BuildTestPod("p19", 100, 0, "n3", setTwoRSOwnerRef),
|
||||||
|
},
|
||||||
|
expectedEvictedPodCount: 4,
|
||||||
|
nodes: []*v1.Node{
|
||||||
|
test.BuildTestNode("n1", 2000, 3000, 10, nil),
|
||||||
|
test.BuildTestNode("n2", 2000, 3000, 10, nil),
|
||||||
|
test.BuildTestNode("n3", 2000, 3000, 10, nil),
|
||||||
|
},
|
||||||
|
strategy: api.DeschedulerStrategy{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
description: "Evict pods uniformly with two owner references",
|
||||||
|
pods: []v1.Pod{
|
||||||
|
// (5,3,1) -> (3,3,3) -> 2 evictions
|
||||||
|
*test.BuildTestPod("p11", 100, 0, "n1", test.SetRSOwnerRef),
|
||||||
|
*test.BuildTestPod("p12", 100, 0, "n1", test.SetRSOwnerRef),
|
||||||
|
*test.BuildTestPod("p13", 100, 0, "n1", test.SetRSOwnerRef),
|
||||||
|
*test.BuildTestPod("p14", 100, 0, "n1", test.SetRSOwnerRef),
|
||||||
|
*test.BuildTestPod("p15", 100, 0, "n1", test.SetRSOwnerRef),
|
||||||
|
*test.BuildTestPod("p16", 100, 0, "n2", test.SetRSOwnerRef),
|
||||||
|
*test.BuildTestPod("p17", 100, 0, "n2", test.SetRSOwnerRef),
|
||||||
|
*test.BuildTestPod("p18", 100, 0, "n2", test.SetRSOwnerRef),
|
||||||
|
*test.BuildTestPod("p19", 100, 0, "n3", test.SetRSOwnerRef),
|
||||||
|
// (1,3,5) -> (3,3,3) -> 2 evictions
|
||||||
|
*test.BuildTestPod("p21", 100, 0, "n1", setRSOwnerRef2),
|
||||||
|
*test.BuildTestPod("p22", 100, 0, "n2", setRSOwnerRef2),
|
||||||
|
*test.BuildTestPod("p23", 100, 0, "n2", setRSOwnerRef2),
|
||||||
|
*test.BuildTestPod("p24", 100, 0, "n2", setRSOwnerRef2),
|
||||||
|
*test.BuildTestPod("p25", 100, 0, "n3", setRSOwnerRef2),
|
||||||
|
*test.BuildTestPod("p26", 100, 0, "n3", setRSOwnerRef2),
|
||||||
|
*test.BuildTestPod("p27", 100, 0, "n3", setRSOwnerRef2),
|
||||||
|
*test.BuildTestPod("p28", 100, 0, "n3", setRSOwnerRef2),
|
||||||
|
*test.BuildTestPod("p29", 100, 0, "n3", setRSOwnerRef2),
|
||||||
|
},
|
||||||
|
expectedEvictedPodCount: 4,
|
||||||
|
nodes: []*v1.Node{
|
||||||
|
test.BuildTestNode("n1", 2000, 3000, 10, nil),
|
||||||
|
test.BuildTestNode("n2", 2000, 3000, 10, nil),
|
||||||
|
test.BuildTestNode("n3", 2000, 3000, 10, nil),
|
||||||
|
},
|
||||||
|
strategy: api.DeschedulerStrategy{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
description: "Evict pods with number of pods less than nodes",
|
||||||
|
pods: []v1.Pod{
|
||||||
|
// (2,0,0) -> (1,1,0) -> 1 eviction
|
||||||
|
*test.BuildTestPod("p1", 100, 0, "n1", test.SetRSOwnerRef),
|
||||||
|
*test.BuildTestPod("p2", 100, 0, "n1", test.SetRSOwnerRef),
|
||||||
|
},
|
||||||
|
expectedEvictedPodCount: 1,
|
||||||
|
nodes: []*v1.Node{
|
||||||
|
test.BuildTestNode("n1", 2000, 3000, 10, nil),
|
||||||
|
test.BuildTestNode("n2", 2000, 3000, 10, nil),
|
||||||
|
test.BuildTestNode("n3", 2000, 3000, 10, nil),
|
||||||
|
},
|
||||||
|
strategy: api.DeschedulerStrategy{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
description: "Evict pods with number of pods less than nodes, but ignore different pods with the same ownerref",
|
||||||
|
pods: []v1.Pod{
|
||||||
|
// (1, 0, 0) for "bar","baz" images -> no eviction, even with a matching ownerKey
|
||||||
|
// (2, 0, 0) for "foo" image -> (1,1,0) - 1 eviction
|
||||||
|
// In this case the only "real" duplicates are p1 and p4, so one of those should be evicted
|
||||||
|
*buildTestPodWithImage("p1", "n1", "foo"),
|
||||||
|
*buildTestPodWithImage("p2", "n1", "bar"),
|
||||||
|
*buildTestPodWithImage("p3", "n1", "baz"),
|
||||||
|
*buildTestPodWithImage("p4", "n1", "foo"),
|
||||||
|
},
|
||||||
|
expectedEvictedPodCount: 1,
|
||||||
|
nodes: []*v1.Node{
|
||||||
|
test.BuildTestNode("n1", 2000, 3000, 10, nil),
|
||||||
|
test.BuildTestNode("n2", 2000, 3000, 10, nil),
|
||||||
|
test.BuildTestNode("n3", 2000, 3000, 10, nil),
|
||||||
|
},
|
||||||
|
strategy: api.DeschedulerStrategy{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
description: "Evict pods with a single pod with three nodes",
|
||||||
|
pods: []v1.Pod{
|
||||||
|
// (2,0,0) -> (1,1,0) -> 1 eviction
|
||||||
|
*test.BuildTestPod("p1", 100, 0, "n1", test.SetRSOwnerRef),
|
||||||
|
},
|
||||||
|
expectedEvictedPodCount: 0,
|
||||||
|
nodes: []*v1.Node{
|
||||||
|
test.BuildTestNode("n1", 2000, 3000, 10, nil),
|
||||||
|
test.BuildTestNode("n2", 2000, 3000, 10, nil),
|
||||||
|
test.BuildTestNode("n3", 2000, 3000, 10, nil),
|
||||||
|
},
|
||||||
|
strategy: api.DeschedulerStrategy{},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, testCase := range testCases {
|
||||||
|
t.Run(testCase.description, func(t *testing.T) {
|
||||||
|
fakeClient := &fake.Clientset{}
|
||||||
|
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
|
||||||
|
return true, &v1.PodList{Items: testCase.pods}, nil
|
||||||
|
})
|
||||||
|
podEvictor := evictions.NewPodEvictor(
|
||||||
|
fakeClient,
|
||||||
|
"v1",
|
||||||
|
false,
|
||||||
|
testCase.maxPodsToEvictPerNode,
|
||||||
|
testCase.nodes,
|
||||||
|
false,
|
||||||
|
)
|
||||||
|
|
||||||
|
RemoveDuplicatePods(ctx, fakeClient, testCase.strategy, testCase.nodes, podEvictor)
|
||||||
|
podsEvicted := podEvictor.TotalEvicted()
|
||||||
|
if podsEvicted != testCase.expectedEvictedPodCount {
|
||||||
|
t.Errorf("Test error for description: %s. Expected evicted pods count %v, got %v", testCase.description, testCase.expectedEvictedPodCount, podsEvicted)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -18,12 +18,13 @@ package strategies
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"fmt"
|
||||||
"sort"
|
"sort"
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/klog"
|
"k8s.io/klog/v2"
|
||||||
|
|
||||||
"sigs.k8s.io/descheduler/pkg/api"
|
"sigs.k8s.io/descheduler/pkg/api"
|
||||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||||
@@ -32,132 +33,249 @@ import (
|
|||||||
"sigs.k8s.io/descheduler/pkg/utils"
|
"sigs.k8s.io/descheduler/pkg/utils"
|
||||||
)
|
)
|
||||||
|
|
||||||
type NodeUsageMap struct {
|
// NodeUsage stores a node's info, pods on it, thresholds and its resource usage
|
||||||
|
type NodeUsage struct {
|
||||||
node *v1.Node
|
node *v1.Node
|
||||||
usage api.ResourceThresholds
|
usage map[v1.ResourceName]*resource.Quantity
|
||||||
allPods []*v1.Pod
|
allPods []*v1.Pod
|
||||||
|
|
||||||
|
lowResourceThreshold map[v1.ResourceName]*resource.Quantity
|
||||||
|
highResourceThreshold map[v1.ResourceName]*resource.Quantity
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NodePodsMap is a set of (node, pods) pairs
|
||||||
type NodePodsMap map[*v1.Node][]*v1.Pod
|
type NodePodsMap map[*v1.Node][]*v1.Pod
|
||||||
|
|
||||||
func LowNodeUtilization(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, evictLocalStoragePods bool, podEvictor *evictions.PodEvictor) {
|
const (
|
||||||
if !strategy.Enabled {
|
// MinResourcePercentage is the minimum value of a resource's percentage
|
||||||
|
MinResourcePercentage = 0
|
||||||
|
// MaxResourcePercentage is the maximum value of a resource's percentage
|
||||||
|
MaxResourcePercentage = 100
|
||||||
|
)
|
||||||
|
|
||||||
|
func validateLowNodeUtilizationParams(params *api.StrategyParameters) error {
|
||||||
|
if params == nil || params.NodeResourceUtilizationThresholds == nil {
|
||||||
|
return fmt.Errorf("NodeResourceUtilizationThresholds not set")
|
||||||
|
}
|
||||||
|
if params.ThresholdPriority != nil && params.ThresholdPriorityClassName != "" {
|
||||||
|
return fmt.Errorf("only one of thresholdPriority and thresholdPriorityClassName can be set")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// LowNodeUtilization evicts pods from overutilized nodes to underutilized nodes. Note that CPU/Memory requests are used
|
||||||
|
// to calculate nodes' utilization and not the actual resource usage.
|
||||||
|
func LowNodeUtilization(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor) {
|
||||||
|
// TODO: May be create a struct for the strategy as well, so that we don't have to pass along the all the params?
|
||||||
|
if err := validateLowNodeUtilizationParams(strategy.Params); err != nil {
|
||||||
|
klog.ErrorS(err, "Invalid LowNodeUtilization parameters")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
// todo: move to config validation?
|
thresholdPriority, err := utils.GetPriorityFromStrategyParams(ctx, client, strategy.Params)
|
||||||
// TODO: May be create a struct for the strategy as well, so that we don't have to pass along the all the params?
|
if err != nil {
|
||||||
if strategy.Params.NodeResourceUtilizationThresholds == nil {
|
klog.ErrorS(err, "Failed to get threshold priority from strategy's params")
|
||||||
klog.V(1).Infof("NodeResourceUtilizationThresholds not set")
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
thresholds := strategy.Params.NodeResourceUtilizationThresholds.Thresholds
|
thresholds := strategy.Params.NodeResourceUtilizationThresholds.Thresholds
|
||||||
if !validateThresholds(thresholds) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
targetThresholds := strategy.Params.NodeResourceUtilizationThresholds.TargetThresholds
|
targetThresholds := strategy.Params.NodeResourceUtilizationThresholds.TargetThresholds
|
||||||
if !validateTargetThresholds(targetThresholds) {
|
if err := validateStrategyConfig(thresholds, targetThresholds); err != nil {
|
||||||
|
klog.ErrorS(err, "LowNodeUtilization config is not valid")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
// check if Pods/CPU/Mem are set, if not, set them to 100
|
||||||
|
if _, ok := thresholds[v1.ResourcePods]; !ok {
|
||||||
|
thresholds[v1.ResourcePods] = MaxResourcePercentage
|
||||||
|
targetThresholds[v1.ResourcePods] = MaxResourcePercentage
|
||||||
|
}
|
||||||
|
if _, ok := thresholds[v1.ResourceCPU]; !ok {
|
||||||
|
thresholds[v1.ResourceCPU] = MaxResourcePercentage
|
||||||
|
targetThresholds[v1.ResourceCPU] = MaxResourcePercentage
|
||||||
|
}
|
||||||
|
if _, ok := thresholds[v1.ResourceMemory]; !ok {
|
||||||
|
thresholds[v1.ResourceMemory] = MaxResourcePercentage
|
||||||
|
targetThresholds[v1.ResourceMemory] = MaxResourcePercentage
|
||||||
|
}
|
||||||
|
|
||||||
npm := createNodePodsMap(ctx, client, nodes)
|
lowNodes, targetNodes := classifyNodes(
|
||||||
lowNodes, targetNodes := classifyNodes(npm, thresholds, targetThresholds, evictLocalStoragePods)
|
getNodeUsage(ctx, client, nodes, thresholds, targetThresholds),
|
||||||
|
// The node has to be schedulable (to be able to move workload there)
|
||||||
|
func(node *v1.Node, usage NodeUsage) bool {
|
||||||
|
if nodeutil.IsNodeUnschedulable(node) {
|
||||||
|
klog.V(2).InfoS("Node is unschedulable, thus not considered as underutilized", "node", klog.KObj(node))
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return isNodeWithLowUtilization(usage)
|
||||||
|
},
|
||||||
|
func(node *v1.Node, usage NodeUsage) bool {
|
||||||
|
return isNodeAboveTargetUtilization(usage)
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
klog.V(1).Infof("Criteria for a node under utilization: CPU: %v, Mem: %v, Pods: %v",
|
klog.V(1).InfoS("Criteria for a node under utilization",
|
||||||
thresholds[v1.ResourceCPU], thresholds[v1.ResourceMemory], thresholds[v1.ResourcePods])
|
"CPU", thresholds[v1.ResourceCPU], "Mem", thresholds[v1.ResourceMemory], "Pods", thresholds[v1.ResourcePods])
|
||||||
|
|
||||||
if len(lowNodes) == 0 {
|
if len(lowNodes) == 0 {
|
||||||
klog.V(1).Infof("No node is underutilized, nothing to do here, you might tune your thresholds further")
|
klog.V(1).InfoS("No node is underutilized, nothing to do here, you might tune your thresholds further")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
klog.V(1).Infof("Total number of underutilized nodes: %v", len(lowNodes))
|
klog.V(1).InfoS("Total number of underutilized nodes", "totalNumber", len(lowNodes))
|
||||||
|
|
||||||
if len(lowNodes) < strategy.Params.NodeResourceUtilizationThresholds.NumberOfNodes {
|
if len(lowNodes) < strategy.Params.NodeResourceUtilizationThresholds.NumberOfNodes {
|
||||||
klog.V(1).Infof("number of nodes underutilized (%v) is less than NumberOfNodes (%v), nothing to do here", len(lowNodes), strategy.Params.NodeResourceUtilizationThresholds.NumberOfNodes)
|
klog.V(1).InfoS("Number of nodes underutilized is less than NumberOfNodes, nothing to do here", "underutilizedNodes", len(lowNodes), "numberOfNodes", strategy.Params.NodeResourceUtilizationThresholds.NumberOfNodes)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(lowNodes) == len(nodes) {
|
if len(lowNodes) == len(nodes) {
|
||||||
klog.V(1).Infof("all nodes are underutilized, nothing to do here")
|
klog.V(1).InfoS("All nodes are underutilized, nothing to do here")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(targetNodes) == 0 {
|
if len(targetNodes) == 0 {
|
||||||
klog.V(1).Infof("all nodes are under target utilization, nothing to do here")
|
klog.V(1).InfoS("All nodes are under target utilization, nothing to do here")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
klog.V(1).Infof("Criteria for a node above target utilization: CPU: %v, Mem: %v, Pods: %v",
|
klog.V(1).InfoS("Criteria for a node above target utilization",
|
||||||
targetThresholds[v1.ResourceCPU], targetThresholds[v1.ResourceMemory], targetThresholds[v1.ResourcePods])
|
"CPU", targetThresholds[v1.ResourceCPU], "Mem", targetThresholds[v1.ResourceMemory], "Pods", targetThresholds[v1.ResourcePods])
|
||||||
klog.V(1).Infof("Total number of nodes above target utilization: %v", len(targetNodes))
|
|
||||||
|
klog.V(1).InfoS("Number of nodes above target utilization", "totalNumber", len(targetNodes))
|
||||||
|
evictable := podEvictor.Evictable(evictions.WithPriorityThreshold(thresholdPriority))
|
||||||
|
|
||||||
evictPodsFromTargetNodes(
|
evictPodsFromTargetNodes(
|
||||||
ctx,
|
ctx,
|
||||||
targetNodes,
|
targetNodes,
|
||||||
lowNodes,
|
lowNodes,
|
||||||
targetThresholds,
|
podEvictor,
|
||||||
evictLocalStoragePods,
|
evictable.IsEvictable)
|
||||||
podEvictor)
|
|
||||||
|
|
||||||
klog.V(1).Infof("Total number of pods evicted: %v", podEvictor.TotalEvicted())
|
klog.V(1).InfoS("Total number of pods evicted", "evictedPods", podEvictor.TotalEvicted())
|
||||||
}
|
}
|
||||||
|
|
||||||
func validateThresholds(thresholds api.ResourceThresholds) bool {
|
// validateStrategyConfig checks if the strategy's config is valid
|
||||||
if thresholds == nil || len(thresholds) == 0 {
|
func validateStrategyConfig(thresholds, targetThresholds api.ResourceThresholds) error {
|
||||||
klog.V(1).Infof("no resource threshold is configured")
|
// validate thresholds and targetThresholds config
|
||||||
return false
|
if err := validateThresholds(thresholds); err != nil {
|
||||||
|
return fmt.Errorf("thresholds config is not valid: %v", err)
|
||||||
}
|
}
|
||||||
for name := range thresholds {
|
if err := validateThresholds(targetThresholds); err != nil {
|
||||||
switch name {
|
return fmt.Errorf("targetThresholds config is not valid: %v", err)
|
||||||
case v1.ResourceCPU:
|
}
|
||||||
continue
|
|
||||||
case v1.ResourceMemory:
|
// validate if thresholds and targetThresholds have same resources configured
|
||||||
continue
|
if len(thresholds) != len(targetThresholds) {
|
||||||
case v1.ResourcePods:
|
return fmt.Errorf("thresholds and targetThresholds configured different resources")
|
||||||
continue
|
}
|
||||||
default:
|
for resourceName, value := range thresholds {
|
||||||
klog.Errorf("only cpu, memory, or pods thresholds can be specified")
|
if targetValue, ok := targetThresholds[resourceName]; !ok {
|
||||||
return false
|
return fmt.Errorf("thresholds and targetThresholds configured different resources")
|
||||||
|
} else if value > targetValue {
|
||||||
|
return fmt.Errorf("thresholds' %v percentage is greater than targetThresholds'", resourceName)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return true
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
//This function could be merged into above once we are clear.
|
// validateThresholds checks if thresholds have valid resource name and resource percentage configured
|
||||||
func validateTargetThresholds(targetThresholds api.ResourceThresholds) bool {
|
func validateThresholds(thresholds api.ResourceThresholds) error {
|
||||||
if targetThresholds == nil {
|
if thresholds == nil || len(thresholds) == 0 {
|
||||||
klog.V(1).Infof("no target resource threshold is configured")
|
return fmt.Errorf("no resource threshold is configured")
|
||||||
return false
|
|
||||||
} else if _, ok := targetThresholds[v1.ResourcePods]; !ok {
|
|
||||||
klog.V(1).Infof("no target resource threshold for pods is configured")
|
|
||||||
return false
|
|
||||||
}
|
}
|
||||||
return true
|
for name, percent := range thresholds {
|
||||||
|
switch name {
|
||||||
|
case v1.ResourceCPU, v1.ResourceMemory, v1.ResourcePods:
|
||||||
|
if percent < MinResourcePercentage || percent > MaxResourcePercentage {
|
||||||
|
return fmt.Errorf("%v threshold not in [%v, %v] range", name, MinResourcePercentage, MaxResourcePercentage)
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("only cpu, memory, or pods thresholds can be specified")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func getNodeUsage(
|
||||||
|
ctx context.Context,
|
||||||
|
client clientset.Interface,
|
||||||
|
nodes []*v1.Node,
|
||||||
|
lowThreshold, highThreshold api.ResourceThresholds,
|
||||||
|
) []NodeUsage {
|
||||||
|
nodeUsageList := []NodeUsage{}
|
||||||
|
|
||||||
|
for _, node := range nodes {
|
||||||
|
pods, err := podutil.ListPodsOnANode(ctx, client, node)
|
||||||
|
if err != nil {
|
||||||
|
klog.V(2).InfoS("Node will not be processed, error accessing its pods", "node", klog.KObj(node), "err", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
nodeCapacity := node.Status.Capacity
|
||||||
|
if len(node.Status.Allocatable) > 0 {
|
||||||
|
nodeCapacity = node.Status.Allocatable
|
||||||
|
}
|
||||||
|
|
||||||
|
nodeUsageList = append(nodeUsageList, NodeUsage{
|
||||||
|
node: node,
|
||||||
|
usage: nodeUtilization(node, pods),
|
||||||
|
allPods: pods,
|
||||||
|
// A threshold is in percentages but in <0;100> interval.
|
||||||
|
// Performing `threshold * 0.01` will convert <0;100> interval into <0;1>.
|
||||||
|
// Multiplying it with capacity will give fraction of the capacity corresponding to the given high/low resource threshold in Quantity units.
|
||||||
|
lowResourceThreshold: map[v1.ResourceName]*resource.Quantity{
|
||||||
|
v1.ResourceCPU: resource.NewMilliQuantity(int64(float64(lowThreshold[v1.ResourceCPU])*float64(nodeCapacity.Cpu().MilliValue())*0.01), resource.DecimalSI),
|
||||||
|
v1.ResourceMemory: resource.NewQuantity(int64(float64(lowThreshold[v1.ResourceMemory])*float64(nodeCapacity.Memory().Value())*0.01), resource.BinarySI),
|
||||||
|
v1.ResourcePods: resource.NewQuantity(int64(float64(lowThreshold[v1.ResourcePods])*float64(nodeCapacity.Pods().Value())*0.01), resource.DecimalSI),
|
||||||
|
},
|
||||||
|
highResourceThreshold: map[v1.ResourceName]*resource.Quantity{
|
||||||
|
v1.ResourceCPU: resource.NewMilliQuantity(int64(float64(highThreshold[v1.ResourceCPU])*float64(nodeCapacity.Cpu().MilliValue())*0.01), resource.DecimalSI),
|
||||||
|
v1.ResourceMemory: resource.NewQuantity(int64(float64(highThreshold[v1.ResourceMemory])*float64(nodeCapacity.Memory().Value())*0.01), resource.BinarySI),
|
||||||
|
v1.ResourcePods: resource.NewQuantity(int64(float64(highThreshold[v1.ResourcePods])*float64(nodeCapacity.Pods().Value())*0.01), resource.DecimalSI),
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
return nodeUsageList
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceUsagePercentages(nodeUsage NodeUsage) map[v1.ResourceName]float64 {
|
||||||
|
nodeCapacity := nodeUsage.node.Status.Capacity
|
||||||
|
if len(nodeUsage.node.Status.Allocatable) > 0 {
|
||||||
|
nodeCapacity = nodeUsage.node.Status.Allocatable
|
||||||
|
}
|
||||||
|
|
||||||
|
resourceUsagePercentage := map[v1.ResourceName]float64{}
|
||||||
|
for resourceName, resourceUsage := range nodeUsage.usage {
|
||||||
|
cap := nodeCapacity[resourceName]
|
||||||
|
if !cap.IsZero() {
|
||||||
|
resourceUsagePercentage[resourceName] = 100 * float64(resourceUsage.Value()) / float64(cap.Value())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return resourceUsagePercentage
|
||||||
}
|
}
|
||||||
|
|
||||||
// classifyNodes classifies the nodes into low-utilization or high-utilization nodes. If a node lies between
|
// classifyNodes classifies the nodes into low-utilization or high-utilization nodes. If a node lies between
|
||||||
// low and high thresholds, it is simply ignored.
|
// low and high thresholds, it is simply ignored.
|
||||||
func classifyNodes(npm NodePodsMap, thresholds api.ResourceThresholds, targetThresholds api.ResourceThresholds, evictLocalStoragePods bool) ([]NodeUsageMap, []NodeUsageMap) {
|
func classifyNodes(
|
||||||
lowNodes, targetNodes := []NodeUsageMap{}, []NodeUsageMap{}
|
nodeUsages []NodeUsage,
|
||||||
for node, pods := range npm {
|
lowThresholdFilter, highThresholdFilter func(node *v1.Node, usage NodeUsage) bool,
|
||||||
usage := nodeUtilization(node, pods, evictLocalStoragePods)
|
) ([]NodeUsage, []NodeUsage) {
|
||||||
nuMap := NodeUsageMap{
|
lowNodes, highNodes := []NodeUsage{}, []NodeUsage{}
|
||||||
node: node,
|
|
||||||
usage: usage,
|
for _, nodeUsage := range nodeUsages {
|
||||||
allPods: pods,
|
if lowThresholdFilter(nodeUsage.node, nodeUsage) {
|
||||||
}
|
klog.V(2).InfoS("Node is underutilized", "node", klog.KObj(nodeUsage.node), "usage", nodeUsage.usage, "usagePercentage", resourceUsagePercentages(nodeUsage))
|
||||||
// Check if node is underutilized and if we can schedule pods on it.
|
lowNodes = append(lowNodes, nodeUsage)
|
||||||
if !nodeutil.IsNodeUnschedulable(node) && IsNodeWithLowUtilization(usage, thresholds) {
|
} else if highThresholdFilter(nodeUsage.node, nodeUsage) {
|
||||||
klog.V(2).Infof("Node %#v is under utilized with usage: %#v", node.Name, usage)
|
klog.V(2).InfoS("Node is overutilized", "node", klog.KObj(nodeUsage.node), "usage", nodeUsage.usage, "usagePercentage", resourceUsagePercentages(nodeUsage))
|
||||||
lowNodes = append(lowNodes, nuMap)
|
highNodes = append(highNodes, nodeUsage)
|
||||||
} else if IsNodeAboveTargetUtilization(usage, targetThresholds) {
|
|
||||||
klog.V(2).Infof("Node %#v is over utilized with usage: %#v", node.Name, usage)
|
|
||||||
targetNodes = append(targetNodes, nuMap)
|
|
||||||
} else {
|
} else {
|
||||||
klog.V(2).Infof("Node %#v is appropriately utilized with usage: %#v", node.Name, usage)
|
klog.V(2).InfoS("Node is appropriately utilized", "node", klog.KObj(nodeUsage.node), "usage", nodeUsage.usage, "usagePercentage", resourceUsagePercentages(nodeUsage))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return lowNodes, targetNodes
|
|
||||||
|
return lowNodes, highNodes
|
||||||
}
|
}
|
||||||
|
|
||||||
// evictPodsFromTargetNodes evicts pods based on priority, if all the pods on the node have priority, if not
|
// evictPodsFromTargetNodes evicts pods based on priority, if all the pods on the node have priority, if not
|
||||||
@@ -165,124 +283,112 @@ func classifyNodes(npm NodePodsMap, thresholds api.ResourceThresholds, targetThr
|
|||||||
// TODO: @ravig Break this function into smaller functions.
|
// TODO: @ravig Break this function into smaller functions.
|
||||||
func evictPodsFromTargetNodes(
|
func evictPodsFromTargetNodes(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
targetNodes, lowNodes []NodeUsageMap,
|
targetNodes, lowNodes []NodeUsage,
|
||||||
targetThresholds api.ResourceThresholds,
|
|
||||||
evictLocalStoragePods bool,
|
|
||||||
podEvictor *evictions.PodEvictor,
|
podEvictor *evictions.PodEvictor,
|
||||||
|
podFilter func(pod *v1.Pod) bool,
|
||||||
) {
|
) {
|
||||||
|
|
||||||
SortNodesByUsage(targetNodes)
|
sortNodesByUsage(targetNodes)
|
||||||
|
|
||||||
// upper bound on total number of pods/cpu/memory to be moved
|
// upper bound on total number of pods/cpu/memory to be moved
|
||||||
var totalPods, totalCPU, totalMem float64
|
totalAvailableUsage := map[v1.ResourceName]*resource.Quantity{
|
||||||
|
v1.ResourcePods: {},
|
||||||
|
v1.ResourceCPU: {},
|
||||||
|
v1.ResourceMemory: {},
|
||||||
|
}
|
||||||
|
|
||||||
var taintsOfLowNodes = make(map[string][]v1.Taint, len(lowNodes))
|
var taintsOfLowNodes = make(map[string][]v1.Taint, len(lowNodes))
|
||||||
for _, node := range lowNodes {
|
for _, node := range lowNodes {
|
||||||
taintsOfLowNodes[node.node.Name] = node.node.Spec.Taints
|
taintsOfLowNodes[node.node.Name] = node.node.Spec.Taints
|
||||||
nodeCapacity := node.node.Status.Capacity
|
|
||||||
if len(node.node.Status.Allocatable) > 0 {
|
|
||||||
nodeCapacity = node.node.Status.Allocatable
|
|
||||||
}
|
|
||||||
// totalPods to be moved
|
|
||||||
podsPercentage := targetThresholds[v1.ResourcePods] - node.usage[v1.ResourcePods]
|
|
||||||
totalPods += ((float64(podsPercentage) * float64(nodeCapacity.Pods().Value())) / 100)
|
|
||||||
|
|
||||||
// totalCPU capacity to be moved
|
for name := range totalAvailableUsage {
|
||||||
if _, ok := targetThresholds[v1.ResourceCPU]; ok {
|
totalAvailableUsage[name].Add(*node.highResourceThreshold[name])
|
||||||
cpuPercentage := targetThresholds[v1.ResourceCPU] - node.usage[v1.ResourceCPU]
|
totalAvailableUsage[name].Sub(*node.usage[name])
|
||||||
totalCPU += ((float64(cpuPercentage) * float64(nodeCapacity.Cpu().MilliValue())) / 100)
|
|
||||||
}
|
|
||||||
|
|
||||||
// totalMem capacity to be moved
|
|
||||||
if _, ok := targetThresholds[v1.ResourceMemory]; ok {
|
|
||||||
memPercentage := targetThresholds[v1.ResourceMemory] - node.usage[v1.ResourceMemory]
|
|
||||||
totalMem += ((float64(memPercentage) * float64(nodeCapacity.Memory().Value())) / 100)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
klog.V(1).Infof("Total capacity to be moved: CPU:%v, Mem:%v, Pods:%v", totalCPU, totalMem, totalPods)
|
klog.V(1).InfoS(
|
||||||
klog.V(1).Infof("********Number of pods evicted from each node:***********")
|
"Total capacity to be moved",
|
||||||
|
"CPU", totalAvailableUsage[v1.ResourceCPU].MilliValue(),
|
||||||
|
"Mem", totalAvailableUsage[v1.ResourceMemory].Value(),
|
||||||
|
"Pods", totalAvailableUsage[v1.ResourcePods].Value(),
|
||||||
|
)
|
||||||
|
|
||||||
for _, node := range targetNodes {
|
for _, node := range targetNodes {
|
||||||
nodeCapacity := node.node.Status.Capacity
|
klog.V(3).InfoS("Evicting pods from node", "node", klog.KObj(node.node), "usage", node.usage)
|
||||||
if len(node.node.Status.Allocatable) > 0 {
|
|
||||||
nodeCapacity = node.node.Status.Allocatable
|
nonRemovablePods, removablePods := classifyPods(node.allPods, podFilter)
|
||||||
|
klog.V(2).InfoS("Pods on node", "node", klog.KObj(node.node), "allPods", len(node.allPods), "nonRemovablePods", len(nonRemovablePods), "removablePods", len(removablePods))
|
||||||
|
|
||||||
|
if len(removablePods) == 0 {
|
||||||
|
klog.V(1).InfoS("No removable pods on node, try next node", "node", klog.KObj(node.node))
|
||||||
|
continue
|
||||||
}
|
}
|
||||||
klog.V(3).Infof("evicting pods from node %#v with usage: %#v", node.node.Name, node.usage)
|
|
||||||
|
|
||||||
nonRemovablePods, bestEffortPods, burstablePods, guaranteedPods := classifyPods(node.allPods, evictLocalStoragePods)
|
klog.V(1).InfoS("Evicting pods based on priority, if they have same priority, they'll be evicted based on QoS tiers")
|
||||||
klog.V(2).Infof("allPods:%v, nonRemovablePods:%v, bestEffortPods:%v, burstablePods:%v, guaranteedPods:%v", len(node.allPods), len(nonRemovablePods), len(bestEffortPods), len(burstablePods), len(guaranteedPods))
|
// sort the evictable Pods based on priority. This also sorts them based on QoS. If there are multiple pods with same priority, they are sorted based on QoS tiers.
|
||||||
|
podutil.SortPodsBasedOnPriorityLowToHigh(removablePods)
|
||||||
// Check if one pod has priority, if yes, assume that all pods have priority and evict pods based on priority.
|
evictPods(ctx, removablePods, node, totalAvailableUsage, taintsOfLowNodes, podEvictor)
|
||||||
if node.allPods[0].Spec.Priority != nil {
|
klog.V(1).InfoS("Evicted pods from node", "node", klog.KObj(node.node), "evictedPods", podEvictor.NodeEvicted(node.node), "usage", node.usage)
|
||||||
klog.V(1).Infof("All pods have priority associated with them. Evicting pods based on priority")
|
|
||||||
evictablePods := make([]*v1.Pod, 0)
|
|
||||||
evictablePods = append(append(burstablePods, bestEffortPods...), guaranteedPods...)
|
|
||||||
|
|
||||||
// sort the evictable Pods based on priority. This also sorts them based on QoS. If there are multiple pods with same priority, they are sorted based on QoS tiers.
|
|
||||||
sortPodsBasedOnPriority(evictablePods)
|
|
||||||
evictPods(ctx, evictablePods, targetThresholds, nodeCapacity, node.usage, &totalPods, &totalCPU, &totalMem, taintsOfLowNodes, podEvictor, node.node)
|
|
||||||
} else {
|
|
||||||
// TODO: Remove this when we support only priority.
|
|
||||||
// Falling back to evicting pods based on priority.
|
|
||||||
klog.V(1).Infof("Evicting pods based on QoS")
|
|
||||||
klog.V(1).Infof("There are %v non-evictable pods on the node", len(nonRemovablePods))
|
|
||||||
// evict best effort pods
|
|
||||||
evictPods(ctx, bestEffortPods, targetThresholds, nodeCapacity, node.usage, &totalPods, &totalCPU, &totalMem, taintsOfLowNodes, podEvictor, node.node)
|
|
||||||
// evict burstable pods
|
|
||||||
evictPods(ctx, burstablePods, targetThresholds, nodeCapacity, node.usage, &totalPods, &totalCPU, &totalMem, taintsOfLowNodes, podEvictor, node.node)
|
|
||||||
// evict guaranteed pods
|
|
||||||
evictPods(ctx, guaranteedPods, targetThresholds, nodeCapacity, node.usage, &totalPods, &totalCPU, &totalMem, taintsOfLowNodes, podEvictor, node.node)
|
|
||||||
}
|
|
||||||
klog.V(1).Infof("%v pods evicted from node %#v with usage %v", podEvictor.NodeEvicted(node.node), node.node.Name, node.usage)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func evictPods(
|
func evictPods(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
inputPods []*v1.Pod,
|
inputPods []*v1.Pod,
|
||||||
targetThresholds api.ResourceThresholds,
|
nodeUsage NodeUsage,
|
||||||
nodeCapacity v1.ResourceList,
|
totalAvailableUsage map[v1.ResourceName]*resource.Quantity,
|
||||||
nodeUsage api.ResourceThresholds,
|
|
||||||
totalPods *float64,
|
|
||||||
totalCPU *float64,
|
|
||||||
totalMem *float64,
|
|
||||||
taintsOfLowNodes map[string][]v1.Taint,
|
taintsOfLowNodes map[string][]v1.Taint,
|
||||||
podEvictor *evictions.PodEvictor,
|
podEvictor *evictions.PodEvictor,
|
||||||
node *v1.Node) {
|
) {
|
||||||
if IsNodeAboveTargetUtilization(nodeUsage, targetThresholds) && (*totalPods > 0 || *totalCPU > 0 || *totalMem > 0) {
|
// stop if node utilization drops below target threshold or any of required capacity (cpu, memory, pods) is moved
|
||||||
onePodPercentage := api.Percentage((float64(1) * 100) / float64(nodeCapacity.Pods().Value()))
|
continueCond := func() bool {
|
||||||
|
if !isNodeAboveTargetUtilization(nodeUsage) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if totalAvailableUsage[v1.ResourcePods].CmpInt64(0) < 1 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if totalAvailableUsage[v1.ResourceCPU].CmpInt64(0) < 1 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if totalAvailableUsage[v1.ResourceMemory].CmpInt64(0) < 1 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
if continueCond() {
|
||||||
for _, pod := range inputPods {
|
for _, pod := range inputPods {
|
||||||
if !utils.PodToleratesTaints(pod, taintsOfLowNodes) {
|
if !utils.PodToleratesTaints(pod, taintsOfLowNodes) {
|
||||||
klog.V(3).Infof("Skipping eviction for Pod: %#v, doesn't tolerate node taint", pod.Name)
|
klog.V(3).InfoS("Skipping eviction for pod, doesn't tolerate node taint", "pod", klog.KObj(pod))
|
||||||
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
cUsage := utils.GetResourceRequest(pod, v1.ResourceCPU)
|
success, err := podEvictor.EvictPod(ctx, pod, nodeUsage.node, "LowNodeUtilization")
|
||||||
mUsage := utils.GetResourceRequest(pod, v1.ResourceMemory)
|
|
||||||
|
|
||||||
success, err := podEvictor.EvictPod(ctx, pod, node)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.Errorf("Error evicting pod: (%#v)", err)
|
klog.ErrorS(err, "Error evicting pod", "pod", klog.KObj(pod))
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
if success {
|
if success {
|
||||||
klog.V(3).Infof("Evicted pod: %#v", pod.Name)
|
klog.V(3).InfoS("Evicted pods", "pod", klog.KObj(pod), "err", err)
|
||||||
// update remaining pods
|
|
||||||
nodeUsage[v1.ResourcePods] -= onePodPercentage
|
|
||||||
*totalPods--
|
|
||||||
|
|
||||||
// update remaining cpu
|
cpuQuantity := utils.GetResourceRequestQuantity(pod, v1.ResourceCPU)
|
||||||
*totalCPU -= float64(cUsage)
|
nodeUsage.usage[v1.ResourceCPU].Sub(cpuQuantity)
|
||||||
nodeUsage[v1.ResourceCPU] -= api.Percentage((float64(cUsage) * 100) / float64(nodeCapacity.Cpu().MilliValue()))
|
totalAvailableUsage[v1.ResourceCPU].Sub(cpuQuantity)
|
||||||
|
|
||||||
// update remaining memory
|
memoryQuantity := utils.GetResourceRequestQuantity(pod, v1.ResourceMemory)
|
||||||
*totalMem -= float64(mUsage)
|
nodeUsage.usage[v1.ResourceMemory].Sub(memoryQuantity)
|
||||||
nodeUsage[v1.ResourceMemory] -= api.Percentage(float64(mUsage) / float64(nodeCapacity.Memory().Value()) * 100)
|
totalAvailableUsage[v1.ResourceMemory].Sub(memoryQuantity)
|
||||||
|
|
||||||
klog.V(3).Infof("updated node usage: %#v", nodeUsage)
|
nodeUsage.usage[v1.ResourcePods].Sub(*resource.NewQuantity(1, resource.DecimalSI))
|
||||||
// check if node utilization drops below target threshold or required capacity (cpu, memory, pods) is moved
|
totalAvailableUsage[v1.ResourcePods].Sub(*resource.NewQuantity(1, resource.DecimalSI))
|
||||||
if !IsNodeAboveTargetUtilization(nodeUsage, targetThresholds) || (*totalPods <= 0 && *totalCPU <= 0 && *totalMem <= 0) {
|
|
||||||
|
klog.V(3).InfoS("Updated node usage", "updatedUsage", nodeUsage)
|
||||||
|
// check if node utilization drops below target threshold or any required capacity (cpu, memory, pods) is moved
|
||||||
|
if !continueCond() {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -290,90 +396,46 @@ func evictPods(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func SortNodesByUsage(nodes []NodeUsageMap) {
|
// sortNodesByUsage sorts nodes based on usage in descending order
|
||||||
|
func sortNodesByUsage(nodes []NodeUsage) {
|
||||||
sort.Slice(nodes, func(i, j int) bool {
|
sort.Slice(nodes, func(i, j int) bool {
|
||||||
var ti, tj api.Percentage
|
ti := nodes[i].usage[v1.ResourceMemory].Value() + nodes[i].usage[v1.ResourceCPU].MilliValue() + nodes[i].usage[v1.ResourcePods].Value()
|
||||||
for name, value := range nodes[i].usage {
|
tj := nodes[j].usage[v1.ResourceMemory].Value() + nodes[j].usage[v1.ResourceCPU].MilliValue() + nodes[j].usage[v1.ResourcePods].Value()
|
||||||
if name == v1.ResourceCPU || name == v1.ResourceMemory || name == v1.ResourcePods {
|
|
||||||
ti += value
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for name, value := range nodes[j].usage {
|
|
||||||
if name == v1.ResourceCPU || name == v1.ResourceMemory || name == v1.ResourcePods {
|
|
||||||
tj += value
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// To return sorted in descending order
|
// To return sorted in descending order
|
||||||
return ti > tj
|
return ti > tj
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// sortPodsBasedOnPriority sorts pods based on priority and if their priorities are equal, they are sorted based on QoS tiers.
|
// isNodeAboveTargetUtilization checks if a node is overutilized
|
||||||
func sortPodsBasedOnPriority(evictablePods []*v1.Pod) {
|
// At least one resource has to be above the high threshold
|
||||||
sort.Slice(evictablePods, func(i, j int) bool {
|
func isNodeAboveTargetUtilization(usage NodeUsage) bool {
|
||||||
if evictablePods[i].Spec.Priority == nil && evictablePods[j].Spec.Priority != nil {
|
for name, nodeValue := range usage.usage {
|
||||||
|
// usage.highResourceThreshold[name] < nodeValue
|
||||||
|
if usage.highResourceThreshold[name].Cmp(*nodeValue) == -1 {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
if evictablePods[j].Spec.Priority == nil && evictablePods[i].Spec.Priority != nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if (evictablePods[j].Spec.Priority == nil && evictablePods[i].Spec.Priority == nil) || (*evictablePods[i].Spec.Priority == *evictablePods[j].Spec.Priority) {
|
|
||||||
if podutil.IsBestEffortPod(evictablePods[i]) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
if podutil.IsBurstablePod(evictablePods[i]) && podutil.IsGuaranteedPod(evictablePods[j]) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return *evictablePods[i].Spec.Priority < *evictablePods[j].Spec.Priority
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// createNodePodsMap returns nodepodsmap with evictable pods on node.
|
|
||||||
func createNodePodsMap(ctx context.Context, client clientset.Interface, nodes []*v1.Node) NodePodsMap {
|
|
||||||
npm := NodePodsMap{}
|
|
||||||
for _, node := range nodes {
|
|
||||||
pods, err := podutil.ListPodsOnANode(ctx, client, node)
|
|
||||||
if err != nil {
|
|
||||||
klog.Warningf("node %s will not be processed, error in accessing its pods (%#v)", node.Name, err)
|
|
||||||
} else {
|
|
||||||
npm[node] = pods
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return npm
|
|
||||||
}
|
|
||||||
|
|
||||||
func IsNodeAboveTargetUtilization(nodeThresholds api.ResourceThresholds, thresholds api.ResourceThresholds) bool {
|
|
||||||
for name, nodeValue := range nodeThresholds {
|
|
||||||
if name == v1.ResourceCPU || name == v1.ResourceMemory || name == v1.ResourcePods {
|
|
||||||
if value, ok := thresholds[name]; !ok {
|
|
||||||
continue
|
|
||||||
} else if nodeValue > value {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
func IsNodeWithLowUtilization(nodeThresholds api.ResourceThresholds, thresholds api.ResourceThresholds) bool {
|
// isNodeWithLowUtilization checks if a node is underutilized
|
||||||
for name, nodeValue := range nodeThresholds {
|
// All resources have to be below the low threshold
|
||||||
if name == v1.ResourceCPU || name == v1.ResourceMemory || name == v1.ResourcePods {
|
func isNodeWithLowUtilization(usage NodeUsage) bool {
|
||||||
if value, ok := thresholds[name]; !ok {
|
for name, nodeValue := range usage.usage {
|
||||||
continue
|
// usage.lowResourceThreshold[name] < nodeValue
|
||||||
} else if nodeValue > value {
|
if usage.lowResourceThreshold[name].Cmp(*nodeValue) == -1 {
|
||||||
return false
|
return false
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
func nodeUtilization(node *v1.Node, pods []*v1.Pod, evictLocalStoragePods bool) api.ResourceThresholds {
|
func nodeUtilization(node *v1.Node, pods []*v1.Pod) map[v1.ResourceName]*resource.Quantity {
|
||||||
totalReqs := map[v1.ResourceName]*resource.Quantity{
|
totalReqs := map[v1.ResourceName]*resource.Quantity{
|
||||||
v1.ResourceCPU: {},
|
v1.ResourceCPU: resource.NewMilliQuantity(0, resource.DecimalSI),
|
||||||
v1.ResourceMemory: {},
|
v1.ResourceMemory: resource.NewQuantity(0, resource.BinarySI),
|
||||||
|
v1.ResourcePods: resource.NewQuantity(int64(len(pods)), resource.DecimalSI),
|
||||||
}
|
}
|
||||||
for _, pod := range pods {
|
for _, pod := range pods {
|
||||||
req, _ := utils.PodRequestsAndLimits(pod)
|
req, _ := utils.PodRequestsAndLimits(pod)
|
||||||
@@ -386,47 +448,19 @@ func nodeUtilization(node *v1.Node, pods []*v1.Pod, evictLocalStoragePods bool)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
nodeCapacity := node.Status.Capacity
|
return totalReqs
|
||||||
if len(node.Status.Allocatable) > 0 {
|
|
||||||
nodeCapacity = node.Status.Allocatable
|
|
||||||
}
|
|
||||||
|
|
||||||
totalPods := len(pods)
|
|
||||||
return api.ResourceThresholds{
|
|
||||||
v1.ResourceCPU: api.Percentage((float64(totalReqs[v1.ResourceCPU].MilliValue()) * 100) / float64(nodeCapacity.Cpu().MilliValue())),
|
|
||||||
v1.ResourceMemory: api.Percentage(float64(totalReqs[v1.ResourceMemory].Value()) / float64(nodeCapacity.Memory().Value()) * 100),
|
|
||||||
v1.ResourcePods: api.Percentage((float64(totalPods) * 100) / float64(nodeCapacity.Pods().Value())),
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func classifyPods(pods []*v1.Pod, evictLocalStoragePods bool) ([]*v1.Pod, []*v1.Pod, []*v1.Pod, []*v1.Pod) {
|
func classifyPods(pods []*v1.Pod, filter func(pod *v1.Pod) bool) ([]*v1.Pod, []*v1.Pod) {
|
||||||
var nonRemovablePods, bestEffortPods, burstablePods, guaranteedPods []*v1.Pod
|
var nonRemovablePods, removablePods []*v1.Pod
|
||||||
|
|
||||||
// From https://kubernetes.io/docs/tasks/configure-pod-container/quality-service-pod/
|
|
||||||
//
|
|
||||||
// For a Pod to be given a QoS class of Guaranteed:
|
|
||||||
// - every Container in the Pod must have a memory limit and a memory request, and they must be the same.
|
|
||||||
// - every Container in the Pod must have a CPU limit and a CPU request, and they must be the same.
|
|
||||||
// A Pod is given a QoS class of Burstable if:
|
|
||||||
// - the Pod does not meet the criteria for QoS class Guaranteed.
|
|
||||||
// - at least one Container in the Pod has a memory or CPU request.
|
|
||||||
// For a Pod to be given a QoS class of BestEffort, the Containers in the Pod must not have any memory or CPU limits or requests.
|
|
||||||
|
|
||||||
for _, pod := range pods {
|
for _, pod := range pods {
|
||||||
if !podutil.IsEvictable(pod, evictLocalStoragePods) {
|
if !filter(pod) {
|
||||||
nonRemovablePods = append(nonRemovablePods, pod)
|
nonRemovablePods = append(nonRemovablePods, pod)
|
||||||
continue
|
} else {
|
||||||
}
|
removablePods = append(removablePods, pod)
|
||||||
|
|
||||||
switch utils.GetPodQOS(pod) {
|
|
||||||
case v1.PodQOSGuaranteed:
|
|
||||||
guaranteedPods = append(guaranteedPods, pod)
|
|
||||||
case v1.PodQOSBurstable:
|
|
||||||
burstablePods = append(burstablePods, pod)
|
|
||||||
default: // alias v1.PodQOSBestEffort
|
|
||||||
bestEffortPods = append(bestEffortPods, pod)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return nonRemovablePods, bestEffortPods, burstablePods, guaranteedPods
|
return nonRemovablePods, removablePods
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -22,16 +22,10 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"reflect"
|
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/api/policy/v1beta1"
|
"k8s.io/api/policy/v1beta1"
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
"k8s.io/apimachinery/pkg/fields"
|
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
|
||||||
serializer "k8s.io/apimachinery/pkg/runtime/serializer"
|
|
||||||
"k8s.io/apimachinery/pkg/watch"
|
|
||||||
"k8s.io/client-go/kubernetes/fake"
|
"k8s.io/client-go/kubernetes/fake"
|
||||||
core "k8s.io/client-go/testing"
|
core "k8s.io/client-go/testing"
|
||||||
"sigs.k8s.io/descheduler/pkg/api"
|
"sigs.k8s.io/descheduler/pkg/api"
|
||||||
@@ -45,30 +39,6 @@ var (
|
|||||||
highPriority = int32(10000)
|
highPriority = int32(10000)
|
||||||
)
|
)
|
||||||
|
|
||||||
func setRSOwnerRef(pod *v1.Pod) { pod.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList() }
|
|
||||||
func setDSOwnerRef(pod *v1.Pod) { pod.ObjectMeta.OwnerReferences = test.GetDaemonSetOwnerRefList() }
|
|
||||||
func setNormalOwnerRef(pod *v1.Pod) { pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList() }
|
|
||||||
func setHighPriority(pod *v1.Pod) { pod.Spec.Priority = &highPriority }
|
|
||||||
func setLowPriority(pod *v1.Pod) { pod.Spec.Priority = &lowPriority }
|
|
||||||
func setNodeUnschedulable(node *v1.Node) { node.Spec.Unschedulable = true }
|
|
||||||
|
|
||||||
func makeBestEffortPod(pod *v1.Pod) {
|
|
||||||
pod.Spec.Containers[0].Resources.Requests = nil
|
|
||||||
pod.Spec.Containers[0].Resources.Requests = nil
|
|
||||||
pod.Spec.Containers[0].Resources.Limits = nil
|
|
||||||
pod.Spec.Containers[0].Resources.Limits = nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func makeBurstablePod(pod *v1.Pod) {
|
|
||||||
pod.Spec.Containers[0].Resources.Limits = nil
|
|
||||||
pod.Spec.Containers[0].Resources.Limits = nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func makeGuaranteedPod(pod *v1.Pod) {
|
|
||||||
pod.Spec.Containers[0].Resources.Limits[v1.ResourceCPU] = pod.Spec.Containers[0].Resources.Requests[v1.ResourceCPU]
|
|
||||||
pod.Spec.Containers[0].Resources.Limits[v1.ResourceMemory] = pod.Spec.Containers[0].Resources.Requests[v1.ResourceMemory]
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestLowNodeUtilization(t *testing.T) {
|
func TestLowNodeUtilization(t *testing.T) {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
n1NodeName := "n1"
|
n1NodeName := "n1"
|
||||||
@@ -80,9 +50,67 @@ func TestLowNodeUtilization(t *testing.T) {
|
|||||||
thresholds, targetThresholds api.ResourceThresholds
|
thresholds, targetThresholds api.ResourceThresholds
|
||||||
nodes map[string]*v1.Node
|
nodes map[string]*v1.Node
|
||||||
pods map[string]*v1.PodList
|
pods map[string]*v1.PodList
|
||||||
|
maxPodsToEvictPerNode int
|
||||||
expectedPodsEvicted int
|
expectedPodsEvicted int
|
||||||
evictedPods []string
|
evictedPods []string
|
||||||
}{
|
}{
|
||||||
|
{
|
||||||
|
name: "no evictable pods",
|
||||||
|
thresholds: api.ResourceThresholds{
|
||||||
|
v1.ResourceCPU: 30,
|
||||||
|
v1.ResourcePods: 30,
|
||||||
|
},
|
||||||
|
targetThresholds: api.ResourceThresholds{
|
||||||
|
v1.ResourceCPU: 50,
|
||||||
|
v1.ResourcePods: 50,
|
||||||
|
},
|
||||||
|
nodes: map[string]*v1.Node{
|
||||||
|
n1NodeName: test.BuildTestNode(n1NodeName, 4000, 3000, 9, nil),
|
||||||
|
n2NodeName: test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil),
|
||||||
|
n3NodeName: test.BuildTestNode(n3NodeName, 4000, 3000, 10, test.SetNodeUnschedulable),
|
||||||
|
},
|
||||||
|
pods: map[string]*v1.PodList{
|
||||||
|
n1NodeName: {
|
||||||
|
Items: []v1.Pod{
|
||||||
|
// These won't be evicted.
|
||||||
|
*test.BuildTestPod("p1", 400, 0, n1NodeName, test.SetDSOwnerRef),
|
||||||
|
*test.BuildTestPod("p2", 400, 0, n1NodeName, test.SetDSOwnerRef),
|
||||||
|
*test.BuildTestPod("p3", 400, 0, n1NodeName, test.SetDSOwnerRef),
|
||||||
|
*test.BuildTestPod("p4", 400, 0, n1NodeName, test.SetDSOwnerRef),
|
||||||
|
*test.BuildTestPod("p5", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||||
|
// A pod with local storage.
|
||||||
|
test.SetNormalOwnerRef(pod)
|
||||||
|
pod.Spec.Volumes = []v1.Volume{
|
||||||
|
{
|
||||||
|
Name: "sample",
|
||||||
|
VolumeSource: v1.VolumeSource{
|
||||||
|
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||||
|
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||||
|
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
// A Mirror Pod.
|
||||||
|
pod.Annotations = test.GetMirrorPodAnnotation()
|
||||||
|
}),
|
||||||
|
*test.BuildTestPod("p6", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||||
|
// A Critical Pod.
|
||||||
|
pod.Namespace = "kube-system"
|
||||||
|
priority := utils.SystemCriticalPriority
|
||||||
|
pod.Spec.Priority = &priority
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
n2NodeName: {
|
||||||
|
Items: []v1.Pod{
|
||||||
|
*test.BuildTestPod("p9", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
n3NodeName: {},
|
||||||
|
},
|
||||||
|
maxPodsToEvictPerNode: 0,
|
||||||
|
expectedPodsEvicted: 0,
|
||||||
|
},
|
||||||
{
|
{
|
||||||
name: "without priorities",
|
name: "without priorities",
|
||||||
thresholds: api.ResourceThresholds{
|
thresholds: api.ResourceThresholds{
|
||||||
@@ -96,21 +124,21 @@ func TestLowNodeUtilization(t *testing.T) {
|
|||||||
nodes: map[string]*v1.Node{
|
nodes: map[string]*v1.Node{
|
||||||
n1NodeName: test.BuildTestNode(n1NodeName, 4000, 3000, 9, nil),
|
n1NodeName: test.BuildTestNode(n1NodeName, 4000, 3000, 9, nil),
|
||||||
n2NodeName: test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil),
|
n2NodeName: test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil),
|
||||||
n3NodeName: test.BuildTestNode(n3NodeName, 4000, 3000, 10, setNodeUnschedulable),
|
n3NodeName: test.BuildTestNode(n3NodeName, 4000, 3000, 10, test.SetNodeUnschedulable),
|
||||||
},
|
},
|
||||||
pods: map[string]*v1.PodList{
|
pods: map[string]*v1.PodList{
|
||||||
n1NodeName: {
|
n1NodeName: {
|
||||||
Items: []v1.Pod{
|
Items: []v1.Pod{
|
||||||
*test.BuildTestPod("p1", 400, 0, n1NodeName, setRSOwnerRef),
|
*test.BuildTestPod("p1", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||||
*test.BuildTestPod("p2", 400, 0, n1NodeName, setRSOwnerRef),
|
*test.BuildTestPod("p2", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||||
*test.BuildTestPod("p3", 400, 0, n1NodeName, setRSOwnerRef),
|
*test.BuildTestPod("p3", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||||
*test.BuildTestPod("p4", 400, 0, n1NodeName, setRSOwnerRef),
|
*test.BuildTestPod("p4", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||||
*test.BuildTestPod("p5", 400, 0, n1NodeName, setRSOwnerRef),
|
*test.BuildTestPod("p5", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||||
// These won't be evicted.
|
// These won't be evicted.
|
||||||
*test.BuildTestPod("p6", 400, 0, n1NodeName, setDSOwnerRef),
|
*test.BuildTestPod("p6", 400, 0, n1NodeName, test.SetDSOwnerRef),
|
||||||
*test.BuildTestPod("p7", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
*test.BuildTestPod("p7", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||||
// A pod with local storage.
|
// A pod with local storage.
|
||||||
setNormalOwnerRef(pod)
|
test.SetNormalOwnerRef(pod)
|
||||||
pod.Spec.Volumes = []v1.Volume{
|
pod.Spec.Volumes = []v1.Volume{
|
||||||
{
|
{
|
||||||
Name: "sample",
|
Name: "sample",
|
||||||
@@ -134,11 +162,72 @@ func TestLowNodeUtilization(t *testing.T) {
|
|||||||
},
|
},
|
||||||
n2NodeName: {
|
n2NodeName: {
|
||||||
Items: []v1.Pod{
|
Items: []v1.Pod{
|
||||||
*test.BuildTestPod("p9", 400, 0, n1NodeName, setRSOwnerRef),
|
*test.BuildTestPod("p9", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
n3NodeName: {},
|
n3NodeName: {},
|
||||||
},
|
},
|
||||||
|
maxPodsToEvictPerNode: 0,
|
||||||
|
expectedPodsEvicted: 4,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "without priorities stop when cpu capacity is depleted",
|
||||||
|
thresholds: api.ResourceThresholds{
|
||||||
|
v1.ResourceCPU: 30,
|
||||||
|
v1.ResourcePods: 30,
|
||||||
|
},
|
||||||
|
targetThresholds: api.ResourceThresholds{
|
||||||
|
v1.ResourceCPU: 50,
|
||||||
|
v1.ResourcePods: 50,
|
||||||
|
},
|
||||||
|
nodes: map[string]*v1.Node{
|
||||||
|
n1NodeName: test.BuildTestNode(n1NodeName, 4000, 3000, 9, nil),
|
||||||
|
n2NodeName: test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil),
|
||||||
|
n3NodeName: test.BuildTestNode(n3NodeName, 4000, 3000, 10, test.SetNodeUnschedulable),
|
||||||
|
},
|
||||||
|
pods: map[string]*v1.PodList{
|
||||||
|
n1NodeName: {
|
||||||
|
Items: []v1.Pod{
|
||||||
|
*test.BuildTestPod("p1", 400, 300, n1NodeName, test.SetRSOwnerRef),
|
||||||
|
*test.BuildTestPod("p2", 400, 300, n1NodeName, test.SetRSOwnerRef),
|
||||||
|
*test.BuildTestPod("p3", 400, 300, n1NodeName, test.SetRSOwnerRef),
|
||||||
|
*test.BuildTestPod("p4", 400, 300, n1NodeName, test.SetRSOwnerRef),
|
||||||
|
*test.BuildTestPod("p5", 400, 300, n1NodeName, test.SetRSOwnerRef),
|
||||||
|
// These won't be evicted.
|
||||||
|
*test.BuildTestPod("p6", 400, 300, n1NodeName, test.SetDSOwnerRef),
|
||||||
|
*test.BuildTestPod("p7", 400, 300, n1NodeName, func(pod *v1.Pod) {
|
||||||
|
// A pod with local storage.
|
||||||
|
test.SetNormalOwnerRef(pod)
|
||||||
|
pod.Spec.Volumes = []v1.Volume{
|
||||||
|
{
|
||||||
|
Name: "sample",
|
||||||
|
VolumeSource: v1.VolumeSource{
|
||||||
|
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||||
|
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||||
|
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
// A Mirror Pod.
|
||||||
|
pod.Annotations = test.GetMirrorPodAnnotation()
|
||||||
|
}),
|
||||||
|
*test.BuildTestPod("p8", 400, 300, n1NodeName, func(pod *v1.Pod) {
|
||||||
|
// A Critical Pod.
|
||||||
|
pod.Namespace = "kube-system"
|
||||||
|
priority := utils.SystemCriticalPriority
|
||||||
|
pod.Spec.Priority = &priority
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
n2NodeName: {
|
||||||
|
Items: []v1.Pod{
|
||||||
|
*test.BuildTestPod("p9", 400, 2100, n2NodeName, test.SetRSOwnerRef),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
n3NodeName: {},
|
||||||
|
},
|
||||||
|
maxPodsToEvictPerNode: 0,
|
||||||
|
// 4 pods available for eviction based on v1.ResourcePods, only 3 pods can be evicted before cpu is depleted
|
||||||
expectedPodsEvicted: 3,
|
expectedPodsEvicted: 3,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -154,40 +243,40 @@ func TestLowNodeUtilization(t *testing.T) {
|
|||||||
nodes: map[string]*v1.Node{
|
nodes: map[string]*v1.Node{
|
||||||
n1NodeName: test.BuildTestNode(n1NodeName, 4000, 3000, 9, nil),
|
n1NodeName: test.BuildTestNode(n1NodeName, 4000, 3000, 9, nil),
|
||||||
n2NodeName: test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil),
|
n2NodeName: test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil),
|
||||||
n3NodeName: test.BuildTestNode(n3NodeName, 4000, 3000, 10, setNodeUnschedulable),
|
n3NodeName: test.BuildTestNode(n3NodeName, 4000, 3000, 10, test.SetNodeUnschedulable),
|
||||||
},
|
},
|
||||||
pods: map[string]*v1.PodList{
|
pods: map[string]*v1.PodList{
|
||||||
n1NodeName: {
|
n1NodeName: {
|
||||||
Items: []v1.Pod{
|
Items: []v1.Pod{
|
||||||
*test.BuildTestPod("p1", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
*test.BuildTestPod("p1", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||||
setRSOwnerRef(pod)
|
test.SetRSOwnerRef(pod)
|
||||||
setHighPriority(pod)
|
test.SetPodPriority(pod, highPriority)
|
||||||
}),
|
}),
|
||||||
*test.BuildTestPod("p2", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
*test.BuildTestPod("p2", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||||
setRSOwnerRef(pod)
|
test.SetRSOwnerRef(pod)
|
||||||
setHighPriority(pod)
|
test.SetPodPriority(pod, highPriority)
|
||||||
}),
|
}),
|
||||||
*test.BuildTestPod("p3", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
*test.BuildTestPod("p3", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||||
setRSOwnerRef(pod)
|
test.SetRSOwnerRef(pod)
|
||||||
setHighPriority(pod)
|
test.SetPodPriority(pod, highPriority)
|
||||||
}),
|
}),
|
||||||
*test.BuildTestPod("p4", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
*test.BuildTestPod("p4", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||||
setRSOwnerRef(pod)
|
test.SetRSOwnerRef(pod)
|
||||||
setHighPriority(pod)
|
test.SetPodPriority(pod, highPriority)
|
||||||
}),
|
}),
|
||||||
*test.BuildTestPod("p5", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
*test.BuildTestPod("p5", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||||
setRSOwnerRef(pod)
|
test.SetRSOwnerRef(pod)
|
||||||
setLowPriority(pod)
|
test.SetPodPriority(pod, lowPriority)
|
||||||
}),
|
}),
|
||||||
// These won't be evicted.
|
// These won't be evicted.
|
||||||
*test.BuildTestPod("p6", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
*test.BuildTestPod("p6", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||||
setDSOwnerRef(pod)
|
test.SetDSOwnerRef(pod)
|
||||||
setHighPriority(pod)
|
test.SetPodPriority(pod, highPriority)
|
||||||
}),
|
}),
|
||||||
*test.BuildTestPod("p7", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
*test.BuildTestPod("p7", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||||
// A pod with local storage.
|
// A pod with local storage.
|
||||||
setNormalOwnerRef(pod)
|
test.SetNormalOwnerRef(pod)
|
||||||
setLowPriority(pod)
|
test.SetPodPriority(pod, lowPriority)
|
||||||
pod.Spec.Volumes = []v1.Volume{
|
pod.Spec.Volumes = []v1.Volume{
|
||||||
{
|
{
|
||||||
Name: "sample",
|
Name: "sample",
|
||||||
@@ -211,12 +300,13 @@ func TestLowNodeUtilization(t *testing.T) {
|
|||||||
},
|
},
|
||||||
n2NodeName: {
|
n2NodeName: {
|
||||||
Items: []v1.Pod{
|
Items: []v1.Pod{
|
||||||
*test.BuildTestPod("p9", 400, 0, n1NodeName, setRSOwnerRef),
|
*test.BuildTestPod("p9", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
n3NodeName: {},
|
n3NodeName: {},
|
||||||
},
|
},
|
||||||
expectedPodsEvicted: 3,
|
maxPodsToEvictPerNode: 0,
|
||||||
|
expectedPodsEvicted: 4,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "without priorities evicting best-effort pods only",
|
name: "without priorities evicting best-effort pods only",
|
||||||
@@ -231,38 +321,38 @@ func TestLowNodeUtilization(t *testing.T) {
|
|||||||
nodes: map[string]*v1.Node{
|
nodes: map[string]*v1.Node{
|
||||||
n1NodeName: test.BuildTestNode(n1NodeName, 4000, 3000, 9, nil),
|
n1NodeName: test.BuildTestNode(n1NodeName, 4000, 3000, 9, nil),
|
||||||
n2NodeName: test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil),
|
n2NodeName: test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil),
|
||||||
n3NodeName: test.BuildTestNode(n3NodeName, 4000, 3000, 10, setNodeUnschedulable),
|
n3NodeName: test.BuildTestNode(n3NodeName, 4000, 3000, 10, test.SetNodeUnschedulable),
|
||||||
},
|
},
|
||||||
// All pods are assumed to be burstable (test.BuildTestNode always sets both cpu/memory resource requests to some value)
|
// All pods are assumed to be burstable (test.BuildTestNode always sets both cpu/memory resource requests to some value)
|
||||||
pods: map[string]*v1.PodList{
|
pods: map[string]*v1.PodList{
|
||||||
n1NodeName: {
|
n1NodeName: {
|
||||||
Items: []v1.Pod{
|
Items: []v1.Pod{
|
||||||
*test.BuildTestPod("p1", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
*test.BuildTestPod("p1", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||||
setRSOwnerRef(pod)
|
test.SetRSOwnerRef(pod)
|
||||||
makeBestEffortPod(pod)
|
test.MakeBestEffortPod(pod)
|
||||||
}),
|
}),
|
||||||
*test.BuildTestPod("p2", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
*test.BuildTestPod("p2", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||||
setRSOwnerRef(pod)
|
test.SetRSOwnerRef(pod)
|
||||||
makeBestEffortPod(pod)
|
test.MakeBestEffortPod(pod)
|
||||||
}),
|
}),
|
||||||
*test.BuildTestPod("p3", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
*test.BuildTestPod("p3", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||||
setRSOwnerRef(pod)
|
test.SetRSOwnerRef(pod)
|
||||||
}),
|
}),
|
||||||
*test.BuildTestPod("p4", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
*test.BuildTestPod("p4", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||||
setRSOwnerRef(pod)
|
test.SetRSOwnerRef(pod)
|
||||||
makeBestEffortPod(pod)
|
test.MakeBestEffortPod(pod)
|
||||||
}),
|
}),
|
||||||
*test.BuildTestPod("p5", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
*test.BuildTestPod("p5", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||||
setRSOwnerRef(pod)
|
test.SetRSOwnerRef(pod)
|
||||||
makeBestEffortPod(pod)
|
test.MakeBestEffortPod(pod)
|
||||||
}),
|
}),
|
||||||
// These won't be evicted.
|
// These won't be evicted.
|
||||||
*test.BuildTestPod("p6", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
*test.BuildTestPod("p6", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||||
setDSOwnerRef(pod)
|
test.SetDSOwnerRef(pod)
|
||||||
}),
|
}),
|
||||||
*test.BuildTestPod("p7", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
*test.BuildTestPod("p7", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||||
// A pod with local storage.
|
// A pod with local storage.
|
||||||
setNormalOwnerRef(pod)
|
test.SetNormalOwnerRef(pod)
|
||||||
pod.Spec.Volumes = []v1.Volume{
|
pod.Spec.Volumes = []v1.Volume{
|
||||||
{
|
{
|
||||||
Name: "sample",
|
Name: "sample",
|
||||||
@@ -286,13 +376,14 @@ func TestLowNodeUtilization(t *testing.T) {
|
|||||||
},
|
},
|
||||||
n2NodeName: {
|
n2NodeName: {
|
||||||
Items: []v1.Pod{
|
Items: []v1.Pod{
|
||||||
*test.BuildTestPod("p9", 400, 0, n1NodeName, setRSOwnerRef),
|
*test.BuildTestPod("p9", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
n3NodeName: {},
|
n3NodeName: {},
|
||||||
},
|
},
|
||||||
expectedPodsEvicted: 4,
|
maxPodsToEvictPerNode: 0,
|
||||||
evictedPods: []string{"p1", "p2", "p4", "p5"},
|
expectedPodsEvicted: 4,
|
||||||
|
evictedPods: []string{"p1", "p2", "p4", "p5"},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -346,20 +437,26 @@ func TestLowNodeUtilization(t *testing.T) {
|
|||||||
nodes = append(nodes, node)
|
nodes = append(nodes, node)
|
||||||
}
|
}
|
||||||
|
|
||||||
npm := createNodePodsMap(ctx, fakeClient, nodes)
|
|
||||||
lowNodes, targetNodes := classifyNodes(npm, test.thresholds, test.targetThresholds, false)
|
|
||||||
if len(lowNodes) != 1 {
|
|
||||||
t.Errorf("After ignoring unschedulable nodes, expected only one node to be under utilized.")
|
|
||||||
}
|
|
||||||
podEvictor := evictions.NewPodEvictor(
|
podEvictor := evictions.NewPodEvictor(
|
||||||
fakeClient,
|
fakeClient,
|
||||||
"v1",
|
"v1",
|
||||||
false,
|
false,
|
||||||
test.expectedPodsEvicted,
|
test.maxPodsToEvictPerNode,
|
||||||
nodes,
|
nodes,
|
||||||
|
false,
|
||||||
)
|
)
|
||||||
|
|
||||||
evictPodsFromTargetNodes(ctx, targetNodes, lowNodes, test.targetThresholds, false, podEvictor)
|
strategy := api.DeschedulerStrategy{
|
||||||
|
Enabled: true,
|
||||||
|
Params: &api.StrategyParameters{
|
||||||
|
NodeResourceUtilizationThresholds: &api.NodeResourceUtilizationThresholds{
|
||||||
|
Thresholds: test.thresholds,
|
||||||
|
TargetThresholds: test.targetThresholds,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
LowNodeUtilization(ctx, fakeClient, strategy, nodes, podEvictor)
|
||||||
|
|
||||||
podsEvicted := podEvictor.TotalEvicted()
|
podsEvicted := podEvictor.TotalEvicted()
|
||||||
if test.expectedPodsEvicted != podsEvicted {
|
if test.expectedPodsEvicted != podsEvicted {
|
||||||
t.Errorf("Expected %#v pods to be evicted but %#v got evicted", test.expectedPodsEvicted, podsEvicted)
|
t.Errorf("Expected %#v pods to be evicted but %#v got evicted", test.expectedPodsEvicted, podsEvicted)
|
||||||
@@ -371,41 +468,102 @@ func TestLowNodeUtilization(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestSortPodsByPriority(t *testing.T) {
|
func TestValidateStrategyConfig(t *testing.T) {
|
||||||
n1 := test.BuildTestNode("n1", 4000, 3000, 9, nil)
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
thresholds api.ResourceThresholds
|
||||||
|
targetThresholds api.ResourceThresholds
|
||||||
|
errInfo error
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "passing invalid thresholds",
|
||||||
|
thresholds: api.ResourceThresholds{
|
||||||
|
v1.ResourceCPU: 20,
|
||||||
|
v1.ResourceMemory: 120,
|
||||||
|
},
|
||||||
|
targetThresholds: api.ResourceThresholds{
|
||||||
|
v1.ResourceCPU: 80,
|
||||||
|
v1.ResourceMemory: 80,
|
||||||
|
},
|
||||||
|
errInfo: fmt.Errorf("thresholds config is not valid: %v", fmt.Errorf(
|
||||||
|
"%v threshold not in [%v, %v] range", v1.ResourceMemory, MinResourcePercentage, MaxResourcePercentage)),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "passing invalid targetThresholds",
|
||||||
|
thresholds: api.ResourceThresholds{
|
||||||
|
v1.ResourceCPU: 20,
|
||||||
|
v1.ResourceMemory: 20,
|
||||||
|
},
|
||||||
|
targetThresholds: api.ResourceThresholds{
|
||||||
|
v1.ResourceCPU: 80,
|
||||||
|
"resourceInvalid": 80,
|
||||||
|
},
|
||||||
|
errInfo: fmt.Errorf("targetThresholds config is not valid: %v",
|
||||||
|
fmt.Errorf("only cpu, memory, or pods thresholds can be specified")),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "thresholds and targetThresholds configured different num of resources",
|
||||||
|
thresholds: api.ResourceThresholds{
|
||||||
|
v1.ResourceCPU: 20,
|
||||||
|
v1.ResourceMemory: 20,
|
||||||
|
},
|
||||||
|
targetThresholds: api.ResourceThresholds{
|
||||||
|
v1.ResourceCPU: 80,
|
||||||
|
v1.ResourceMemory: 80,
|
||||||
|
v1.ResourcePods: 80,
|
||||||
|
},
|
||||||
|
errInfo: fmt.Errorf("thresholds and targetThresholds configured different resources"),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "thresholds and targetThresholds configured different resources",
|
||||||
|
thresholds: api.ResourceThresholds{
|
||||||
|
v1.ResourceCPU: 20,
|
||||||
|
v1.ResourceMemory: 20,
|
||||||
|
},
|
||||||
|
targetThresholds: api.ResourceThresholds{
|
||||||
|
v1.ResourceCPU: 80,
|
||||||
|
v1.ResourcePods: 80,
|
||||||
|
},
|
||||||
|
errInfo: fmt.Errorf("thresholds and targetThresholds configured different resources"),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "thresholds' CPU config value is greater than targetThresholds'",
|
||||||
|
thresholds: api.ResourceThresholds{
|
||||||
|
v1.ResourceCPU: 90,
|
||||||
|
v1.ResourceMemory: 20,
|
||||||
|
},
|
||||||
|
targetThresholds: api.ResourceThresholds{
|
||||||
|
v1.ResourceCPU: 80,
|
||||||
|
v1.ResourceMemory: 80,
|
||||||
|
},
|
||||||
|
errInfo: fmt.Errorf("thresholds' %v percentage is greater than targetThresholds'", v1.ResourceCPU),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "passing valid strategy config",
|
||||||
|
thresholds: api.ResourceThresholds{
|
||||||
|
v1.ResourceCPU: 20,
|
||||||
|
v1.ResourceMemory: 20,
|
||||||
|
},
|
||||||
|
targetThresholds: api.ResourceThresholds{
|
||||||
|
v1.ResourceCPU: 80,
|
||||||
|
v1.ResourceMemory: 80,
|
||||||
|
},
|
||||||
|
errInfo: nil,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
p1 := test.BuildTestPod("p1", 400, 0, n1.Name, setLowPriority)
|
for _, testCase := range tests {
|
||||||
|
validateErr := validateStrategyConfig(testCase.thresholds, testCase.targetThresholds)
|
||||||
|
|
||||||
// BestEffort
|
if validateErr == nil || testCase.errInfo == nil {
|
||||||
p2 := test.BuildTestPod("p2", 400, 0, n1.Name, func(pod *v1.Pod) {
|
if validateErr != testCase.errInfo {
|
||||||
setHighPriority(pod)
|
t.Errorf("expected validity of strategy config: thresholds %#v targetThresholds %#v to be %v but got %v instead",
|
||||||
makeBestEffortPod(pod)
|
testCase.thresholds, testCase.targetThresholds, testCase.errInfo, validateErr)
|
||||||
})
|
}
|
||||||
|
} else if validateErr.Error() != testCase.errInfo.Error() {
|
||||||
// Burstable
|
t.Errorf("expected validity of strategy config: thresholds %#v targetThresholds %#v to be %v but got %v instead",
|
||||||
p3 := test.BuildTestPod("p3", 400, 0, n1.Name, func(pod *v1.Pod) {
|
testCase.thresholds, testCase.targetThresholds, testCase.errInfo, validateErr)
|
||||||
setHighPriority(pod)
|
}
|
||||||
makeBurstablePod(pod)
|
|
||||||
})
|
|
||||||
|
|
||||||
// Guaranteed
|
|
||||||
p4 := test.BuildTestPod("p4", 400, 100, n1.Name, func(pod *v1.Pod) {
|
|
||||||
setHighPriority(pod)
|
|
||||||
makeGuaranteedPod(pod)
|
|
||||||
})
|
|
||||||
|
|
||||||
// Best effort with nil priorities.
|
|
||||||
p5 := test.BuildTestPod("p5", 400, 100, n1.Name, makeBestEffortPod)
|
|
||||||
p5.Spec.Priority = nil
|
|
||||||
|
|
||||||
p6 := test.BuildTestPod("p6", 400, 100, n1.Name, makeGuaranteedPod)
|
|
||||||
p6.Spec.Priority = nil
|
|
||||||
|
|
||||||
podList := []*v1.Pod{p4, p3, p2, p1, p6, p5}
|
|
||||||
|
|
||||||
sortPodsBasedOnPriority(podList)
|
|
||||||
if !reflect.DeepEqual(podList[len(podList)-1], p4) {
|
|
||||||
t.Errorf("Expected last pod in sorted list to be %v which of highest priority and guaranteed but got %v", p4, podList[len(podList)-1])
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -413,17 +571,17 @@ func TestValidateThresholds(t *testing.T) {
|
|||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
input api.ResourceThresholds
|
input api.ResourceThresholds
|
||||||
succeed bool
|
errInfo error
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "passing nil map for threshold",
|
name: "passing nil map for threshold",
|
||||||
input: nil,
|
input: nil,
|
||||||
succeed: false,
|
errInfo: fmt.Errorf("no resource threshold is configured"),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "passing no threshold",
|
name: "passing no threshold",
|
||||||
input: api.ResourceThresholds{},
|
input: api.ResourceThresholds{},
|
||||||
succeed: false,
|
errInfo: fmt.Errorf("no resource threshold is configured"),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "passing unsupported resource name",
|
name: "passing unsupported resource name",
|
||||||
@@ -431,7 +589,7 @@ func TestValidateThresholds(t *testing.T) {
|
|||||||
v1.ResourceCPU: 40,
|
v1.ResourceCPU: 40,
|
||||||
v1.ResourceStorage: 25.5,
|
v1.ResourceStorage: 25.5,
|
||||||
},
|
},
|
||||||
succeed: false,
|
errInfo: fmt.Errorf("only cpu, memory, or pods thresholds can be specified"),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "passing invalid resource name",
|
name: "passing invalid resource name",
|
||||||
@@ -439,7 +597,30 @@ func TestValidateThresholds(t *testing.T) {
|
|||||||
v1.ResourceCPU: 40,
|
v1.ResourceCPU: 40,
|
||||||
"coolResource": 42.0,
|
"coolResource": 42.0,
|
||||||
},
|
},
|
||||||
succeed: false,
|
errInfo: fmt.Errorf("only cpu, memory, or pods thresholds can be specified"),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "passing invalid resource value",
|
||||||
|
input: api.ResourceThresholds{
|
||||||
|
v1.ResourceCPU: 110,
|
||||||
|
v1.ResourceMemory: 80,
|
||||||
|
},
|
||||||
|
errInfo: fmt.Errorf("%v threshold not in [%v, %v] range", v1.ResourceCPU, MinResourcePercentage, MaxResourcePercentage),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "passing a valid threshold with max and min resource value",
|
||||||
|
input: api.ResourceThresholds{
|
||||||
|
v1.ResourceCPU: 100,
|
||||||
|
v1.ResourceMemory: 0,
|
||||||
|
},
|
||||||
|
errInfo: nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "passing a valid threshold with only cpu",
|
||||||
|
input: api.ResourceThresholds{
|
||||||
|
v1.ResourceCPU: 80,
|
||||||
|
},
|
||||||
|
errInfo: nil,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "passing a valid threshold with cpu, memory and pods",
|
name: "passing a valid threshold with cpu, memory and pods",
|
||||||
@@ -448,68 +629,28 @@ func TestValidateThresholds(t *testing.T) {
|
|||||||
v1.ResourceMemory: 30,
|
v1.ResourceMemory: 30,
|
||||||
v1.ResourcePods: 40,
|
v1.ResourcePods: 40,
|
||||||
},
|
},
|
||||||
succeed: true,
|
errInfo: nil,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
isValid := validateThresholds(test.input)
|
validateErr := validateThresholds(test.input)
|
||||||
|
|
||||||
if isValid != test.succeed {
|
if validateErr == nil || test.errInfo == nil {
|
||||||
t.Errorf("expected validity of threshold: %#v\nto be %v but got %v instead", test.input, test.succeed, isValid)
|
if validateErr != test.errInfo {
|
||||||
}
|
t.Errorf("expected validity of threshold: %#v to be %v but got %v instead", test.input, test.errInfo, validateErr)
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func newFake(objects ...runtime.Object) *core.Fake {
|
|
||||||
scheme := runtime.NewScheme()
|
|
||||||
codecs := serializer.NewCodecFactory(scheme)
|
|
||||||
fake.AddToScheme(scheme)
|
|
||||||
o := core.NewObjectTracker(scheme, codecs.UniversalDecoder())
|
|
||||||
for _, obj := range objects {
|
|
||||||
if err := o.Add(obj); err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fakePtr := core.Fake{}
|
|
||||||
fakePtr.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
|
|
||||||
objs, err := o.List(
|
|
||||||
schema.GroupVersionResource{Group: "", Version: "v1", Resource: "pods"},
|
|
||||||
schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Pod"},
|
|
||||||
action.GetNamespace(),
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
return true, nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
obj := &v1.PodList{
|
|
||||||
Items: []v1.Pod{},
|
|
||||||
}
|
|
||||||
for _, pod := range objs.(*v1.PodList).Items {
|
|
||||||
podFieldSet := fields.Set(map[string]string{
|
|
||||||
"spec.nodeName": pod.Spec.NodeName,
|
|
||||||
"status.phase": string(pod.Status.Phase),
|
|
||||||
})
|
|
||||||
match := action.(core.ListAction).GetListRestrictions().Fields.Matches(podFieldSet)
|
|
||||||
if !match {
|
|
||||||
continue
|
|
||||||
}
|
}
|
||||||
obj.Items = append(obj.Items, *pod.DeepCopy())
|
} else if validateErr.Error() != test.errInfo.Error() {
|
||||||
|
t.Errorf("expected validity of threshold: %#v to be %v but got %v instead", test.input, test.errInfo, validateErr)
|
||||||
}
|
}
|
||||||
return true, obj, nil
|
}
|
||||||
})
|
|
||||||
fakePtr.AddReactor("*", "*", core.ObjectReaction(o))
|
|
||||||
fakePtr.AddWatchReactor("*", core.DefaultWatchReactor(watch.NewFake(), nil))
|
|
||||||
|
|
||||||
return &fakePtr
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestWithTaints(t *testing.T) {
|
func TestWithTaints(t *testing.T) {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
strategy := api.DeschedulerStrategy{
|
strategy := api.DeschedulerStrategy{
|
||||||
Enabled: true,
|
Enabled: true,
|
||||||
Params: api.StrategyParameters{
|
Params: &api.StrategyParameters{
|
||||||
NodeResourceUtilizationThresholds: &api.NodeResourceUtilizationThresholds{
|
NodeResourceUtilizationThresholds: &api.NodeResourceUtilizationThresholds{
|
||||||
Thresholds: api.ResourceThresholds{
|
Thresholds: api.ResourceThresholds{
|
||||||
v1.ResourcePods: 20,
|
v1.ResourcePods: 20,
|
||||||
@@ -533,7 +674,7 @@ func TestWithTaints(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
podThatToleratesTaint := test.BuildTestPod("tolerate_pod", 200, 0, n1.Name, setRSOwnerRef)
|
podThatToleratesTaint := test.BuildTestPod("tolerate_pod", 200, 0, n1.Name, test.SetRSOwnerRef)
|
||||||
podThatToleratesTaint.Spec.Tolerations = []v1.Toleration{
|
podThatToleratesTaint.Spec.Tolerations = []v1.Toleration{
|
||||||
{
|
{
|
||||||
Key: "key",
|
Key: "key",
|
||||||
@@ -552,16 +693,16 @@ func TestWithTaints(t *testing.T) {
|
|||||||
nodes: []*v1.Node{n1, n2, n3},
|
nodes: []*v1.Node{n1, n2, n3},
|
||||||
pods: []*v1.Pod{
|
pods: []*v1.Pod{
|
||||||
//Node 1 pods
|
//Node 1 pods
|
||||||
test.BuildTestPod(fmt.Sprintf("pod_1_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef),
|
test.BuildTestPod(fmt.Sprintf("pod_1_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
|
||||||
test.BuildTestPod(fmt.Sprintf("pod_2_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef),
|
test.BuildTestPod(fmt.Sprintf("pod_2_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
|
||||||
test.BuildTestPod(fmt.Sprintf("pod_3_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef),
|
test.BuildTestPod(fmt.Sprintf("pod_3_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
|
||||||
test.BuildTestPod(fmt.Sprintf("pod_4_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef),
|
test.BuildTestPod(fmt.Sprintf("pod_4_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
|
||||||
test.BuildTestPod(fmt.Sprintf("pod_5_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef),
|
test.BuildTestPod(fmt.Sprintf("pod_5_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
|
||||||
test.BuildTestPod(fmt.Sprintf("pod_6_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef),
|
test.BuildTestPod(fmt.Sprintf("pod_6_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
|
||||||
test.BuildTestPod(fmt.Sprintf("pod_7_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef),
|
test.BuildTestPod(fmt.Sprintf("pod_7_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
|
||||||
test.BuildTestPod(fmt.Sprintf("pod_8_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef),
|
test.BuildTestPod(fmt.Sprintf("pod_8_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
|
||||||
// Node 2 pods
|
// Node 2 pods
|
||||||
test.BuildTestPod(fmt.Sprintf("pod_9_%s", n2.Name), 200, 0, n2.Name, setRSOwnerRef),
|
test.BuildTestPod(fmt.Sprintf("pod_9_%s", n2.Name), 200, 0, n2.Name, test.SetRSOwnerRef),
|
||||||
},
|
},
|
||||||
evictionsExpected: 1,
|
evictionsExpected: 1,
|
||||||
},
|
},
|
||||||
@@ -570,16 +711,16 @@ func TestWithTaints(t *testing.T) {
|
|||||||
nodes: []*v1.Node{n1, n3withTaints},
|
nodes: []*v1.Node{n1, n3withTaints},
|
||||||
pods: []*v1.Pod{
|
pods: []*v1.Pod{
|
||||||
//Node 1 pods
|
//Node 1 pods
|
||||||
test.BuildTestPod(fmt.Sprintf("pod_1_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef),
|
test.BuildTestPod(fmt.Sprintf("pod_1_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
|
||||||
test.BuildTestPod(fmt.Sprintf("pod_2_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef),
|
test.BuildTestPod(fmt.Sprintf("pod_2_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
|
||||||
test.BuildTestPod(fmt.Sprintf("pod_3_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef),
|
test.BuildTestPod(fmt.Sprintf("pod_3_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
|
||||||
test.BuildTestPod(fmt.Sprintf("pod_4_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef),
|
test.BuildTestPod(fmt.Sprintf("pod_4_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
|
||||||
test.BuildTestPod(fmt.Sprintf("pod_5_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef),
|
test.BuildTestPod(fmt.Sprintf("pod_5_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
|
||||||
test.BuildTestPod(fmt.Sprintf("pod_6_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef),
|
test.BuildTestPod(fmt.Sprintf("pod_6_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
|
||||||
test.BuildTestPod(fmt.Sprintf("pod_7_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef),
|
test.BuildTestPod(fmt.Sprintf("pod_7_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
|
||||||
test.BuildTestPod(fmt.Sprintf("pod_8_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef),
|
test.BuildTestPod(fmt.Sprintf("pod_8_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
|
||||||
// Node 3 pods
|
// Node 3 pods
|
||||||
test.BuildTestPod(fmt.Sprintf("pod_9_%s", n3withTaints.Name), 200, 0, n3withTaints.Name, setRSOwnerRef),
|
test.BuildTestPod(fmt.Sprintf("pod_9_%s", n3withTaints.Name), 200, 0, n3withTaints.Name, test.SetRSOwnerRef),
|
||||||
},
|
},
|
||||||
evictionsExpected: 0,
|
evictionsExpected: 0,
|
||||||
},
|
},
|
||||||
@@ -588,16 +729,16 @@ func TestWithTaints(t *testing.T) {
|
|||||||
nodes: []*v1.Node{n1, n3withTaints},
|
nodes: []*v1.Node{n1, n3withTaints},
|
||||||
pods: []*v1.Pod{
|
pods: []*v1.Pod{
|
||||||
//Node 1 pods
|
//Node 1 pods
|
||||||
test.BuildTestPod(fmt.Sprintf("pod_1_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef),
|
test.BuildTestPod(fmt.Sprintf("pod_1_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
|
||||||
test.BuildTestPod(fmt.Sprintf("pod_2_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef),
|
test.BuildTestPod(fmt.Sprintf("pod_2_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
|
||||||
test.BuildTestPod(fmt.Sprintf("pod_3_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef),
|
test.BuildTestPod(fmt.Sprintf("pod_3_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
|
||||||
test.BuildTestPod(fmt.Sprintf("pod_4_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef),
|
test.BuildTestPod(fmt.Sprintf("pod_4_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
|
||||||
test.BuildTestPod(fmt.Sprintf("pod_5_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef),
|
test.BuildTestPod(fmt.Sprintf("pod_5_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
|
||||||
test.BuildTestPod(fmt.Sprintf("pod_6_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef),
|
test.BuildTestPod(fmt.Sprintf("pod_6_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
|
||||||
test.BuildTestPod(fmt.Sprintf("pod_7_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef),
|
test.BuildTestPod(fmt.Sprintf("pod_7_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
|
||||||
podThatToleratesTaint,
|
podThatToleratesTaint,
|
||||||
// Node 3 pods
|
// Node 3 pods
|
||||||
test.BuildTestPod(fmt.Sprintf("pod_9_%s", n3withTaints.Name), 200, 0, n3withTaints.Name, setRSOwnerRef),
|
test.BuildTestPod(fmt.Sprintf("pod_9_%s", n3withTaints.Name), 200, 0, n3withTaints.Name, test.SetRSOwnerRef),
|
||||||
},
|
},
|
||||||
evictionsExpected: 1,
|
evictionsExpected: 1,
|
||||||
},
|
},
|
||||||
@@ -614,28 +755,21 @@ func TestWithTaints(t *testing.T) {
|
|||||||
objs = append(objs, pod)
|
objs = append(objs, pod)
|
||||||
}
|
}
|
||||||
|
|
||||||
fakePtr := newFake(objs...)
|
fakeClient := fake.NewSimpleClientset(objs...)
|
||||||
var evictionCounter int
|
|
||||||
fakePtr.PrependReactor("create", "pods", func(action core.Action) (bool, runtime.Object, error) {
|
|
||||||
if action.GetSubresource() != "eviction" || action.GetResource().Resource != "pods" {
|
|
||||||
return false, nil, nil
|
|
||||||
}
|
|
||||||
evictionCounter++
|
|
||||||
return true, nil, nil
|
|
||||||
})
|
|
||||||
|
|
||||||
podEvictor := evictions.NewPodEvictor(
|
podEvictor := evictions.NewPodEvictor(
|
||||||
&fake.Clientset{Fake: *fakePtr},
|
fakeClient,
|
||||||
"policy/v1",
|
"policy/v1",
|
||||||
false,
|
false,
|
||||||
item.evictionsExpected,
|
item.evictionsExpected,
|
||||||
item.nodes,
|
item.nodes,
|
||||||
|
false,
|
||||||
)
|
)
|
||||||
|
|
||||||
LowNodeUtilization(ctx, &fake.Clientset{Fake: *fakePtr}, strategy, item.nodes, false, podEvictor)
|
LowNodeUtilization(ctx, fakeClient, strategy, item.nodes, podEvictor)
|
||||||
|
|
||||||
if item.evictionsExpected != evictionCounter {
|
if item.evictionsExpected != podEvictor.TotalEvicted() {
|
||||||
t.Errorf("Expected %v evictions, got %v", item.evictionsExpected, evictionCounter)
|
t.Errorf("Expected %v evictions, got %v", item.evictionsExpected, podEvictor.TotalEvicted())
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -18,46 +18,91 @@ package strategies
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/klog"
|
"k8s.io/klog/v2"
|
||||||
|
|
||||||
"sigs.k8s.io/descheduler/pkg/api"
|
"sigs.k8s.io/descheduler/pkg/api"
|
||||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||||
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
|
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
|
||||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/utils"
|
||||||
)
|
)
|
||||||
|
|
||||||
func RemovePodsViolatingNodeAffinity(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, evictLocalStoragePods bool, podEvictor *evictions.PodEvictor) {
|
func validatePodsViolatingNodeAffinityParams(params *api.StrategyParameters) error {
|
||||||
|
if params == nil || len(params.NodeAffinityType) == 0 {
|
||||||
|
return fmt.Errorf("NodeAffinityType is empty")
|
||||||
|
}
|
||||||
|
// At most one of include/exclude can be set
|
||||||
|
if params.Namespaces != nil && len(params.Namespaces.Include) > 0 && len(params.Namespaces.Exclude) > 0 {
|
||||||
|
return fmt.Errorf("only one of Include/Exclude namespaces can be set")
|
||||||
|
}
|
||||||
|
if params.ThresholdPriority != nil && params.ThresholdPriorityClassName != "" {
|
||||||
|
return fmt.Errorf("only one of thresholdPriority and thresholdPriorityClassName can be set")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemovePodsViolatingNodeAffinity evicts pods on nodes which violate node affinity
|
||||||
|
func RemovePodsViolatingNodeAffinity(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor) {
|
||||||
|
if err := validatePodsViolatingNodeAffinityParams(strategy.Params); err != nil {
|
||||||
|
klog.ErrorS(err, "Invalid RemovePodsViolatingNodeAffinity parameters")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
thresholdPriority, err := utils.GetPriorityFromStrategyParams(ctx, client, strategy.Params)
|
||||||
|
if err != nil {
|
||||||
|
klog.ErrorS(err, "Failed to get threshold priority from strategy's params")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var includedNamespaces, excludedNamespaces []string
|
||||||
|
if strategy.Params.Namespaces != nil {
|
||||||
|
includedNamespaces = strategy.Params.Namespaces.Include
|
||||||
|
excludedNamespaces = strategy.Params.Namespaces.Exclude
|
||||||
|
}
|
||||||
|
|
||||||
|
evictable := podEvictor.Evictable(evictions.WithPriorityThreshold(thresholdPriority))
|
||||||
|
|
||||||
for _, nodeAffinity := range strategy.Params.NodeAffinityType {
|
for _, nodeAffinity := range strategy.Params.NodeAffinityType {
|
||||||
klog.V(2).Infof("Executing for nodeAffinityType: %v", nodeAffinity)
|
klog.V(2).InfoS("Executing for nodeAffinityType", "nodeAffinity", nodeAffinity)
|
||||||
|
|
||||||
switch nodeAffinity {
|
switch nodeAffinity {
|
||||||
case "requiredDuringSchedulingIgnoredDuringExecution":
|
case "requiredDuringSchedulingIgnoredDuringExecution":
|
||||||
for _, node := range nodes {
|
for _, node := range nodes {
|
||||||
klog.V(1).Infof("Processing node: %#v\n", node.Name)
|
klog.V(1).InfoS("Processing node", "node", klog.KObj(node))
|
||||||
|
|
||||||
pods, err := podutil.ListEvictablePodsOnNode(ctx, client, node, evictLocalStoragePods)
|
pods, err := podutil.ListPodsOnANode(
|
||||||
|
ctx,
|
||||||
|
client,
|
||||||
|
node,
|
||||||
|
podutil.WithFilter(func(pod *v1.Pod) bool {
|
||||||
|
return evictable.IsEvictable(pod) &&
|
||||||
|
!nodeutil.PodFitsCurrentNode(pod, node) &&
|
||||||
|
nodeutil.PodFitsAnyNode(pod, nodes)
|
||||||
|
}),
|
||||||
|
podutil.WithNamespaces(includedNamespaces),
|
||||||
|
podutil.WithoutNamespaces(excludedNamespaces),
|
||||||
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.Errorf("failed to get pods from %v: %v", node.Name, err)
|
klog.ErrorS(err, "Failed to get pods", "node", klog.KObj(node))
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, pod := range pods {
|
for _, pod := range pods {
|
||||||
if pod.Spec.Affinity != nil && pod.Spec.Affinity.NodeAffinity != nil && pod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil {
|
if pod.Spec.Affinity != nil && pod.Spec.Affinity.NodeAffinity != nil && pod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil {
|
||||||
if !nodeutil.PodFitsCurrentNode(pod, node) && nodeutil.PodFitsAnyNode(pod, nodes) {
|
klog.V(1).InfoS("Evicting pod", "pod", klog.KObj(pod))
|
||||||
klog.V(1).Infof("Evicting pod: %v", pod.Name)
|
if _, err := podEvictor.EvictPod(ctx, pod, node, "NodeAffinity"); err != nil {
|
||||||
if _, err := podEvictor.EvictPod(ctx, pod, node); err != nil {
|
klog.ErrorS(err, "Error evicting pod")
|
||||||
klog.Errorf("Error evicting pod: (%#v)", err)
|
break
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
klog.Errorf("invalid nodeAffinityType: %v", nodeAffinity)
|
klog.ErrorS(nil, "Invalid nodeAffinityType", "nodeAffinity", nodeAffinity)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
klog.V(1).Infof("Evicted %v pods", podEvictor.TotalEvicted())
|
klog.V(1).InfoS("Number of evicted pods", "totalEvicted", podEvictor.TotalEvicted())
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -33,7 +33,7 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
|||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
requiredDuringSchedulingIgnoredDuringExecutionStrategy := api.DeschedulerStrategy{
|
requiredDuringSchedulingIgnoredDuringExecutionStrategy := api.DeschedulerStrategy{
|
||||||
Enabled: true,
|
Enabled: true,
|
||||||
Params: api.StrategyParameters{
|
Params: &api.StrategyParameters{
|
||||||
NodeAffinityType: []string{
|
NodeAffinityType: []string{
|
||||||
"requiredDuringSchedulingIgnoredDuringExecution",
|
"requiredDuringSchedulingIgnoredDuringExecution",
|
||||||
},
|
},
|
||||||
@@ -93,13 +93,13 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
|||||||
pods []v1.Pod
|
pods []v1.Pod
|
||||||
strategy api.DeschedulerStrategy
|
strategy api.DeschedulerStrategy
|
||||||
expectedEvictedPodCount int
|
expectedEvictedPodCount int
|
||||||
maxPodsToEvict int
|
maxPodsToEvictPerNode int
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
description: "Invalid strategy type, should not evict any pods",
|
description: "Invalid strategy type, should not evict any pods",
|
||||||
strategy: api.DeschedulerStrategy{
|
strategy: api.DeschedulerStrategy{
|
||||||
Enabled: true,
|
Enabled: true,
|
||||||
Params: api.StrategyParameters{
|
Params: &api.StrategyParameters{
|
||||||
NodeAffinityType: []string{
|
NodeAffinityType: []string{
|
||||||
"requiredDuringSchedulingRequiredDuringExecution",
|
"requiredDuringSchedulingRequiredDuringExecution",
|
||||||
},
|
},
|
||||||
@@ -108,7 +108,7 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
|||||||
expectedEvictedPodCount: 0,
|
expectedEvictedPodCount: 0,
|
||||||
pods: addPodsToNode(nodeWithoutLabels),
|
pods: addPodsToNode(nodeWithoutLabels),
|
||||||
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
|
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
|
||||||
maxPodsToEvict: 0,
|
maxPodsToEvictPerNode: 0,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
description: "Pod is correctly scheduled on node, no eviction expected",
|
description: "Pod is correctly scheduled on node, no eviction expected",
|
||||||
@@ -116,7 +116,7 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
|||||||
expectedEvictedPodCount: 0,
|
expectedEvictedPodCount: 0,
|
||||||
pods: addPodsToNode(nodeWithLabels),
|
pods: addPodsToNode(nodeWithLabels),
|
||||||
nodes: []*v1.Node{nodeWithLabels},
|
nodes: []*v1.Node{nodeWithLabels},
|
||||||
maxPodsToEvict: 0,
|
maxPodsToEvictPerNode: 0,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
description: "Pod is scheduled on node without matching labels, another schedulable node available, should be evicted",
|
description: "Pod is scheduled on node without matching labels, another schedulable node available, should be evicted",
|
||||||
@@ -124,15 +124,15 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
|||||||
strategy: requiredDuringSchedulingIgnoredDuringExecutionStrategy,
|
strategy: requiredDuringSchedulingIgnoredDuringExecutionStrategy,
|
||||||
pods: addPodsToNode(nodeWithoutLabels),
|
pods: addPodsToNode(nodeWithoutLabels),
|
||||||
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
|
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
|
||||||
maxPodsToEvict: 0,
|
maxPodsToEvictPerNode: 0,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
description: "Pod is scheduled on node without matching labels, another schedulable node available, maxPodsToEvict set to 1, should not be evicted",
|
description: "Pod is scheduled on node without matching labels, another schedulable node available, maxPodsToEvictPerNode set to 1, should not be evicted",
|
||||||
expectedEvictedPodCount: 1,
|
expectedEvictedPodCount: 1,
|
||||||
strategy: requiredDuringSchedulingIgnoredDuringExecutionStrategy,
|
strategy: requiredDuringSchedulingIgnoredDuringExecutionStrategy,
|
||||||
pods: addPodsToNode(nodeWithoutLabels),
|
pods: addPodsToNode(nodeWithoutLabels),
|
||||||
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
|
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
|
||||||
maxPodsToEvict: 1,
|
maxPodsToEvictPerNode: 1,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
description: "Pod is scheduled on node without matching labels, but no node where pod fits is available, should not evict",
|
description: "Pod is scheduled on node without matching labels, but no node where pod fits is available, should not evict",
|
||||||
@@ -140,7 +140,7 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
|||||||
strategy: requiredDuringSchedulingIgnoredDuringExecutionStrategy,
|
strategy: requiredDuringSchedulingIgnoredDuringExecutionStrategy,
|
||||||
pods: addPodsToNode(nodeWithoutLabels),
|
pods: addPodsToNode(nodeWithoutLabels),
|
||||||
nodes: []*v1.Node{nodeWithoutLabels, unschedulableNodeWithLabels},
|
nodes: []*v1.Node{nodeWithoutLabels, unschedulableNodeWithLabels},
|
||||||
maxPodsToEvict: 0,
|
maxPodsToEvictPerNode: 0,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -155,11 +155,12 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
|||||||
fakeClient,
|
fakeClient,
|
||||||
"v1",
|
"v1",
|
||||||
false,
|
false,
|
||||||
tc.maxPodsToEvict,
|
tc.maxPodsToEvictPerNode,
|
||||||
tc.nodes,
|
tc.nodes,
|
||||||
|
false,
|
||||||
)
|
)
|
||||||
|
|
||||||
RemovePodsViolatingNodeAffinity(ctx, fakeClient, tc.strategy, tc.nodes, false, podEvictor)
|
RemovePodsViolatingNodeAffinity(ctx, fakeClient, tc.strategy, tc.nodes, podEvictor)
|
||||||
actualEvictedPodCount := podEvictor.TotalEvicted()
|
actualEvictedPodCount := podEvictor.TotalEvicted()
|
||||||
if actualEvictedPodCount != tc.expectedEvictedPodCount {
|
if actualEvictedPodCount != tc.expectedEvictedPodCount {
|
||||||
t.Errorf("Test %#v failed, expected %v pod evictions, but got %v pod evictions\n", tc.description, tc.expectedEvictedPodCount, actualEvictedPodCount)
|
t.Errorf("Test %#v failed, expected %v pod evictions, but got %v pod evictions\n", tc.description, tc.expectedEvictedPodCount, actualEvictedPodCount)
|
||||||
|
|||||||
@@ -18,6 +18,7 @@ package strategies
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
"sigs.k8s.io/descheduler/pkg/api"
|
"sigs.k8s.io/descheduler/pkg/api"
|
||||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||||
@@ -26,14 +27,56 @@ import (
|
|||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/klog"
|
"k8s.io/klog/v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func validateRemovePodsViolatingNodeTaintsParams(params *api.StrategyParameters) error {
|
||||||
|
if params == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// At most one of include/exclude can be set
|
||||||
|
if params.Namespaces != nil && len(params.Namespaces.Include) > 0 && len(params.Namespaces.Exclude) > 0 {
|
||||||
|
return fmt.Errorf("only one of Include/Exclude namespaces can be set")
|
||||||
|
}
|
||||||
|
if params.ThresholdPriority != nil && params.ThresholdPriorityClassName != "" {
|
||||||
|
return fmt.Errorf("only one of thresholdPriority and thresholdPriorityClassName can be set")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// RemovePodsViolatingNodeTaints evicts pods on the node which violate NoSchedule Taints on nodes
|
// RemovePodsViolatingNodeTaints evicts pods on the node which violate NoSchedule Taints on nodes
|
||||||
func RemovePodsViolatingNodeTaints(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, evictLocalStoragePods bool, podEvictor *evictions.PodEvictor) {
|
func RemovePodsViolatingNodeTaints(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor) {
|
||||||
|
if err := validateRemovePodsViolatingNodeTaintsParams(strategy.Params); err != nil {
|
||||||
|
klog.ErrorS(err, "Invalid RemovePodsViolatingNodeTaints parameters")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var includedNamespaces, excludedNamespaces []string
|
||||||
|
if strategy.Params != nil && strategy.Params.Namespaces != nil {
|
||||||
|
includedNamespaces = strategy.Params.Namespaces.Include
|
||||||
|
excludedNamespaces = strategy.Params.Namespaces.Exclude
|
||||||
|
}
|
||||||
|
|
||||||
|
thresholdPriority, err := utils.GetPriorityFromStrategyParams(ctx, client, strategy.Params)
|
||||||
|
if err != nil {
|
||||||
|
klog.ErrorS(err, "Failed to get threshold priority from strategy's params")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
evictable := podEvictor.Evictable(evictions.WithPriorityThreshold(thresholdPriority))
|
||||||
|
|
||||||
for _, node := range nodes {
|
for _, node := range nodes {
|
||||||
klog.V(1).Infof("Processing node: %#v\n", node.Name)
|
klog.V(1).InfoS("Processing node", "node", klog.KObj(node))
|
||||||
pods, err := podutil.ListEvictablePodsOnNode(ctx, client, node, evictLocalStoragePods)
|
pods, err := podutil.ListPodsOnANode(
|
||||||
|
ctx,
|
||||||
|
client,
|
||||||
|
node,
|
||||||
|
podutil.WithFilter(evictable.IsEvictable),
|
||||||
|
podutil.WithNamespaces(includedNamespaces),
|
||||||
|
podutil.WithoutNamespaces(excludedNamespaces),
|
||||||
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
//no pods evicted as error encountered retrieving evictable Pods
|
//no pods evicted as error encountered retrieving evictable Pods
|
||||||
return
|
return
|
||||||
@@ -45,9 +88,9 @@ func RemovePodsViolatingNodeTaints(ctx context.Context, client clientset.Interfa
|
|||||||
node.Spec.Taints,
|
node.Spec.Taints,
|
||||||
func(taint *v1.Taint) bool { return taint.Effect == v1.TaintEffectNoSchedule },
|
func(taint *v1.Taint) bool { return taint.Effect == v1.TaintEffectNoSchedule },
|
||||||
) {
|
) {
|
||||||
klog.V(2).Infof("Not all taints with NoSchedule effect are tolerated after update for pod %v on node %v", pods[i].Name, node.Name)
|
klog.V(2).InfoS("Not all taints with NoSchedule effect are tolerated after update for pod on node", "pod", klog.KObj(pods[i]), "node", klog.KObj(node))
|
||||||
if _, err := podEvictor.EvictPod(ctx, pods[i], node); err != nil {
|
if _, err := podEvictor.EvictPod(ctx, pods[i], node, "NodeTaint"); err != nil {
|
||||||
klog.Errorf("Error evicting pod: (%#v)", err)
|
klog.ErrorS(err, "Error evicting pod")
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -102,7 +102,7 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
|
|||||||
nodes []*v1.Node
|
nodes []*v1.Node
|
||||||
pods []v1.Pod
|
pods []v1.Pod
|
||||||
evictLocalStoragePods bool
|
evictLocalStoragePods bool
|
||||||
maxPodsToEvict int
|
maxPodsToEvictPerNode int
|
||||||
expectedEvictedPodCount int
|
expectedEvictedPodCount int
|
||||||
}{
|
}{
|
||||||
|
|
||||||
@@ -111,7 +111,7 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
|
|||||||
pods: []v1.Pod{*p1, *p2, *p3},
|
pods: []v1.Pod{*p1, *p2, *p3},
|
||||||
nodes: []*v1.Node{node1},
|
nodes: []*v1.Node{node1},
|
||||||
evictLocalStoragePods: false,
|
evictLocalStoragePods: false,
|
||||||
maxPodsToEvict: 0,
|
maxPodsToEvictPerNode: 0,
|
||||||
expectedEvictedPodCount: 1, //p2 gets evicted
|
expectedEvictedPodCount: 1, //p2 gets evicted
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -119,15 +119,15 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
|
|||||||
pods: []v1.Pod{*p1, *p3, *p4},
|
pods: []v1.Pod{*p1, *p3, *p4},
|
||||||
nodes: []*v1.Node{node1},
|
nodes: []*v1.Node{node1},
|
||||||
evictLocalStoragePods: false,
|
evictLocalStoragePods: false,
|
||||||
maxPodsToEvict: 0,
|
maxPodsToEvictPerNode: 0,
|
||||||
expectedEvictedPodCount: 1, //p4 gets evicted
|
expectedEvictedPodCount: 1, //p4 gets evicted
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
description: "Only <maxPodsToEvict> number of Pods not tolerating node taint should be evicted",
|
description: "Only <maxPodsToEvictPerNode> number of Pods not tolerating node taint should be evicted",
|
||||||
pods: []v1.Pod{*p1, *p5, *p6},
|
pods: []v1.Pod{*p1, *p5, *p6},
|
||||||
nodes: []*v1.Node{node1},
|
nodes: []*v1.Node{node1},
|
||||||
evictLocalStoragePods: false,
|
evictLocalStoragePods: false,
|
||||||
maxPodsToEvict: 1,
|
maxPodsToEvictPerNode: 1,
|
||||||
expectedEvictedPodCount: 1, //p5 or p6 gets evicted
|
expectedEvictedPodCount: 1, //p5 or p6 gets evicted
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -135,7 +135,7 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
|
|||||||
pods: []v1.Pod{*p7, *p8, *p9, *p10},
|
pods: []v1.Pod{*p7, *p8, *p9, *p10},
|
||||||
nodes: []*v1.Node{node2},
|
nodes: []*v1.Node{node2},
|
||||||
evictLocalStoragePods: false,
|
evictLocalStoragePods: false,
|
||||||
maxPodsToEvict: 0,
|
maxPodsToEvictPerNode: 0,
|
||||||
expectedEvictedPodCount: 0,
|
expectedEvictedPodCount: 0,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -143,7 +143,7 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
|
|||||||
pods: []v1.Pod{*p7, *p8, *p9, *p10},
|
pods: []v1.Pod{*p7, *p8, *p9, *p10},
|
||||||
nodes: []*v1.Node{node2},
|
nodes: []*v1.Node{node2},
|
||||||
evictLocalStoragePods: true,
|
evictLocalStoragePods: true,
|
||||||
maxPodsToEvict: 0,
|
maxPodsToEvictPerNode: 0,
|
||||||
expectedEvictedPodCount: 1,
|
expectedEvictedPodCount: 1,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -151,7 +151,7 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
|
|||||||
pods: []v1.Pod{*p7, *p8, *p10, *p11},
|
pods: []v1.Pod{*p7, *p8, *p10, *p11},
|
||||||
nodes: []*v1.Node{node2},
|
nodes: []*v1.Node{node2},
|
||||||
evictLocalStoragePods: false,
|
evictLocalStoragePods: false,
|
||||||
maxPodsToEvict: 0,
|
maxPodsToEvictPerNode: 0,
|
||||||
expectedEvictedPodCount: 1,
|
expectedEvictedPodCount: 1,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@@ -168,11 +168,12 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
|
|||||||
fakeClient,
|
fakeClient,
|
||||||
"v1",
|
"v1",
|
||||||
false,
|
false,
|
||||||
tc.maxPodsToEvict,
|
tc.maxPodsToEvictPerNode,
|
||||||
tc.nodes,
|
tc.nodes,
|
||||||
|
tc.evictLocalStoragePods,
|
||||||
)
|
)
|
||||||
|
|
||||||
RemovePodsViolatingNodeTaints(ctx, fakeClient, api.DeschedulerStrategy{}, tc.nodes, tc.evictLocalStoragePods, podEvictor)
|
RemovePodsViolatingNodeTaints(ctx, fakeClient, api.DeschedulerStrategy{}, tc.nodes, podEvictor)
|
||||||
actualEvictedPodCount := podEvictor.TotalEvicted()
|
actualEvictedPodCount := podEvictor.TotalEvicted()
|
||||||
if actualEvictedPodCount != tc.expectedEvictedPodCount {
|
if actualEvictedPodCount != tc.expectedEvictedPodCount {
|
||||||
t.Errorf("Test %#v failed, Unexpected no of pods evicted: pods evicted: %d, expected: %d", tc.description, actualEvictedPodCount, tc.expectedEvictedPodCount)
|
t.Errorf("Test %#v failed, Unexpected no of pods evicted: pods evicted: %d, expected: %d", tc.description, actualEvictedPodCount, tc.expectedEvictedPodCount)
|
||||||
|
|||||||
@@ -18,36 +18,80 @@ package strategies
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
"sigs.k8s.io/descheduler/pkg/api"
|
"sigs.k8s.io/descheduler/pkg/api"
|
||||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||||
"sigs.k8s.io/descheduler/pkg/utils"
|
"sigs.k8s.io/descheduler/pkg/utils"
|
||||||
|
|
||||||
"k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/klog"
|
"k8s.io/klog/v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func validateRemovePodsViolatingInterPodAntiAffinityParams(params *api.StrategyParameters) error {
|
||||||
|
if params == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// At most one of include/exclude can be set
|
||||||
|
if params.Namespaces != nil && len(params.Namespaces.Include) > 0 && len(params.Namespaces.Exclude) > 0 {
|
||||||
|
return fmt.Errorf("only one of Include/Exclude namespaces can be set")
|
||||||
|
}
|
||||||
|
if params.ThresholdPriority != nil && params.ThresholdPriorityClassName != "" {
|
||||||
|
return fmt.Errorf("only one of thresholdPriority and thresholdPriorityClassName can be set")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// RemovePodsViolatingInterPodAntiAffinity evicts pods on the node which are having a pod affinity rules.
|
// RemovePodsViolatingInterPodAntiAffinity evicts pods on the node which are having a pod affinity rules.
|
||||||
func RemovePodsViolatingInterPodAntiAffinity(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, evictLocalStoragePods bool, podEvictor *evictions.PodEvictor) {
|
func RemovePodsViolatingInterPodAntiAffinity(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor) {
|
||||||
|
if err := validateRemovePodsViolatingInterPodAntiAffinityParams(strategy.Params); err != nil {
|
||||||
|
klog.ErrorS(err, "Invalid RemovePodsViolatingInterPodAntiAffinity parameters")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var includedNamespaces, excludedNamespaces []string
|
||||||
|
if strategy.Params != nil && strategy.Params.Namespaces != nil {
|
||||||
|
includedNamespaces = strategy.Params.Namespaces.Include
|
||||||
|
excludedNamespaces = strategy.Params.Namespaces.Exclude
|
||||||
|
}
|
||||||
|
|
||||||
|
thresholdPriority, err := utils.GetPriorityFromStrategyParams(ctx, client, strategy.Params)
|
||||||
|
if err != nil {
|
||||||
|
klog.ErrorS(err, "Failed to get threshold priority from strategy's params")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
evictable := podEvictor.Evictable(evictions.WithPriorityThreshold(thresholdPriority))
|
||||||
|
|
||||||
for _, node := range nodes {
|
for _, node := range nodes {
|
||||||
klog.V(1).Infof("Processing node: %#v\n", node.Name)
|
klog.V(1).InfoS("Processing node", "node", klog.KObj(node))
|
||||||
pods, err := podutil.ListEvictablePodsOnNode(ctx, client, node, evictLocalStoragePods)
|
pods, err := podutil.ListPodsOnANode(
|
||||||
|
ctx,
|
||||||
|
client,
|
||||||
|
node,
|
||||||
|
podutil.WithNamespaces(includedNamespaces),
|
||||||
|
podutil.WithoutNamespaces(excludedNamespaces),
|
||||||
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
// sort the evictable Pods based on priority, if there are multiple pods with same priority, they are sorted based on QoS tiers.
|
||||||
|
podutil.SortPodsBasedOnPriorityLowToHigh(pods)
|
||||||
totalPods := len(pods)
|
totalPods := len(pods)
|
||||||
for i := 0; i < totalPods; i++ {
|
for i := 0; i < totalPods; i++ {
|
||||||
if checkPodsWithAntiAffinityExist(pods[i], pods) {
|
if checkPodsWithAntiAffinityExist(pods[i], pods) && evictable.IsEvictable(pods[i]) {
|
||||||
success, err := podEvictor.EvictPod(ctx, pods[i], node)
|
success, err := podEvictor.EvictPod(ctx, pods[i], node, "InterPodAntiAffinity")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.Errorf("Error evicting pod: (%#v)", err)
|
klog.ErrorS(err, "Error evicting pod")
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
if success {
|
if success {
|
||||||
klog.V(1).Infof("Evicted pod: %#v\n because of existing anti-affinity", pods[i].Name)
|
|
||||||
// Since the current pod is evicted all other pods which have anti-affinity with this
|
// Since the current pod is evicted all other pods which have anti-affinity with this
|
||||||
// pod need not be evicted.
|
// pod need not be evicted.
|
||||||
// Update pods.
|
// Update pods.
|
||||||
@@ -68,7 +112,7 @@ func checkPodsWithAntiAffinityExist(pod *v1.Pod, pods []*v1.Pod) bool {
|
|||||||
namespaces := utils.GetNamespacesFromPodAffinityTerm(pod, &term)
|
namespaces := utils.GetNamespacesFromPodAffinityTerm(pod, &term)
|
||||||
selector, err := metav1.LabelSelectorAsSelector(term.LabelSelector)
|
selector, err := metav1.LabelSelectorAsSelector(term.LabelSelector)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.Infof("%v", err)
|
klog.ErrorS(err, "Unable to convert LabelSelector into Selector")
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
for _, existingPod := range pods {
|
for _, existingPod := range pods {
|
||||||
|
|||||||
@@ -27,6 +27,7 @@ import (
|
|||||||
core "k8s.io/client-go/testing"
|
core "k8s.io/client-go/testing"
|
||||||
"sigs.k8s.io/descheduler/pkg/api"
|
"sigs.k8s.io/descheduler/pkg/api"
|
||||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/utils"
|
||||||
"sigs.k8s.io/descheduler/test"
|
"sigs.k8s.io/descheduler/test"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -37,42 +38,76 @@ func TestPodAntiAffinity(t *testing.T) {
|
|||||||
p2 := test.BuildTestPod("p2", 100, 0, node.Name, nil)
|
p2 := test.BuildTestPod("p2", 100, 0, node.Name, nil)
|
||||||
p3 := test.BuildTestPod("p3", 100, 0, node.Name, nil)
|
p3 := test.BuildTestPod("p3", 100, 0, node.Name, nil)
|
||||||
p4 := test.BuildTestPod("p4", 100, 0, node.Name, nil)
|
p4 := test.BuildTestPod("p4", 100, 0, node.Name, nil)
|
||||||
|
p5 := test.BuildTestPod("p5", 100, 0, node.Name, nil)
|
||||||
|
p6 := test.BuildTestPod("p6", 100, 0, node.Name, nil)
|
||||||
|
p7 := test.BuildTestPod("p7", 100, 0, node.Name, nil)
|
||||||
|
criticalPriority := utils.SystemCriticalPriority
|
||||||
|
nonEvictablePod := test.BuildTestPod("non-evict", 100, 0, node.Name, func(pod *v1.Pod) {
|
||||||
|
pod.Spec.Priority = &criticalPriority
|
||||||
|
})
|
||||||
p2.Labels = map[string]string{"foo": "bar"}
|
p2.Labels = map[string]string{"foo": "bar"}
|
||||||
p1.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
p5.Labels = map[string]string{"foo": "bar"}
|
||||||
p2.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
p6.Labels = map[string]string{"foo": "bar"}
|
||||||
p3.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
p7.Labels = map[string]string{"foo1": "bar1"}
|
||||||
p4.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
nonEvictablePod.Labels = map[string]string{"foo": "bar"}
|
||||||
|
test.SetNormalOwnerRef(p1)
|
||||||
|
test.SetNormalOwnerRef(p2)
|
||||||
|
test.SetNormalOwnerRef(p3)
|
||||||
|
test.SetNormalOwnerRef(p4)
|
||||||
|
test.SetNormalOwnerRef(p5)
|
||||||
|
test.SetNormalOwnerRef(p6)
|
||||||
|
test.SetNormalOwnerRef(p7)
|
||||||
|
|
||||||
// set pod anti affinity
|
// set pod anti affinity
|
||||||
setPodAntiAffinity(p1)
|
setPodAntiAffinity(p1, "foo", "bar")
|
||||||
setPodAntiAffinity(p3)
|
setPodAntiAffinity(p3, "foo", "bar")
|
||||||
setPodAntiAffinity(p4)
|
setPodAntiAffinity(p4, "foo", "bar")
|
||||||
|
setPodAntiAffinity(p5, "foo1", "bar1")
|
||||||
|
setPodAntiAffinity(p6, "foo1", "bar1")
|
||||||
|
setPodAntiAffinity(p7, "foo", "bar")
|
||||||
|
|
||||||
|
// set pod priority
|
||||||
|
test.SetPodPriority(p5, 100)
|
||||||
|
test.SetPodPriority(p6, 50)
|
||||||
|
test.SetPodPriority(p7, 0)
|
||||||
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
description string
|
description string
|
||||||
maxPodsToEvict int
|
maxPodsToEvictPerNode int
|
||||||
pods []v1.Pod
|
pods []v1.Pod
|
||||||
expectedEvictedPodCount int
|
expectedEvictedPodCount int
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
description: "Maximum pods to evict - 0",
|
description: "Maximum pods to evict - 0",
|
||||||
maxPodsToEvict: 0,
|
maxPodsToEvictPerNode: 0,
|
||||||
pods: []v1.Pod{*p1, *p2, *p3, *p4},
|
pods: []v1.Pod{*p1, *p2, *p3, *p4},
|
||||||
expectedEvictedPodCount: 3,
|
expectedEvictedPodCount: 3,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
description: "Maximum pods to evict - 3",
|
description: "Maximum pods to evict - 3",
|
||||||
maxPodsToEvict: 3,
|
maxPodsToEvictPerNode: 3,
|
||||||
pods: []v1.Pod{*p1, *p2, *p3, *p4},
|
pods: []v1.Pod{*p1, *p2, *p3, *p4},
|
||||||
expectedEvictedPodCount: 3,
|
expectedEvictedPodCount: 3,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
description: "Evict only 1 pod after sorting",
|
||||||
|
maxPodsToEvictPerNode: 0,
|
||||||
|
pods: []v1.Pod{*p5, *p6, *p7},
|
||||||
|
expectedEvictedPodCount: 1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
description: "Evicts pod that conflicts with critical pod (but does not evict critical pod)",
|
||||||
|
maxPodsToEvictPerNode: 1,
|
||||||
|
pods: []v1.Pod{*p1, *nonEvictablePod},
|
||||||
|
expectedEvictedPodCount: 1,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
// create fake client
|
// create fake client
|
||||||
fakeClient := &fake.Clientset{}
|
fakeClient := &fake.Clientset{}
|
||||||
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
|
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
|
||||||
return true, &v1.PodList{Items: []v1.Pod{*p1, *p2, *p3, *p4}}, nil
|
return true, &v1.PodList{Items: test.pods}, nil
|
||||||
})
|
})
|
||||||
fakeClient.Fake.AddReactor("get", "nodes", func(action core.Action) (bool, runtime.Object, error) {
|
fakeClient.Fake.AddReactor("get", "nodes", func(action core.Action) (bool, runtime.Object, error) {
|
||||||
return true, node, nil
|
return true, node, nil
|
||||||
@@ -82,11 +117,12 @@ func TestPodAntiAffinity(t *testing.T) {
|
|||||||
fakeClient,
|
fakeClient,
|
||||||
"v1",
|
"v1",
|
||||||
false,
|
false,
|
||||||
test.maxPodsToEvict,
|
test.maxPodsToEvictPerNode,
|
||||||
[]*v1.Node{node},
|
[]*v1.Node{node},
|
||||||
|
false,
|
||||||
)
|
)
|
||||||
|
|
||||||
RemovePodsViolatingInterPodAntiAffinity(ctx, fakeClient, api.DeschedulerStrategy{}, []*v1.Node{node}, false, podEvictor)
|
RemovePodsViolatingInterPodAntiAffinity(ctx, fakeClient, api.DeschedulerStrategy{}, []*v1.Node{node}, podEvictor)
|
||||||
podsEvicted := podEvictor.TotalEvicted()
|
podsEvicted := podEvictor.TotalEvicted()
|
||||||
if podsEvicted != test.expectedEvictedPodCount {
|
if podsEvicted != test.expectedEvictedPodCount {
|
||||||
t.Errorf("Unexpected no of pods evicted: pods evicted: %d, expected: %d", podsEvicted, test.expectedEvictedPodCount)
|
t.Errorf("Unexpected no of pods evicted: pods evicted: %d, expected: %d", podsEvicted, test.expectedEvictedPodCount)
|
||||||
@@ -94,7 +130,7 @@ func TestPodAntiAffinity(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func setPodAntiAffinity(inputPod *v1.Pod) {
|
func setPodAntiAffinity(inputPod *v1.Pod, labelKey, labelValue string) {
|
||||||
inputPod.Spec.Affinity = &v1.Affinity{
|
inputPod.Spec.Affinity = &v1.Affinity{
|
||||||
PodAntiAffinity: &v1.PodAntiAffinity{
|
PodAntiAffinity: &v1.PodAntiAffinity{
|
||||||
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
|
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
|
||||||
@@ -102,9 +138,9 @@ func setPodAntiAffinity(inputPod *v1.Pod) {
|
|||||||
LabelSelector: &metav1.LabelSelector{
|
LabelSelector: &metav1.LabelSelector{
|
||||||
MatchExpressions: []metav1.LabelSelectorRequirement{
|
MatchExpressions: []metav1.LabelSelectorRequirement{
|
||||||
{
|
{
|
||||||
Key: "foo",
|
Key: labelKey,
|
||||||
Operator: metav1.LabelSelectorOpIn,
|
Operator: metav1.LabelSelectorOpIn,
|
||||||
Values: []string{"bar"},
|
Values: []string{labelValue},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -18,43 +18,104 @@ package strategies
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
v1meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
v1meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/klog"
|
"k8s.io/klog/v2"
|
||||||
|
|
||||||
"sigs.k8s.io/descheduler/pkg/api"
|
"sigs.k8s.io/descheduler/pkg/api"
|
||||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/utils"
|
||||||
)
|
)
|
||||||
|
|
||||||
// PodLifeTime evicts pods on nodes that were created more than strategy.Params.MaxPodLifeTimeSeconds seconds ago.
|
func validatePodLifeTimeParams(params *api.StrategyParameters) error {
|
||||||
func PodLifeTime(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, evictLocalStoragePods bool, podEvictor *evictions.PodEvictor) {
|
if params == nil || params.PodLifeTime == nil || params.PodLifeTime.MaxPodLifeTimeSeconds == nil {
|
||||||
if strategy.Params.MaxPodLifeTimeSeconds == nil {
|
return fmt.Errorf("MaxPodLifeTimeSeconds not set")
|
||||||
klog.V(1).Infof("MaxPodLifeTimeSeconds not set")
|
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, node := range nodes {
|
if params.PodLifeTime.PodStatusPhases != nil {
|
||||||
klog.V(1).Infof("Processing node: %#v", node.Name)
|
for _, phase := range params.PodLifeTime.PodStatusPhases {
|
||||||
pods := listOldPodsOnNode(ctx, client, node, *strategy.Params.MaxPodLifeTimeSeconds, evictLocalStoragePods)
|
if phase != string(v1.PodPending) && phase != string(v1.PodRunning) {
|
||||||
for _, pod := range pods {
|
return fmt.Errorf("only Pending and Running phases are supported in PodLifeTime")
|
||||||
success, err := podEvictor.EvictPod(ctx, pod, node)
|
|
||||||
if success {
|
|
||||||
klog.V(1).Infof("Evicted pod: %#v because it was created more than %v seconds ago", pod.Name, *strategy.Params.MaxPodLifeTimeSeconds)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
klog.Errorf("Error evicting pod: (%#v)", err)
|
|
||||||
break
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// At most one of include/exclude can be set
|
||||||
|
if params.Namespaces != nil && len(params.Namespaces.Include) > 0 && len(params.Namespaces.Exclude) > 0 {
|
||||||
|
return fmt.Errorf("only one of Include/Exclude namespaces can be set")
|
||||||
|
}
|
||||||
|
if params.ThresholdPriority != nil && params.ThresholdPriorityClassName != "" {
|
||||||
|
return fmt.Errorf("only one of thresholdPriority and thresholdPriorityClassName can be set")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func listOldPodsOnNode(ctx context.Context, client clientset.Interface, node *v1.Node, maxAge uint, evictLocalStoragePods bool) []*v1.Pod {
|
// PodLifeTime evicts pods on nodes that were created more than strategy.Params.MaxPodLifeTimeSeconds seconds ago.
|
||||||
pods, err := podutil.ListEvictablePodsOnNode(ctx, client, node, evictLocalStoragePods)
|
func PodLifeTime(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor) {
|
||||||
|
if err := validatePodLifeTimeParams(strategy.Params); err != nil {
|
||||||
|
klog.ErrorS(err, "Invalid PodLifeTime parameters")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
thresholdPriority, err := utils.GetPriorityFromStrategyParams(ctx, client, strategy.Params)
|
||||||
|
if err != nil {
|
||||||
|
klog.ErrorS(err, "Failed to get threshold priority from strategy's params")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var includedNamespaces, excludedNamespaces []string
|
||||||
|
if strategy.Params.Namespaces != nil {
|
||||||
|
includedNamespaces = strategy.Params.Namespaces.Include
|
||||||
|
excludedNamespaces = strategy.Params.Namespaces.Exclude
|
||||||
|
}
|
||||||
|
|
||||||
|
evictable := podEvictor.Evictable(evictions.WithPriorityThreshold(thresholdPriority))
|
||||||
|
|
||||||
|
filter := evictable.IsEvictable
|
||||||
|
if strategy.Params.PodLifeTime.PodStatusPhases != nil {
|
||||||
|
filter = func(pod *v1.Pod) bool {
|
||||||
|
for _, phase := range strategy.Params.PodLifeTime.PodStatusPhases {
|
||||||
|
if string(pod.Status.Phase) == phase {
|
||||||
|
return evictable.IsEvictable(pod)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, node := range nodes {
|
||||||
|
klog.V(1).InfoS("Processing node", "node", klog.KObj(node))
|
||||||
|
|
||||||
|
pods := listOldPodsOnNode(ctx, client, node, includedNamespaces, excludedNamespaces, *strategy.Params.PodLifeTime.MaxPodLifeTimeSeconds, filter)
|
||||||
|
for _, pod := range pods {
|
||||||
|
success, err := podEvictor.EvictPod(ctx, pod, node, "PodLifeTime")
|
||||||
|
if success {
|
||||||
|
klog.V(1).InfoS("Evicted pod because it exceeded its lifetime", "pod", klog.KObj(pod), "maxPodLifeTime", *strategy.Params.PodLifeTime.MaxPodLifeTimeSeconds)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
klog.ErrorS(err, "Error evicting pod", "pod", klog.KObj(pod))
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func listOldPodsOnNode(ctx context.Context, client clientset.Interface, node *v1.Node, includedNamespaces, excludedNamespaces []string, maxPodLifeTimeSeconds uint, filter func(pod *v1.Pod) bool) []*v1.Pod {
|
||||||
|
pods, err := podutil.ListPodsOnANode(
|
||||||
|
ctx,
|
||||||
|
client,
|
||||||
|
node,
|
||||||
|
podutil.WithFilter(filter),
|
||||||
|
podutil.WithNamespaces(includedNamespaces),
|
||||||
|
podutil.WithoutNamespaces(excludedNamespaces),
|
||||||
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -62,7 +123,7 @@ func listOldPodsOnNode(ctx context.Context, client clientset.Interface, node *v1
|
|||||||
var oldPods []*v1.Pod
|
var oldPods []*v1.Pod
|
||||||
for _, pod := range pods {
|
for _, pod := range pods {
|
||||||
podAgeSeconds := uint(v1meta.Now().Sub(pod.GetCreationTimestamp().Local()).Seconds())
|
podAgeSeconds := uint(v1meta.Now().Sub(pod.GetCreationTimestamp().Local()).Seconds())
|
||||||
if podAgeSeconds > maxAge {
|
if podAgeSeconds > maxPodLifeTimeSeconds {
|
||||||
oldPods = append(oldPods, pod)
|
oldPods = append(oldPods, pod)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -85,11 +85,24 @@ func TestPodLifeTime(t *testing.T) {
|
|||||||
p5.ObjectMeta.OwnerReferences = ownerRef4
|
p5.ObjectMeta.OwnerReferences = ownerRef4
|
||||||
p6.ObjectMeta.OwnerReferences = ownerRef4
|
p6.ObjectMeta.OwnerReferences = ownerRef4
|
||||||
|
|
||||||
|
// Setup two old pods with different status phases
|
||||||
|
p9 := test.BuildTestPod("p9", 100, 0, node.Name, nil)
|
||||||
|
p9.Namespace = "dev"
|
||||||
|
p9.ObjectMeta.CreationTimestamp = olderPodCreationTime
|
||||||
|
p10 := test.BuildTestPod("p10", 100, 0, node.Name, nil)
|
||||||
|
p10.Namespace = "dev"
|
||||||
|
p10.ObjectMeta.CreationTimestamp = olderPodCreationTime
|
||||||
|
|
||||||
|
p9.Status.Phase = "Pending"
|
||||||
|
p10.Status.Phase = "Running"
|
||||||
|
p9.ObjectMeta.OwnerReferences = ownerRef1
|
||||||
|
p10.ObjectMeta.OwnerReferences = ownerRef1
|
||||||
|
|
||||||
var maxLifeTime uint = 600
|
var maxLifeTime uint = 600
|
||||||
testCases := []struct {
|
testCases := []struct {
|
||||||
description string
|
description string
|
||||||
strategy api.DeschedulerStrategy
|
strategy api.DeschedulerStrategy
|
||||||
maxPodsToEvict int
|
maxPodsToEvictPerNode int
|
||||||
pods []v1.Pod
|
pods []v1.Pod
|
||||||
expectedEvictedPodCount int
|
expectedEvictedPodCount int
|
||||||
}{
|
}{
|
||||||
@@ -97,11 +110,11 @@ func TestPodLifeTime(t *testing.T) {
|
|||||||
description: "Two pods in the `dev` Namespace, 1 is new and 1 very is old. 1 should be evicted.",
|
description: "Two pods in the `dev` Namespace, 1 is new and 1 very is old. 1 should be evicted.",
|
||||||
strategy: api.DeschedulerStrategy{
|
strategy: api.DeschedulerStrategy{
|
||||||
Enabled: true,
|
Enabled: true,
|
||||||
Params: api.StrategyParameters{
|
Params: &api.StrategyParameters{
|
||||||
MaxPodLifeTimeSeconds: &maxLifeTime,
|
PodLifeTime: &api.PodLifeTime{MaxPodLifeTimeSeconds: &maxLifeTime},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
maxPodsToEvict: 5,
|
maxPodsToEvictPerNode: 5,
|
||||||
pods: []v1.Pod{*p1, *p2},
|
pods: []v1.Pod{*p1, *p2},
|
||||||
expectedEvictedPodCount: 1,
|
expectedEvictedPodCount: 1,
|
||||||
},
|
},
|
||||||
@@ -109,11 +122,11 @@ func TestPodLifeTime(t *testing.T) {
|
|||||||
description: "Two pods in the `dev` Namespace, 2 are new and 0 are old. 0 should be evicted.",
|
description: "Two pods in the `dev` Namespace, 2 are new and 0 are old. 0 should be evicted.",
|
||||||
strategy: api.DeschedulerStrategy{
|
strategy: api.DeschedulerStrategy{
|
||||||
Enabled: true,
|
Enabled: true,
|
||||||
Params: api.StrategyParameters{
|
Params: &api.StrategyParameters{
|
||||||
MaxPodLifeTimeSeconds: &maxLifeTime,
|
PodLifeTime: &api.PodLifeTime{MaxPodLifeTimeSeconds: &maxLifeTime},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
maxPodsToEvict: 5,
|
maxPodsToEvictPerNode: 5,
|
||||||
pods: []v1.Pod{*p3, *p4},
|
pods: []v1.Pod{*p3, *p4},
|
||||||
expectedEvictedPodCount: 0,
|
expectedEvictedPodCount: 0,
|
||||||
},
|
},
|
||||||
@@ -121,11 +134,11 @@ func TestPodLifeTime(t *testing.T) {
|
|||||||
description: "Two pods in the `dev` Namespace, 1 created 605 seconds ago. 1 should be evicted.",
|
description: "Two pods in the `dev` Namespace, 1 created 605 seconds ago. 1 should be evicted.",
|
||||||
strategy: api.DeschedulerStrategy{
|
strategy: api.DeschedulerStrategy{
|
||||||
Enabled: true,
|
Enabled: true,
|
||||||
Params: api.StrategyParameters{
|
Params: &api.StrategyParameters{
|
||||||
MaxPodLifeTimeSeconds: &maxLifeTime,
|
PodLifeTime: &api.PodLifeTime{MaxPodLifeTimeSeconds: &maxLifeTime},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
maxPodsToEvict: 5,
|
maxPodsToEvictPerNode: 5,
|
||||||
pods: []v1.Pod{*p5, *p6},
|
pods: []v1.Pod{*p5, *p6},
|
||||||
expectedEvictedPodCount: 1,
|
expectedEvictedPodCount: 1,
|
||||||
},
|
},
|
||||||
@@ -133,14 +146,29 @@ func TestPodLifeTime(t *testing.T) {
|
|||||||
description: "Two pods in the `dev` Namespace, 1 created 595 seconds ago. 0 should be evicted.",
|
description: "Two pods in the `dev` Namespace, 1 created 595 seconds ago. 0 should be evicted.",
|
||||||
strategy: api.DeschedulerStrategy{
|
strategy: api.DeschedulerStrategy{
|
||||||
Enabled: true,
|
Enabled: true,
|
||||||
Params: api.StrategyParameters{
|
Params: &api.StrategyParameters{
|
||||||
MaxPodLifeTimeSeconds: &maxLifeTime,
|
PodLifeTime: &api.PodLifeTime{MaxPodLifeTimeSeconds: &maxLifeTime},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
maxPodsToEvict: 5,
|
maxPodsToEvictPerNode: 5,
|
||||||
pods: []v1.Pod{*p7, *p8},
|
pods: []v1.Pod{*p7, *p8},
|
||||||
expectedEvictedPodCount: 0,
|
expectedEvictedPodCount: 0,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
description: "Two old pods with different status phases. 1 should be evicted.",
|
||||||
|
strategy: api.DeschedulerStrategy{
|
||||||
|
Enabled: true,
|
||||||
|
Params: &api.StrategyParameters{
|
||||||
|
PodLifeTime: &api.PodLifeTime{
|
||||||
|
MaxPodLifeTimeSeconds: &maxLifeTime,
|
||||||
|
PodStatusPhases: []string{"Pending"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
maxPodsToEvictPerNode: 5,
|
||||||
|
pods: []v1.Pod{*p9, *p10},
|
||||||
|
expectedEvictedPodCount: 1,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tc := range testCases {
|
for _, tc := range testCases {
|
||||||
@@ -155,11 +183,12 @@ func TestPodLifeTime(t *testing.T) {
|
|||||||
fakeClient,
|
fakeClient,
|
||||||
"v1",
|
"v1",
|
||||||
false,
|
false,
|
||||||
tc.maxPodsToEvict,
|
tc.maxPodsToEvictPerNode,
|
||||||
[]*v1.Node{node},
|
[]*v1.Node{node},
|
||||||
|
false,
|
||||||
)
|
)
|
||||||
|
|
||||||
PodLifeTime(ctx, fakeClient, tc.strategy, []*v1.Node{node}, false, podEvictor)
|
PodLifeTime(ctx, fakeClient, tc.strategy, []*v1.Node{node}, podEvictor)
|
||||||
podsEvicted := podEvictor.TotalEvicted()
|
podsEvicted := podEvictor.TotalEvicted()
|
||||||
if podsEvicted != tc.expectedEvictedPodCount {
|
if podsEvicted != tc.expectedEvictedPodCount {
|
||||||
t.Errorf("Test error for description: %s. Expected evicted pods count %v, got %v", tc.description, tc.expectedEvictedPodCount, podsEvicted)
|
t.Errorf("Test error for description: %s. Expected evicted pods count %v, got %v", tc.description, tc.expectedEvictedPodCount, podsEvicted)
|
||||||
|
|||||||
@@ -18,29 +18,69 @@ package strategies
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/klog"
|
"k8s.io/klog/v2"
|
||||||
|
|
||||||
"sigs.k8s.io/descheduler/pkg/api"
|
"sigs.k8s.io/descheduler/pkg/api"
|
||||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/utils"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func validateRemovePodsHavingTooManyRestartsParams(params *api.StrategyParameters) error {
|
||||||
|
if params == nil || params.PodsHavingTooManyRestarts == nil || params.PodsHavingTooManyRestarts.PodRestartThreshold < 1 {
|
||||||
|
return fmt.Errorf("PodsHavingTooManyRestarts threshold not set")
|
||||||
|
}
|
||||||
|
|
||||||
|
// At most one of include/exclude can be set
|
||||||
|
if params.Namespaces != nil && len(params.Namespaces.Include) > 0 && len(params.Namespaces.Exclude) > 0 {
|
||||||
|
return fmt.Errorf("only one of Include/Exclude namespaces can be set")
|
||||||
|
}
|
||||||
|
if params.ThresholdPriority != nil && params.ThresholdPriorityClassName != "" {
|
||||||
|
return fmt.Errorf("only one of thresholdPriority and thresholdPriorityClassName can be set")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// RemovePodsHavingTooManyRestarts removes the pods that have too many restarts on node.
|
// RemovePodsHavingTooManyRestarts removes the pods that have too many restarts on node.
|
||||||
// There are too many cases leading this issue: Volume mount failed, app error due to nodes' different settings.
|
// There are too many cases leading this issue: Volume mount failed, app error due to nodes' different settings.
|
||||||
// As of now, this strategy won't evict daemonsets, mirror pods, critical pods and pods with local storages.
|
// As of now, this strategy won't evict daemonsets, mirror pods, critical pods and pods with local storages.
|
||||||
func RemovePodsHavingTooManyRestarts(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, evictLocalStoragePods bool, podEvictor *evictions.PodEvictor) {
|
func RemovePodsHavingTooManyRestarts(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor) {
|
||||||
if strategy.Params.PodsHavingTooManyRestarts == nil || strategy.Params.PodsHavingTooManyRestarts.PodRestartThreshold < 1 {
|
if err := validateRemovePodsHavingTooManyRestartsParams(strategy.Params); err != nil {
|
||||||
klog.V(1).Infof("PodsHavingTooManyRestarts thresholds not set")
|
klog.ErrorS(err, "Invalid RemovePodsHavingTooManyRestarts parameters")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
thresholdPriority, err := utils.GetPriorityFromStrategyParams(ctx, client, strategy.Params)
|
||||||
|
if err != nil {
|
||||||
|
klog.ErrorS(err, "Failed to get threshold priority from strategy's params")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var includedNamespaces, excludedNamespaces []string
|
||||||
|
if strategy.Params.Namespaces != nil {
|
||||||
|
includedNamespaces = strategy.Params.Namespaces.Include
|
||||||
|
excludedNamespaces = strategy.Params.Namespaces.Exclude
|
||||||
|
}
|
||||||
|
|
||||||
|
evictable := podEvictor.Evictable(evictions.WithPriorityThreshold(thresholdPriority))
|
||||||
|
|
||||||
for _, node := range nodes {
|
for _, node := range nodes {
|
||||||
klog.V(1).Infof("Processing node: %s", node.Name)
|
klog.V(1).InfoS("Processing node", "node", klog.KObj(node))
|
||||||
pods, err := podutil.ListEvictablePodsOnNode(ctx, client, node, evictLocalStoragePods)
|
pods, err := podutil.ListPodsOnANode(
|
||||||
|
ctx,
|
||||||
|
client,
|
||||||
|
node,
|
||||||
|
podutil.WithFilter(evictable.IsEvictable),
|
||||||
|
podutil.WithNamespaces(includedNamespaces),
|
||||||
|
podutil.WithoutNamespaces(excludedNamespaces),
|
||||||
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.Errorf("Error when list pods at node %s", node.Name)
|
klog.ErrorS(err, "Error listing a nodes pods", "node", klog.KObj(node))
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -53,8 +93,8 @@ func RemovePodsHavingTooManyRestarts(ctx context.Context, client clientset.Inter
|
|||||||
} else if restarts < strategy.Params.PodsHavingTooManyRestarts.PodRestartThreshold {
|
} else if restarts < strategy.Params.PodsHavingTooManyRestarts.PodRestartThreshold {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if _, err := podEvictor.EvictPod(ctx, pods[i], node); err != nil {
|
if _, err := podEvictor.EvictPod(ctx, pods[i], node, "TooManyRestarts"); err != nil {
|
||||||
klog.Errorf("Error evicting pod: (%#v)", err)
|
klog.ErrorS(err, "Error evicting pod", "pod", klog.KObj(pod))
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -84,7 +84,7 @@ func TestRemovePodsHavingTooManyRestarts(t *testing.T) {
|
|||||||
createStrategy := func(enabled, includingInitContainers bool, restartThresholds int32) api.DeschedulerStrategy {
|
createStrategy := func(enabled, includingInitContainers bool, restartThresholds int32) api.DeschedulerStrategy {
|
||||||
return api.DeschedulerStrategy{
|
return api.DeschedulerStrategy{
|
||||||
Enabled: enabled,
|
Enabled: enabled,
|
||||||
Params: api.StrategyParameters{
|
Params: &api.StrategyParameters{
|
||||||
PodsHavingTooManyRestarts: &api.PodsHavingTooManyRestarts{
|
PodsHavingTooManyRestarts: &api.PodsHavingTooManyRestarts{
|
||||||
PodRestartThreshold: restartThresholds,
|
PodRestartThreshold: restartThresholds,
|
||||||
IncludingInitContainers: includingInitContainers,
|
IncludingInitContainers: includingInitContainers,
|
||||||
@@ -98,61 +98,61 @@ func TestRemovePodsHavingTooManyRestarts(t *testing.T) {
|
|||||||
pods []v1.Pod
|
pods []v1.Pod
|
||||||
strategy api.DeschedulerStrategy
|
strategy api.DeschedulerStrategy
|
||||||
expectedEvictedPodCount int
|
expectedEvictedPodCount int
|
||||||
maxPodsToEvict int
|
maxPodsToEvictPerNode int
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
description: "All pods have total restarts under threshold, no pod evictions",
|
description: "All pods have total restarts under threshold, no pod evictions",
|
||||||
strategy: createStrategy(true, true, 10000),
|
strategy: createStrategy(true, true, 10000),
|
||||||
expectedEvictedPodCount: 0,
|
expectedEvictedPodCount: 0,
|
||||||
maxPodsToEvict: 0,
|
maxPodsToEvictPerNode: 0,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
description: "Some pods have total restarts bigger than threshold",
|
description: "Some pods have total restarts bigger than threshold",
|
||||||
strategy: createStrategy(true, true, 1),
|
strategy: createStrategy(true, true, 1),
|
||||||
expectedEvictedPodCount: 6,
|
expectedEvictedPodCount: 6,
|
||||||
maxPodsToEvict: 0,
|
maxPodsToEvictPerNode: 0,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
description: "Nine pods have total restarts equals threshold(includingInitContainers=true), 6 pods evictions",
|
description: "Nine pods have total restarts equals threshold(includingInitContainers=true), 6 pods evictions",
|
||||||
strategy: createStrategy(true, true, 1*25),
|
strategy: createStrategy(true, true, 1*25),
|
||||||
expectedEvictedPodCount: 6,
|
expectedEvictedPodCount: 6,
|
||||||
maxPodsToEvict: 0,
|
maxPodsToEvictPerNode: 0,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
description: "Nine pods have total restarts equals threshold(includingInitContainers=false), 5 pods evictions",
|
description: "Nine pods have total restarts equals threshold(includingInitContainers=false), 5 pods evictions",
|
||||||
strategy: createStrategy(true, false, 1*25),
|
strategy: createStrategy(true, false, 1*25),
|
||||||
expectedEvictedPodCount: 5,
|
expectedEvictedPodCount: 5,
|
||||||
maxPodsToEvict: 0,
|
maxPodsToEvictPerNode: 0,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
description: "All pods have total restarts equals threshold(includingInitContainers=true), 6 pods evictions",
|
description: "All pods have total restarts equals threshold(includingInitContainers=true), 6 pods evictions",
|
||||||
strategy: createStrategy(true, true, 1*20),
|
strategy: createStrategy(true, true, 1*20),
|
||||||
expectedEvictedPodCount: 6,
|
expectedEvictedPodCount: 6,
|
||||||
maxPodsToEvict: 0,
|
maxPodsToEvictPerNode: 0,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
description: "Nine pods have total restarts equals threshold(includingInitContainers=false), 6 pods evictions",
|
description: "Nine pods have total restarts equals threshold(includingInitContainers=false), 6 pods evictions",
|
||||||
strategy: createStrategy(true, false, 1*20),
|
strategy: createStrategy(true, false, 1*20),
|
||||||
expectedEvictedPodCount: 6,
|
expectedEvictedPodCount: 6,
|
||||||
maxPodsToEvict: 0,
|
maxPodsToEvictPerNode: 0,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
description: "Five pods have total restarts bigger than threshold(includingInitContainers=true), but only 1 pod eviction",
|
description: "Five pods have total restarts bigger than threshold(includingInitContainers=true), but only 1 pod eviction",
|
||||||
strategy: createStrategy(true, true, 5*25+1),
|
strategy: createStrategy(true, true, 5*25+1),
|
||||||
expectedEvictedPodCount: 1,
|
expectedEvictedPodCount: 1,
|
||||||
maxPodsToEvict: 0,
|
maxPodsToEvictPerNode: 0,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
description: "Five pods have total restarts bigger than threshold(includingInitContainers=false), but only 1 pod eviction",
|
description: "Five pods have total restarts bigger than threshold(includingInitContainers=false), but only 1 pod eviction",
|
||||||
strategy: createStrategy(true, false, 5*20+1),
|
strategy: createStrategy(true, false, 5*20+1),
|
||||||
expectedEvictedPodCount: 1,
|
expectedEvictedPodCount: 1,
|
||||||
maxPodsToEvict: 0,
|
maxPodsToEvictPerNode: 0,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
description: "All pods have total restarts equals threshold(maxPodsToEvict=3), 3 pods evictions",
|
description: "All pods have total restarts equals threshold(maxPodsToEvictPerNode=3), 3 pods evictions",
|
||||||
strategy: createStrategy(true, true, 1),
|
strategy: createStrategy(true, true, 1),
|
||||||
expectedEvictedPodCount: 3,
|
expectedEvictedPodCount: 3,
|
||||||
maxPodsToEvict: 3,
|
maxPodsToEvictPerNode: 3,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -169,11 +169,12 @@ func TestRemovePodsHavingTooManyRestarts(t *testing.T) {
|
|||||||
fakeClient,
|
fakeClient,
|
||||||
"v1",
|
"v1",
|
||||||
false,
|
false,
|
||||||
tc.maxPodsToEvict,
|
tc.maxPodsToEvictPerNode,
|
||||||
[]*v1.Node{node},
|
[]*v1.Node{node},
|
||||||
|
false,
|
||||||
)
|
)
|
||||||
|
|
||||||
RemovePodsHavingTooManyRestarts(ctx, fakeClient, tc.strategy, []*v1.Node{node}, false, podEvictor)
|
RemovePodsHavingTooManyRestarts(ctx, fakeClient, tc.strategy, []*v1.Node{node}, podEvictor)
|
||||||
actualEvictedPodCount := podEvictor.TotalEvicted()
|
actualEvictedPodCount := podEvictor.TotalEvicted()
|
||||||
if actualEvictedPodCount != tc.expectedEvictedPodCount {
|
if actualEvictedPodCount != tc.expectedEvictedPodCount {
|
||||||
t.Errorf("Test %#v failed, expected %v pod evictions, but got %v pod evictions\n", tc.description, tc.expectedEvictedPodCount, actualEvictedPodCount)
|
t.Errorf("Test %#v failed, expected %v pod evictions, but got %v pod evictions\n", tc.description, tc.expectedEvictedPodCount, actualEvictedPodCount)
|
||||||
|
|||||||
381
pkg/descheduler/strategies/topologyspreadconstraint.go
Normal file
381
pkg/descheduler/strategies/topologyspreadconstraint.go
Normal file
@@ -0,0 +1,381 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2020 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package strategies
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"math"
|
||||||
|
"sort"
|
||||||
|
|
||||||
|
v1 "k8s.io/api/core/v1"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
|
"k8s.io/apimachinery/pkg/util/sets"
|
||||||
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
|
"k8s.io/klog/v2"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/api"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||||
|
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/utils"
|
||||||
|
)
|
||||||
|
|
||||||
|
// AntiAffinityTerm's topology key value used in predicate metadata
|
||||||
|
type topologyPair struct {
|
||||||
|
key string
|
||||||
|
value string
|
||||||
|
}
|
||||||
|
|
||||||
|
type topology struct {
|
||||||
|
pair topologyPair
|
||||||
|
pods []*v1.Pod
|
||||||
|
}
|
||||||
|
|
||||||
|
func validateAndParseTopologySpreadParams(ctx context.Context, client clientset.Interface, params *api.StrategyParameters) (int32, sets.String, sets.String, error) {
|
||||||
|
var includedNamespaces, excludedNamespaces sets.String
|
||||||
|
if params == nil {
|
||||||
|
return 0, includedNamespaces, excludedNamespaces, nil
|
||||||
|
}
|
||||||
|
// At most one of include/exclude can be set
|
||||||
|
if params.Namespaces != nil && len(params.Namespaces.Include) > 0 && len(params.Namespaces.Exclude) > 0 {
|
||||||
|
return 0, includedNamespaces, excludedNamespaces, fmt.Errorf("only one of Include/Exclude namespaces can be set")
|
||||||
|
}
|
||||||
|
if params.ThresholdPriority != nil && params.ThresholdPriorityClassName != "" {
|
||||||
|
return 0, includedNamespaces, excludedNamespaces, fmt.Errorf("only one of thresholdPriority and thresholdPriorityClassName can be set")
|
||||||
|
}
|
||||||
|
thresholdPriority, err := utils.GetPriorityFromStrategyParams(ctx, client, params)
|
||||||
|
if err != nil {
|
||||||
|
return 0, includedNamespaces, excludedNamespaces, fmt.Errorf("failed to get threshold priority from strategy's params: %+v", err)
|
||||||
|
}
|
||||||
|
if params.Namespaces != nil {
|
||||||
|
includedNamespaces = sets.NewString(params.Namespaces.Include...)
|
||||||
|
excludedNamespaces = sets.NewString(params.Namespaces.Exclude...)
|
||||||
|
}
|
||||||
|
|
||||||
|
return thresholdPriority, includedNamespaces, excludedNamespaces, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func RemovePodsViolatingTopologySpreadConstraint(
|
||||||
|
ctx context.Context,
|
||||||
|
client clientset.Interface,
|
||||||
|
strategy api.DeschedulerStrategy,
|
||||||
|
nodes []*v1.Node,
|
||||||
|
podEvictor *evictions.PodEvictor,
|
||||||
|
) {
|
||||||
|
thresholdPriority, includedNamespaces, excludedNamespaces, err := validateAndParseTopologySpreadParams(ctx, client, strategy.Params)
|
||||||
|
if err != nil {
|
||||||
|
klog.ErrorS(err, "Invalid PodLifeTime parameters")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
nodeMap := make(map[string]*v1.Node, len(nodes))
|
||||||
|
for _, node := range nodes {
|
||||||
|
nodeMap[node.Name] = node
|
||||||
|
}
|
||||||
|
evictable := podEvictor.Evictable(evictions.WithPriorityThreshold(thresholdPriority))
|
||||||
|
|
||||||
|
// 1. for each namespace for which there is Topology Constraint
|
||||||
|
// 2. for each TopologySpreadConstraint in that namespace
|
||||||
|
// { find all evictable pods in that namespace
|
||||||
|
// { 3. for each evictable pod in that namespace
|
||||||
|
// 4. If the pod matches this TopologySpreadConstraint LabelSelector
|
||||||
|
// 5. If the pod nodeName is present in the nodeMap
|
||||||
|
// 6. create a topoPair with key as this TopologySpreadConstraint.TopologyKey and value as this pod's Node Label Value for this TopologyKey
|
||||||
|
// 7. add the pod with key as this topoPair
|
||||||
|
// 8. find the min number of pods in any topoPair for this topologyKey
|
||||||
|
// iterate through all topoPairs for this topologyKey and diff currentPods -minPods <=maxSkew
|
||||||
|
// if diff > maxSkew, add this pod in the current bucket for eviction
|
||||||
|
|
||||||
|
// First record all of the constraints by namespace
|
||||||
|
namespaces, err := client.CoreV1().Namespaces().List(ctx, metav1.ListOptions{})
|
||||||
|
if err != nil {
|
||||||
|
klog.ErrorS(err, "Couldn't list namespaces")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
klog.V(1).InfoS("Processing namespaces for topology spread constraints")
|
||||||
|
podsForEviction := make(map[*v1.Pod]struct{})
|
||||||
|
// 1. for each namespace...
|
||||||
|
for _, namespace := range namespaces.Items {
|
||||||
|
if (len(includedNamespaces) > 0 && !includedNamespaces.Has(namespace.Name)) ||
|
||||||
|
(len(excludedNamespaces) > 0 && excludedNamespaces.Has(namespace.Name)) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
namespacePods, err := client.CoreV1().Pods(namespace.Name).List(ctx, metav1.ListOptions{})
|
||||||
|
if err != nil {
|
||||||
|
klog.ErrorS(err, "Couldn't list pods in namespace", "namespace", namespace)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// ...where there is a topology constraint
|
||||||
|
//namespaceTopologySpreadConstrainPods := make([]v1.Pod, 0, len(namespacePods.Items))
|
||||||
|
namespaceTopologySpreadConstraints := make(map[v1.TopologySpreadConstraint]struct{})
|
||||||
|
for _, pod := range namespacePods.Items {
|
||||||
|
for _, constraint := range pod.Spec.TopologySpreadConstraints {
|
||||||
|
// Only deal with hard topology constraints
|
||||||
|
// TODO(@damemi): add support for soft constraints
|
||||||
|
if constraint.WhenUnsatisfiable != v1.DoNotSchedule {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
namespaceTopologySpreadConstraints[constraint] = struct{}{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(namespaceTopologySpreadConstraints) == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// 2. for each topologySpreadConstraint in that namespace
|
||||||
|
for constraint := range namespaceTopologySpreadConstraints {
|
||||||
|
constraintTopologies := make(map[topologyPair][]*v1.Pod)
|
||||||
|
// pre-populate the topologyPair map with all the topologies available from the nodeMap
|
||||||
|
// (we can't just build it from existing pods' nodes because a topology may have 0 pods)
|
||||||
|
for _, node := range nodeMap {
|
||||||
|
if val, ok := node.Labels[constraint.TopologyKey]; ok {
|
||||||
|
constraintTopologies[topologyPair{key: constraint.TopologyKey, value: val}] = make([]*v1.Pod, 0)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
selector, err := metav1.LabelSelectorAsSelector(constraint.LabelSelector)
|
||||||
|
if err != nil {
|
||||||
|
klog.ErrorS(err, "Couldn't parse label selector as selector", "selector", constraint.LabelSelector)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// 3. for each evictable pod in that namespace
|
||||||
|
// (this loop is where we count the number of pods per topologyValue that match this constraint's selector)
|
||||||
|
var sumPods float64
|
||||||
|
for i := range namespacePods.Items {
|
||||||
|
// 4. if the pod matches this TopologySpreadConstraint LabelSelector
|
||||||
|
if !selector.Matches(labels.Set(namespacePods.Items[i].Labels)) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// 5. If the pod's node matches this constraint'selector topologyKey, create a topoPair and add the pod
|
||||||
|
node, ok := nodeMap[namespacePods.Items[i].Spec.NodeName]
|
||||||
|
if !ok {
|
||||||
|
// If ok is false, node is nil in which case node.Labels will panic. In which case a pod is yet to be scheduled. So it's safe to just continue here.
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
nodeValue, ok := node.Labels[constraint.TopologyKey]
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// 6. create a topoPair with key as this TopologySpreadConstraint
|
||||||
|
topoPair := topologyPair{key: constraint.TopologyKey, value: nodeValue}
|
||||||
|
// 7. add the pod with key as this topoPair
|
||||||
|
constraintTopologies[topoPair] = append(constraintTopologies[topoPair], &namespacePods.Items[i])
|
||||||
|
sumPods++
|
||||||
|
}
|
||||||
|
if topologyIsBalanced(constraintTopologies, constraint) {
|
||||||
|
klog.V(2).InfoS("Skipping topology constraint because it is already balanced", "constraint", constraint)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
balanceDomains(podsForEviction, constraint, constraintTopologies, sumPods, evictable.IsEvictable, nodeMap)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for pod := range podsForEviction {
|
||||||
|
if _, err := podEvictor.EvictPod(ctx, pod, nodeMap[pod.Spec.NodeName], "PodTopologySpread"); err != nil && !evictable.IsEvictable(pod) {
|
||||||
|
klog.ErrorS(err, "Error evicting pod", "pod", klog.KObj(pod))
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// topologyIsBalanced checks if any domains in the topology differ by more than the MaxSkew
|
||||||
|
// this is called before any sorting or other calculations and is used to skip topologies that don't need to be balanced
|
||||||
|
func topologyIsBalanced(topology map[topologyPair][]*v1.Pod, constraint v1.TopologySpreadConstraint) bool {
|
||||||
|
minDomainSize := math.MaxInt32
|
||||||
|
maxDomainSize := math.MinInt32
|
||||||
|
for _, pods := range topology {
|
||||||
|
if len(pods) < minDomainSize {
|
||||||
|
minDomainSize = len(pods)
|
||||||
|
}
|
||||||
|
if len(pods) > maxDomainSize {
|
||||||
|
maxDomainSize = len(pods)
|
||||||
|
}
|
||||||
|
if int32(maxDomainSize-minDomainSize) > constraint.MaxSkew {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// balanceDomains determines how many pods (minimum) should be evicted from large domains to achieve an ideal balance within maxSkew
|
||||||
|
// To actually determine how many pods need to be moved, we sort the topology domains in ascending length
|
||||||
|
// [2, 5, 3, 8, 5, 7]
|
||||||
|
//
|
||||||
|
// Would end up like:
|
||||||
|
// [2, 3, 5, 5, 7, 8]
|
||||||
|
//
|
||||||
|
// We then start at i=[0] and j=[len(list)-1] and compare the 2 topology sizes.
|
||||||
|
// If the diff of the size of the domains is more than the maxSkew, we will move up to half that skew,
|
||||||
|
// or the available pods from the higher domain, or the number required to bring the smaller domain up to the average,
|
||||||
|
// whichever number is less.
|
||||||
|
//
|
||||||
|
// (Note, we will only move as many pods from a domain as possible without bringing it below the ideal average,
|
||||||
|
// and we will not bring any smaller domain above the average)
|
||||||
|
// If the diff is within the skew, we move to the next highest domain.
|
||||||
|
// If the higher domain can't give any more without falling below the average, we move to the next lowest "high" domain
|
||||||
|
//
|
||||||
|
// Following this, the above topology domains end up "sorted" as:
|
||||||
|
// [5, 5, 5, 5, 5, 5]
|
||||||
|
// (assuming even distribution by the scheduler of the evicted pods)
|
||||||
|
func balanceDomains(
|
||||||
|
podsForEviction map[*v1.Pod]struct{},
|
||||||
|
constraint v1.TopologySpreadConstraint,
|
||||||
|
constraintTopologies map[topologyPair][]*v1.Pod,
|
||||||
|
sumPods float64,
|
||||||
|
isEvictable func(*v1.Pod) bool,
|
||||||
|
nodeMap map[string]*v1.Node) {
|
||||||
|
idealAvg := sumPods / float64(len(constraintTopologies))
|
||||||
|
sortedDomains := sortDomains(constraintTopologies, isEvictable)
|
||||||
|
// i is the index for belowOrEqualAvg
|
||||||
|
// j is the index for aboveAvg
|
||||||
|
i := 0
|
||||||
|
j := len(sortedDomains) - 1
|
||||||
|
for i < j {
|
||||||
|
// if j has no more to give without falling below the ideal average, move to next aboveAvg
|
||||||
|
if float64(len(sortedDomains[j].pods)) < idealAvg {
|
||||||
|
j--
|
||||||
|
}
|
||||||
|
|
||||||
|
// skew = actual difference between the domains
|
||||||
|
skew := float64(len(sortedDomains[j].pods) - len(sortedDomains[i].pods))
|
||||||
|
|
||||||
|
// if k and j are within the maxSkew of each other, move to next belowOrEqualAvg
|
||||||
|
if int32(skew) <= constraint.MaxSkew {
|
||||||
|
i++
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// the most that can be given from aboveAvg is:
|
||||||
|
// 1. up to half the distance between them, minus MaxSkew, rounded up
|
||||||
|
// 2. how many it has remaining without falling below the average rounded up, or
|
||||||
|
// 3. how many can be added without bringing the smaller domain above the average rounded up,
|
||||||
|
// whichever is less
|
||||||
|
// (This is the basic principle of keeping all sizes within ~skew of the average)
|
||||||
|
aboveAvg := math.Ceil(float64(len(sortedDomains[j].pods)) - idealAvg)
|
||||||
|
belowAvg := math.Ceil(idealAvg - float64(len(sortedDomains[i].pods)))
|
||||||
|
smallestDiff := math.Min(aboveAvg, belowAvg)
|
||||||
|
halfSkew := math.Ceil((skew - float64(constraint.MaxSkew)) / 2)
|
||||||
|
movePods := int(math.Min(smallestDiff, halfSkew))
|
||||||
|
if movePods <= 0 {
|
||||||
|
i++
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// remove pods from the higher topology and add them to the list of pods to be evicted
|
||||||
|
// also (just for tracking), add them to the list of pods in the lower topology
|
||||||
|
aboveToEvict := sortedDomains[j].pods[len(sortedDomains[j].pods)-movePods:]
|
||||||
|
for k := range aboveToEvict {
|
||||||
|
// if the pod has a hard nodeAffinity or nodeSelector that only matches this node,
|
||||||
|
// don't bother evicting it as it will just end up back on the same node
|
||||||
|
// however we still account for it "being evicted" so the algorithm can complete
|
||||||
|
// TODO(@damemi): Since we don't order pods wrt their affinities, we should refactor this to skip the current pod
|
||||||
|
// but still try to get the required # of movePods (instead of just chopping that value off the slice above)
|
||||||
|
if aboveToEvict[k].Spec.NodeSelector != nil ||
|
||||||
|
(aboveToEvict[k].Spec.Affinity != nil &&
|
||||||
|
aboveToEvict[k].Spec.Affinity.NodeAffinity != nil &&
|
||||||
|
aboveToEvict[k].Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil &&
|
||||||
|
nodesPodFitsOnBesidesCurrent(aboveToEvict[k], nodeMap) == 0) {
|
||||||
|
klog.V(2).InfoS("Ignoring pod for eviction due to node selector/affinity", "pod", klog.KObj(aboveToEvict[k]))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
podsForEviction[aboveToEvict[k]] = struct{}{}
|
||||||
|
}
|
||||||
|
sortedDomains[j].pods = sortedDomains[j].pods[:len(sortedDomains[j].pods)-movePods]
|
||||||
|
sortedDomains[i].pods = append(sortedDomains[i].pods, aboveToEvict...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// nodesPodFitsOnBesidesCurrent counts the number of nodes this pod could fit on based on its affinity
|
||||||
|
// It excludes the current node because, for the sake of domain balancing only, we care about if there is any other
|
||||||
|
// place it could theoretically fit.
|
||||||
|
// If the pod doesn't fit on its current node, that is a job for RemovePodsViolatingNodeAffinity, and irrelevant to Topology Spreading
|
||||||
|
func nodesPodFitsOnBesidesCurrent(pod *v1.Pod, nodeMap map[string]*v1.Node) int {
|
||||||
|
count := 0
|
||||||
|
for _, node := range nodeMap {
|
||||||
|
if nodeutil.PodFitsCurrentNode(pod, node) && node != nodeMap[pod.Spec.NodeName] {
|
||||||
|
count++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return count
|
||||||
|
}
|
||||||
|
|
||||||
|
// sortDomains sorts and splits the list of topology domains based on their size
|
||||||
|
// it also sorts the list of pods within the domains based on their node affinity/selector and priority in the following order:
|
||||||
|
// 1. non-evictable pods
|
||||||
|
// 2. pods with selectors or affinity
|
||||||
|
// 3. pods in descending priority
|
||||||
|
// 4. all other pods
|
||||||
|
// We then pop pods off the back of the list for eviction
|
||||||
|
func sortDomains(constraintTopologyPairs map[topologyPair][]*v1.Pod, isEvictable func(*v1.Pod) bool) []topology {
|
||||||
|
sortedTopologies := make([]topology, 0, len(constraintTopologyPairs))
|
||||||
|
// sort the topologies and return 2 lists: those <= the average and those > the average (> list inverted)
|
||||||
|
for pair, list := range constraintTopologyPairs {
|
||||||
|
// Sort the pods within the domain so that the lowest priority pods are considered first for eviction,
|
||||||
|
// followed by the highest priority,
|
||||||
|
// followed by the lowest priority pods with affinity or nodeSelector,
|
||||||
|
// followed by the highest priority pods with affinity or nodeSelector
|
||||||
|
sort.Slice(list, func(i, j int) bool {
|
||||||
|
// any non-evictable pods should be considered last (ie, first in the list)
|
||||||
|
if !isEvictable(list[i]) || !isEvictable(list[j]) {
|
||||||
|
// false - i is the only non-evictable, so return true to put it first
|
||||||
|
// true - j is non-evictable, so return false to put j before i
|
||||||
|
// if true and both and non-evictable, order doesn't matter
|
||||||
|
return !(isEvictable(list[i]) && !isEvictable(list[j]))
|
||||||
|
}
|
||||||
|
|
||||||
|
// if both pods have selectors/affinity, compare them by their priority
|
||||||
|
if hasSelectorOrAffinity(*list[i]) == hasSelectorOrAffinity(*list[j]) {
|
||||||
|
comparePodsByPriority(list[i], list[j])
|
||||||
|
}
|
||||||
|
return hasSelectorOrAffinity(*list[i]) && !hasSelectorOrAffinity(*list[j])
|
||||||
|
})
|
||||||
|
sortedTopologies = append(sortedTopologies, topology{pair: pair, pods: list})
|
||||||
|
}
|
||||||
|
|
||||||
|
// create an ascending slice of all key-value toplogy pairs
|
||||||
|
sort.Slice(sortedTopologies, func(i, j int) bool {
|
||||||
|
return len(sortedTopologies[i].pods) < len(sortedTopologies[j].pods)
|
||||||
|
})
|
||||||
|
|
||||||
|
return sortedTopologies
|
||||||
|
}
|
||||||
|
|
||||||
|
func hasSelectorOrAffinity(pod v1.Pod) bool {
|
||||||
|
return pod.Spec.NodeSelector != nil || (pod.Spec.Affinity != nil && pod.Spec.Affinity.NodeAffinity != nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
// comparePodsByPriority is a helper to the sort function to compare 2 pods based on their priority values
|
||||||
|
// It will sort the pods in DESCENDING order of priority, since in our logic we evict pods from the back
|
||||||
|
// of the list first.
|
||||||
|
func comparePodsByPriority(iPod, jPod *v1.Pod) bool {
|
||||||
|
if iPod.Spec.Priority != nil && jPod.Spec.Priority != nil {
|
||||||
|
// a LOWER priority value should be evicted FIRST
|
||||||
|
return *iPod.Spec.Priority > *jPod.Spec.Priority
|
||||||
|
} else if iPod.Spec.Priority != nil && jPod.Spec.Priority == nil {
|
||||||
|
// i should come before j
|
||||||
|
return true
|
||||||
|
} else if iPod.Spec.Priority == nil && jPod.Spec.Priority != nil {
|
||||||
|
// j should come before i
|
||||||
|
return false
|
||||||
|
} else {
|
||||||
|
// it doesn't matter. just return true
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
410
pkg/descheduler/strategies/topologyspreadconstraint_test.go
Normal file
410
pkg/descheduler/strategies/topologyspreadconstraint_test.go
Normal file
@@ -0,0 +1,410 @@
|
|||||||
|
package strategies
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/api"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
v1 "k8s.io/api/core/v1"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
|
"k8s.io/client-go/kubernetes/fake"
|
||||||
|
core "k8s.io/client-go/testing"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||||
|
"sigs.k8s.io/descheduler/test"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestTopologySpreadConstraint(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
testCases := []struct {
|
||||||
|
name string
|
||||||
|
pods []*v1.Pod
|
||||||
|
expectedEvictedCount int
|
||||||
|
nodes []*v1.Node
|
||||||
|
strategy api.DeschedulerStrategy
|
||||||
|
namespaces []string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "2 domains, sizes [2,1], maxSkew=1, move 0 pods",
|
||||||
|
nodes: []*v1.Node{
|
||||||
|
test.BuildTestNode("n1", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneA" }),
|
||||||
|
test.BuildTestNode("n2", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneB" }),
|
||||||
|
},
|
||||||
|
pods: createTestPods([]testPodList{
|
||||||
|
{
|
||||||
|
count: 1,
|
||||||
|
node: "n1",
|
||||||
|
labels: map[string]string{"foo": "bar"},
|
||||||
|
constraints: []v1.TopologySpreadConstraint{
|
||||||
|
{
|
||||||
|
MaxSkew: 1,
|
||||||
|
TopologyKey: "zone",
|
||||||
|
WhenUnsatisfiable: v1.DoNotSchedule,
|
||||||
|
LabelSelector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
count: 1,
|
||||||
|
node: "n1",
|
||||||
|
labels: map[string]string{"foo": "bar"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
count: 1,
|
||||||
|
node: "n2",
|
||||||
|
labels: map[string]string{"foo": "bar"},
|
||||||
|
},
|
||||||
|
}),
|
||||||
|
expectedEvictedCount: 0,
|
||||||
|
strategy: api.DeschedulerStrategy{},
|
||||||
|
namespaces: []string{"ns1"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "2 domains, sizes [3,1], maxSkew=1, move 1 pod to achieve [2,2]",
|
||||||
|
nodes: []*v1.Node{
|
||||||
|
test.BuildTestNode("n1", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneA" }),
|
||||||
|
test.BuildTestNode("n2", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneB" }),
|
||||||
|
},
|
||||||
|
pods: createTestPods([]testPodList{
|
||||||
|
{
|
||||||
|
count: 1,
|
||||||
|
node: "n1",
|
||||||
|
labels: map[string]string{"foo": "bar"},
|
||||||
|
constraints: []v1.TopologySpreadConstraint{
|
||||||
|
{
|
||||||
|
MaxSkew: 1,
|
||||||
|
TopologyKey: "zone",
|
||||||
|
WhenUnsatisfiable: v1.DoNotSchedule,
|
||||||
|
LabelSelector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
count: 2,
|
||||||
|
node: "n1",
|
||||||
|
labels: map[string]string{"foo": "bar"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
count: 1,
|
||||||
|
node: "n2",
|
||||||
|
labels: map[string]string{"foo": "bar"},
|
||||||
|
},
|
||||||
|
}),
|
||||||
|
expectedEvictedCount: 1,
|
||||||
|
strategy: api.DeschedulerStrategy{},
|
||||||
|
namespaces: []string{"ns1"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "2 domains, sizes [3,1], maxSkew=1, move 1 pod to achieve [2,2], exclude kube-system namespace",
|
||||||
|
nodes: []*v1.Node{
|
||||||
|
test.BuildTestNode("n1", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneA" }),
|
||||||
|
test.BuildTestNode("n2", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneB" }),
|
||||||
|
},
|
||||||
|
pods: createTestPods([]testPodList{
|
||||||
|
{
|
||||||
|
count: 1,
|
||||||
|
node: "n1",
|
||||||
|
labels: map[string]string{"foo": "bar"},
|
||||||
|
constraints: []v1.TopologySpreadConstraint{
|
||||||
|
{
|
||||||
|
MaxSkew: 1,
|
||||||
|
TopologyKey: "zone",
|
||||||
|
WhenUnsatisfiable: v1.DoNotSchedule,
|
||||||
|
LabelSelector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
count: 2,
|
||||||
|
node: "n1",
|
||||||
|
labels: map[string]string{"foo": "bar"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
count: 1,
|
||||||
|
node: "n2",
|
||||||
|
labels: map[string]string{"foo": "bar"},
|
||||||
|
},
|
||||||
|
}),
|
||||||
|
expectedEvictedCount: 1,
|
||||||
|
strategy: api.DeschedulerStrategy{Enabled: true, Params: &api.StrategyParameters{Namespaces: &api.Namespaces{Exclude: []string{"kube-system"}}}},
|
||||||
|
namespaces: []string{"ns1"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "2 domains, sizes [5,2], maxSkew=1, move 1 pod to achieve [4,3]",
|
||||||
|
nodes: []*v1.Node{
|
||||||
|
test.BuildTestNode("n1", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneA" }),
|
||||||
|
test.BuildTestNode("n2", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneB" }),
|
||||||
|
},
|
||||||
|
pods: createTestPods([]testPodList{
|
||||||
|
{
|
||||||
|
count: 1,
|
||||||
|
node: "n1",
|
||||||
|
labels: map[string]string{"foo": "bar"},
|
||||||
|
constraints: []v1.TopologySpreadConstraint{
|
||||||
|
{
|
||||||
|
MaxSkew: 1,
|
||||||
|
TopologyKey: "zone",
|
||||||
|
WhenUnsatisfiable: v1.DoNotSchedule,
|
||||||
|
LabelSelector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
count: 4,
|
||||||
|
node: "n1",
|
||||||
|
labels: map[string]string{"foo": "bar"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
count: 2,
|
||||||
|
node: "n2",
|
||||||
|
labels: map[string]string{"foo": "bar"},
|
||||||
|
},
|
||||||
|
}),
|
||||||
|
expectedEvictedCount: 1,
|
||||||
|
strategy: api.DeschedulerStrategy{},
|
||||||
|
namespaces: []string{"ns1"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "2 domains, sizes [4,0], maxSkew=1, move 2 pods to achieve [2,2]",
|
||||||
|
nodes: []*v1.Node{
|
||||||
|
test.BuildTestNode("n1", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneA" }),
|
||||||
|
test.BuildTestNode("n2", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneB" }),
|
||||||
|
},
|
||||||
|
pods: createTestPods([]testPodList{
|
||||||
|
{
|
||||||
|
count: 1,
|
||||||
|
node: "n1",
|
||||||
|
labels: map[string]string{"foo": "bar"},
|
||||||
|
constraints: []v1.TopologySpreadConstraint{
|
||||||
|
{
|
||||||
|
MaxSkew: 1,
|
||||||
|
TopologyKey: "zone",
|
||||||
|
WhenUnsatisfiable: v1.DoNotSchedule,
|
||||||
|
LabelSelector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
count: 3,
|
||||||
|
node: "n1",
|
||||||
|
labels: map[string]string{"foo": "bar"},
|
||||||
|
},
|
||||||
|
}),
|
||||||
|
expectedEvictedCount: 2,
|
||||||
|
strategy: api.DeschedulerStrategy{},
|
||||||
|
namespaces: []string{"ns1"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "2 domains, sizes [4,0], maxSkew=1, only move 1 pod since pods with nodeSelector and nodeAffinity aren't evicted",
|
||||||
|
nodes: []*v1.Node{
|
||||||
|
test.BuildTestNode("n1", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneA" }),
|
||||||
|
test.BuildTestNode("n2", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneB" }),
|
||||||
|
},
|
||||||
|
pods: createTestPods([]testPodList{
|
||||||
|
{
|
||||||
|
count: 1,
|
||||||
|
node: "n1",
|
||||||
|
labels: map[string]string{"foo": "bar"},
|
||||||
|
constraints: []v1.TopologySpreadConstraint{
|
||||||
|
{
|
||||||
|
MaxSkew: 1,
|
||||||
|
TopologyKey: "zone",
|
||||||
|
WhenUnsatisfiable: v1.DoNotSchedule,
|
||||||
|
LabelSelector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
nodeSelector: map[string]string{"zone": "zoneA"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
count: 1,
|
||||||
|
node: "n1",
|
||||||
|
labels: map[string]string{"foo": "bar"},
|
||||||
|
nodeSelector: map[string]string{"zone": "zoneA"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
count: 1,
|
||||||
|
node: "n1",
|
||||||
|
labels: map[string]string{"foo": "bar"},
|
||||||
|
nodeAffinity: &v1.Affinity{NodeAffinity: &v1.NodeAffinity{
|
||||||
|
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||||
|
{MatchExpressions: []v1.NodeSelectorRequirement{{Key: "foo", Values: []string{"bar"}, Operator: v1.NodeSelectorOpIn}}},
|
||||||
|
}},
|
||||||
|
}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
count: 1,
|
||||||
|
node: "n1",
|
||||||
|
labels: map[string]string{"foo": "bar"},
|
||||||
|
},
|
||||||
|
}),
|
||||||
|
expectedEvictedCount: 1,
|
||||||
|
strategy: api.DeschedulerStrategy{},
|
||||||
|
namespaces: []string{"ns1"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "3 domains, sizes [0, 1, 100], maxSkew=1, move 66 pods to get [34, 33, 34]",
|
||||||
|
nodes: []*v1.Node{
|
||||||
|
test.BuildTestNode("n1", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneA" }),
|
||||||
|
test.BuildTestNode("n2", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneB" }),
|
||||||
|
test.BuildTestNode("n3", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneC" }),
|
||||||
|
},
|
||||||
|
pods: createTestPods([]testPodList{
|
||||||
|
{
|
||||||
|
count: 1,
|
||||||
|
node: "n2",
|
||||||
|
labels: map[string]string{"foo": "bar"},
|
||||||
|
constraints: []v1.TopologySpreadConstraint{
|
||||||
|
{
|
||||||
|
MaxSkew: 1,
|
||||||
|
TopologyKey: "zone",
|
||||||
|
WhenUnsatisfiable: v1.DoNotSchedule,
|
||||||
|
LabelSelector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
count: 100,
|
||||||
|
node: "n3",
|
||||||
|
labels: map[string]string{"foo": "bar"},
|
||||||
|
},
|
||||||
|
}),
|
||||||
|
expectedEvictedCount: 66,
|
||||||
|
strategy: api.DeschedulerStrategy{},
|
||||||
|
namespaces: []string{"ns1"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "4 domains, sizes [0, 1, 3, 5], should move 3 to get [2, 2, 3, 2]",
|
||||||
|
nodes: []*v1.Node{
|
||||||
|
test.BuildTestNode("n1", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneA" }),
|
||||||
|
test.BuildTestNode("n2", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneB" }),
|
||||||
|
test.BuildTestNode("n3", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneC" }),
|
||||||
|
test.BuildTestNode("n4", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneD" }),
|
||||||
|
},
|
||||||
|
pods: createTestPods([]testPodList{
|
||||||
|
{
|
||||||
|
count: 1,
|
||||||
|
node: "n2",
|
||||||
|
labels: map[string]string{"foo": "bar"},
|
||||||
|
constraints: []v1.TopologySpreadConstraint{
|
||||||
|
{
|
||||||
|
MaxSkew: 1,
|
||||||
|
TopologyKey: "zone",
|
||||||
|
WhenUnsatisfiable: v1.DoNotSchedule,
|
||||||
|
LabelSelector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
count: 3,
|
||||||
|
node: "n3",
|
||||||
|
labels: map[string]string{"foo": "bar"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
count: 5,
|
||||||
|
node: "n4",
|
||||||
|
labels: map[string]string{"foo": "bar"},
|
||||||
|
},
|
||||||
|
}),
|
||||||
|
expectedEvictedCount: 3,
|
||||||
|
strategy: api.DeschedulerStrategy{},
|
||||||
|
namespaces: []string{"ns1"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "2 domains size [2 6], maxSkew=2, should move 1 to get [3 5]",
|
||||||
|
nodes: []*v1.Node{
|
||||||
|
test.BuildTestNode("n1", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneA" }),
|
||||||
|
test.BuildTestNode("n2", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneB" }),
|
||||||
|
},
|
||||||
|
pods: createTestPods([]testPodList{
|
||||||
|
{
|
||||||
|
count: 1,
|
||||||
|
node: "n1",
|
||||||
|
labels: map[string]string{"foo": "bar"},
|
||||||
|
constraints: []v1.TopologySpreadConstraint{
|
||||||
|
{
|
||||||
|
MaxSkew: 2,
|
||||||
|
TopologyKey: "zone",
|
||||||
|
WhenUnsatisfiable: v1.DoNotSchedule,
|
||||||
|
LabelSelector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
count: 1,
|
||||||
|
node: "n1",
|
||||||
|
labels: map[string]string{"foo": "bar"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
count: 6,
|
||||||
|
node: "n2",
|
||||||
|
labels: map[string]string{"foo": "bar"},
|
||||||
|
},
|
||||||
|
}),
|
||||||
|
expectedEvictedCount: 1,
|
||||||
|
strategy: api.DeschedulerStrategy{},
|
||||||
|
namespaces: []string{"ns1"},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range testCases {
|
||||||
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
fakeClient := &fake.Clientset{}
|
||||||
|
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
|
||||||
|
podList := make([]v1.Pod, 0, len(tc.pods))
|
||||||
|
for _, pod := range tc.pods {
|
||||||
|
podList = append(podList, *pod)
|
||||||
|
}
|
||||||
|
return true, &v1.PodList{Items: podList}, nil
|
||||||
|
})
|
||||||
|
fakeClient.Fake.AddReactor("list", "namespaces", func(action core.Action) (bool, runtime.Object, error) {
|
||||||
|
return true, &v1.NamespaceList{Items: []v1.Namespace{{ObjectMeta: metav1.ObjectMeta{Name: "ns1", Namespace: "ns1"}}}}, nil
|
||||||
|
})
|
||||||
|
|
||||||
|
podEvictor := evictions.NewPodEvictor(
|
||||||
|
fakeClient,
|
||||||
|
"v1",
|
||||||
|
false,
|
||||||
|
100,
|
||||||
|
tc.nodes,
|
||||||
|
false,
|
||||||
|
)
|
||||||
|
RemovePodsViolatingTopologySpreadConstraint(ctx, fakeClient, tc.strategy, tc.nodes, podEvictor)
|
||||||
|
podsEvicted := podEvictor.TotalEvicted()
|
||||||
|
if podsEvicted != tc.expectedEvictedCount {
|
||||||
|
t.Errorf("Test error for description: %s. Expected evicted pods count %v, got %v", tc.name, tc.expectedEvictedCount, podsEvicted)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type testPodList struct {
|
||||||
|
count int
|
||||||
|
node string
|
||||||
|
labels map[string]string
|
||||||
|
constraints []v1.TopologySpreadConstraint
|
||||||
|
nodeSelector map[string]string
|
||||||
|
nodeAffinity *v1.Affinity
|
||||||
|
}
|
||||||
|
|
||||||
|
func createTestPods(testPods []testPodList) []*v1.Pod {
|
||||||
|
ownerRef1 := test.GetReplicaSetOwnerRefList()
|
||||||
|
pods := make([]*v1.Pod, 0)
|
||||||
|
podNum := 0
|
||||||
|
for _, tp := range testPods {
|
||||||
|
for i := 0; i < tp.count; i++ {
|
||||||
|
pods = append(pods,
|
||||||
|
test.BuildTestPod(fmt.Sprintf("pod-%d", podNum), 100, 0, tp.node, func(p *v1.Pod) {
|
||||||
|
p.Labels = make(map[string]string)
|
||||||
|
p.Labels = tp.labels
|
||||||
|
p.Namespace = "ns1"
|
||||||
|
p.ObjectMeta.OwnerReferences = ownerRef1
|
||||||
|
p.Spec.TopologySpreadConstraints = tp.constraints
|
||||||
|
p.Spec.NodeSelector = tp.nodeSelector
|
||||||
|
p.Spec.Affinity = tp.nodeAffinity
|
||||||
|
}))
|
||||||
|
podNum++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return pods
|
||||||
|
}
|
||||||
@@ -7,7 +7,7 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||||
"k8s.io/component-base/featuregate"
|
"k8s.io/component-base/featuregate"
|
||||||
"k8s.io/klog"
|
"k8s.io/klog/v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@@ -104,23 +104,21 @@ func GetPodSource(pod *v1.Pod) (string, error) {
|
|||||||
return "", fmt.Errorf("cannot get source of pod %q", pod.UID)
|
return "", fmt.Errorf("cannot get source of pod %q", pod.UID)
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsCriticalPod returns true if pod's priority is greater than or equal to SystemCriticalPriority.
|
// IsCriticalPod returns true if the pod is a static or mirror pod.
|
||||||
func IsCriticalPod(pod *v1.Pod) bool {
|
func IsCriticalPod(pod *v1.Pod) bool {
|
||||||
if IsStaticPod(pod) {
|
if IsStaticPod(pod) {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
if IsMirrorPod(pod) {
|
if IsMirrorPod(pod) {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
if pod.Spec.Priority != nil && IsCriticalPodBasedOnPriority(*pod.Spec.Priority) {
|
|
||||||
|
if pod.Spec.Priority != nil && *pod.Spec.Priority >= SystemCriticalPriority {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsCriticalPodBasedOnPriority checks if the given pod is a critical pod based on priority resolved from pod Spec.
|
return false
|
||||||
func IsCriticalPodBasedOnPriority(priority int32) bool {
|
|
||||||
return priority >= SystemCriticalPriority
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// PodRequestsAndLimits returns a dictionary of all defined resources summed up for all
|
// PodRequestsAndLimits returns a dictionary of all defined resources summed up for all
|
||||||
@@ -188,7 +186,7 @@ func PodToleratesTaints(pod *v1.Pod, taintsOfNodes map[string][]v1.Taint) bool {
|
|||||||
if len(pod.Spec.Tolerations) >= len(taintsForNode) && TolerationsTolerateTaintsWithFilter(pod.Spec.Tolerations, taintsForNode, nil) {
|
if len(pod.Spec.Tolerations) >= len(taintsForNode) && TolerationsTolerateTaintsWithFilter(pod.Spec.Tolerations, taintsForNode, nil) {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
klog.V(5).Infof("pod: %#v doesn't tolerate node %s's taints", pod.Name, nodeName)
|
klog.V(5).InfoS("Pod doesn't tolerate nodes taint", "pod", klog.KObj(pod), "nodeName", nodeName)
|
||||||
}
|
}
|
||||||
|
|
||||||
return false
|
return false
|
||||||
|
|||||||
@@ -21,8 +21,8 @@ import (
|
|||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/labels"
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
"k8s.io/apimachinery/pkg/selection"
|
"k8s.io/component-helpers/scheduling/corev1"
|
||||||
"k8s.io/klog"
|
"k8s.io/klog/v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
// The following code has been copied from predicates package to avoid the
|
// The following code has been copied from predicates package to avoid the
|
||||||
@@ -69,64 +69,17 @@ func podMatchesNodeLabels(pod *v1.Pod, node *v1.Node) bool {
|
|||||||
|
|
||||||
// Match node selector for requiredDuringSchedulingIgnoredDuringExecution.
|
// Match node selector for requiredDuringSchedulingIgnoredDuringExecution.
|
||||||
if nodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil {
|
if nodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil {
|
||||||
nodeSelectorTerms := nodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms
|
klog.V(10).InfoS("Match for RequiredDuringSchedulingIgnoredDuringExecution node selector", "selector", nodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution)
|
||||||
klog.V(10).Infof("Match for RequiredDuringSchedulingIgnoredDuringExecution node selector terms %+v", nodeSelectorTerms)
|
matches, err := corev1.MatchNodeSelectorTerms(node, nodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution)
|
||||||
return nodeMatchesNodeSelectorTerms(node, nodeSelectorTerms)
|
if err != nil {
|
||||||
|
klog.ErrorS(err, "error parsing node selector", "selector", nodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution)
|
||||||
|
}
|
||||||
|
return matches
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
// nodeMatchesNodeSelectorTerms checks if a node's labels satisfy a list of node selector terms,
|
|
||||||
// terms are ORed, and an empty list of terms will match nothing.
|
|
||||||
func nodeMatchesNodeSelectorTerms(node *v1.Node, nodeSelectorTerms []v1.NodeSelectorTerm) bool {
|
|
||||||
for _, req := range nodeSelectorTerms {
|
|
||||||
nodeSelector, err := NodeSelectorRequirementsAsSelector(req.MatchExpressions)
|
|
||||||
if err != nil {
|
|
||||||
klog.V(10).Infof("Failed to parse MatchExpressions: %+v, regarding as not match.", req.MatchExpressions)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if nodeSelector.Matches(labels.Set(node.Labels)) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// NodeSelectorRequirementsAsSelector converts the []NodeSelectorRequirement api type into a struct that implements
|
|
||||||
// labels.Selector.
|
|
||||||
func NodeSelectorRequirementsAsSelector(nsm []v1.NodeSelectorRequirement) (labels.Selector, error) {
|
|
||||||
if len(nsm) == 0 {
|
|
||||||
return labels.Nothing(), nil
|
|
||||||
}
|
|
||||||
selector := labels.NewSelector()
|
|
||||||
for _, expr := range nsm {
|
|
||||||
var op selection.Operator
|
|
||||||
switch expr.Operator {
|
|
||||||
case v1.NodeSelectorOpIn:
|
|
||||||
op = selection.In
|
|
||||||
case v1.NodeSelectorOpNotIn:
|
|
||||||
op = selection.NotIn
|
|
||||||
case v1.NodeSelectorOpExists:
|
|
||||||
op = selection.Exists
|
|
||||||
case v1.NodeSelectorOpDoesNotExist:
|
|
||||||
op = selection.DoesNotExist
|
|
||||||
case v1.NodeSelectorOpGt:
|
|
||||||
op = selection.GreaterThan
|
|
||||||
case v1.NodeSelectorOpLt:
|
|
||||||
op = selection.LessThan
|
|
||||||
default:
|
|
||||||
return nil, fmt.Errorf("%q is not a valid node selector operator", expr.Operator)
|
|
||||||
}
|
|
||||||
r, err := labels.NewRequirement(expr.Key, op, expr.Values)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
selector = selector.Add(*r)
|
|
||||||
}
|
|
||||||
return selector, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// TolerationsTolerateTaint checks if taint is tolerated by any of the tolerations.
|
// TolerationsTolerateTaint checks if taint is tolerated by any of the tolerations.
|
||||||
func TolerationsTolerateTaint(tolerations []v1.Toleration, taint *v1.Taint) bool {
|
func TolerationsTolerateTaint(tolerations []v1.Toleration, taint *v1.Taint) bool {
|
||||||
for i := range tolerations {
|
for i := range tolerations {
|
||||||
|
|||||||
@@ -1,9 +1,15 @@
|
|||||||
package utils
|
package utils
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/labels"
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
"k8s.io/apimachinery/pkg/util/sets"
|
"k8s.io/apimachinery/pkg/util/sets"
|
||||||
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/api"
|
||||||
)
|
)
|
||||||
|
|
||||||
const SystemCriticalPriority = 2 * int32(1000000000)
|
const SystemCriticalPriority = 2 * int32(1000000000)
|
||||||
@@ -33,3 +39,36 @@ func PodMatchesTermsNamespaceAndSelector(pod *v1.Pod, namespaces sets.String, se
|
|||||||
}
|
}
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetPriorityFromPriorityClass gets priority from the given priority class.
|
||||||
|
// If no priority class is provided, it will return SystemCriticalPriority by default.
|
||||||
|
func GetPriorityFromPriorityClass(ctx context.Context, client clientset.Interface, name string) (int32, error) {
|
||||||
|
if name != "" {
|
||||||
|
priorityClass, err := client.SchedulingV1().PriorityClasses().Get(ctx, name, metav1.GetOptions{})
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return priorityClass.Value, nil
|
||||||
|
}
|
||||||
|
return SystemCriticalPriority, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetPriorityFromStrategyParams gets priority from the given StrategyParameters.
|
||||||
|
// It will return SystemCriticalPriority by default.
|
||||||
|
func GetPriorityFromStrategyParams(ctx context.Context, client clientset.Interface, params *api.StrategyParameters) (priority int32, err error) {
|
||||||
|
if params == nil {
|
||||||
|
return SystemCriticalPriority, nil
|
||||||
|
}
|
||||||
|
if params.ThresholdPriority != nil {
|
||||||
|
priority = *params.ThresholdPriority
|
||||||
|
} else {
|
||||||
|
priority, err = GetPriorityFromPriorityClass(ctx, client, params.ThresholdPriorityClassName)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if priority > SystemCriticalPriority {
|
||||||
|
return 0, fmt.Errorf("Priority threshold can't be greater than %d", SystemCriticalPriority)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|||||||
@@ -19,16 +19,23 @@ package e2e
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"math"
|
"math"
|
||||||
|
"os"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
|
schedulingv1 "k8s.io/api/scheduling/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
"k8s.io/client-go/informers"
|
"k8s.io/client-go/informers"
|
||||||
coreinformers "k8s.io/client-go/informers/core/v1"
|
coreinformers "k8s.io/client-go/informers/core/v1"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/klog"
|
"k8s.io/klog/v2"
|
||||||
|
|
||||||
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
|
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
|
||||||
"sigs.k8s.io/descheduler/pkg/api"
|
"sigs.k8s.io/descheduler/pkg/api"
|
||||||
deschedulerapi "sigs.k8s.io/descheduler/pkg/api"
|
deschedulerapi "sigs.k8s.io/descheduler/pkg/api"
|
||||||
@@ -41,7 +48,7 @@ import (
|
|||||||
"sigs.k8s.io/descheduler/pkg/descheduler/strategies"
|
"sigs.k8s.io/descheduler/pkg/descheduler/strategies"
|
||||||
)
|
)
|
||||||
|
|
||||||
func MakePodSpec() v1.PodSpec {
|
func MakePodSpec(priorityClassName string) v1.PodSpec {
|
||||||
return v1.PodSpec{
|
return v1.PodSpec{
|
||||||
Containers: []v1.Container{{
|
Containers: []v1.Container{{
|
||||||
Name: "pause",
|
Name: "pause",
|
||||||
@@ -59,12 +66,12 @@ func MakePodSpec() v1.PodSpec {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
}},
|
}},
|
||||||
|
PriorityClassName: priorityClassName,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// RcByNameContainer returns a ReplicationControoler with specified name and container
|
// RcByNameContainer returns a ReplicationControoler with specified name and container
|
||||||
func RcByNameContainer(name string, replicas int32, labels map[string]string, gracePeriod *int64) *v1.ReplicationController {
|
func RcByNameContainer(name, namespace string, replicas int32, labels map[string]string, gracePeriod *int64, priorityClassName string) *v1.ReplicationController {
|
||||||
|
|
||||||
zeroGracePeriod := int64(0)
|
zeroGracePeriod := int64(0)
|
||||||
|
|
||||||
// Add "name": name to the labels, overwriting if it exists.
|
// Add "name": name to the labels, overwriting if it exists.
|
||||||
@@ -78,7 +85,8 @@ func RcByNameContainer(name string, replicas int32, labels map[string]string, gr
|
|||||||
APIVersion: "v1",
|
APIVersion: "v1",
|
||||||
},
|
},
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: name,
|
Name: name,
|
||||||
|
Namespace: namespace,
|
||||||
},
|
},
|
||||||
Spec: v1.ReplicationControllerSpec{
|
Spec: v1.ReplicationControllerSpec{
|
||||||
Replicas: func(i int32) *int32 { return &i }(replicas),
|
Replicas: func(i int32) *int32 { return &i }(replicas),
|
||||||
@@ -89,86 +97,450 @@ func RcByNameContainer(name string, replicas int32, labels map[string]string, gr
|
|||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Labels: labels,
|
Labels: labels,
|
||||||
},
|
},
|
||||||
Spec: MakePodSpec(),
|
Spec: MakePodSpec(priorityClassName),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// startEndToEndForLowNodeUtilization tests the lownode utilization strategy.
|
// startEndToEndForLowNodeUtilization tests the lownode utilization strategy.
|
||||||
func startEndToEndForLowNodeUtilization(ctx context.Context, clientset clientset.Interface, nodeInformer coreinformers.NodeInformer) {
|
func startEndToEndForLowNodeUtilization(ctx context.Context, clientset clientset.Interface, nodeInformer coreinformers.NodeInformer, podEvictor *evictions.PodEvictor) {
|
||||||
var thresholds = make(deschedulerapi.ResourceThresholds)
|
// Run descheduler.
|
||||||
var targetThresholds = make(deschedulerapi.ResourceThresholds)
|
nodes, err := nodeutil.ReadyNodes(ctx, clientset, nodeInformer, "", nil)
|
||||||
thresholds[v1.ResourceMemory] = 20
|
if err != nil {
|
||||||
thresholds[v1.ResourcePods] = 20
|
klog.Fatalf("%v", err)
|
||||||
thresholds[v1.ResourceCPU] = 85
|
}
|
||||||
targetThresholds[v1.ResourceMemory] = 20
|
|
||||||
targetThresholds[v1.ResourcePods] = 20
|
strategies.LowNodeUtilization(
|
||||||
targetThresholds[v1.ResourceCPU] = 90
|
ctx,
|
||||||
|
clientset,
|
||||||
|
deschedulerapi.DeschedulerStrategy{
|
||||||
|
Enabled: true,
|
||||||
|
Params: &deschedulerapi.StrategyParameters{
|
||||||
|
NodeResourceUtilizationThresholds: &deschedulerapi.NodeResourceUtilizationThresholds{
|
||||||
|
Thresholds: deschedulerapi.ResourceThresholds{
|
||||||
|
v1.ResourceMemory: 20,
|
||||||
|
v1.ResourcePods: 20,
|
||||||
|
v1.ResourceCPU: 85,
|
||||||
|
},
|
||||||
|
TargetThresholds: deschedulerapi.ResourceThresholds{
|
||||||
|
v1.ResourceMemory: 20,
|
||||||
|
v1.ResourcePods: 20,
|
||||||
|
v1.ResourceCPU: 90,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
nodes,
|
||||||
|
podEvictor,
|
||||||
|
)
|
||||||
|
|
||||||
|
time.Sleep(10 * time.Second)
|
||||||
|
}
|
||||||
|
|
||||||
|
func initializeClient(t *testing.T) (clientset.Interface, coreinformers.NodeInformer, chan struct{}) {
|
||||||
|
clientSet, err := client.CreateClient(os.Getenv("KUBECONFIG"))
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Error during client creation with %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
stopChannel := make(chan struct{})
|
||||||
|
|
||||||
|
sharedInformerFactory := informers.NewSharedInformerFactory(clientSet, 0)
|
||||||
|
sharedInformerFactory.Start(stopChannel)
|
||||||
|
sharedInformerFactory.WaitForCacheSync(stopChannel)
|
||||||
|
|
||||||
|
nodeInformer := sharedInformerFactory.Core().V1().Nodes()
|
||||||
|
|
||||||
|
return clientSet, nodeInformer, stopChannel
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestLowNodeUtilization(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
clientSet, nodeInformer, stopCh := initializeClient(t)
|
||||||
|
defer close(stopCh)
|
||||||
|
|
||||||
|
nodeList, err := clientSet.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Error listing node with %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var nodes []*v1.Node
|
||||||
|
for i := range nodeList.Items {
|
||||||
|
node := nodeList.Items[i]
|
||||||
|
nodes = append(nodes, &node)
|
||||||
|
}
|
||||||
|
|
||||||
|
testNamespace := &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "e2e-" + strings.ToLower(t.Name())}}
|
||||||
|
if _, err := clientSet.CoreV1().Namespaces().Create(ctx, testNamespace, metav1.CreateOptions{}); err != nil {
|
||||||
|
t.Fatalf("Unable to create ns %v", testNamespace.Name)
|
||||||
|
}
|
||||||
|
defer clientSet.CoreV1().Namespaces().Delete(ctx, testNamespace.Name, metav1.DeleteOptions{})
|
||||||
|
|
||||||
|
rc := RcByNameContainer("test-rc-node-utilization", testNamespace.Name, int32(15), map[string]string{"test": "node-utilization"}, nil, "")
|
||||||
|
if _, err := clientSet.CoreV1().ReplicationControllers(rc.Namespace).Create(ctx, rc, metav1.CreateOptions{}); err != nil {
|
||||||
|
t.Errorf("Error creating deployment %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
evictPods(ctx, t, clientSet, nodeInformer, nodes, rc)
|
||||||
|
deleteRC(ctx, t, clientSet, rc)
|
||||||
|
}
|
||||||
|
|
||||||
|
func runPodLifetimeStrategy(ctx context.Context, clientset clientset.Interface, nodeInformer coreinformers.NodeInformer, namespaces *deschedulerapi.Namespaces, priorityClass string, priority *int32) {
|
||||||
// Run descheduler.
|
// Run descheduler.
|
||||||
evictionPolicyGroupVersion, err := eutils.SupportEviction(clientset)
|
evictionPolicyGroupVersion, err := eutils.SupportEviction(clientset)
|
||||||
if err != nil || len(evictionPolicyGroupVersion) == 0 {
|
if err != nil || len(evictionPolicyGroupVersion) == 0 {
|
||||||
klog.Fatalf("%v", err)
|
klog.Fatalf("%v", err)
|
||||||
}
|
}
|
||||||
stopChannel := make(chan struct{})
|
|
||||||
nodes, err := nodeutil.ReadyNodes(ctx, clientset, nodeInformer, "", stopChannel)
|
nodes, err := nodeutil.ReadyNodes(ctx, clientset, nodeInformer, "", nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.Fatalf("%v", err)
|
klog.Fatalf("%v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
lowNodeUtilizationStrategy := deschedulerapi.DeschedulerStrategy{
|
maxPodLifeTimeSeconds := uint(1)
|
||||||
Enabled: true,
|
strategies.PodLifeTime(
|
||||||
Params: deschedulerapi.StrategyParameters{
|
ctx,
|
||||||
NodeResourceUtilizationThresholds: &deschedulerapi.NodeResourceUtilizationThresholds{
|
clientset,
|
||||||
Thresholds: thresholds,
|
deschedulerapi.DeschedulerStrategy{
|
||||||
TargetThresholds: targetThresholds,
|
Enabled: true,
|
||||||
|
Params: &deschedulerapi.StrategyParameters{
|
||||||
|
PodLifeTime: &deschedulerapi.PodLifeTime{MaxPodLifeTimeSeconds: &maxPodLifeTimeSeconds},
|
||||||
|
Namespaces: namespaces,
|
||||||
|
ThresholdPriority: priority,
|
||||||
|
ThresholdPriorityClassName: priorityClass,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
|
||||||
|
|
||||||
podEvictor := evictions.NewPodEvictor(
|
|
||||||
clientset,
|
|
||||||
evictionPolicyGroupVersion,
|
|
||||||
false,
|
|
||||||
0,
|
|
||||||
nodes,
|
nodes,
|
||||||
|
evictions.NewPodEvictor(
|
||||||
|
clientset,
|
||||||
|
evictionPolicyGroupVersion,
|
||||||
|
false,
|
||||||
|
0,
|
||||||
|
nodes,
|
||||||
|
false,
|
||||||
|
),
|
||||||
)
|
)
|
||||||
|
|
||||||
strategies.LowNodeUtilization(ctx, clientset, lowNodeUtilizationStrategy, nodes, false, podEvictor)
|
|
||||||
time.Sleep(10 * time.Second)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestE2E(t *testing.T) {
|
func getPodNames(pods []v1.Pod) []string {
|
||||||
// If we have reached here, it means cluster would have been already setup and the kubeconfig file should
|
names := []string{}
|
||||||
// be in /tmp directory as admin.conf.
|
for _, pod := range pods {
|
||||||
ctx := context.Background()
|
names = append(names, pod.Name)
|
||||||
clientSet, err := client.CreateClient("/tmp/admin.conf")
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("Error during client creation with %v", err)
|
|
||||||
}
|
}
|
||||||
|
return names
|
||||||
|
}
|
||||||
|
|
||||||
|
func intersectStrings(lista, listb []string) []string {
|
||||||
|
commonNames := []string{}
|
||||||
|
|
||||||
|
for _, stra := range lista {
|
||||||
|
for _, strb := range listb {
|
||||||
|
if stra == strb {
|
||||||
|
commonNames = append(commonNames, stra)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return commonNames
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO(jchaloup): add testcases for two included/excluded namespaces
|
||||||
|
|
||||||
|
func TestNamespaceConstraintsInclude(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
clientSet, nodeInformer, stopCh := initializeClient(t)
|
||||||
|
defer close(stopCh)
|
||||||
|
|
||||||
|
testNamespace := &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "e2e-" + strings.ToLower(t.Name())}}
|
||||||
|
if _, err := clientSet.CoreV1().Namespaces().Create(ctx, testNamespace, metav1.CreateOptions{}); err != nil {
|
||||||
|
t.Fatalf("Unable to create ns %v", testNamespace.Name)
|
||||||
|
}
|
||||||
|
defer clientSet.CoreV1().Namespaces().Delete(ctx, testNamespace.Name, metav1.DeleteOptions{})
|
||||||
|
|
||||||
|
rc := RcByNameContainer("test-rc-podlifetime", testNamespace.Name, 5, map[string]string{"test": "podlifetime-include"}, nil, "")
|
||||||
|
if _, err := clientSet.CoreV1().ReplicationControllers(rc.Namespace).Create(ctx, rc, metav1.CreateOptions{}); err != nil {
|
||||||
|
t.Errorf("Error creating deployment %v", err)
|
||||||
|
}
|
||||||
|
defer deleteRC(ctx, t, clientSet, rc)
|
||||||
|
|
||||||
|
// wait for a while so all the pods are at least few seconds older
|
||||||
|
time.Sleep(5 * time.Second)
|
||||||
|
|
||||||
|
// it's assumed all new pods are named differently from currently running -> no name collision
|
||||||
|
podList, err := clientSet.CoreV1().Pods(rc.Namespace).List(ctx, metav1.ListOptions{LabelSelector: labels.SelectorFromSet(rc.Spec.Template.Labels).String()})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Unable to list pods: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(podList.Items) != 5 {
|
||||||
|
t.Fatalf("Expected 5 replicas, got %v instead", len(podList.Items))
|
||||||
|
}
|
||||||
|
|
||||||
|
initialPodNames := getPodNames(podList.Items)
|
||||||
|
sort.Strings(initialPodNames)
|
||||||
|
t.Logf("Existing pods: %v", initialPodNames)
|
||||||
|
|
||||||
|
t.Logf("set the strategy to delete pods from %v namespace", rc.Namespace)
|
||||||
|
runPodLifetimeStrategy(ctx, clientSet, nodeInformer, &deschedulerapi.Namespaces{
|
||||||
|
Include: []string{rc.Namespace},
|
||||||
|
}, "", nil)
|
||||||
|
|
||||||
|
// All pods are supposed to be deleted, wait until all the old pods are deleted
|
||||||
|
if err := wait.PollImmediate(time.Second, 20*time.Second, func() (bool, error) {
|
||||||
|
podList, err := clientSet.CoreV1().Pods(rc.Namespace).List(ctx, metav1.ListOptions{LabelSelector: labels.SelectorFromSet(rc.Spec.Template.Labels).String()})
|
||||||
|
if err != nil {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
includePodNames := getPodNames(podList.Items)
|
||||||
|
// validate all pod were deleted
|
||||||
|
if len(intersectStrings(initialPodNames, includePodNames)) > 0 {
|
||||||
|
t.Logf("Waiting until %v pods get deleted", intersectStrings(initialPodNames, includePodNames))
|
||||||
|
// check if there's at least one pod not in Terminating state
|
||||||
|
for _, pod := range podList.Items {
|
||||||
|
// In case podList contains newly created pods
|
||||||
|
if len(intersectStrings(initialPodNames, []string{pod.Name})) == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if pod.DeletionTimestamp == nil {
|
||||||
|
t.Logf("Pod %v not in terminating state", pod.Name)
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
t.Logf("All %v pods are terminating", intersectStrings(initialPodNames, includePodNames))
|
||||||
|
}
|
||||||
|
|
||||||
|
return true, nil
|
||||||
|
}); err != nil {
|
||||||
|
t.Fatalf("Error waiting for pods to be deleted: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNamespaceConstraintsExclude(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
clientSet, nodeInformer, stopCh := initializeClient(t)
|
||||||
|
defer close(stopCh)
|
||||||
|
|
||||||
|
testNamespace := &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "e2e-" + strings.ToLower(t.Name())}}
|
||||||
|
if _, err := clientSet.CoreV1().Namespaces().Create(ctx, testNamespace, metav1.CreateOptions{}); err != nil {
|
||||||
|
t.Fatalf("Unable to create ns %v", testNamespace.Name)
|
||||||
|
}
|
||||||
|
defer clientSet.CoreV1().Namespaces().Delete(ctx, testNamespace.Name, metav1.DeleteOptions{})
|
||||||
|
|
||||||
|
rc := RcByNameContainer("test-rc-podlifetime", testNamespace.Name, 5, map[string]string{"test": "podlifetime-exclude"}, nil, "")
|
||||||
|
if _, err := clientSet.CoreV1().ReplicationControllers(rc.Namespace).Create(ctx, rc, metav1.CreateOptions{}); err != nil {
|
||||||
|
t.Errorf("Error creating deployment %v", err)
|
||||||
|
}
|
||||||
|
defer deleteRC(ctx, t, clientSet, rc)
|
||||||
|
|
||||||
|
// wait for a while so all the pods are at least few seconds older
|
||||||
|
time.Sleep(5 * time.Second)
|
||||||
|
|
||||||
|
// it's assumed all new pods are named differently from currently running -> no name collision
|
||||||
|
podList, err := clientSet.CoreV1().Pods(rc.Namespace).List(ctx, metav1.ListOptions{LabelSelector: labels.SelectorFromSet(rc.Spec.Template.Labels).String()})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Unable to list pods: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(podList.Items) != 5 {
|
||||||
|
t.Fatalf("Expected 5 replicas, got %v instead", len(podList.Items))
|
||||||
|
}
|
||||||
|
|
||||||
|
initialPodNames := getPodNames(podList.Items)
|
||||||
|
sort.Strings(initialPodNames)
|
||||||
|
t.Logf("Existing pods: %v", initialPodNames)
|
||||||
|
|
||||||
|
t.Logf("set the strategy to delete pods from namespaces except the %v namespace", rc.Namespace)
|
||||||
|
runPodLifetimeStrategy(ctx, clientSet, nodeInformer, &deschedulerapi.Namespaces{
|
||||||
|
Exclude: []string{rc.Namespace},
|
||||||
|
}, "", nil)
|
||||||
|
|
||||||
|
t.Logf("Waiting 10s")
|
||||||
|
time.Sleep(10 * time.Second)
|
||||||
|
podList, err = clientSet.CoreV1().Pods(rc.Namespace).List(ctx, metav1.ListOptions{LabelSelector: labels.SelectorFromSet(rc.Spec.Template.Labels).String()})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Unable to list pods after running strategy: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
excludePodNames := getPodNames(podList.Items)
|
||||||
|
sort.Strings(excludePodNames)
|
||||||
|
t.Logf("Existing pods: %v", excludePodNames)
|
||||||
|
|
||||||
|
// validate no pods were deleted
|
||||||
|
if len(intersectStrings(initialPodNames, excludePodNames)) != 5 {
|
||||||
|
t.Fatalf("None of %v pods are expected to be deleted", initialPodNames)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestThresholdPriority(t *testing.T) {
|
||||||
|
testPriority(t, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestThresholdPriorityClass(t *testing.T) {
|
||||||
|
testPriority(t, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
func testPriority(t *testing.T, isPriorityClass bool) {
|
||||||
|
var highPriority = int32(1000)
|
||||||
|
var lowPriority = int32(500)
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
clientSet, nodeInformer, stopCh := initializeClient(t)
|
||||||
|
defer close(stopCh)
|
||||||
|
|
||||||
|
testNamespace := &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "e2e-" + strings.ToLower(t.Name())}}
|
||||||
|
if _, err := clientSet.CoreV1().Namespaces().Create(ctx, testNamespace, metav1.CreateOptions{}); err != nil {
|
||||||
|
t.Fatalf("Unable to create ns %v", testNamespace.Name)
|
||||||
|
}
|
||||||
|
defer clientSet.CoreV1().Namespaces().Delete(ctx, testNamespace.Name, metav1.DeleteOptions{})
|
||||||
|
|
||||||
|
// create two priority classes
|
||||||
|
highPriorityClass := &schedulingv1.PriorityClass{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{Name: "e2e-" + strings.ToLower(t.Name()) + "-highpriority"},
|
||||||
|
Value: highPriority,
|
||||||
|
}
|
||||||
|
if _, err := clientSet.SchedulingV1().PriorityClasses().Create(ctx, highPriorityClass, metav1.CreateOptions{}); err != nil {
|
||||||
|
t.Fatalf("Error creating priorityclass %s: %v", highPriorityClass.Name, err)
|
||||||
|
}
|
||||||
|
defer clientSet.SchedulingV1().PriorityClasses().Delete(ctx, highPriorityClass.Name, metav1.DeleteOptions{})
|
||||||
|
|
||||||
|
lowPriorityClass := &schedulingv1.PriorityClass{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{Name: "e2e-" + strings.ToLower(t.Name()) + "-lowpriority"},
|
||||||
|
Value: lowPriority,
|
||||||
|
}
|
||||||
|
if _, err := clientSet.SchedulingV1().PriorityClasses().Create(ctx, lowPriorityClass, metav1.CreateOptions{}); err != nil {
|
||||||
|
t.Fatalf("Error creating priorityclass %s: %v", lowPriorityClass.Name, err)
|
||||||
|
}
|
||||||
|
defer clientSet.SchedulingV1().PriorityClasses().Delete(ctx, lowPriorityClass.Name, metav1.DeleteOptions{})
|
||||||
|
|
||||||
|
// create two RCs with different priority classes in the same namespace
|
||||||
|
rcHighPriority := RcByNameContainer("test-rc-podlifetime-highpriority", testNamespace.Name, 5,
|
||||||
|
map[string]string{"test": "podlifetime-highpriority"}, nil, highPriorityClass.Name)
|
||||||
|
if _, err := clientSet.CoreV1().ReplicationControllers(rcHighPriority.Namespace).Create(ctx, rcHighPriority, metav1.CreateOptions{}); err != nil {
|
||||||
|
t.Errorf("Error creating rc %s: %v", rcHighPriority.Name, err)
|
||||||
|
}
|
||||||
|
defer deleteRC(ctx, t, clientSet, rcHighPriority)
|
||||||
|
|
||||||
|
rcLowPriority := RcByNameContainer("test-rc-podlifetime-lowpriority", testNamespace.Name, 5,
|
||||||
|
map[string]string{"test": "podlifetime-lowpriority"}, nil, lowPriorityClass.Name)
|
||||||
|
if _, err := clientSet.CoreV1().ReplicationControllers(rcLowPriority.Namespace).Create(ctx, rcLowPriority, metav1.CreateOptions{}); err != nil {
|
||||||
|
t.Errorf("Error creating rc %s: %v", rcLowPriority.Name, err)
|
||||||
|
}
|
||||||
|
defer deleteRC(ctx, t, clientSet, rcLowPriority)
|
||||||
|
|
||||||
|
// wait for a while so all the pods are at least few seconds older
|
||||||
|
time.Sleep(5 * time.Second)
|
||||||
|
|
||||||
|
// it's assumed all new pods are named differently from currently running -> no name collision
|
||||||
|
podListHighPriority, err := clientSet.CoreV1().Pods(rcHighPriority.Namespace).List(
|
||||||
|
ctx, metav1.ListOptions{LabelSelector: labels.SelectorFromSet(rcHighPriority.Spec.Template.Labels).String()})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Unable to list pods: %v", err)
|
||||||
|
}
|
||||||
|
podListLowPriority, err := clientSet.CoreV1().Pods(rcLowPriority.Namespace).List(
|
||||||
|
ctx, metav1.ListOptions{LabelSelector: labels.SelectorFromSet(rcLowPriority.Spec.Template.Labels).String()})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Unable to list pods: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(podListHighPriority.Items)+len(podListLowPriority.Items) != 10 {
|
||||||
|
t.Fatalf("Expected 10 replicas, got %v instead", len(podListHighPriority.Items)+len(podListLowPriority.Items))
|
||||||
|
}
|
||||||
|
|
||||||
|
expectReservePodNames := getPodNames(podListHighPriority.Items)
|
||||||
|
expectEvictPodNames := getPodNames(podListLowPriority.Items)
|
||||||
|
sort.Strings(expectReservePodNames)
|
||||||
|
sort.Strings(expectEvictPodNames)
|
||||||
|
t.Logf("Pods not expect to be evicted: %v, pods expect to be evicted: %v", expectReservePodNames, expectEvictPodNames)
|
||||||
|
|
||||||
|
if isPriorityClass {
|
||||||
|
t.Logf("set the strategy to delete pods with priority lower than priority class %s", highPriorityClass.Name)
|
||||||
|
runPodLifetimeStrategy(ctx, clientSet, nodeInformer, nil, highPriorityClass.Name, nil)
|
||||||
|
} else {
|
||||||
|
t.Logf("set the strategy to delete pods with priority lower than %d", highPriority)
|
||||||
|
runPodLifetimeStrategy(ctx, clientSet, nodeInformer, nil, "", &highPriority)
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Logf("Waiting 10s")
|
||||||
|
time.Sleep(10 * time.Second)
|
||||||
|
// check if all pods with high priority class are not evicted
|
||||||
|
podListHighPriority, err = clientSet.CoreV1().Pods(rcHighPriority.Namespace).List(
|
||||||
|
ctx, metav1.ListOptions{LabelSelector: labels.SelectorFromSet(rcHighPriority.Spec.Template.Labels).String()})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Unable to list pods after running strategy: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
excludePodNames := getPodNames(podListHighPriority.Items)
|
||||||
|
sort.Strings(excludePodNames)
|
||||||
|
t.Logf("Existing high priority pods: %v", excludePodNames)
|
||||||
|
|
||||||
|
// validate no pods were deleted
|
||||||
|
if len(intersectStrings(expectReservePodNames, excludePodNames)) != 5 {
|
||||||
|
t.Fatalf("None of %v high priority pods are expected to be deleted", expectReservePodNames)
|
||||||
|
}
|
||||||
|
|
||||||
|
//check if all pods with low priority class are evicted
|
||||||
|
if err := wait.PollImmediate(time.Second, 20*time.Second, func() (bool, error) {
|
||||||
|
podListLowPriority, err := clientSet.CoreV1().Pods(rcLowPriority.Namespace).List(
|
||||||
|
ctx, metav1.ListOptions{LabelSelector: labels.SelectorFromSet(rcLowPriority.Spec.Template.Labels).String()})
|
||||||
|
if err != nil {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
includePodNames := getPodNames(podListLowPriority.Items)
|
||||||
|
// validate all pod were deleted
|
||||||
|
if len(intersectStrings(expectEvictPodNames, includePodNames)) > 0 {
|
||||||
|
t.Logf("Waiting until %v low priority pods get deleted", intersectStrings(expectEvictPodNames, includePodNames))
|
||||||
|
// check if there's at least one pod not in Terminating state
|
||||||
|
for _, pod := range podListLowPriority.Items {
|
||||||
|
// In case podList contains newly created pods
|
||||||
|
if len(intersectStrings(expectEvictPodNames, []string{pod.Name})) == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if pod.DeletionTimestamp == nil {
|
||||||
|
t.Logf("Pod %v not in terminating state", pod.Name)
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
t.Logf("All %v pods are terminating", intersectStrings(expectEvictPodNames, includePodNames))
|
||||||
|
}
|
||||||
|
|
||||||
|
return true, nil
|
||||||
|
}); err != nil {
|
||||||
|
t.Fatalf("Error waiting for pods to be deleted: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestEvictAnnotation(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
clientSet, nodeInformer, stopCh := initializeClient(t)
|
||||||
|
defer close(stopCh)
|
||||||
|
|
||||||
nodeList, err := clientSet.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
|
nodeList, err := clientSet.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Error listing node with %v", err)
|
t.Errorf("Error listing node with %v", err)
|
||||||
}
|
}
|
||||||
sharedInformerFactory := informers.NewSharedInformerFactory(clientSet, 0)
|
|
||||||
nodeInformer := sharedInformerFactory.Core().V1().Nodes()
|
|
||||||
|
|
||||||
stopChannel := make(chan struct{}, 0)
|
var nodes []*v1.Node
|
||||||
sharedInformerFactory.Start(stopChannel)
|
for i := range nodeList.Items {
|
||||||
sharedInformerFactory.WaitForCacheSync(stopChannel)
|
node := nodeList.Items[i]
|
||||||
defer close(stopChannel)
|
nodes = append(nodes, &node)
|
||||||
|
|
||||||
// Assumption: We would have 3 node cluster by now. Kubeadm brings all the master components onto master node.
|
|
||||||
// So, the last node would have least utilization.
|
|
||||||
rc := RcByNameContainer("test-rc", int32(15), map[string]string{"test": "app"}, nil)
|
|
||||||
_, err = clientSet.CoreV1().ReplicationControllers("default").Create(ctx, rc, metav1.CreateOptions{})
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("Error creating deployment %v", err)
|
|
||||||
}
|
}
|
||||||
evictPods(ctx, t, clientSet, nodeInformer, nodeList, rc)
|
|
||||||
|
|
||||||
|
testNamespace := &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "e2e-" + strings.ToLower(t.Name())}}
|
||||||
|
if _, err := clientSet.CoreV1().Namespaces().Create(ctx, testNamespace, metav1.CreateOptions{}); err != nil {
|
||||||
|
t.Fatalf("Unable to create ns %v", testNamespace.Name)
|
||||||
|
}
|
||||||
|
defer clientSet.CoreV1().Namespaces().Delete(ctx, testNamespace.Name, metav1.DeleteOptions{})
|
||||||
|
|
||||||
|
rc := RcByNameContainer("test-rc-evict-annotation", testNamespace.Name, int32(15), map[string]string{"test": "annotation"}, nil, "")
|
||||||
rc.Spec.Template.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"}
|
rc.Spec.Template.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"}
|
||||||
rc.Spec.Replicas = func(i int32) *int32 { return &i }(15)
|
|
||||||
rc.Spec.Template.Spec.Volumes = []v1.Volume{
|
rc.Spec.Template.Spec.Volumes = []v1.Volume{
|
||||||
{
|
{
|
||||||
Name: "sample",
|
Name: "sample",
|
||||||
@@ -178,22 +550,27 @@ func TestE2E(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
_, err = clientSet.CoreV1().ReplicationControllers("default").Create(ctx, rc, metav1.CreateOptions{})
|
|
||||||
if err != nil {
|
if _, err := clientSet.CoreV1().ReplicationControllers(rc.Namespace).Create(ctx, rc, metav1.CreateOptions{}); err != nil {
|
||||||
t.Errorf("Error creating deployment %v", err)
|
t.Errorf("Error creating deployment %v", err)
|
||||||
}
|
}
|
||||||
evictPods(ctx, t, clientSet, nodeInformer, nodeList, rc)
|
|
||||||
|
evictPods(ctx, t, clientSet, nodeInformer, nodes, rc)
|
||||||
|
deleteRC(ctx, t, clientSet, rc)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestDeschedulingInterval(t *testing.T) {
|
func TestDeschedulingInterval(t *testing.T) {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
clientSet, err := client.CreateClient("/tmp/admin.conf")
|
clientSet, err := client.CreateClient(os.Getenv("KUBECONFIG"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Error during client creation with %v", err)
|
t.Errorf("Error during client creation with %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// By default, the DeschedulingInterval param should be set to 0, meaning Descheduler only runs once then exits
|
// By default, the DeschedulingInterval param should be set to 0, meaning Descheduler only runs once then exits
|
||||||
s := options.NewDeschedulerServer()
|
s, err := options.NewDeschedulerServer()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Unable to initialize server: %v", err)
|
||||||
|
}
|
||||||
s.Client = clientSet
|
s.Client = clientSet
|
||||||
|
|
||||||
deschedulerPolicy := &api.DeschedulerPolicy{}
|
deschedulerPolicy := &api.DeschedulerPolicy{}
|
||||||
@@ -220,46 +597,17 @@ func TestDeschedulingInterval(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func evictPods(ctx context.Context, t *testing.T, clientSet clientset.Interface, nodeInformer coreinformers.NodeInformer, nodeList *v1.NodeList, rc *v1.ReplicationController) {
|
func deleteRC(ctx context.Context, t *testing.T, clientSet clientset.Interface, rc *v1.ReplicationController) {
|
||||||
var leastLoadedNode v1.Node
|
|
||||||
podsBefore := math.MaxInt16
|
|
||||||
for i := range nodeList.Items {
|
|
||||||
// Skip the Master Node
|
|
||||||
if _, exist := nodeList.Items[i].Labels["node-role.kubernetes.io/master"]; exist {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
// List all the pods on the current Node
|
|
||||||
podsOnANode, err := podutil.ListEvictablePodsOnNode(ctx, clientSet, &nodeList.Items[i], true)
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("Error listing pods on a node %v", err)
|
|
||||||
}
|
|
||||||
// Update leastLoadedNode if necessary
|
|
||||||
if tmpLoads := len(podsOnANode); tmpLoads < podsBefore {
|
|
||||||
leastLoadedNode = nodeList.Items[i]
|
|
||||||
podsBefore = tmpLoads
|
|
||||||
}
|
|
||||||
}
|
|
||||||
t.Log("Eviction of pods starting")
|
|
||||||
startEndToEndForLowNodeUtilization(ctx, clientSet, nodeInformer)
|
|
||||||
podsOnleastUtilizedNode, err := podutil.ListEvictablePodsOnNode(ctx, clientSet, &leastLoadedNode, true)
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("Error listing pods on a node %v", err)
|
|
||||||
}
|
|
||||||
podsAfter := len(podsOnleastUtilizedNode)
|
|
||||||
if podsBefore > podsAfter {
|
|
||||||
t.Fatalf("We should have see more pods on this node as per kubeadm's way of installing %v, %v", podsBefore, podsAfter)
|
|
||||||
}
|
|
||||||
|
|
||||||
//set number of replicas to 0
|
//set number of replicas to 0
|
||||||
rc.Spec.Replicas = func(i int32) *int32 { return &i }(0)
|
rcdeepcopy := rc.DeepCopy()
|
||||||
_, err = clientSet.CoreV1().ReplicationControllers("default").Update(ctx, rc, metav1.UpdateOptions{})
|
rcdeepcopy.Spec.Replicas = func(i int32) *int32 { return &i }(0)
|
||||||
if err != nil {
|
if _, err := clientSet.CoreV1().ReplicationControllers(rcdeepcopy.Namespace).Update(ctx, rcdeepcopy, metav1.UpdateOptions{}); err != nil {
|
||||||
t.Errorf("Error updating replica controller %v", err)
|
t.Fatalf("Error updating replica controller %v", err)
|
||||||
}
|
}
|
||||||
allPodsDeleted := false
|
allPodsDeleted := false
|
||||||
//wait 30 seconds until all pods are deleted
|
//wait 30 seconds until all pods are deleted
|
||||||
for i := 0; i < 6; i++ {
|
for i := 0; i < 6; i++ {
|
||||||
scale, _ := clientSet.CoreV1().ReplicationControllers("default").GetScale(ctx, rc.Name, metav1.GetOptions{})
|
scale, _ := clientSet.CoreV1().ReplicationControllers(rc.Namespace).GetScale(ctx, rc.Name, metav1.GetOptions{})
|
||||||
if scale.Spec.Replicas == 0 {
|
if scale.Spec.Replicas == 0 {
|
||||||
allPodsDeleted = true
|
allPodsDeleted = true
|
||||||
break
|
break
|
||||||
@@ -271,11 +619,60 @@ func evictPods(ctx context.Context, t *testing.T, clientSet clientset.Interface,
|
|||||||
t.Errorf("Deleting of rc pods took too long")
|
t.Errorf("Deleting of rc pods took too long")
|
||||||
}
|
}
|
||||||
|
|
||||||
err = clientSet.CoreV1().ReplicationControllers("default").Delete(ctx, rc.Name, metav1.DeleteOptions{})
|
if err := clientSet.CoreV1().ReplicationControllers(rc.Namespace).Delete(ctx, rc.Name, metav1.DeleteOptions{}); err != nil {
|
||||||
if err != nil {
|
t.Fatalf("Error deleting rc %v", err)
|
||||||
t.Errorf("Error deleting rc %v", err)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
//wait until rc is deleted
|
if err := wait.PollImmediate(time.Second, 10*time.Second, func() (bool, error) {
|
||||||
time.Sleep(5 * time.Second)
|
_, err := clientSet.CoreV1().ReplicationControllers(rc.Namespace).Get(ctx, rc.Name, metav1.GetOptions{})
|
||||||
|
if err != nil && strings.Contains(err.Error(), "not found") {
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
return false, nil
|
||||||
|
}); err != nil {
|
||||||
|
t.Fatalf("Error deleting rc %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func evictPods(ctx context.Context, t *testing.T, clientSet clientset.Interface, nodeInformer coreinformers.NodeInformer, nodeList []*v1.Node, rc *v1.ReplicationController) {
|
||||||
|
var leastLoadedNode *v1.Node
|
||||||
|
podsBefore := math.MaxInt16
|
||||||
|
evictionPolicyGroupVersion, err := eutils.SupportEviction(clientSet)
|
||||||
|
if err != nil || len(evictionPolicyGroupVersion) == 0 {
|
||||||
|
klog.Fatalf("%v", err)
|
||||||
|
}
|
||||||
|
podEvictor := evictions.NewPodEvictor(
|
||||||
|
clientSet,
|
||||||
|
evictionPolicyGroupVersion,
|
||||||
|
false,
|
||||||
|
0,
|
||||||
|
nodeList,
|
||||||
|
true,
|
||||||
|
)
|
||||||
|
for _, node := range nodeList {
|
||||||
|
// Skip the Master Node
|
||||||
|
if _, exist := node.Labels["node-role.kubernetes.io/master"]; exist {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// List all the pods on the current Node
|
||||||
|
podsOnANode, err := podutil.ListPodsOnANode(ctx, clientSet, node, podutil.WithFilter(podEvictor.Evictable().IsEvictable))
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Error listing pods on a node %v", err)
|
||||||
|
}
|
||||||
|
// Update leastLoadedNode if necessary
|
||||||
|
if tmpLoads := len(podsOnANode); tmpLoads < podsBefore {
|
||||||
|
leastLoadedNode = node
|
||||||
|
podsBefore = tmpLoads
|
||||||
|
}
|
||||||
|
}
|
||||||
|
t.Log("Eviction of pods starting")
|
||||||
|
startEndToEndForLowNodeUtilization(ctx, clientSet, nodeInformer, podEvictor)
|
||||||
|
podsOnleastUtilizedNode, err := podutil.ListPodsOnANode(ctx, clientSet, leastLoadedNode, podutil.WithFilter(podEvictor.Evictable().IsEvictable))
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Error listing pods on a node %v", err)
|
||||||
|
}
|
||||||
|
podsAfter := len(podsOnleastUtilizedNode)
|
||||||
|
if podsBefore > podsAfter {
|
||||||
|
t.Fatalf("We should have see more pods on this node as per kubeadm's way of installing %v, %v", podsBefore, podsAfter)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -15,5 +15,22 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
# This just run e2e tests.
|
# This just run e2e tests.
|
||||||
|
if [ -n "$KIND_E2E" ]; then
|
||||||
|
K8S_VERSION=${KUBERNETES_VERSION:-v1.18.2}
|
||||||
|
curl -Lo kubectl https://storage.googleapis.com/kubernetes-release/release/${K8S_VERSION}/bin/linux/amd64/kubectl && chmod +x kubectl && sudo mv kubectl /usr/local/bin/
|
||||||
|
wget https://github.com/kubernetes-sigs/kind/releases/download/v0.9.0/kind-linux-amd64
|
||||||
|
chmod +x kind-linux-amd64
|
||||||
|
mv kind-linux-amd64 kind
|
||||||
|
export PATH=$PATH:$PWD
|
||||||
|
kind create cluster --image kindest/node:${K8S_VERSION} --config=./hack/kind_config.yaml
|
||||||
|
export KUBECONFIG="$(kind get kubeconfig-path)"
|
||||||
|
docker pull kubernetes/pause
|
||||||
|
kind load docker-image kubernetes/pause
|
||||||
|
kind get kubeconfig > /tmp/admin.conf
|
||||||
|
export KUBECONFIG="/tmp/admin.conf"
|
||||||
|
mkdir -p ~/gopath/src/sigs.k8s.io/
|
||||||
|
mv ~/gopath/src/github.com/kubernetes-sigs/descheduler ~/gopath/src/sigs.k8s.io/.
|
||||||
|
fi
|
||||||
|
|
||||||
PRJ_PREFIX="sigs.k8s.io/descheduler"
|
PRJ_PREFIX="sigs.k8s.io/descheduler"
|
||||||
go test ${PRJ_PREFIX}/test/e2e/ -v
|
go test ${PRJ_PREFIX}/test/e2e/ -v
|
||||||
|
|||||||
@@ -116,3 +116,48 @@ func BuildTestNode(name string, millicpu int64, mem int64, pods int64, apply fun
|
|||||||
}
|
}
|
||||||
return node
|
return node
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// MakeBestEffortPod makes the given pod a BestEffort pod
|
||||||
|
func MakeBestEffortPod(pod *v1.Pod) {
|
||||||
|
pod.Spec.Containers[0].Resources.Requests = nil
|
||||||
|
pod.Spec.Containers[0].Resources.Requests = nil
|
||||||
|
pod.Spec.Containers[0].Resources.Limits = nil
|
||||||
|
pod.Spec.Containers[0].Resources.Limits = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// MakeBurstablePod makes the given pod a Burstable pod
|
||||||
|
func MakeBurstablePod(pod *v1.Pod) {
|
||||||
|
pod.Spec.Containers[0].Resources.Limits = nil
|
||||||
|
pod.Spec.Containers[0].Resources.Limits = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// MakeGuaranteedPod makes the given pod an Guaranteed pod
|
||||||
|
func MakeGuaranteedPod(pod *v1.Pod) {
|
||||||
|
pod.Spec.Containers[0].Resources.Limits[v1.ResourceCPU] = pod.Spec.Containers[0].Resources.Requests[v1.ResourceCPU]
|
||||||
|
pod.Spec.Containers[0].Resources.Limits[v1.ResourceMemory] = pod.Spec.Containers[0].Resources.Requests[v1.ResourceMemory]
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetRSOwnerRef sets the given pod's owner to ReplicaSet
|
||||||
|
func SetRSOwnerRef(pod *v1.Pod) {
|
||||||
|
pod.ObjectMeta.OwnerReferences = GetReplicaSetOwnerRefList()
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetDSOwnerRef sets the given pod's owner to DaemonSet
|
||||||
|
func SetDSOwnerRef(pod *v1.Pod) {
|
||||||
|
pod.ObjectMeta.OwnerReferences = GetDaemonSetOwnerRefList()
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNormalOwnerRef sets the given pod's owner to Pod
|
||||||
|
func SetNormalOwnerRef(pod *v1.Pod) {
|
||||||
|
pod.ObjectMeta.OwnerReferences = GetNormalPodOwnerRefList()
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetPodPriority sets the given pod's priority
|
||||||
|
func SetPodPriority(pod *v1.Pod, priority int32) {
|
||||||
|
pod.Spec.Priority = &priority
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNodeUnschedulable sets the given node unschedulable
|
||||||
|
func SetNodeUnschedulable(node *v1.Node) {
|
||||||
|
node.Spec.Unschedulable = true
|
||||||
|
}
|
||||||
|
|||||||
65
vendor/cloud.google.com/go/compute/metadata/metadata.go
generated
vendored
65
vendor/cloud.google.com/go/compute/metadata/metadata.go
generated
vendored
@@ -61,25 +61,14 @@ var (
|
|||||||
instID = &cachedValue{k: "instance/id", trim: true}
|
instID = &cachedValue{k: "instance/id", trim: true}
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var defaultClient = &Client{hc: &http.Client{
|
||||||
defaultClient = &Client{hc: &http.Client{
|
Transport: &http.Transport{
|
||||||
Transport: &http.Transport{
|
Dial: (&net.Dialer{
|
||||||
Dial: (&net.Dialer{
|
Timeout: 2 * time.Second,
|
||||||
Timeout: 2 * time.Second,
|
KeepAlive: 30 * time.Second,
|
||||||
KeepAlive: 30 * time.Second,
|
}).Dial,
|
||||||
}).Dial,
|
},
|
||||||
ResponseHeaderTimeout: 2 * time.Second,
|
}}
|
||||||
},
|
|
||||||
}}
|
|
||||||
subscribeClient = &Client{hc: &http.Client{
|
|
||||||
Transport: &http.Transport{
|
|
||||||
Dial: (&net.Dialer{
|
|
||||||
Timeout: 2 * time.Second,
|
|
||||||
KeepAlive: 30 * time.Second,
|
|
||||||
}).Dial,
|
|
||||||
},
|
|
||||||
}}
|
|
||||||
)
|
|
||||||
|
|
||||||
// NotDefinedError is returned when requested metadata is not defined.
|
// NotDefinedError is returned when requested metadata is not defined.
|
||||||
//
|
//
|
||||||
@@ -206,10 +195,9 @@ func systemInfoSuggestsGCE() bool {
|
|||||||
return name == "Google" || name == "Google Compute Engine"
|
return name == "Google" || name == "Google Compute Engine"
|
||||||
}
|
}
|
||||||
|
|
||||||
// Subscribe calls Client.Subscribe on a client designed for subscribing (one with no
|
// Subscribe calls Client.Subscribe on the default client.
|
||||||
// ResponseHeaderTimeout).
|
|
||||||
func Subscribe(suffix string, fn func(v string, ok bool) error) error {
|
func Subscribe(suffix string, fn func(v string, ok bool) error) error {
|
||||||
return subscribeClient.Subscribe(suffix, fn)
|
return defaultClient.Subscribe(suffix, fn)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get calls Client.Get on the default client.
|
// Get calls Client.Get on the default client.
|
||||||
@@ -227,6 +215,9 @@ func InternalIP() (string, error) { return defaultClient.InternalIP() }
|
|||||||
// ExternalIP returns the instance's primary external (public) IP address.
|
// ExternalIP returns the instance's primary external (public) IP address.
|
||||||
func ExternalIP() (string, error) { return defaultClient.ExternalIP() }
|
func ExternalIP() (string, error) { return defaultClient.ExternalIP() }
|
||||||
|
|
||||||
|
// Email calls Client.Email on the default client.
|
||||||
|
func Email(serviceAccount string) (string, error) { return defaultClient.Email(serviceAccount) }
|
||||||
|
|
||||||
// Hostname returns the instance's hostname. This will be of the form
|
// Hostname returns the instance's hostname. This will be of the form
|
||||||
// "<instanceID>.c.<projID>.internal".
|
// "<instanceID>.c.<projID>.internal".
|
||||||
func Hostname() (string, error) { return defaultClient.Hostname() }
|
func Hostname() (string, error) { return defaultClient.Hostname() }
|
||||||
@@ -277,9 +268,14 @@ type Client struct {
|
|||||||
hc *http.Client
|
hc *http.Client
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewClient returns a Client that can be used to fetch metadata. All HTTP requests
|
// NewClient returns a Client that can be used to fetch metadata.
|
||||||
// will use the given http.Client instead of the default client.
|
// Returns the client that uses the specified http.Client for HTTP requests.
|
||||||
|
// If nil is specified, returns the default client.
|
||||||
func NewClient(c *http.Client) *Client {
|
func NewClient(c *http.Client) *Client {
|
||||||
|
if c == nil {
|
||||||
|
return defaultClient
|
||||||
|
}
|
||||||
|
|
||||||
return &Client{hc: c}
|
return &Client{hc: c}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -301,7 +297,10 @@ func (c *Client) getETag(suffix string) (value, etag string, err error) {
|
|||||||
host = metadataIP
|
host = metadataIP
|
||||||
}
|
}
|
||||||
u := "http://" + host + "/computeMetadata/v1/" + suffix
|
u := "http://" + host + "/computeMetadata/v1/" + suffix
|
||||||
req, _ := http.NewRequest("GET", u, nil)
|
req, err := http.NewRequest("GET", u, nil)
|
||||||
|
if err != nil {
|
||||||
|
return "", "", err
|
||||||
|
}
|
||||||
req.Header.Set("Metadata-Flavor", "Google")
|
req.Header.Set("Metadata-Flavor", "Google")
|
||||||
req.Header.Set("User-Agent", userAgent)
|
req.Header.Set("User-Agent", userAgent)
|
||||||
res, err := c.hc.Do(req)
|
res, err := c.hc.Do(req)
|
||||||
@@ -367,6 +366,16 @@ func (c *Client) InternalIP() (string, error) {
|
|||||||
return c.getTrimmed("instance/network-interfaces/0/ip")
|
return c.getTrimmed("instance/network-interfaces/0/ip")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Email returns the email address associated with the service account.
|
||||||
|
// The account may be empty or the string "default" to use the instance's
|
||||||
|
// main account.
|
||||||
|
func (c *Client) Email(serviceAccount string) (string, error) {
|
||||||
|
if serviceAccount == "" {
|
||||||
|
serviceAccount = "default"
|
||||||
|
}
|
||||||
|
return c.getTrimmed("instance/service-accounts/" + serviceAccount + "/email")
|
||||||
|
}
|
||||||
|
|
||||||
// ExternalIP returns the instance's primary external (public) IP address.
|
// ExternalIP returns the instance's primary external (public) IP address.
|
||||||
func (c *Client) ExternalIP() (string, error) {
|
func (c *Client) ExternalIP() (string, error) {
|
||||||
return c.getTrimmed("instance/network-interfaces/0/access-configs/0/external-ip")
|
return c.getTrimmed("instance/network-interfaces/0/access-configs/0/external-ip")
|
||||||
@@ -394,11 +403,7 @@ func (c *Client) InstanceTags() ([]string, error) {
|
|||||||
|
|
||||||
// InstanceName returns the current VM's instance ID string.
|
// InstanceName returns the current VM's instance ID string.
|
||||||
func (c *Client) InstanceName() (string, error) {
|
func (c *Client) InstanceName() (string, error) {
|
||||||
host, err := c.Hostname()
|
return c.getTrimmed("instance/name")
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
return strings.Split(host, ".")[0], nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Zone returns the current VM's zone, such as "us-central1-b".
|
// Zone returns the current VM's zone, such as "us-central1-b".
|
||||||
|
|||||||
32
vendor/github.com/Azure/go-autorest/.gitignore
generated
vendored
Normal file
32
vendor/github.com/Azure/go-autorest/.gitignore
generated
vendored
Normal file
@@ -0,0 +1,32 @@
|
|||||||
|
# The standard Go .gitignore file follows. (Sourced from: github.com/github/gitignore/master/Go.gitignore)
|
||||||
|
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||||
|
*.o
|
||||||
|
*.a
|
||||||
|
*.so
|
||||||
|
|
||||||
|
# Folders
|
||||||
|
_obj
|
||||||
|
_test
|
||||||
|
.DS_Store
|
||||||
|
.idea/
|
||||||
|
.vscode/
|
||||||
|
|
||||||
|
# Architecture specific extensions/prefixes
|
||||||
|
*.[568vq]
|
||||||
|
[568vq].out
|
||||||
|
|
||||||
|
*.cgo1.go
|
||||||
|
*.cgo2.c
|
||||||
|
_cgo_defun.c
|
||||||
|
_cgo_gotypes.go
|
||||||
|
_cgo_export.*
|
||||||
|
|
||||||
|
_testmain.go
|
||||||
|
|
||||||
|
*.exe
|
||||||
|
*.test
|
||||||
|
*.prof
|
||||||
|
|
||||||
|
# go-autorest specific
|
||||||
|
vendor/
|
||||||
|
autorest/azure/example/example
|
||||||
1004
vendor/github.com/Azure/go-autorest/CHANGELOG.md
generated
vendored
Normal file
1004
vendor/github.com/Azure/go-autorest/CHANGELOG.md
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
23
vendor/github.com/Azure/go-autorest/GNUmakefile
generated
vendored
Normal file
23
vendor/github.com/Azure/go-autorest/GNUmakefile
generated
vendored
Normal file
@@ -0,0 +1,23 @@
|
|||||||
|
DIR?=./autorest/
|
||||||
|
|
||||||
|
default: build
|
||||||
|
|
||||||
|
build: fmt
|
||||||
|
go install $(DIR)
|
||||||
|
|
||||||
|
test:
|
||||||
|
go test $(DIR) || exit 1
|
||||||
|
|
||||||
|
vet:
|
||||||
|
@echo "go vet ."
|
||||||
|
@go vet $(DIR)... ; if [ $$? -eq 1 ]; then \
|
||||||
|
echo ""; \
|
||||||
|
echo "Vet found suspicious constructs. Please check the reported constructs"; \
|
||||||
|
echo "and fix them if necessary before submitting the code for review."; \
|
||||||
|
exit 1; \
|
||||||
|
fi
|
||||||
|
|
||||||
|
fmt:
|
||||||
|
gofmt -w $(DIR)
|
||||||
|
|
||||||
|
.PHONY: build test vet fmt
|
||||||
324
vendor/github.com/Azure/go-autorest/Gopkg.lock
generated
vendored
Normal file
324
vendor/github.com/Azure/go-autorest/Gopkg.lock
generated
vendored
Normal file
@@ -0,0 +1,324 @@
|
|||||||
|
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
|
||||||
|
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
digest = "1:892e39e5c083d0943f1e80ab8351690f183c6a5ab24e1d280adcad424c26255e"
|
||||||
|
name = "contrib.go.opencensus.io/exporter/ocagent"
|
||||||
|
packages = ["."]
|
||||||
|
pruneopts = "UT"
|
||||||
|
revision = "a8a6f458bbc1d5042322ad1f9b65eeb0b69be9ea"
|
||||||
|
version = "v0.6.0"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
digest = "1:8f5acd4d4462b5136af644d25101f0968a7a94ee90fcb2059cec5b7cc42e0b20"
|
||||||
|
name = "github.com/census-instrumentation/opencensus-proto"
|
||||||
|
packages = [
|
||||||
|
"gen-go/agent/common/v1",
|
||||||
|
"gen-go/agent/metrics/v1",
|
||||||
|
"gen-go/agent/trace/v1",
|
||||||
|
"gen-go/metrics/v1",
|
||||||
|
"gen-go/resource/v1",
|
||||||
|
"gen-go/trace/v1",
|
||||||
|
]
|
||||||
|
pruneopts = "UT"
|
||||||
|
revision = "d89fa54de508111353cb0b06403c00569be780d8"
|
||||||
|
version = "v0.2.1"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
digest = "1:ffe9824d294da03b391f44e1ae8281281b4afc1bdaa9588c9097785e3af10cec"
|
||||||
|
name = "github.com/davecgh/go-spew"
|
||||||
|
packages = ["spew"]
|
||||||
|
pruneopts = "UT"
|
||||||
|
revision = "8991bc29aa16c548c550c7ff78260e27b9ab7c73"
|
||||||
|
version = "v1.1.1"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
digest = "1:76dc72490af7174349349838f2fe118996381b31ea83243812a97e5a0fd5ed55"
|
||||||
|
name = "github.com/dgrijalva/jwt-go"
|
||||||
|
packages = ["."]
|
||||||
|
pruneopts = "UT"
|
||||||
|
revision = "06ea1031745cb8b3dab3f6a236daf2b0aa468b7e"
|
||||||
|
version = "v3.2.0"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
digest = "1:cf0d2e435fd4ce45b789e93ef24b5f08e86be0e9807a16beb3694e2d8c9af965"
|
||||||
|
name = "github.com/dimchansky/utfbom"
|
||||||
|
packages = ["."]
|
||||||
|
pruneopts = "UT"
|
||||||
|
revision = "d2133a1ce379ef6fa992b0514a77146c60db9d1c"
|
||||||
|
version = "v1.1.0"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
digest = "1:b7cb6054d3dff43b38ad2e92492f220f57ae6087ee797dca298139776749ace8"
|
||||||
|
name = "github.com/golang/groupcache"
|
||||||
|
packages = ["lru"]
|
||||||
|
pruneopts = "UT"
|
||||||
|
revision = "611e8accdfc92c4187d399e95ce826046d4c8d73"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
digest = "1:e3839df32927e8d3403cd5aa7253d966e8ff80fc8f10e2e35d146461cd83fcfa"
|
||||||
|
name = "github.com/golang/protobuf"
|
||||||
|
packages = [
|
||||||
|
"descriptor",
|
||||||
|
"jsonpb",
|
||||||
|
"proto",
|
||||||
|
"protoc-gen-go/descriptor",
|
||||||
|
"ptypes",
|
||||||
|
"ptypes/any",
|
||||||
|
"ptypes/duration",
|
||||||
|
"ptypes/struct",
|
||||||
|
"ptypes/timestamp",
|
||||||
|
"ptypes/wrappers",
|
||||||
|
]
|
||||||
|
pruneopts = "UT"
|
||||||
|
revision = "6c65a5562fc06764971b7c5d05c76c75e84bdbf7"
|
||||||
|
version = "v1.3.2"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
digest = "1:c560cd79300fac84f124b96225181a637a70b60155919a3c36db50b7cca6b806"
|
||||||
|
name = "github.com/grpc-ecosystem/grpc-gateway"
|
||||||
|
packages = [
|
||||||
|
"internal",
|
||||||
|
"runtime",
|
||||||
|
"utilities",
|
||||||
|
]
|
||||||
|
pruneopts = "UT"
|
||||||
|
revision = "f7120437bb4f6c71f7f5076ad65a45310de2c009"
|
||||||
|
version = "v1.12.1"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
digest = "1:5d231480e1c64a726869bc4142d270184c419749d34f167646baa21008eb0a79"
|
||||||
|
name = "github.com/mitchellh/go-homedir"
|
||||||
|
packages = ["."]
|
||||||
|
pruneopts = "UT"
|
||||||
|
revision = "af06845cf3004701891bf4fdb884bfe4920b3727"
|
||||||
|
version = "v1.1.0"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
digest = "1:0028cb19b2e4c3112225cd871870f2d9cf49b9b4276531f03438a88e94be86fe"
|
||||||
|
name = "github.com/pmezard/go-difflib"
|
||||||
|
packages = ["difflib"]
|
||||||
|
pruneopts = "UT"
|
||||||
|
revision = "792786c7400a136282c1664665ae0a8db921c6c2"
|
||||||
|
version = "v1.0.0"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
digest = "1:99d32780e5238c2621fff621123997c3e3cca96db8be13179013aea77dfab551"
|
||||||
|
name = "github.com/stretchr/testify"
|
||||||
|
packages = [
|
||||||
|
"assert",
|
||||||
|
"require",
|
||||||
|
]
|
||||||
|
pruneopts = "UT"
|
||||||
|
revision = "221dbe5ed46703ee255b1da0dec05086f5035f62"
|
||||||
|
version = "v1.4.0"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
digest = "1:7c5e00383399fe13de0b4b65c9fdde16275407ce8ac02d867eafeaa916edcc71"
|
||||||
|
name = "go.opencensus.io"
|
||||||
|
packages = [
|
||||||
|
".",
|
||||||
|
"internal",
|
||||||
|
"internal/tagencoding",
|
||||||
|
"metric/metricdata",
|
||||||
|
"metric/metricproducer",
|
||||||
|
"plugin/ocgrpc",
|
||||||
|
"plugin/ochttp",
|
||||||
|
"plugin/ochttp/propagation/b3",
|
||||||
|
"plugin/ochttp/propagation/tracecontext",
|
||||||
|
"resource",
|
||||||
|
"stats",
|
||||||
|
"stats/internal",
|
||||||
|
"stats/view",
|
||||||
|
"tag",
|
||||||
|
"trace",
|
||||||
|
"trace/internal",
|
||||||
|
"trace/propagation",
|
||||||
|
"trace/tracestate",
|
||||||
|
]
|
||||||
|
pruneopts = "UT"
|
||||||
|
revision = "aad2c527c5defcf89b5afab7f37274304195a6b2"
|
||||||
|
version = "v0.22.2"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
digest = "1:f604f5e2ee721b6757d962dfe7bab4f28aae50c456e39cfb2f3819762a44a6ae"
|
||||||
|
name = "golang.org/x/crypto"
|
||||||
|
packages = [
|
||||||
|
"pkcs12",
|
||||||
|
"pkcs12/internal/rc2",
|
||||||
|
]
|
||||||
|
pruneopts = "UT"
|
||||||
|
revision = "e9b2fee46413994441b28dfca259d911d963dfed"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
digest = "1:334b27eac455cb6567ea28cd424230b07b1a64334a2f861a8075ac26ce10af43"
|
||||||
|
name = "golang.org/x/lint"
|
||||||
|
packages = [
|
||||||
|
".",
|
||||||
|
"golint",
|
||||||
|
]
|
||||||
|
pruneopts = "UT"
|
||||||
|
revision = "fdd1cda4f05fd1fd86124f0ef9ce31a0b72c8448"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
digest = "1:257a75d024975428ab9192bfc334c3490882f8cb21322ea5784ca8eca000a910"
|
||||||
|
name = "golang.org/x/net"
|
||||||
|
packages = [
|
||||||
|
"http/httpguts",
|
||||||
|
"http2",
|
||||||
|
"http2/hpack",
|
||||||
|
"idna",
|
||||||
|
"internal/timeseries",
|
||||||
|
"trace",
|
||||||
|
]
|
||||||
|
pruneopts = "UT"
|
||||||
|
revision = "1ddd1de85cb0337b623b740a609d35817d516a8d"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
digest = "1:382bb5a7fb4034db3b6a2d19e5a4a6bcf52f4750530603c01ca18a172fa3089b"
|
||||||
|
name = "golang.org/x/sync"
|
||||||
|
packages = ["semaphore"]
|
||||||
|
pruneopts = "UT"
|
||||||
|
revision = "cd5d95a43a6e21273425c7ae415d3df9ea832eeb"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
digest = "1:4da420ceda5f68e8d748aa2169d0ed44ffadb1bbd6537cf778a49563104189b8"
|
||||||
|
name = "golang.org/x/sys"
|
||||||
|
packages = ["unix"]
|
||||||
|
pruneopts = "UT"
|
||||||
|
revision = "ce4227a45e2eb77e5c847278dcc6a626742e2945"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
digest = "1:8d8faad6b12a3a4c819a3f9618cb6ee1fa1cfc33253abeeea8b55336721e3405"
|
||||||
|
name = "golang.org/x/text"
|
||||||
|
packages = [
|
||||||
|
"collate",
|
||||||
|
"collate/build",
|
||||||
|
"internal/colltab",
|
||||||
|
"internal/gen",
|
||||||
|
"internal/language",
|
||||||
|
"internal/language/compact",
|
||||||
|
"internal/tag",
|
||||||
|
"internal/triegen",
|
||||||
|
"internal/ucd",
|
||||||
|
"language",
|
||||||
|
"secure/bidirule",
|
||||||
|
"transform",
|
||||||
|
"unicode/bidi",
|
||||||
|
"unicode/cldr",
|
||||||
|
"unicode/norm",
|
||||||
|
"unicode/rangetable",
|
||||||
|
]
|
||||||
|
pruneopts = "UT"
|
||||||
|
revision = "342b2e1fbaa52c93f31447ad2c6abc048c63e475"
|
||||||
|
version = "v0.3.2"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
digest = "1:4eb5ea8395fb60212dd58b92c9db80bab59d5e99c7435f9a6a0a528c373b60e7"
|
||||||
|
name = "golang.org/x/tools"
|
||||||
|
packages = [
|
||||||
|
"go/ast/astutil",
|
||||||
|
"go/gcexportdata",
|
||||||
|
"go/internal/gcimporter",
|
||||||
|
"go/types/typeutil",
|
||||||
|
]
|
||||||
|
pruneopts = "UT"
|
||||||
|
revision = "259af5ff87bdcd4abf2ecda8edc3f13f04f26a42"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
digest = "1:964bb30febc27fabfbec4759fa530c6ec35e77a7c85fed90b9317ea39a054877"
|
||||||
|
name = "google.golang.org/api"
|
||||||
|
packages = ["support/bundler"]
|
||||||
|
pruneopts = "UT"
|
||||||
|
revision = "8a410c21381766a810817fd6200fce8838ecb277"
|
||||||
|
version = "v0.14.0"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
digest = "1:a8d5c2c6e746b3485e36908ab2a9e3d77b86b81f8156d88403c7d2b462431dfd"
|
||||||
|
name = "google.golang.org/genproto"
|
||||||
|
packages = [
|
||||||
|
"googleapis/api/httpbody",
|
||||||
|
"googleapis/rpc/status",
|
||||||
|
"protobuf/field_mask",
|
||||||
|
]
|
||||||
|
pruneopts = "UT"
|
||||||
|
revision = "51378566eb590fa106d1025ea12835a4416dda84"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
digest = "1:b59ce3ddb11daeeccccc9cb3183b58ebf8e9a779f1c853308cd91612e817a301"
|
||||||
|
name = "google.golang.org/grpc"
|
||||||
|
packages = [
|
||||||
|
".",
|
||||||
|
"backoff",
|
||||||
|
"balancer",
|
||||||
|
"balancer/base",
|
||||||
|
"balancer/roundrobin",
|
||||||
|
"binarylog/grpc_binarylog_v1",
|
||||||
|
"codes",
|
||||||
|
"connectivity",
|
||||||
|
"credentials",
|
||||||
|
"credentials/internal",
|
||||||
|
"encoding",
|
||||||
|
"encoding/proto",
|
||||||
|
"grpclog",
|
||||||
|
"internal",
|
||||||
|
"internal/backoff",
|
||||||
|
"internal/balancerload",
|
||||||
|
"internal/binarylog",
|
||||||
|
"internal/buffer",
|
||||||
|
"internal/channelz",
|
||||||
|
"internal/envconfig",
|
||||||
|
"internal/grpcrand",
|
||||||
|
"internal/grpcsync",
|
||||||
|
"internal/resolver/dns",
|
||||||
|
"internal/resolver/passthrough",
|
||||||
|
"internal/syscall",
|
||||||
|
"internal/transport",
|
||||||
|
"keepalive",
|
||||||
|
"metadata",
|
||||||
|
"naming",
|
||||||
|
"peer",
|
||||||
|
"resolver",
|
||||||
|
"serviceconfig",
|
||||||
|
"stats",
|
||||||
|
"status",
|
||||||
|
"tap",
|
||||||
|
]
|
||||||
|
pruneopts = "UT"
|
||||||
|
revision = "1a3960e4bd028ac0cec0a2afd27d7d8e67c11514"
|
||||||
|
version = "v1.25.1"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
digest = "1:b75b3deb2bce8bc079e16bb2aecfe01eb80098f5650f9e93e5643ca8b7b73737"
|
||||||
|
name = "gopkg.in/yaml.v2"
|
||||||
|
packages = ["."]
|
||||||
|
pruneopts = "UT"
|
||||||
|
revision = "1f64d6156d11335c3f22d9330b0ad14fc1e789ce"
|
||||||
|
version = "v2.2.7"
|
||||||
|
|
||||||
|
[solve-meta]
|
||||||
|
analyzer-name = "dep"
|
||||||
|
analyzer-version = 1
|
||||||
|
input-imports = [
|
||||||
|
"contrib.go.opencensus.io/exporter/ocagent",
|
||||||
|
"github.com/dgrijalva/jwt-go",
|
||||||
|
"github.com/dimchansky/utfbom",
|
||||||
|
"github.com/mitchellh/go-homedir",
|
||||||
|
"github.com/stretchr/testify/require",
|
||||||
|
"go.opencensus.io/plugin/ochttp",
|
||||||
|
"go.opencensus.io/plugin/ochttp/propagation/tracecontext",
|
||||||
|
"go.opencensus.io/stats/view",
|
||||||
|
"go.opencensus.io/trace",
|
||||||
|
"golang.org/x/crypto/pkcs12",
|
||||||
|
"golang.org/x/lint/golint",
|
||||||
|
]
|
||||||
|
solver-name = "gps-cdcl"
|
||||||
|
solver-version = 1
|
||||||
59
vendor/github.com/Azure/go-autorest/Gopkg.toml
generated
vendored
Normal file
59
vendor/github.com/Azure/go-autorest/Gopkg.toml
generated
vendored
Normal file
@@ -0,0 +1,59 @@
|
|||||||
|
# Gopkg.toml example
|
||||||
|
#
|
||||||
|
# Refer to https://golang.github.io/dep/docs/Gopkg.toml.html
|
||||||
|
# for detailed Gopkg.toml documentation.
|
||||||
|
#
|
||||||
|
# required = ["github.com/user/thing/cmd/thing"]
|
||||||
|
# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
|
||||||
|
#
|
||||||
|
# [[constraint]]
|
||||||
|
# name = "github.com/user/project"
|
||||||
|
# version = "1.0.0"
|
||||||
|
#
|
||||||
|
# [[constraint]]
|
||||||
|
# name = "github.com/user/project2"
|
||||||
|
# branch = "dev"
|
||||||
|
# source = "github.com/myfork/project2"
|
||||||
|
#
|
||||||
|
# [[override]]
|
||||||
|
# name = "github.com/x/y"
|
||||||
|
# version = "2.4.0"
|
||||||
|
#
|
||||||
|
# [prune]
|
||||||
|
# non-go = false
|
||||||
|
# go-tests = true
|
||||||
|
# unused-packages = true
|
||||||
|
|
||||||
|
required = ["golang.org/x/lint/golint"]
|
||||||
|
|
||||||
|
[prune]
|
||||||
|
go-tests = true
|
||||||
|
unused-packages = true
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "contrib.go.opencensus.io/exporter/ocagent"
|
||||||
|
version = "0.6.0"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "github.com/dgrijalva/jwt-go"
|
||||||
|
version = "3.2.0"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "github.com/dimchansky/utfbom"
|
||||||
|
version = "1.1.0"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "github.com/mitchellh/go-homedir"
|
||||||
|
version = "1.1.0"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "github.com/stretchr/testify"
|
||||||
|
version = "1.3.0"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "go.opencensus.io"
|
||||||
|
version = "0.22.0"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
branch = "master"
|
||||||
|
name = "golang.org/x/crypto"
|
||||||
@@ -1,19 +1,5 @@
|
|||||||
Copyright 2012-2013 Rackspace, Inc.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use
|
Apache License
|
||||||
this file except in compliance with the License. You may obtain a copy of the
|
|
||||||
License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software distributed
|
|
||||||
under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
|
||||||
CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
|
||||||
specific language governing permissions and limitations under the License.
|
|
||||||
|
|
||||||
------
|
|
||||||
|
|
||||||
Apache License
|
|
||||||
Version 2.0, January 2004
|
Version 2.0, January 2004
|
||||||
http://www.apache.org/licenses/
|
http://www.apache.org/licenses/
|
||||||
|
|
||||||
@@ -189,3 +175,17 @@ specific language governing permissions and limitations under the License.
|
|||||||
of your accepting any such warranty or additional liability.
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
END OF TERMS AND CONDITIONS
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
Copyright 2015 Microsoft Corporation
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
165
vendor/github.com/Azure/go-autorest/README.md
generated
vendored
Normal file
165
vendor/github.com/Azure/go-autorest/README.md
generated
vendored
Normal file
@@ -0,0 +1,165 @@
|
|||||||
|
# go-autorest
|
||||||
|
|
||||||
|
[](https://godoc.org/github.com/Azure/go-autorest/autorest)
|
||||||
|
[](https://dev.azure.com/azure-sdk/public/_build/latest?definitionId=625&branchName=master)
|
||||||
|
[](https://goreportcard.com/report/Azure/go-autorest)
|
||||||
|
|
||||||
|
Package go-autorest provides an HTTP request client for use with [Autorest](https://github.com/Azure/autorest.go)-generated API client packages.
|
||||||
|
|
||||||
|
An authentication client tested with Azure Active Directory (AAD) is also
|
||||||
|
provided in this repo in the package
|
||||||
|
`github.com/Azure/go-autorest/autorest/adal`. Despite its name, this package
|
||||||
|
is maintained only as part of the Azure Go SDK and is not related to other
|
||||||
|
"ADAL" libraries in [github.com/AzureAD](https://github.com/AzureAD).
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
Package go-autorest implements an HTTP request pipeline suitable for use across
|
||||||
|
multiple goroutines and provides the shared routines used by packages generated
|
||||||
|
by [Autorest](https://github.com/Azure/autorest.go).
|
||||||
|
|
||||||
|
The package breaks sending and responding to HTTP requests into three phases: Preparing, Sending,
|
||||||
|
and Responding. A typical pattern is:
|
||||||
|
|
||||||
|
```go
|
||||||
|
req, err := Prepare(&http.Request{},
|
||||||
|
token.WithAuthorization())
|
||||||
|
|
||||||
|
resp, err := Send(req,
|
||||||
|
WithLogging(logger),
|
||||||
|
DoErrorIfStatusCode(http.StatusInternalServerError),
|
||||||
|
DoCloseIfError(),
|
||||||
|
DoRetryForAttempts(5, time.Second))
|
||||||
|
|
||||||
|
err = Respond(resp,
|
||||||
|
ByDiscardingBody(),
|
||||||
|
ByClosing())
|
||||||
|
```
|
||||||
|
|
||||||
|
Each phase relies on decorators to modify and / or manage processing. Decorators may first modify
|
||||||
|
and then pass the data along, pass the data first and then modify the result, or wrap themselves
|
||||||
|
around passing the data (such as a logger might do). Decorators run in the order provided. For
|
||||||
|
example, the following:
|
||||||
|
|
||||||
|
```go
|
||||||
|
req, err := Prepare(&http.Request{},
|
||||||
|
WithBaseURL("https://microsoft.com/"),
|
||||||
|
WithPath("a"),
|
||||||
|
WithPath("b"),
|
||||||
|
WithPath("c"))
|
||||||
|
```
|
||||||
|
|
||||||
|
will set the URL to:
|
||||||
|
|
||||||
|
```
|
||||||
|
https://microsoft.com/a/b/c
|
||||||
|
```
|
||||||
|
|
||||||
|
Preparers and Responders may be shared and re-used (assuming the underlying decorators support
|
||||||
|
sharing and re-use). Performant use is obtained by creating one or more Preparers and Responders
|
||||||
|
shared among multiple go-routines, and a single Sender shared among multiple sending go-routines,
|
||||||
|
all bound together by means of input / output channels.
|
||||||
|
|
||||||
|
Decorators hold their passed state within a closure (such as the path components in the example
|
||||||
|
above). Be careful to share Preparers and Responders only in a context where such held state
|
||||||
|
applies. For example, it may not make sense to share a Preparer that applies a query string from a
|
||||||
|
fixed set of values. Similarly, sharing a Responder that reads the response body into a passed
|
||||||
|
struct (e.g., `ByUnmarshallingJson`) is likely incorrect.
|
||||||
|
|
||||||
|
Errors raised by autorest objects and methods will conform to the `autorest.Error` interface.
|
||||||
|
|
||||||
|
See the included examples for more detail. For details on the suggested use of this package by
|
||||||
|
generated clients, see the Client described below.
|
||||||
|
|
||||||
|
## Helpers
|
||||||
|
|
||||||
|
### Handling Swagger Dates
|
||||||
|
|
||||||
|
The Swagger specification (https://swagger.io) that drives AutoRest
|
||||||
|
(https://github.com/Azure/autorest/) precisely defines two date forms: date and date-time. The
|
||||||
|
github.com/Azure/go-autorest/autorest/date package provides time.Time derivations to ensure correct
|
||||||
|
parsing and formatting.
|
||||||
|
|
||||||
|
### Handling Empty Values
|
||||||
|
|
||||||
|
In JSON, missing values have different semantics than empty values. This is especially true for
|
||||||
|
services using the HTTP PATCH verb. The JSON submitted with a PATCH request generally contains
|
||||||
|
only those values to modify. Missing values are to be left unchanged. Developers, then, require a
|
||||||
|
means to both specify an empty value and to leave the value out of the submitted JSON.
|
||||||
|
|
||||||
|
The Go JSON package (`encoding/json`) supports the `omitempty` tag. When specified, it omits
|
||||||
|
empty values from the rendered JSON. Since Go defines default values for all base types (such as ""
|
||||||
|
for string and 0 for int) and provides no means to mark a value as actually empty, the JSON package
|
||||||
|
treats default values as meaning empty, omitting them from the rendered JSON. This means that, using
|
||||||
|
the Go base types encoded through the default JSON package, it is not possible to create JSON to
|
||||||
|
clear a value at the server.
|
||||||
|
|
||||||
|
The workaround within the Go community is to use pointers to base types in lieu of base types within
|
||||||
|
structures that map to JSON. For example, instead of a value of type `string`, the workaround uses
|
||||||
|
`*string`. While this enables distinguishing empty values from those to be unchanged, creating
|
||||||
|
pointers to a base type (notably constant, in-line values) requires additional variables. This, for
|
||||||
|
example,
|
||||||
|
|
||||||
|
```go
|
||||||
|
s := struct {
|
||||||
|
S *string
|
||||||
|
}{ S: &"foo" }
|
||||||
|
```
|
||||||
|
fails, while, this
|
||||||
|
|
||||||
|
```go
|
||||||
|
v := "foo"
|
||||||
|
s := struct {
|
||||||
|
S *string
|
||||||
|
}{ S: &v }
|
||||||
|
```
|
||||||
|
succeeds.
|
||||||
|
|
||||||
|
To ease using pointers, the subpackage `to` contains helpers that convert to and from pointers for
|
||||||
|
Go base types which have Swagger analogs. It also provides a helper that converts between
|
||||||
|
`map[string]string` and `map[string]*string`, enabling the JSON to specify that the value
|
||||||
|
associated with a key should be cleared. With the helpers, the previous example becomes
|
||||||
|
|
||||||
|
```go
|
||||||
|
s := struct {
|
||||||
|
S *string
|
||||||
|
}{ S: to.StringPtr("foo") }
|
||||||
|
```
|
||||||
|
|
||||||
|
## Install
|
||||||
|
|
||||||
|
```bash
|
||||||
|
go get github.com/Azure/go-autorest/autorest
|
||||||
|
go get github.com/Azure/go-autorest/autorest/azure
|
||||||
|
go get github.com/Azure/go-autorest/autorest/date
|
||||||
|
go get github.com/Azure/go-autorest/autorest/to
|
||||||
|
```
|
||||||
|
|
||||||
|
### Using with Go Modules
|
||||||
|
In [v12.0.1](https://github.com/Azure/go-autorest/pull/386), this repository introduced the following modules.
|
||||||
|
|
||||||
|
- autorest/adal
|
||||||
|
- autorest/azure/auth
|
||||||
|
- autorest/azure/cli
|
||||||
|
- autorest/date
|
||||||
|
- autorest/mocks
|
||||||
|
- autorest/to
|
||||||
|
- autorest/validation
|
||||||
|
- autorest
|
||||||
|
- logger
|
||||||
|
- tracing
|
||||||
|
|
||||||
|
Tagging cumulative SDK releases as a whole (e.g. `v12.3.0`) is still enabled to support consumers of this repo that have not yet migrated to modules.
|
||||||
|
|
||||||
|
## License
|
||||||
|
|
||||||
|
See LICENSE file.
|
||||||
|
|
||||||
|
-----
|
||||||
|
|
||||||
|
This project has adopted the [Microsoft Open Source Code of
|
||||||
|
Conduct](https://opensource.microsoft.com/codeofconduct/). For more information
|
||||||
|
see the [Code of Conduct
|
||||||
|
FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact
|
||||||
|
[opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional
|
||||||
|
questions or comments.
|
||||||
39
vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go
generated
vendored
39
vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go
generated
vendored
@@ -24,6 +24,7 @@ package adal
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
@@ -101,7 +102,14 @@ type deviceToken struct {
|
|||||||
|
|
||||||
// InitiateDeviceAuth initiates a device auth flow. It returns a DeviceCode
|
// InitiateDeviceAuth initiates a device auth flow. It returns a DeviceCode
|
||||||
// that can be used with CheckForUserCompletion or WaitForUserCompletion.
|
// that can be used with CheckForUserCompletion or WaitForUserCompletion.
|
||||||
|
// Deprecated: use InitiateDeviceAuthWithContext() instead.
|
||||||
func InitiateDeviceAuth(sender Sender, oauthConfig OAuthConfig, clientID, resource string) (*DeviceCode, error) {
|
func InitiateDeviceAuth(sender Sender, oauthConfig OAuthConfig, clientID, resource string) (*DeviceCode, error) {
|
||||||
|
return InitiateDeviceAuthWithContext(context.Background(), sender, oauthConfig, clientID, resource)
|
||||||
|
}
|
||||||
|
|
||||||
|
// InitiateDeviceAuthWithContext initiates a device auth flow. It returns a DeviceCode
|
||||||
|
// that can be used with CheckForUserCompletion or WaitForUserCompletion.
|
||||||
|
func InitiateDeviceAuthWithContext(ctx context.Context, sender Sender, oauthConfig OAuthConfig, clientID, resource string) (*DeviceCode, error) {
|
||||||
v := url.Values{
|
v := url.Values{
|
||||||
"client_id": []string{clientID},
|
"client_id": []string{clientID},
|
||||||
"resource": []string{resource},
|
"resource": []string{resource},
|
||||||
@@ -117,7 +125,7 @@ func InitiateDeviceAuth(sender Sender, oauthConfig OAuthConfig, clientID, resour
|
|||||||
|
|
||||||
req.ContentLength = int64(len(s))
|
req.ContentLength = int64(len(s))
|
||||||
req.Header.Set(contentType, mimeTypeFormPost)
|
req.Header.Set(contentType, mimeTypeFormPost)
|
||||||
resp, err := sender.Do(req)
|
resp, err := sender.Do(req.WithContext(ctx))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeSendingFails, err.Error())
|
return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeSendingFails, err.Error())
|
||||||
}
|
}
|
||||||
@@ -151,7 +159,14 @@ func InitiateDeviceAuth(sender Sender, oauthConfig OAuthConfig, clientID, resour
|
|||||||
|
|
||||||
// CheckForUserCompletion takes a DeviceCode and checks with the Azure AD OAuth endpoint
|
// CheckForUserCompletion takes a DeviceCode and checks with the Azure AD OAuth endpoint
|
||||||
// to see if the device flow has: been completed, timed out, or otherwise failed
|
// to see if the device flow has: been completed, timed out, or otherwise failed
|
||||||
|
// Deprecated: use CheckForUserCompletionWithContext() instead.
|
||||||
func CheckForUserCompletion(sender Sender, code *DeviceCode) (*Token, error) {
|
func CheckForUserCompletion(sender Sender, code *DeviceCode) (*Token, error) {
|
||||||
|
return CheckForUserCompletionWithContext(context.Background(), sender, code)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CheckForUserCompletionWithContext takes a DeviceCode and checks with the Azure AD OAuth endpoint
|
||||||
|
// to see if the device flow has: been completed, timed out, or otherwise failed
|
||||||
|
func CheckForUserCompletionWithContext(ctx context.Context, sender Sender, code *DeviceCode) (*Token, error) {
|
||||||
v := url.Values{
|
v := url.Values{
|
||||||
"client_id": []string{code.ClientID},
|
"client_id": []string{code.ClientID},
|
||||||
"code": []string{*code.DeviceCode},
|
"code": []string{*code.DeviceCode},
|
||||||
@@ -169,7 +184,7 @@ func CheckForUserCompletion(sender Sender, code *DeviceCode) (*Token, error) {
|
|||||||
|
|
||||||
req.ContentLength = int64(len(s))
|
req.ContentLength = int64(len(s))
|
||||||
req.Header.Set(contentType, mimeTypeFormPost)
|
req.Header.Set(contentType, mimeTypeFormPost)
|
||||||
resp, err := sender.Do(req)
|
resp, err := sender.Do(req.WithContext(ctx))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenSendingFails, err.Error())
|
return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenSendingFails, err.Error())
|
||||||
}
|
}
|
||||||
@@ -207,18 +222,29 @@ func CheckForUserCompletion(sender Sender, code *DeviceCode) (*Token, error) {
|
|||||||
case "code_expired":
|
case "code_expired":
|
||||||
return nil, ErrDeviceCodeExpired
|
return nil, ErrDeviceCodeExpired
|
||||||
default:
|
default:
|
||||||
|
// return a more meaningful error message if available
|
||||||
|
if token.ErrorDescription != nil {
|
||||||
|
return nil, fmt.Errorf("%s %s: %s", logPrefix, *token.Error, *token.ErrorDescription)
|
||||||
|
}
|
||||||
return nil, ErrDeviceGeneric
|
return nil, ErrDeviceGeneric
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// WaitForUserCompletion calls CheckForUserCompletion repeatedly until a token is granted or an error state occurs.
|
// WaitForUserCompletion calls CheckForUserCompletion repeatedly until a token is granted or an error state occurs.
|
||||||
// This prevents the user from looping and checking against 'ErrDeviceAuthorizationPending'.
|
// This prevents the user from looping and checking against 'ErrDeviceAuthorizationPending'.
|
||||||
|
// Deprecated: use WaitForUserCompletionWithContext() instead.
|
||||||
func WaitForUserCompletion(sender Sender, code *DeviceCode) (*Token, error) {
|
func WaitForUserCompletion(sender Sender, code *DeviceCode) (*Token, error) {
|
||||||
|
return WaitForUserCompletionWithContext(context.Background(), sender, code)
|
||||||
|
}
|
||||||
|
|
||||||
|
// WaitForUserCompletionWithContext calls CheckForUserCompletion repeatedly until a token is granted or an error
|
||||||
|
// state occurs. This prevents the user from looping and checking against 'ErrDeviceAuthorizationPending'.
|
||||||
|
func WaitForUserCompletionWithContext(ctx context.Context, sender Sender, code *DeviceCode) (*Token, error) {
|
||||||
intervalDuration := time.Duration(*code.Interval) * time.Second
|
intervalDuration := time.Duration(*code.Interval) * time.Second
|
||||||
waitDuration := intervalDuration
|
waitDuration := intervalDuration
|
||||||
|
|
||||||
for {
|
for {
|
||||||
token, err := CheckForUserCompletion(sender, code)
|
token, err := CheckForUserCompletionWithContext(ctx, sender, code)
|
||||||
|
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return token, nil
|
return token, nil
|
||||||
@@ -237,6 +263,11 @@ func WaitForUserCompletion(sender Sender, code *DeviceCode) (*Token, error) {
|
|||||||
return nil, fmt.Errorf("%s Error waiting for user to complete device flow. Server told us to slow_down too much", logPrefix)
|
return nil, fmt.Errorf("%s Error waiting for user to complete device flow. Server told us to slow_down too much", logPrefix)
|
||||||
}
|
}
|
||||||
|
|
||||||
time.Sleep(waitDuration)
|
select {
|
||||||
|
case <-time.After(waitDuration):
|
||||||
|
// noop
|
||||||
|
case <-ctx.Done():
|
||||||
|
return nil, ctx.Err()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
11
vendor/github.com/Azure/go-autorest/autorest/adal/go.mod
generated
vendored
11
vendor/github.com/Azure/go-autorest/autorest/adal/go.mod
generated
vendored
@@ -3,9 +3,10 @@ module github.com/Azure/go-autorest/autorest/adal
|
|||||||
go 1.12
|
go 1.12
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/Azure/go-autorest/autorest/date v0.1.0
|
github.com/Azure/go-autorest v14.2.0+incompatible
|
||||||
github.com/Azure/go-autorest/autorest/mocks v0.1.0
|
github.com/Azure/go-autorest/autorest/date v0.3.0
|
||||||
github.com/Azure/go-autorest/tracing v0.5.0
|
github.com/Azure/go-autorest/autorest/mocks v0.4.1
|
||||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible
|
github.com/Azure/go-autorest/tracing v0.6.0
|
||||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2
|
github.com/form3tech-oss/jwt-go v3.2.2+incompatible
|
||||||
|
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0
|
||||||
)
|
)
|
||||||
|
|||||||
23
vendor/github.com/Azure/go-autorest/autorest/adal/go.sum
generated
vendored
23
vendor/github.com/Azure/go-autorest/autorest/adal/go.sum
generated
vendored
@@ -1,12 +1,19 @@
|
|||||||
github.com/Azure/go-autorest/autorest/date v0.1.0 h1:YGrhWfrgtFs84+h0o46rJrlmsZtyZRg470CqAXTZaGM=
|
github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs=
|
||||||
github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA=
|
github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
|
||||||
github.com/Azure/go-autorest/autorest/mocks v0.1.0 h1:Kx+AUU2Te+A3JIyYn6Dfs+cFgx5XorQKuIXrZGoq/SI=
|
github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw=
|
||||||
github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
|
github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74=
|
||||||
github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k=
|
github.com/Azure/go-autorest/autorest/mocks v0.4.1 h1:K0laFcLE6VLTOwNgSxaGbUcLPuGXlNkbVvq4cW4nIHk=
|
||||||
github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk=
|
github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
|
||||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=
|
github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo=
|
||||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
|
||||||
|
github.com/form3tech-oss/jwt-go v3.2.2+incompatible h1:TcekIExNqud5crz4xD2pavyTgWiPvpYe4Xau31I0PRk=
|
||||||
|
github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
|
||||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M=
|
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M=
|
||||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||||
|
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0 h1:hb9wdF1z5waM+dSIICn1l0DkLVDT3hqhhQsDNUmHPRE=
|
||||||
|
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||||
|
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU=
|
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU=
|
||||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
|
|||||||
24
vendor/github.com/Azure/go-autorest/autorest/adal/go_mod_tidy_hack.go
generated
vendored
Normal file
24
vendor/github.com/Azure/go-autorest/autorest/adal/go_mod_tidy_hack.go
generated
vendored
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
// +build modhack
|
||||||
|
|
||||||
|
package adal
|
||||||
|
|
||||||
|
// Copyright 2017 Microsoft Corporation
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// This file, and the github.com/Azure/go-autorest import, won't actually become part of
|
||||||
|
// the resultant binary.
|
||||||
|
|
||||||
|
// Necessary for safely adding multi-module repo.
|
||||||
|
// See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository
|
||||||
|
import _ "github.com/Azure/go-autorest"
|
||||||
62
vendor/github.com/Azure/go-autorest/autorest/adal/persist.go
generated
vendored
62
vendor/github.com/Azure/go-autorest/autorest/adal/persist.go
generated
vendored
@@ -15,11 +15,24 @@ package adal
|
|||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"crypto/rsa"
|
||||||
|
"crypto/x509"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
|
||||||
|
"golang.org/x/crypto/pkcs12"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// ErrMissingCertificate is returned when no local certificate is found in the provided PFX data.
|
||||||
|
ErrMissingCertificate = errors.New("adal: certificate missing")
|
||||||
|
|
||||||
|
// ErrMissingPrivateKey is returned when no private key is found in the provided PFX data.
|
||||||
|
ErrMissingPrivateKey = errors.New("adal: private key missing")
|
||||||
)
|
)
|
||||||
|
|
||||||
// LoadToken restores a Token object from a file located at 'path'.
|
// LoadToken restores a Token object from a file located at 'path'.
|
||||||
@@ -71,3 +84,52 @@ func SaveToken(path string, mode os.FileMode, token Token) error {
|
|||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DecodePfxCertificateData extracts the x509 certificate and RSA private key from the provided PFX data.
|
||||||
|
// The PFX data must contain a private key along with a certificate whose public key matches that of the
|
||||||
|
// private key or an error is returned.
|
||||||
|
// If the private key is not password protected pass the empty string for password.
|
||||||
|
func DecodePfxCertificateData(pfxData []byte, password string) (*x509.Certificate, *rsa.PrivateKey, error) {
|
||||||
|
blocks, err := pkcs12.ToPEM(pfxData, password)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
// first extract the private key
|
||||||
|
var priv *rsa.PrivateKey
|
||||||
|
for _, block := range blocks {
|
||||||
|
if block.Type == "PRIVATE KEY" {
|
||||||
|
priv, err = x509.ParsePKCS1PrivateKey(block.Bytes)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if priv == nil {
|
||||||
|
return nil, nil, ErrMissingPrivateKey
|
||||||
|
}
|
||||||
|
// now find the certificate with the matching public key of our private key
|
||||||
|
var cert *x509.Certificate
|
||||||
|
for _, block := range blocks {
|
||||||
|
if block.Type == "CERTIFICATE" {
|
||||||
|
pcert, err := x509.ParseCertificate(block.Bytes)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
certKey, ok := pcert.PublicKey.(*rsa.PublicKey)
|
||||||
|
if !ok {
|
||||||
|
// keep looking
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if priv.E == certKey.E && priv.N.Cmp(certKey.N) == 0 {
|
||||||
|
// found a match
|
||||||
|
cert = pcert
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if cert == nil {
|
||||||
|
return nil, nil, ErrMissingCertificate
|
||||||
|
}
|
||||||
|
return cert, priv, nil
|
||||||
|
}
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user