mirror of
https://github.com/kubernetes-sigs/descheduler.git
synced 2026-01-26 13:29:11 +01:00
Compare commits
233 Commits
release-1.
...
release-1.
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
07e467faf3 | ||
|
|
5fb12e98a5 | ||
|
|
d49326fabd | ||
|
|
ab544078f5 | ||
|
|
746481c1bd | ||
|
|
839c506c3c | ||
|
|
9dbf1af06f | ||
|
|
bfe1885eb2 | ||
|
|
b1be089175 | ||
|
|
5f1b31fcfc | ||
|
|
bd9dea9979 | ||
|
|
861c6325f3 | ||
|
|
549fb88dac | ||
|
|
03747b0794 | ||
|
|
37e64abab8 | ||
|
|
17ab593677 | ||
|
|
e6580b97a8 | ||
|
|
923921a1aa | ||
|
|
646e2a72be | ||
|
|
a7c16d92fa | ||
|
|
ca79c2679d | ||
|
|
e73634f04d | ||
|
|
3c80f33bb0 | ||
|
|
1a37c0800e | ||
|
|
6233b6c06e | ||
|
|
6f0ef883da | ||
|
|
63813c9d3b | ||
|
|
bf72cf52c1 | ||
|
|
3d706640bc | ||
|
|
849d8762be | ||
|
|
1f682685ef | ||
|
|
70d917a624 | ||
|
|
29f48e8c3c | ||
|
|
d7a4c221a3 | ||
|
|
8dbd44c467 | ||
|
|
4d04018c93 | ||
|
|
9f6afd4d25 | ||
|
|
97c22e1e4b | ||
|
|
c418736f6e | ||
|
|
d73b33040f | ||
|
|
9e1805d9e1 | ||
|
|
329c77ab54 | ||
|
|
456248d4ba | ||
|
|
da8b145980 | ||
|
|
6e953b2ff3 | ||
|
|
d997be59a9 | ||
|
|
7bf60cd01e | ||
|
|
ac8adfbb09 | ||
|
|
99868a41de | ||
|
|
f938ac5e87 | ||
|
|
dfb1dbc91f | ||
|
|
ba6b2b97d9 | ||
|
|
fa1f7969d1 | ||
|
|
5641fa335a | ||
|
|
398ffa7ee0 | ||
|
|
4d203c5e0c | ||
|
|
6481f1bc38 | ||
|
|
3067d2b311 | ||
|
|
7777d5aa40 | ||
|
|
30aab9c0ae | ||
|
|
894bfe5e2e | ||
|
|
76a0014707 | ||
|
|
3f53c0b4ea | ||
|
|
121ea1ba5f | ||
|
|
8d1d240473 | ||
|
|
9a120ddc68 | ||
|
|
28e0965eeb | ||
|
|
f2b674e690 | ||
|
|
c2ab28b79a | ||
|
|
7bed6456d1 | ||
|
|
bed8693c69 | ||
|
|
4fb781ae7c | ||
|
|
7c7e1b9cdc | ||
|
|
da3ebb7293 | ||
|
|
9e2c323aa9 | ||
|
|
7349b398e5 | ||
|
|
1d79f8761f | ||
|
|
ac0261fafb | ||
|
|
bfb688e016 | ||
|
|
0d8106a32d | ||
|
|
7b1ddf1edb | ||
|
|
28ed86f079 | ||
|
|
fa874844a1 | ||
|
|
63396d4afc | ||
|
|
acc267b0b3 | ||
|
|
1d3e130366 | ||
|
|
35aff52ec4 | ||
|
|
a665a7b4e6 | ||
|
|
2b1746cda8 | ||
|
|
96c03a3f97 | ||
|
|
3d28b85212 | ||
|
|
9eb4c350d8 | ||
|
|
dc04abc0e6 | ||
|
|
5a755e73f2 | ||
|
|
a000bfa796 | ||
|
|
3007448bdb | ||
|
|
b9a89196f2 | ||
|
|
78c3260692 | ||
|
|
0c88326df9 | ||
|
|
3b1cdcd442 | ||
|
|
8d471baf3e | ||
|
|
e31f4aac00 | ||
|
|
ad82adf245 | ||
|
|
c10e6d6c12 | ||
|
|
3e385d9504 | ||
|
|
edec8fe010 | ||
|
|
a85f2cf6f5 | ||
|
|
15045c5dcb | ||
|
|
6c4d1a900d | ||
|
|
59834cf8a7 | ||
|
|
82ed18fd2b | ||
|
|
2c17af79f4 | ||
|
|
72bf50fde6 | ||
|
|
f47c2c4407 | ||
|
|
16619fcf44 | ||
|
|
0317be1b76 | ||
|
|
d8bac08592 | ||
|
|
d14df1fedf | ||
|
|
8a769603a6 | ||
|
|
137a6b999f | ||
|
|
334b4bb12c | ||
|
|
5a2a180f17 | ||
|
|
1265b4c325 | ||
|
|
ea8e648cfb | ||
|
|
e8fae9a3b7 | ||
|
|
c9b0fbe467 | ||
|
|
66694bb767 | ||
|
|
e68ceb2273 | ||
|
|
dcb81f65a9 | ||
|
|
face080485 | ||
|
|
1eade5bf91 | ||
|
|
bfcd310a16 | ||
|
|
70df89601a | ||
|
|
680e650706 | ||
|
|
b743b2d5f7 | ||
|
|
cfc5d0c24a | ||
|
|
ddd145c69a | ||
|
|
d99bdfffc8 | ||
|
|
a2dd86ac3b | ||
|
|
50676622de | ||
|
|
fa3ddc6fee | ||
|
|
674bf4655d | ||
|
|
6d4abe88ca | ||
|
|
d4ff3aef61 | ||
|
|
83c4f5d526 | ||
|
|
d1a9190c50 | ||
|
|
a1d4770634 | ||
|
|
ba85e794b2 | ||
|
|
0a50d5a7da | ||
|
|
2de4e23425 | ||
|
|
3474725176 | ||
|
|
27fa7a70a1 | ||
|
|
ccfaeb2275 | ||
|
|
d798e7d204 | ||
|
|
788e9f86bd | ||
|
|
0c3bf7f957 | ||
|
|
349453264e | ||
|
|
e9c23fe42f | ||
|
|
27ed7d15b9 | ||
|
|
55d4ed479c | ||
|
|
d109ea64d0 | ||
|
|
330def2e56 | ||
|
|
9880ed5372 | ||
|
|
d4ecff5ba4 | ||
|
|
46e712163a | ||
|
|
0d1d485850 | ||
|
|
1294106a22 | ||
|
|
0aa233415e | ||
|
|
0d3ff8a84f | ||
|
|
704f6d4496 | ||
|
|
c699dd1ccc | ||
|
|
d5e66ab62e | ||
|
|
3a486f1a79 | ||
|
|
6e69a10396 | ||
|
|
d78994fe6d | ||
|
|
9ef87b9937 | ||
|
|
8b849106ed | ||
|
|
2ea0a2e1c0 | ||
|
|
329c357834 | ||
|
|
8072a8c82e | ||
|
|
7a7393f5ff | ||
|
|
df65157a3b | ||
|
|
754f8c9def | ||
|
|
d75e9e8c4e | ||
|
|
2cd79c6816 | ||
|
|
aff9a0ba06 | ||
|
|
e1a10c36de | ||
|
|
d8897635b0 | ||
|
|
abf5752260 | ||
|
|
934fffb669 | ||
|
|
7a5e67d462 | ||
|
|
469bde0a01 | ||
|
|
c838614b6c | ||
|
|
cc49f9fcc2 | ||
|
|
4e710cdf3b | ||
|
|
d5ee855221 | ||
|
|
b2418ef481 | ||
|
|
1f1aad335a | ||
|
|
627d219dab | ||
|
|
30c972e49e | ||
|
|
a7cfb25e9b | ||
|
|
45e1cdbd01 | ||
|
|
dad3db3187 | ||
|
|
d2130747d8 | ||
|
|
84c8d1ca03 | ||
|
|
5dfd54e500 | ||
|
|
7550fba2fa | ||
|
|
038b6e1ca7 | ||
|
|
98a946dea7 | ||
|
|
871a10344e | ||
|
|
311d75223f | ||
|
|
33807ed8e4 | ||
|
|
3cc0a68f13 | ||
|
|
8e1d35cb3c | ||
|
|
59c4904ddc | ||
|
|
c5604c760d | ||
|
|
f769296243 | ||
|
|
8972bd9bf0 | ||
|
|
873381197b | ||
|
|
af45591c25 | ||
|
|
17e986418f | ||
|
|
5a9e65833f | ||
|
|
725ca47bda | ||
|
|
f39058af1c | ||
|
|
332d61dba8 | ||
|
|
3cbae5e72b | ||
|
|
d8a609a6e7 | ||
|
|
f0fa4c0cc0 | ||
|
|
e61823c299 | ||
|
|
14b83e6cc5 | ||
|
|
5e3b825427 | ||
|
|
15794ba00d | ||
|
|
e494a5817e |
5
.github/ci/ct.yaml
vendored
Normal file
5
.github/ci/ct.yaml
vendored
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
chart-dirs:
|
||||||
|
- charts
|
||||||
|
helm-extra-args: "--timeout=5m"
|
||||||
|
check-version-increment: false
|
||||||
|
target-branch: master
|
||||||
69
.github/workflows/helm.yaml
vendored
Normal file
69
.github/workflows/helm.yaml
vendored
Normal file
@@ -0,0 +1,69 @@
|
|||||||
|
name: Helm
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- master
|
||||||
|
- release-*
|
||||||
|
paths:
|
||||||
|
- 'charts/**'
|
||||||
|
- '.github/workflows/helm.yaml'
|
||||||
|
- '.github/ci/ct.yaml'
|
||||||
|
pull_request:
|
||||||
|
paths:
|
||||||
|
- 'charts/**'
|
||||||
|
- '.github/workflows/helm.yaml'
|
||||||
|
- '.github/ci/ct.yaml'
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
lint-and-test:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
|
||||||
|
- name: Set up Helm
|
||||||
|
uses: azure/setup-helm@v2.1
|
||||||
|
with:
|
||||||
|
version: v3.9.2
|
||||||
|
|
||||||
|
- uses: actions/setup-python@v3.1.2
|
||||||
|
with:
|
||||||
|
python-version: 3.7
|
||||||
|
|
||||||
|
- uses: actions/setup-go@v3
|
||||||
|
with:
|
||||||
|
go-version: '1.19.3'
|
||||||
|
|
||||||
|
- name: Set up chart-testing
|
||||||
|
uses: helm/chart-testing-action@v2.2.1
|
||||||
|
with:
|
||||||
|
version: v3.7.0
|
||||||
|
|
||||||
|
- name: Run chart-testing (list-changed)
|
||||||
|
id: list-changed
|
||||||
|
run: |
|
||||||
|
changed=$(ct list-changed --config=.github/ci/ct.yaml)
|
||||||
|
if [[ -n "$changed" ]]; then
|
||||||
|
echo "::set-output name=changed::true"
|
||||||
|
fi
|
||||||
|
|
||||||
|
- name: Run chart-testing (lint)
|
||||||
|
run: ct lint --config=.github/ci/ct.yaml --validate-maintainers=false
|
||||||
|
|
||||||
|
# Need a multi node cluster so descheduler runs until evictions
|
||||||
|
- name: Create multi node Kind cluster
|
||||||
|
run: make kind-multi-node
|
||||||
|
|
||||||
|
# helm-extra-set-args only available after ct 3.6.0
|
||||||
|
- name: Run chart-testing (install)
|
||||||
|
run: ct install --config=.github/ci/ct.yaml --helm-extra-set-args='--set=kind=Deployment'
|
||||||
|
|
||||||
|
- name: E2E after chart install
|
||||||
|
env:
|
||||||
|
KUBERNETES_VERSION: "v1.26.0"
|
||||||
|
KIND_E2E: true
|
||||||
|
SKIP_INSTALL: true
|
||||||
|
run: make test-e2e
|
||||||
47
.github/workflows/security.yaml
vendored
Normal file
47
.github/workflows/security.yaml
vendored
Normal file
@@ -0,0 +1,47 @@
|
|||||||
|
name: "Security"
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- main
|
||||||
|
- master
|
||||||
|
- release-*
|
||||||
|
schedule:
|
||||||
|
- cron: '30 1 * * 0'
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
analyze:
|
||||||
|
name: Analyze
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
permissions:
|
||||||
|
actions: read
|
||||||
|
contents: read
|
||||||
|
security-events: write
|
||||||
|
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
|
||||||
|
- name: Build image
|
||||||
|
run: |
|
||||||
|
IMAGE_REPO=${HELM_IMAGE_REPO:-descheduler}
|
||||||
|
IMAGE_TAG=${HELM_IMAGE_TAG:-security-test}
|
||||||
|
VERSION=security-test make image
|
||||||
|
- name: Run Trivy vulnerability scanner
|
||||||
|
uses: aquasecurity/trivy-action@master
|
||||||
|
with:
|
||||||
|
image-ref: 'descheduler:security-test'
|
||||||
|
format: 'sarif'
|
||||||
|
exit-code: '0'
|
||||||
|
severity: 'CRITICAL,HIGH'
|
||||||
|
output: 'trivy-results.sarif'
|
||||||
|
|
||||||
|
- name: Upload Trivy scan results to GitHub Security tab
|
||||||
|
uses: github/codeql-action/upload-sarif@v2
|
||||||
|
with:
|
||||||
|
sarif_file: 'trivy-results.sarif'
|
||||||
|
exit-code: '0'
|
||||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -4,4 +4,5 @@ vendordiff.patch
|
|||||||
.idea/
|
.idea/
|
||||||
*.code-workspace
|
*.code-workspace
|
||||||
.vscode/
|
.vscode/
|
||||||
kind
|
kind
|
||||||
|
bin/
|
||||||
@@ -5,11 +5,14 @@ linters:
|
|||||||
disable-all: true
|
disable-all: true
|
||||||
enable:
|
enable:
|
||||||
- gofmt
|
- gofmt
|
||||||
|
- gofumpt
|
||||||
- gosimple
|
- gosimple
|
||||||
- gocyclo
|
- gocyclo
|
||||||
- misspell
|
- misspell
|
||||||
- govet
|
- govet
|
||||||
|
|
||||||
linters-settings:
|
linters-settings:
|
||||||
|
gofumpt:
|
||||||
|
extra-rules: true
|
||||||
goimports:
|
goimports:
|
||||||
local-prefixes: sigs.k8s.io/descheduler
|
local-prefixes: sigs.k8s.io/descheduler
|
||||||
|
|||||||
@@ -11,7 +11,7 @@
|
|||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
FROM golang:1.18.2
|
FROM golang:1.19.3
|
||||||
|
|
||||||
WORKDIR /go/src/sigs.k8s.io/descheduler
|
WORKDIR /go/src/sigs.k8s.io/descheduler
|
||||||
COPY . .
|
COPY . .
|
||||||
|
|||||||
56
Makefile
56
Makefile
@@ -14,6 +14,8 @@
|
|||||||
|
|
||||||
.PHONY: test
|
.PHONY: test
|
||||||
|
|
||||||
|
export CONTAINER_ENGINE ?= docker
|
||||||
|
|
||||||
# VERSION is based on a date stamp plus the last commit
|
# VERSION is based on a date stamp plus the last commit
|
||||||
VERSION?=v$(shell date +%Y%m%d)-$(shell git describe --tags)
|
VERSION?=v$(shell date +%Y%m%d)-$(shell git describe --tags)
|
||||||
BRANCH?=$(shell git branch --show-current)
|
BRANCH?=$(shell git branch --show-current)
|
||||||
@@ -24,9 +26,12 @@ ARCHS = amd64 arm arm64
|
|||||||
|
|
||||||
LDFLAGS=-ldflags "-X ${LDFLAG_LOCATION}.version=${VERSION} -X ${LDFLAG_LOCATION}.buildDate=${BUILD} -X ${LDFLAG_LOCATION}.gitbranch=${BRANCH} -X ${LDFLAG_LOCATION}.gitsha1=${SHA1}"
|
LDFLAGS=-ldflags "-X ${LDFLAG_LOCATION}.version=${VERSION} -X ${LDFLAG_LOCATION}.buildDate=${BUILD} -X ${LDFLAG_LOCATION}.gitbranch=${BRANCH} -X ${LDFLAG_LOCATION}.gitsha1=${SHA1}"
|
||||||
|
|
||||||
GOLANGCI_VERSION := v1.46.1
|
GOLANGCI_VERSION := v1.49.0
|
||||||
HAS_GOLANGCI := $(shell ls _output/bin/golangci-lint 2> /dev/null)
|
HAS_GOLANGCI := $(shell ls _output/bin/golangci-lint 2> /dev/null)
|
||||||
|
|
||||||
|
GOFUMPT_VERSION := v0.4.0
|
||||||
|
HAS_GOFUMPT := $(shell command -v gofumpt 2> /dev/null)
|
||||||
|
|
||||||
# REGISTRY is the container registry to push
|
# REGISTRY is the container registry to push
|
||||||
# into. The default is to push to the staging
|
# into. The default is to push to the staging
|
||||||
# registry, not production.
|
# registry, not production.
|
||||||
@@ -60,36 +65,36 @@ build.arm64:
|
|||||||
CGO_ENABLED=0 GOOS=linux GOARCH=arm64 go build ${LDFLAGS} -o _output/bin/descheduler sigs.k8s.io/descheduler/cmd/descheduler
|
CGO_ENABLED=0 GOOS=linux GOARCH=arm64 go build ${LDFLAGS} -o _output/bin/descheduler sigs.k8s.io/descheduler/cmd/descheduler
|
||||||
|
|
||||||
dev-image: build
|
dev-image: build
|
||||||
docker build -f Dockerfile.dev -t $(IMAGE) .
|
$(CONTAINER_ENGINE) build -f Dockerfile.dev -t $(IMAGE) .
|
||||||
|
|
||||||
image:
|
image:
|
||||||
docker build --build-arg VERSION="$(VERSION)" --build-arg ARCH="amd64" -t $(IMAGE) .
|
$(CONTAINER_ENGINE) build --build-arg VERSION="$(VERSION)" --build-arg ARCH="amd64" -t $(IMAGE) .
|
||||||
|
|
||||||
image.amd64:
|
image.amd64:
|
||||||
docker build --build-arg VERSION="$(VERSION)" --build-arg ARCH="amd64" -t $(IMAGE)-amd64 .
|
$(CONTAINER_ENGINE) build --build-arg VERSION="$(VERSION)" --build-arg ARCH="amd64" -t $(IMAGE)-amd64 .
|
||||||
|
|
||||||
image.arm:
|
image.arm:
|
||||||
docker build --build-arg VERSION="$(VERSION)" --build-arg ARCH="arm" -t $(IMAGE)-arm .
|
$(CONTAINER_ENGINE) build --build-arg VERSION="$(VERSION)" --build-arg ARCH="arm" -t $(IMAGE)-arm .
|
||||||
|
|
||||||
image.arm64:
|
image.arm64:
|
||||||
docker build --build-arg VERSION="$(VERSION)" --build-arg ARCH="arm64" -t $(IMAGE)-arm64 .
|
$(CONTAINER_ENGINE) build --build-arg VERSION="$(VERSION)" --build-arg ARCH="arm64" -t $(IMAGE)-arm64 .
|
||||||
|
|
||||||
push: image
|
push: image
|
||||||
gcloud auth configure-docker
|
gcloud auth configure-docker
|
||||||
docker tag $(IMAGE) $(IMAGE_GCLOUD)
|
$(CONTAINER_ENGINE) tag $(IMAGE) $(IMAGE_GCLOUD)
|
||||||
docker push $(IMAGE_GCLOUD)
|
$(CONTAINER_ENGINE) push $(IMAGE_GCLOUD)
|
||||||
|
|
||||||
push-all: image.amd64 image.arm image.arm64
|
push-all: image.amd64 image.arm image.arm64
|
||||||
gcloud auth configure-docker
|
gcloud auth configure-docker
|
||||||
for arch in $(ARCHS); do \
|
for arch in $(ARCHS); do \
|
||||||
docker tag $(IMAGE)-$${arch} $(IMAGE_GCLOUD)-$${arch} ;\
|
$(CONTAINER_ENGINE) tag $(IMAGE)-$${arch} $(IMAGE_GCLOUD)-$${arch} ;\
|
||||||
docker push $(IMAGE_GCLOUD)-$${arch} ;\
|
$(CONTAINER_ENGINE) push $(IMAGE_GCLOUD)-$${arch} ;\
|
||||||
done
|
done
|
||||||
DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create $(IMAGE_GCLOUD) $(addprefix --amend $(IMAGE_GCLOUD)-, $(ARCHS))
|
DOCKER_CLI_EXPERIMENTAL=enabled $(CONTAINER_ENGINE) manifest create $(IMAGE_GCLOUD) $(addprefix --amend $(IMAGE_GCLOUD)-, $(ARCHS))
|
||||||
for arch in $(ARCHS); do \
|
for arch in $(ARCHS); do \
|
||||||
DOCKER_CLI_EXPERIMENTAL=enabled docker manifest annotate --arch $${arch} $(IMAGE_GCLOUD) $(IMAGE_GCLOUD)-$${arch} ;\
|
DOCKER_CLI_EXPERIMENTAL=enabled $(CONTAINER_ENGINE) manifest annotate --arch $${arch} $(IMAGE_GCLOUD) $(IMAGE_GCLOUD)-$${arch} ;\
|
||||||
done
|
done
|
||||||
DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push $(IMAGE_GCLOUD) ;\
|
DOCKER_CLI_EXPERIMENTAL=enabled $(CONTAINER_ENGINE) manifest push $(IMAGE_GCLOUD) ;\
|
||||||
|
|
||||||
clean:
|
clean:
|
||||||
rm -rf _output
|
rm -rf _output
|
||||||
@@ -135,13 +140,30 @@ ifndef HAS_GOLANGCI
|
|||||||
endif
|
endif
|
||||||
./_output/bin/golangci-lint run
|
./_output/bin/golangci-lint run
|
||||||
|
|
||||||
lint-chart: ensure-helm-install
|
fmt:
|
||||||
helm lint ./charts/descheduler
|
ifndef HAS_GOFUMPT
|
||||||
|
go install mvdan.cc/gofumpt@${GOFUMPT_VERSION}
|
||||||
|
endif
|
||||||
|
gofumpt -w -extra .
|
||||||
|
|
||||||
test-helm: ensure-helm-install
|
# helm
|
||||||
./test/run-helm-tests.sh
|
|
||||||
|
|
||||||
ensure-helm-install:
|
ensure-helm-install:
|
||||||
ifndef HAS_HELM
|
ifndef HAS_HELM
|
||||||
curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 && chmod 700 ./get_helm.sh && ./get_helm.sh
|
curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 && chmod 700 ./get_helm.sh && ./get_helm.sh
|
||||||
endif
|
endif
|
||||||
|
|
||||||
|
lint-chart: ensure-helm-install
|
||||||
|
helm lint ./charts/descheduler
|
||||||
|
|
||||||
|
build-helm:
|
||||||
|
helm package ./charts/descheduler --dependency-update --destination ./bin/chart
|
||||||
|
|
||||||
|
test-helm: ensure-helm-install
|
||||||
|
./test/run-helm-tests.sh
|
||||||
|
|
||||||
|
kind-multi-node:
|
||||||
|
kind create cluster --name kind --config ./hack/kind_config.yaml --wait 2m
|
||||||
|
|
||||||
|
ct-helm:
|
||||||
|
./hack/verify-chart.sh
|
||||||
|
|||||||
5
OWNERS
5
OWNERS
@@ -2,16 +2,15 @@ approvers:
|
|||||||
- damemi
|
- damemi
|
||||||
- ingvagabund
|
- ingvagabund
|
||||||
- seanmalloy
|
- seanmalloy
|
||||||
|
- a7i
|
||||||
reviewers:
|
reviewers:
|
||||||
- aveshagarwal
|
|
||||||
- k82cn
|
|
||||||
- ravisantoshgudimetla
|
|
||||||
- damemi
|
- damemi
|
||||||
- seanmalloy
|
- seanmalloy
|
||||||
- ingvagabund
|
- ingvagabund
|
||||||
- lixiang233
|
- lixiang233
|
||||||
- a7i
|
- a7i
|
||||||
- janeliul
|
- janeliul
|
||||||
|
- knelasevero
|
||||||
emeritus_approvers:
|
emeritus_approvers:
|
||||||
- aveshagarwal
|
- aveshagarwal
|
||||||
- k82cn
|
- k82cn
|
||||||
|
|||||||
52
README.md
52
README.md
@@ -1,6 +1,10 @@
|
|||||||
[](https://goreportcard.com/report/sigs.k8s.io/descheduler)
|
[](https://goreportcard.com/report/sigs.k8s.io/descheduler)
|
||||||

|

|
||||||
|
|
||||||
|
<p align="center">
|
||||||
|
<img src="assets/logo/descheduler-stacked-color.png" width="40%" align="center" alt="descheduler">
|
||||||
|
</p>
|
||||||
|
|
||||||
# Descheduler for Kubernetes
|
# Descheduler for Kubernetes
|
||||||
|
|
||||||
Scheduling in Kubernetes is the process of binding pending pods to nodes, and is performed by
|
Scheduling in Kubernetes is the process of binding pending pods to nodes, and is performed by
|
||||||
@@ -101,21 +105,21 @@ The descheduler helm chart is also listed on the [artifact hub](https://artifact
|
|||||||
### Install Using Kustomize
|
### Install Using Kustomize
|
||||||
|
|
||||||
You can use kustomize to install descheduler.
|
You can use kustomize to install descheduler.
|
||||||
See the [resources | Kustomize](https://kubectl.docs.kubernetes.io/references/kustomize/resource/) for detailed instructions.
|
See the [resources | Kustomize](https://kubectl.docs.kubernetes.io/references/kustomize/cmd/build/) for detailed instructions.
|
||||||
|
|
||||||
Run As A Job
|
Run As A Job
|
||||||
```
|
```
|
||||||
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/job?ref=v0.24.1' | kubectl apply -f -
|
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/job?ref=v0.26.1' | kubectl apply -f -
|
||||||
```
|
```
|
||||||
|
|
||||||
Run As A CronJob
|
Run As A CronJob
|
||||||
```
|
```
|
||||||
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/cronjob?ref=v0.24.1' | kubectl apply -f -
|
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/cronjob?ref=v0.26.1' | kubectl apply -f -
|
||||||
```
|
```
|
||||||
|
|
||||||
Run As A Deployment
|
Run As A Deployment
|
||||||
```
|
```
|
||||||
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/deployment?ref=v0.24.1' | kubectl apply -f -
|
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/deployment?ref=v0.26.1' | kubectl apply -f -
|
||||||
```
|
```
|
||||||
|
|
||||||
## User Guide
|
## User Guide
|
||||||
@@ -243,6 +247,7 @@ actual usage metrics. Implementing metrics-based descheduling is currently TODO
|
|||||||
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))|
|
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))|
|
||||||
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))|
|
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))|
|
||||||
|`nodeFit`|bool (see [node fit filtering](#node-fit-filtering))|
|
|`nodeFit`|bool (see [node fit filtering](#node-fit-filtering))|
|
||||||
|
|`Namespaces`|(see [namespace filtering](#namespace-filtering))|
|
||||||
|
|
||||||
**Example:**
|
**Example:**
|
||||||
|
|
||||||
@@ -315,6 +320,7 @@ actual usage metrics. Implementing metrics-based descheduling is currently TODO
|
|||||||
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))|
|
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))|
|
||||||
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))|
|
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))|
|
||||||
|`nodeFit`|bool (see [node fit filtering](#node-fit-filtering))|
|
|`nodeFit`|bool (see [node fit filtering](#node-fit-filtering))|
|
||||||
|
|`Namespaces`|(see [namespace filtering](#namespace-filtering))|
|
||||||
|
|
||||||
**Example:**
|
**Example:**
|
||||||
|
|
||||||
@@ -524,19 +530,24 @@ strategies:
|
|||||||
|
|
||||||
This strategy evicts pods that are older than `maxPodLifeTimeSeconds`.
|
This strategy evicts pods that are older than `maxPodLifeTimeSeconds`.
|
||||||
|
|
||||||
You can also specify `podStatusPhases` to `only` evict pods with specific `StatusPhases`, currently this parameter is limited
|
You can also specify `states` parameter to **only** evict pods matching the following conditions:
|
||||||
to `Running` and `Pending`.
|
- [Pod Phase](https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-phase) status of: `Running`, `Pending`
|
||||||
|
- [Container State Waiting](https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-state-waiting) condition of: `PodInitializing`, `ContainerCreating`
|
||||||
|
|
||||||
|
If a value for `states` or `podStatusPhases` is not specified,
|
||||||
|
Pods in any state (even `Running`) are considered for eviction.
|
||||||
|
|
||||||
**Parameters:**
|
**Parameters:**
|
||||||
|
|
||||||
|Name|Type|
|
|Name|Type|Notes|
|
||||||
|---|---|
|
|---|---|---|
|
||||||
|`maxPodLifeTimeSeconds`|int|
|
|`maxPodLifeTimeSeconds`|int||
|
||||||
|`podStatusPhases`|list(string)|
|
|`podStatusPhases`|list(string)|Deprecated in v0.25+ Use `states` instead|
|
||||||
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))|
|
|`states`|list(string)|Only supported in v0.25+|
|
||||||
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))|
|
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))||
|
||||||
|`namespaces`|(see [namespace filtering](#namespace-filtering))|
|
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))||
|
||||||
|`labelSelector`|(see [label filtering](#label-filtering))|
|
|`namespaces`|(see [namespace filtering](#namespace-filtering))||
|
||||||
|
|`labelSelector`|(see [label filtering](#label-filtering))||
|
||||||
|
|
||||||
**Example:**
|
**Example:**
|
||||||
|
|
||||||
@@ -549,8 +560,9 @@ strategies:
|
|||||||
params:
|
params:
|
||||||
podLifeTime:
|
podLifeTime:
|
||||||
maxPodLifeTimeSeconds: 86400
|
maxPodLifeTimeSeconds: 86400
|
||||||
podStatusPhases:
|
states:
|
||||||
- "Pending"
|
- "Pending"
|
||||||
|
- "PodInitializing"
|
||||||
```
|
```
|
||||||
|
|
||||||
### RemoveFailedPods
|
### RemoveFailedPods
|
||||||
@@ -607,6 +619,7 @@ The following strategies accept a `namespaces` parameter which allows to specify
|
|||||||
* `RemoveDuplicates`
|
* `RemoveDuplicates`
|
||||||
* `RemovePodsViolatingTopologySpreadConstraint`
|
* `RemovePodsViolatingTopologySpreadConstraint`
|
||||||
* `RemoveFailedPods`
|
* `RemoveFailedPods`
|
||||||
|
* `LowNodeUtilization` and `HighNodeUtilization` (Only filtered right before eviction)
|
||||||
|
|
||||||
For example:
|
For example:
|
||||||
|
|
||||||
@@ -689,7 +702,7 @@ does not exist, descheduler won't create it and will throw an error.
|
|||||||
|
|
||||||
### Label filtering
|
### Label filtering
|
||||||
|
|
||||||
The following strategies can configure a [standard kubernetes labelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#labelselector-v1-meta)
|
The following strategies can configure a [standard kubernetes labelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#labelselector-v1-meta)
|
||||||
to filter pods by their labels:
|
to filter pods by their labels:
|
||||||
|
|
||||||
* `PodLifeTime`
|
* `PodLifeTime`
|
||||||
@@ -751,6 +764,7 @@ strategies:
|
|||||||
"LowNodeUtilization":
|
"LowNodeUtilization":
|
||||||
enabled: true
|
enabled: true
|
||||||
params:
|
params:
|
||||||
|
nodeFit: true
|
||||||
nodeResourceUtilizationThresholds:
|
nodeResourceUtilizationThresholds:
|
||||||
thresholds:
|
thresholds:
|
||||||
"cpu": 20
|
"cpu": 20
|
||||||
@@ -760,7 +774,6 @@ strategies:
|
|||||||
"cpu": 50
|
"cpu": 50
|
||||||
"memory": 50
|
"memory": 50
|
||||||
"pods": 50
|
"pods": 50
|
||||||
nodeFit: true
|
|
||||||
```
|
```
|
||||||
|
|
||||||
Note that node fit filtering references the current pod spec, and not that of it's owner.
|
Note that node fit filtering references the current pod spec, and not that of it's owner.
|
||||||
@@ -785,6 +798,9 @@ best effort pods are evicted before burstable and guaranteed pods.
|
|||||||
* All types of pods with the annotation `descheduler.alpha.kubernetes.io/evict` are eligible for eviction. This
|
* All types of pods with the annotation `descheduler.alpha.kubernetes.io/evict` are eligible for eviction. This
|
||||||
annotation is used to override checks which prevent eviction and users can select which pod is evicted.
|
annotation is used to override checks which prevent eviction and users can select which pod is evicted.
|
||||||
Users should know how and if the pod will be recreated.
|
Users should know how and if the pod will be recreated.
|
||||||
|
The annotation only affects internal descheduler checks.
|
||||||
|
The anti-disruption protection provided by the [/eviction](https://kubernetes.io/docs/concepts/scheduling-eviction/api-eviction/)
|
||||||
|
subresource is still respected.
|
||||||
* Pods with a non-nil DeletionTimestamp are not evicted by default.
|
* Pods with a non-nil DeletionTimestamp are not evicted by default.
|
||||||
|
|
||||||
Setting `--v=4` or greater on the Descheduler will log all reasons why any pod is not evictable.
|
Setting `--v=4` or greater on the Descheduler will log all reasons why any pod is not evictable.
|
||||||
@@ -832,6 +848,8 @@ packages that it is compiled with.
|
|||||||
|
|
||||||
| Descheduler | Supported Kubernetes Version |
|
| Descheduler | Supported Kubernetes Version |
|
||||||
|-------------|------------------------------|
|
|-------------|------------------------------|
|
||||||
|
| v0.26 | v1.26 |
|
||||||
|
| v0.25 | v1.25 |
|
||||||
| v0.24 | v1.24 |
|
| v0.24 | v1.24 |
|
||||||
| v0.23 | v1.23 |
|
| v0.23 | v1.23 |
|
||||||
| v0.22 | v1.22 |
|
| v0.22 | v1.22 |
|
||||||
|
|||||||
BIN
assets/logo/descheduler-stacked-color.png
Executable file
BIN
assets/logo/descheduler-stacked-color.png
Executable file
Binary file not shown.
|
After Width: | Height: | Size: 41 KiB |
@@ -1,7 +1,7 @@
|
|||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
name: descheduler
|
name: descheduler
|
||||||
version: 0.24.1
|
version: 0.26.1
|
||||||
appVersion: 0.24.1
|
appVersion: 0.26.1
|
||||||
description: Descheduler for Kubernetes is used to rebalance clusters by evicting pods that can potentially be scheduled on better nodes. In the current implementation, descheduler does not schedule replacement of evicted pods but relies on the default scheduler for that.
|
description: Descheduler for Kubernetes is used to rebalance clusters by evicting pods that can potentially be scheduled on better nodes. In the current implementation, descheduler does not schedule replacement of evicted pods but relies on the default scheduler for that.
|
||||||
keywords:
|
keywords:
|
||||||
- kubernetes
|
- kubernetes
|
||||||
|
|||||||
@@ -43,46 +43,46 @@ The command removes all the Kubernetes components associated with the chart and
|
|||||||
|
|
||||||
The following table lists the configurable parameters of the _descheduler_ chart and their default values.
|
The following table lists the configurable parameters of the _descheduler_ chart and their default values.
|
||||||
|
|
||||||
| Parameter | Description | Default |
|
| Parameter | Description | Default |
|
||||||
|-------------------------------------|-----------------------------------------------------------------------------------------------------------------------|--------------------------------------|
|
| ----------------------------------- | --------------------------------------------------------------------------------------------------------------------- | ----------------------------------------- |
|
||||||
| `kind` | Use as CronJob or Deployment | `CronJob` |
|
| `kind` | Use as CronJob or Deployment | `CronJob` |
|
||||||
| `image.repository` | Docker repository to use | `k8s.gcr.io/descheduler/descheduler` |
|
| `image.repository` | Docker repository to use | `registry.k8s.io/descheduler/descheduler` |
|
||||||
| `image.tag` | Docker tag to use | `v[chart appVersion]` |
|
| `image.tag` | Docker tag to use | `v[chart appVersion]` |
|
||||||
| `image.pullPolicy` | Docker image pull policy | `IfNotPresent` |
|
| `image.pullPolicy` | Docker image pull policy | `IfNotPresent` |
|
||||||
| `imagePullSecrets` | Docker repository secrets | `[]` |
|
| `imagePullSecrets` | Docker repository secrets | `[]` |
|
||||||
| `nameOverride` | String to partially override `descheduler.fullname` template (will prepend the release name) | `""` |
|
| `nameOverride` | String to partially override `descheduler.fullname` template (will prepend the release name) | `""` |
|
||||||
| `fullnameOverride` | String to fully override `descheduler.fullname` template | `""` |
|
| `fullnameOverride` | String to fully override `descheduler.fullname` template | `""` |
|
||||||
| `cronJobApiVersion` | CronJob API Group Version | `"batch/v1"` |
|
| `cronJobApiVersion` | CronJob API Group Version | `"batch/v1"` |
|
||||||
| `schedule` | The cron schedule to run the _descheduler_ job on | `"*/2 * * * *"` |
|
| `schedule` | The cron schedule to run the _descheduler_ job on | `"*/2 * * * *"` |
|
||||||
| `startingDeadlineSeconds` | If set, configure `startingDeadlineSeconds` for the _descheduler_ job | `nil` |
|
| `startingDeadlineSeconds` | If set, configure `startingDeadlineSeconds` for the _descheduler_ job | `nil` |
|
||||||
| `successfulJobsHistoryLimit` | If set, configure `successfulJobsHistoryLimit` for the _descheduler_ job | `nil` |
|
| `successfulJobsHistoryLimit` | If set, configure `successfulJobsHistoryLimit` for the _descheduler_ job | `3` |
|
||||||
| `failedJobsHistoryLimit` | If set, configure `failedJobsHistoryLimit` for the _descheduler_ job | `nil` |
|
| `failedJobsHistoryLimit` | If set, configure `failedJobsHistoryLimit` for the _descheduler_ job | `1` |
|
||||||
| `deschedulingInterval` | If using kind:Deployment, sets time between consecutive descheduler executions. | `5m` |
|
| `ttlSecondsAfterFinished` | If set, configure `ttlSecondsAfterFinished` for the _descheduler_ job | `nil` |
|
||||||
| `replicas` | The replica count for Deployment | `1` |
|
| `deschedulingInterval` | If using kind:Deployment, sets time between consecutive descheduler executions. | `5m` |
|
||||||
| `leaderElection` | The options for high availability when running replicated components | _see values.yaml_ |
|
| `replicas` | The replica count for Deployment | `1` |
|
||||||
| `cmdOptions` | The options to pass to the _descheduler_ command | _see values.yaml_ |
|
| `leaderElection` | The options for high availability when running replicated components | _see values.yaml_ |
|
||||||
| `deschedulerPolicy.strategies` | The _descheduler_ strategies to apply | _see values.yaml_ |
|
| `cmdOptions` | The options to pass to the _descheduler_ command | _see values.yaml_ |
|
||||||
| `priorityClassName` | The name of the priority class to add to pods | `system-cluster-critical` |
|
| `deschedulerPolicy.strategies` | The _descheduler_ strategies to apply | _see values.yaml_ |
|
||||||
| `rbac.create` | If `true`, create & use RBAC resources | `true` |
|
| `priorityClassName` | The name of the priority class to add to pods | `system-cluster-critical` |
|
||||||
| `podSecurityPolicy.create` | If `true`, create PodSecurityPolicy | `true` |
|
| `rbac.create` | If `true`, create & use RBAC resources | `true` |
|
||||||
| `resources` | Descheduler container CPU and memory requests/limits | _see values.yaml_ |
|
| `resources` | Descheduler container CPU and memory requests/limits | _see values.yaml_ |
|
||||||
| `serviceAccount.create` | If `true`, create a service account for the cron job | `true` |
|
| `serviceAccount.create` | If `true`, create a service account for the cron job | `true` |
|
||||||
| `serviceAccount.name` | The name of the service account to use, if not set and create is true a name is generated using the fullname template | `nil` |
|
| `serviceAccount.name` | The name of the service account to use, if not set and create is true a name is generated using the fullname template | `nil` |
|
||||||
| `serviceAccount.annotations` | Specifies custom annotations for the serviceAccount | `{}` |
|
| `serviceAccount.annotations` | Specifies custom annotations for the serviceAccount | `{}` |
|
||||||
| `podAnnotations` | Annotations to add to the descheduler Pods | `{}` |
|
| `podAnnotations` | Annotations to add to the descheduler Pods | `{}` |
|
||||||
| `podLabels` | Labels to add to the descheduler Pods | `{}` |
|
| `podLabels` | Labels to add to the descheduler Pods | `{}` |
|
||||||
| `nodeSelector` | Node selectors to run the descheduler cronjob/deployment on specific nodes | `nil` |
|
| `nodeSelector` | Node selectors to run the descheduler cronjob/deployment on specific nodes | `nil` |
|
||||||
| `service.enabled` | If `true`, create a service for deployment | `false` |
|
| `service.enabled` | If `true`, create a service for deployment | `false` |
|
||||||
| `serviceMonitor.enabled` | If `true`, create a ServiceMonitor for deployment | `false` |
|
| `serviceMonitor.enabled` | If `true`, create a ServiceMonitor for deployment | `false` |
|
||||||
| `serviceMonitor.namespace` | The namespace where Prometheus expects to find service monitors | `nil` |
|
| `serviceMonitor.namespace` | The namespace where Prometheus expects to find service monitors | `nil` |
|
||||||
| `serviceMonitor.interval` | The scrape interval. If not set, the Prometheus default scrape interval is used | `nil` |
|
| `serviceMonitor.interval` | The scrape interval. If not set, the Prometheus default scrape interval is used | `nil` |
|
||||||
| `serviceMonitor.honorLabels` | Keeps the scraped data's labels when labels are on collisions with target labels. | `true` |
|
| `serviceMonitor.honorLabels` | Keeps the scraped data's labels when labels are on collisions with target labels. | `true` |
|
||||||
| `serviceMonitor.insecureSkipVerify` | Skip TLS certificate validation when scraping | `true` |
|
| `serviceMonitor.insecureSkipVerify` | Skip TLS certificate validation when scraping | `true` |
|
||||||
| `serviceMonitor.serverName` | Name of the server to use when validating TLS certificate | `nil` |
|
| `serviceMonitor.serverName` | Name of the server to use when validating TLS certificate | `nil` |
|
||||||
| `serviceMonitor.metricRelabelings` | MetricRelabelConfigs to apply to samples after scraping, but before ingestion | `[]` |
|
| `serviceMonitor.metricRelabelings` | MetricRelabelConfigs to apply to samples after scraping, but before ingestion | `[]` |
|
||||||
| `serviceMonitor.relabelings` | RelabelConfigs to apply to samples before scraping | `[]` |
|
| `serviceMonitor.relabelings` | RelabelConfigs to apply to samples before scraping | `[]` |
|
||||||
| `affinity` | Node affinity to run the descheduler cronjob/deployment on specific nodes | `nil` |
|
| `affinity` | Node affinity to run the descheduler cronjob/deployment on specific nodes | `nil` |
|
||||||
| `tolerations` | tolerations to run the descheduler cronjob/deployment on specific nodes | `nil` |
|
| `tolerations` | tolerations to run the descheduler cronjob/deployment on specific nodes | `nil` |
|
||||||
| `suspend` | Set spec.suspend in descheduler cronjob | `false` |
|
| `suspend` | Set spec.suspend in descheduler cronjob | `false` |
|
||||||
| `commonLabels` | Labels to apply to all resources | `{}` |
|
| `commonLabels` | Labels to apply to all resources | `{}` |
|
||||||
| `livenessProbe` | Liveness probe configuration for the descheduler container | _see values.yaml_ |
|
| `livenessProbe` | Liveness probe configuration for the descheduler container | _see values.yaml_ |
|
||||||
|
|||||||
@@ -4,4 +4,9 @@ Descheduler installed as a {{ .Values.kind }}.
|
|||||||
{{- if eq .Values.replicas 1.0}}
|
{{- if eq .Values.replicas 1.0}}
|
||||||
WARNING: You set replica count as 1 and workload kind as Deployment however leaderElection is not enabled. Consider enabling Leader Election for HA mode.
|
WARNING: You set replica count as 1 and workload kind as Deployment however leaderElection is not enabled. Consider enabling Leader Election for HA mode.
|
||||||
{{- end}}
|
{{- end}}
|
||||||
|
{{- if .Values.leaderElection }}
|
||||||
|
{{- if and (hasKey .Values.cmdOptions "dry-run") (eq (get .Values.cmdOptions "dry-run") true) }}
|
||||||
|
WARNING: You enabled DryRun mode, you can't use Leader Election.
|
||||||
|
{{- end}}
|
||||||
|
{{- end}}
|
||||||
{{- end}}
|
{{- end}}
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ metadata:
|
|||||||
labels:
|
labels:
|
||||||
{{- include "descheduler.labels" . | nindent 4 }}
|
{{- include "descheduler.labels" . | nindent 4 }}
|
||||||
rules:
|
rules:
|
||||||
- apiGroups: [""]
|
- apiGroups: ["events.k8s.io"]
|
||||||
resources: ["events"]
|
resources: ["events"]
|
||||||
verbs: ["create", "update"]
|
verbs: ["create", "update"]
|
||||||
- apiGroups: [""]
|
- apiGroups: [""]
|
||||||
@@ -30,14 +30,7 @@ rules:
|
|||||||
verbs: ["create", "update"]
|
verbs: ["create", "update"]
|
||||||
- apiGroups: ["coordination.k8s.io"]
|
- apiGroups: ["coordination.k8s.io"]
|
||||||
resources: ["leases"]
|
resources: ["leases"]
|
||||||
resourceNames: ["descheduler"]
|
resourceNames: ["{{ .Values.leaderElection.resourceName | default "descheduler" }}"]
|
||||||
verbs: ["get", "patch", "delete"]
|
verbs: ["get", "patch", "delete"]
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- if .Values.podSecurityPolicy.create }}
|
|
||||||
- apiGroups: ['policy']
|
|
||||||
resources: ['podsecuritypolicies']
|
|
||||||
verbs: ['use']
|
|
||||||
resourceNames:
|
|
||||||
- {{ template "descheduler.fullname" . }}
|
|
||||||
{{- end }}
|
|
||||||
{{- end -}}
|
{{- end -}}
|
||||||
|
|||||||
@@ -23,6 +23,9 @@ spec:
|
|||||||
{{- end }}
|
{{- end }}
|
||||||
jobTemplate:
|
jobTemplate:
|
||||||
spec:
|
spec:
|
||||||
|
{{- if .Values.ttlSecondsAfterFinished }}
|
||||||
|
ttlSecondsAfterFinished: {{ .Values.ttlSecondsAfterFinished }}
|
||||||
|
{{- end }}
|
||||||
template:
|
template:
|
||||||
metadata:
|
metadata:
|
||||||
name: {{ template "descheduler.fullname" . }}
|
name: {{ template "descheduler.fullname" . }}
|
||||||
|
|||||||
@@ -1,39 +0,0 @@
|
|||||||
{{- if .Values.podSecurityPolicy.create -}}
|
|
||||||
apiVersion: policy/v1beta1
|
|
||||||
kind: PodSecurityPolicy
|
|
||||||
metadata:
|
|
||||||
name: {{ template "descheduler.fullname" . }}
|
|
||||||
namespace: {{ .Release.Namespace }}
|
|
||||||
annotations:
|
|
||||||
seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default,runtime/default'
|
|
||||||
seccomp.security.alpha.kubernetes.io/defaultProfileName: 'runtime/default'
|
|
||||||
spec:
|
|
||||||
privileged: false
|
|
||||||
allowPrivilegeEscalation: false
|
|
||||||
requiredDropCapabilities:
|
|
||||||
- ALL
|
|
||||||
volumes:
|
|
||||||
- 'configMap'
|
|
||||||
- 'secret'
|
|
||||||
hostNetwork: false
|
|
||||||
hostIPC: false
|
|
||||||
hostPID: false
|
|
||||||
runAsUser:
|
|
||||||
rule: 'MustRunAs'
|
|
||||||
ranges:
|
|
||||||
- min: 1
|
|
||||||
max: 65535
|
|
||||||
seLinux:
|
|
||||||
rule: 'RunAsAny'
|
|
||||||
supplementalGroups:
|
|
||||||
rule: 'MustRunAs'
|
|
||||||
ranges:
|
|
||||||
- min: 1
|
|
||||||
max: 65535
|
|
||||||
fsGroup:
|
|
||||||
rule: 'MustRunAs'
|
|
||||||
ranges:
|
|
||||||
- min: 1
|
|
||||||
max: 65535
|
|
||||||
readOnlyRootFilesystem: true
|
|
||||||
{{- end -}}
|
|
||||||
@@ -1,30 +0,0 @@
|
|||||||
apiVersion: v1
|
|
||||||
kind: Pod
|
|
||||||
metadata:
|
|
||||||
name: descheduler-test-pod
|
|
||||||
namespace: {{ .Release.Namespace }}
|
|
||||||
annotations:
|
|
||||||
"helm.sh/hook": test
|
|
||||||
spec:
|
|
||||||
restartPolicy: Never
|
|
||||||
serviceAccountName: descheduler-ci
|
|
||||||
containers:
|
|
||||||
- name: descheduler-test-container
|
|
||||||
image: alpine:latest
|
|
||||||
imagePullPolicy: IfNotPresent
|
|
||||||
securityContext:
|
|
||||||
allowPrivilegeEscalation: false
|
|
||||||
capabilities:
|
|
||||||
drop:
|
|
||||||
- All
|
|
||||||
privileged: false
|
|
||||||
runAsNonRoot: false
|
|
||||||
command: ["/bin/ash"]
|
|
||||||
args:
|
|
||||||
- -c
|
|
||||||
- >-
|
|
||||||
apk --no-cache add curl &&
|
|
||||||
curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl &&
|
|
||||||
chmod +x ./kubectl &&
|
|
||||||
mv ./kubectl /usr/local/bin/kubectl &&
|
|
||||||
/usr/local/bin/kubectl get pods --namespace {{ .Release.Namespace }} --token "$(cat /var/run/secrets/kubernetes.io/serviceaccount/token)" | grep "descheduler" | grep "Completed"
|
|
||||||
@@ -6,7 +6,7 @@
|
|||||||
kind: CronJob
|
kind: CronJob
|
||||||
|
|
||||||
image:
|
image:
|
||||||
repository: k8s.gcr.io/descheduler/descheduler
|
repository: registry.k8s.io/descheduler/descheduler
|
||||||
# Overrides the image tag whose default is the chart version
|
# Overrides the image tag whose default is the chart version
|
||||||
tag: ""
|
tag: ""
|
||||||
pullPolicy: IfNotPresent
|
pullPolicy: IfNotPresent
|
||||||
@@ -28,12 +28,13 @@ fullnameOverride: ""
|
|||||||
# labels that'll be applied to all resources
|
# labels that'll be applied to all resources
|
||||||
commonLabels: {}
|
commonLabels: {}
|
||||||
|
|
||||||
cronJobApiVersion: "batch/v1" # Use "batch/v1beta1" for k8s version < 1.21.0. TODO(@7i) remove with 1.23 release
|
cronJobApiVersion: "batch/v1"
|
||||||
schedule: "*/2 * * * *"
|
schedule: "*/2 * * * *"
|
||||||
suspend: false
|
suspend: false
|
||||||
#startingDeadlineSeconds: 200
|
# startingDeadlineSeconds: 200
|
||||||
#successfulJobsHistoryLimit: 1
|
# successfulJobsHistoryLimit: 3
|
||||||
#failedJobsHistoryLimit: 1
|
# failedJobsHistoryLimit: 1
|
||||||
|
# ttlSecondsAfterFinished 600
|
||||||
|
|
||||||
# Required when running as a Deployment
|
# Required when running as a Deployment
|
||||||
deschedulingInterval: 5m
|
deschedulingInterval: 5m
|
||||||
@@ -46,6 +47,7 @@ replicas: 1
|
|||||||
|
|
||||||
# Specifies whether Leader Election resources should be created
|
# Specifies whether Leader Election resources should be created
|
||||||
# Required when running as a Deployment
|
# Required when running as a Deployment
|
||||||
|
# NOTE: Leader election can't be activated if DryRun enabled
|
||||||
leaderElection: {}
|
leaderElection: {}
|
||||||
# enabled: true
|
# enabled: true
|
||||||
# leaseDuration: 15s
|
# leaseDuration: 15s
|
||||||
@@ -67,6 +69,12 @@ deschedulerPolicy:
|
|||||||
strategies:
|
strategies:
|
||||||
RemoveDuplicates:
|
RemoveDuplicates:
|
||||||
enabled: true
|
enabled: true
|
||||||
|
RemovePodsHavingTooManyRestarts:
|
||||||
|
enabled: true
|
||||||
|
params:
|
||||||
|
podsHavingTooManyRestarts:
|
||||||
|
podRestartThreshold: 100
|
||||||
|
includingInitContainers: true
|
||||||
RemovePodsViolatingNodeTaints:
|
RemovePodsViolatingNodeTaints:
|
||||||
enabled: true
|
enabled: true
|
||||||
RemovePodsViolatingNodeAffinity:
|
RemovePodsViolatingNodeAffinity:
|
||||||
@@ -76,6 +84,10 @@ deschedulerPolicy:
|
|||||||
- requiredDuringSchedulingIgnoredDuringExecution
|
- requiredDuringSchedulingIgnoredDuringExecution
|
||||||
RemovePodsViolatingInterPodAntiAffinity:
|
RemovePodsViolatingInterPodAntiAffinity:
|
||||||
enabled: true
|
enabled: true
|
||||||
|
RemovePodsViolatingTopologySpreadConstraint:
|
||||||
|
enabled: true
|
||||||
|
params:
|
||||||
|
includeSoftConstraints: false
|
||||||
LowNodeUtilization:
|
LowNodeUtilization:
|
||||||
enabled: true
|
enabled: true
|
||||||
params:
|
params:
|
||||||
@@ -123,10 +135,6 @@ rbac:
|
|||||||
# Specifies whether RBAC resources should be created
|
# Specifies whether RBAC resources should be created
|
||||||
create: true
|
create: true
|
||||||
|
|
||||||
podSecurityPolicy:
|
|
||||||
# Specifies whether PodSecurityPolicy should be created.
|
|
||||||
create: true
|
|
||||||
|
|
||||||
serviceAccount:
|
serviceAccount:
|
||||||
# Specifies whether a ServiceAccount should be created
|
# Specifies whether a ServiceAccount should be created
|
||||||
create: true
|
create: true
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
# See https://cloud.google.com/cloud-build/docs/build-config
|
# See https://cloud.google.com/cloud-build/docs/build-config
|
||||||
|
|
||||||
# this must be specified in seconds. If omitted, defaults to 600s (10 mins)
|
# this must be specified in seconds. If omitted, defaults to 600s (10 mins)
|
||||||
timeout: 1500s
|
timeout: 3600s
|
||||||
# this prevents errors if you don't use both _GIT_TAG and _PULL_BASE_REF,
|
# this prevents errors if you don't use both _GIT_TAG and _PULL_BASE_REF,
|
||||||
# or any new substitutions added in the future.
|
# or any new substitutions added in the future.
|
||||||
options:
|
options:
|
||||||
|
|||||||
@@ -18,6 +18,8 @@ limitations under the License.
|
|||||||
package options
|
package options
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/spf13/pflag"
|
"github.com/spf13/pflag"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
apiserveroptions "k8s.io/apiserver/pkg/server/options"
|
apiserveroptions "k8s.io/apiserver/pkg/server/options"
|
||||||
@@ -27,7 +29,6 @@ import (
|
|||||||
"sigs.k8s.io/descheduler/pkg/apis/componentconfig"
|
"sigs.k8s.io/descheduler/pkg/apis/componentconfig"
|
||||||
"sigs.k8s.io/descheduler/pkg/apis/componentconfig/v1alpha1"
|
"sigs.k8s.io/descheduler/pkg/apis/componentconfig/v1alpha1"
|
||||||
deschedulerscheme "sigs.k8s.io/descheduler/pkg/descheduler/scheme"
|
deschedulerscheme "sigs.k8s.io/descheduler/pkg/descheduler/scheme"
|
||||||
"time"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@@ -39,8 +40,10 @@ type DeschedulerServer struct {
|
|||||||
componentconfig.DeschedulerConfiguration
|
componentconfig.DeschedulerConfiguration
|
||||||
|
|
||||||
Client clientset.Interface
|
Client clientset.Interface
|
||||||
|
EventClient clientset.Interface
|
||||||
SecureServing *apiserveroptions.SecureServingOptionsWithLoopback
|
SecureServing *apiserveroptions.SecureServingOptionsWithLoopback
|
||||||
DisableMetrics bool
|
DisableMetrics bool
|
||||||
|
EnableHTTP2 bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewDeschedulerServer creates a new DeschedulerServer with default parameters
|
// NewDeschedulerServer creates a new DeschedulerServer with default parameters
|
||||||
@@ -81,12 +84,13 @@ func newDefaultComponentConfig() (*componentconfig.DeschedulerConfiguration, err
|
|||||||
|
|
||||||
// AddFlags adds flags for a specific SchedulerServer to the specified FlagSet
|
// AddFlags adds flags for a specific SchedulerServer to the specified FlagSet
|
||||||
func (rs *DeschedulerServer) AddFlags(fs *pflag.FlagSet) {
|
func (rs *DeschedulerServer) AddFlags(fs *pflag.FlagSet) {
|
||||||
fs.StringVar(&rs.Logging.Format, "logging-format", "text", `Sets the log format. Permitted formats: "text", "json". Non-default formats don't honor these flags: --add-dir-header, --alsologtostderr, --log-backtrace-at, --log-dir, --log-file, --log-file-max-size, --logtostderr, --skip-headers, --skip-log-headers, --stderrthreshold, --log-flush-frequency.\nNon-default choices are currently alpha and subject to change without warning.`)
|
fs.StringVar(&rs.Logging.Format, "logging-format", "text", `Sets the log format. Permitted formats: "text", "json". Non-default formats don't honor these flags: --add-dir-header, --alsologtostderr, --log-backtrace-at, --log_dir, --log_file, --log_file_max_size, --logtostderr, --skip-headers, --skip-log-headers, --stderrthreshold, --log-flush-frequency.\nNon-default choices are currently alpha and subject to change without warning.`)
|
||||||
fs.DurationVar(&rs.DeschedulingInterval, "descheduling-interval", rs.DeschedulingInterval, "Time interval between two consecutive descheduler executions. Setting this value instructs the descheduler to run in a continuous loop at the interval specified.")
|
fs.DurationVar(&rs.DeschedulingInterval, "descheduling-interval", rs.DeschedulingInterval, "Time interval between two consecutive descheduler executions. Setting this value instructs the descheduler to run in a continuous loop at the interval specified.")
|
||||||
fs.StringVar(&rs.KubeconfigFile, "kubeconfig", rs.KubeconfigFile, "File with kube configuration.")
|
fs.StringVar(&rs.KubeconfigFile, "kubeconfig", rs.KubeconfigFile, "File with kube configuration.")
|
||||||
fs.StringVar(&rs.PolicyConfigFile, "policy-config-file", rs.PolicyConfigFile, "File with descheduler policy configuration.")
|
fs.StringVar(&rs.PolicyConfigFile, "policy-config-file", rs.PolicyConfigFile, "File with descheduler policy configuration.")
|
||||||
fs.BoolVar(&rs.DryRun, "dry-run", rs.DryRun, "execute descheduler in dry run mode.")
|
fs.BoolVar(&rs.DryRun, "dry-run", rs.DryRun, "execute descheduler in dry run mode.")
|
||||||
fs.BoolVar(&rs.DisableMetrics, "disable-metrics", rs.DisableMetrics, "Disables metrics. The metrics are by default served through https://localhost:10258/metrics. Secure address, resp. port can be changed through --bind-address, resp. --secure-port flags.")
|
fs.BoolVar(&rs.DisableMetrics, "disable-metrics", rs.DisableMetrics, "Disables metrics. The metrics are by default served through https://localhost:10258/metrics. Secure address, resp. port can be changed through --bind-address, resp. --secure-port flags.")
|
||||||
|
fs.BoolVar(&rs.EnableHTTP2, "enable-http2", false, "If http/2 should be enabled for the metrics and health check")
|
||||||
|
|
||||||
componentbaseoptions.BindLeaderElectionFlags(&rs.LeaderElection, fs)
|
componentbaseoptions.BindLeaderElectionFlags(&rs.LeaderElection, fs)
|
||||||
|
|
||||||
|
|||||||
@@ -20,6 +20,7 @@ package app
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"io"
|
"io"
|
||||||
|
"os"
|
||||||
"os/signal"
|
"os/signal"
|
||||||
"syscall"
|
"syscall"
|
||||||
|
|
||||||
@@ -33,9 +34,9 @@ import (
|
|||||||
apiserver "k8s.io/apiserver/pkg/server"
|
apiserver "k8s.io/apiserver/pkg/server"
|
||||||
"k8s.io/apiserver/pkg/server/mux"
|
"k8s.io/apiserver/pkg/server/mux"
|
||||||
restclient "k8s.io/client-go/rest"
|
restclient "k8s.io/client-go/rest"
|
||||||
"k8s.io/component-base/config"
|
registry "k8s.io/component-base/logs/api/v1"
|
||||||
|
jsonLog "k8s.io/component-base/logs/json"
|
||||||
_ "k8s.io/component-base/logs/json/register"
|
_ "k8s.io/component-base/logs/json/register"
|
||||||
"k8s.io/component-base/logs/registry"
|
|
||||||
"k8s.io/component-base/metrics/legacyregistry"
|
"k8s.io/component-base/metrics/legacyregistry"
|
||||||
"k8s.io/klog/v2"
|
"k8s.io/klog/v2"
|
||||||
)
|
)
|
||||||
@@ -43,7 +44,6 @@ import (
|
|||||||
// NewDeschedulerCommand creates a *cobra.Command object with default parameters
|
// NewDeschedulerCommand creates a *cobra.Command object with default parameters
|
||||||
func NewDeschedulerCommand(out io.Writer) *cobra.Command {
|
func NewDeschedulerCommand(out io.Writer) *cobra.Command {
|
||||||
s, err := options.NewDeschedulerServer()
|
s, err := options.NewDeschedulerServer()
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.ErrorS(err, "unable to initialize server")
|
klog.ErrorS(err, "unable to initialize server")
|
||||||
}
|
}
|
||||||
@@ -63,11 +63,21 @@ func NewDeschedulerCommand(out io.Writer) *cobra.Command {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
factory, _ := registry.LogRegistry.Get(s.Logging.Format)
|
SecureServing.DisableHTTP2 = !s.EnableHTTP2
|
||||||
|
|
||||||
|
var factory registry.LogFormatFactory
|
||||||
|
|
||||||
|
if s.Logging.Format == "json" {
|
||||||
|
factory = jsonLog.Factory{}
|
||||||
|
}
|
||||||
|
|
||||||
if factory == nil {
|
if factory == nil {
|
||||||
klog.ClearLogger()
|
klog.ClearLogger()
|
||||||
} else {
|
} else {
|
||||||
log, logrFlush := factory.Create(config.LoggingConfiguration{})
|
log, logrFlush := factory.Create(registry.LoggingConfiguration{
|
||||||
|
Format: s.Logging.Format,
|
||||||
|
Verbosity: s.Logging.Verbosity,
|
||||||
|
})
|
||||||
defer logrFlush()
|
defer logrFlush()
|
||||||
klog.SetLogger(log)
|
klog.SetLogger(log)
|
||||||
}
|
}
|
||||||
@@ -106,3 +116,8 @@ func NewDeschedulerCommand(out io.Writer) *cobra.Command {
|
|||||||
func Run(ctx context.Context, rs *options.DeschedulerServer) error {
|
func Run(ctx context.Context, rs *options.DeschedulerServer) error {
|
||||||
return descheduler.Run(ctx, rs)
|
return descheduler.Run(ctx, rs)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func SetupLogs() {
|
||||||
|
klog.SetOutput(os.Stdout)
|
||||||
|
klog.InitFlags(nil)
|
||||||
|
}
|
||||||
|
|||||||
@@ -24,7 +24,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func NewVersionCommand() *cobra.Command {
|
func NewVersionCommand() *cobra.Command {
|
||||||
var versionCmd = &cobra.Command{
|
versionCmd := &cobra.Command{
|
||||||
Use: "version",
|
Use: "version",
|
||||||
Short: "Version of descheduler",
|
Short: "Version of descheduler",
|
||||||
Long: `Prints the version of descheduler.`,
|
Long: `Prints the version of descheduler.`,
|
||||||
|
|||||||
@@ -20,13 +20,13 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
|
|
||||||
"k8s.io/component-base/cli"
|
"k8s.io/component-base/cli"
|
||||||
"k8s.io/klog/v2"
|
|
||||||
"sigs.k8s.io/descheduler/cmd/descheduler/app"
|
"sigs.k8s.io/descheduler/cmd/descheduler/app"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/descheduler"
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
klog.SetOutput(os.Stdout)
|
app.SetupLogs()
|
||||||
klog.InitFlags(nil)
|
descheduler.SetupPlugins()
|
||||||
}
|
}
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
|
|||||||
@@ -31,7 +31,7 @@ View all CLI options.
|
|||||||
## Run Tests
|
## Run Tests
|
||||||
```
|
```
|
||||||
GOOS=linux make dev-image
|
GOOS=linux make dev-image
|
||||||
kind create cluster --config hack/kind_config.yaml
|
make kind-multi-node
|
||||||
kind load docker-image <image name>
|
kind load docker-image <image name>
|
||||||
kind get kubeconfig > /tmp/admin.conf
|
kind get kubeconfig > /tmp/admin.conf
|
||||||
export KUBECONFIG=/tmp/admin.conf
|
export KUBECONFIG=/tmp/admin.conf
|
||||||
@@ -39,17 +39,38 @@ make test-unit
|
|||||||
make test-e2e
|
make test-e2e
|
||||||
```
|
```
|
||||||
|
|
||||||
## Run Helm Tests
|
## Format Code
|
||||||
Run the helm test for a particular descheduler release by setting below variables,
|
|
||||||
```
|
After making changes in the code base, ensure that the code is formatted correctly:
|
||||||
HELM_IMAGE_REPO="descheduler"
|
|
||||||
HELM_IMAGE_TAG="helm-test"
|
|
||||||
HELM_CHART_LOCATION="./charts/descheduler"
|
|
||||||
```
|
|
||||||
The helm tests runs as part of descheduler CI. But, to run it manually from the descheduler root,
|
|
||||||
|
|
||||||
```
|
```
|
||||||
make test-helm
|
make fmt
|
||||||
|
```
|
||||||
|
|
||||||
|
## Build Helm Package locally
|
||||||
|
|
||||||
|
If you made some changes in the chart, and just want to check if templating is ok, or if the chart is buildable, you can run this command to have a package built from the `./charts` directory.
|
||||||
|
|
||||||
|
```
|
||||||
|
make build-helm
|
||||||
|
```
|
||||||
|
|
||||||
|
## Lint Helm Chart locally
|
||||||
|
|
||||||
|
To check linting of your changes in the helm chart locally you can run:
|
||||||
|
|
||||||
|
```
|
||||||
|
make lint-chart
|
||||||
|
```
|
||||||
|
|
||||||
|
## Test helm changes locally with kind and ct
|
||||||
|
|
||||||
|
You will need kind and docker (or equivalent) installed. We can use ct public image to avoid installing ct and all its dependencies.
|
||||||
|
|
||||||
|
|
||||||
|
```
|
||||||
|
make kind-multi-node
|
||||||
|
make ct-helm
|
||||||
```
|
```
|
||||||
|
|
||||||
### Miscellaneous
|
### Miscellaneous
|
||||||
|
|||||||
@@ -1,36 +1,82 @@
|
|||||||
# Release Guide
|
# Release Guide
|
||||||
|
|
||||||
## Container Image
|
The process for publishing each Descheduler release includes a mixture of manual and automatic steps. Over
|
||||||
|
time, it would be good to automate as much of this process as possible. However, due to current limitations there
|
||||||
|
is care that must be taken to perform each manual step precisely so that the automated steps execute properly.
|
||||||
|
|
||||||
### Semi-automatic
|
## Pre-release Code Changes
|
||||||
|
|
||||||
1. Make sure your repo is clean by git's standards
|
Before publishing each release, the following code updates must be made:
|
||||||
2. Create a release branch `git checkout -b release-1.18` (not required for patch releases)
|
|
||||||
3. Push the release branch to the descheuler repo and ensure branch protection is enabled (not required for patch releases)
|
|
||||||
4. Tag the repository from the `master` branch (from the `release-1.18` branch for a patch release) and push the tag `VERSION=v0.18.0 git tag -m $VERSION $VERSION; git push origin $VERSION`
|
|
||||||
5. Publish a draft release using the tag you just created
|
|
||||||
6. Perform the [image promotion process](https://github.com/kubernetes/k8s.io/tree/main/k8s.gcr.io#image-promoter)
|
|
||||||
7. Publish release
|
|
||||||
8. Email `kubernetes-sig-scheduling@googlegroups.com` to announce the release
|
|
||||||
|
|
||||||
### Manual
|
- [ ] (Optional, but recommended) Bump `k8s.io` dependencies to the `-rc` tags. These tags are usually published around upstream code freeze. [Example](https://github.com/kubernetes-sigs/descheduler/pull/539)
|
||||||
|
- [ ] Bump `k8s.io` dependencies to GA tags once they are published (following the upstream release). [Example](https://github.com/kubernetes-sigs/descheduler/pull/615)
|
||||||
|
- [ ] Ensure that Go is updated to the same version as upstream. [Example](https://github.com/kubernetes-sigs/descheduler/pull/801)
|
||||||
|
- [ ] Make CI changes in [github.com/kubernetes/test-infra](https://github.com/kubernetes/test-infra) to add the new version's tests (note, this may also include a Go bump). [Example](https://github.com/kubernetes/test-infra/pull/25833)
|
||||||
|
- [ ] Update local CI versions for utils (such as golang-ci), kind, and go. [Example - e2e](https://github.com/kubernetes-sigs/descheduler/commit/ac4d576df8831c0c399ee8fff1e85469e90b8c44), [Example - helm](https://github.com/kubernetes-sigs/descheduler/pull/821)
|
||||||
|
- [ ] Update version references in docs and Readme. [Example](https://github.com/kubernetes-sigs/descheduler/pull/617)
|
||||||
|
|
||||||
1. Make sure your repo is clean by git's standards
|
## Release Process
|
||||||
2. Create a release branch `git checkout -b release-1.18` (not required for patch releases)
|
|
||||||
3. Push the release branch to the descheuler repo and ensure branch protection is enabled (not required for patch releases)
|
|
||||||
4. Tag the repository from the `master` branch (from the `release-1.18` branch for a patch release) and push the tag `VERSION=v0.18.0 git tag -m $VERSION $VERSION; git push origin $VERSION`
|
|
||||||
5. Checkout the tag you just created and make sure your repo is clean by git's standards `git checkout $VERSION`
|
|
||||||
6. Build and push the container image to the staging registry `VERSION=$VERSION make push-all`
|
|
||||||
7. Publish a draft release using the tag you just created
|
|
||||||
8. Perform the [image promotion process](https://github.com/kubernetes/k8s.io/tree/main/k8s.gcr.io#image-promoter)
|
|
||||||
9. Publish release
|
|
||||||
10. Email `kubernetes-sig-scheduling@googlegroups.com` to announce the release
|
|
||||||
|
|
||||||
### Notes
|
When the above pre-release steps are complete and the release is ready to be cut, perform the following steps **in order**
|
||||||
It's important to create the tag on the master branch after creating the release-* branch so that the [Helm releaser-action](#helm-chart) can work.
|
(the flowchart below demonstrates these steps):
|
||||||
It compares the changes in the action-triggering branch to the latest tag on that branch, so if you tag before creating the new branch there
|
|
||||||
will be nothing to compare and it will fail (creating a new release branch usually involves no code changes). For this same reason, you should
|
**Version release**
|
||||||
also tag patch releases (on the release-* branch) *after* pushing changes (if those changes involve bumping the Helm chart version).
|
1. Create the `git tag` on `master` for the release, eg `v0.24.0`
|
||||||
|
2. Merge Helm chart version update to `master` (see [Helm chart](#helm-chart) below). [Example](https://github.com/kubernetes-sigs/descheduler/pull/709)
|
||||||
|
3. Perform the [image promotion process](https://github.com/kubernetes/k8s.io/tree/main/k8s.gcr.io#image-promoter). [Example](https://github.com/kubernetes/k8s.io/pull/3344)
|
||||||
|
4. Cut release branch from `master`, eg `release-1.24`
|
||||||
|
5. Publish release using Github's release process from the git tag you created
|
||||||
|
6. Email `kubernetes-sig-scheduling@googlegroups.com` to announce the release
|
||||||
|
|
||||||
|
**Patch release**
|
||||||
|
1. Pick relevant code change commits to the matching release branch, eg `release-1.24`
|
||||||
|
2. Create the patch tag on the release branch, eg `v0.24.1` on `release-1.24`
|
||||||
|
3. Merge Helm chart version update to release branch
|
||||||
|
4. Perform the image promotion process for the patch version
|
||||||
|
5. Publish release using Github's release process from the git tag you created
|
||||||
|
6. Email `kubernetes-sig-scheduling@googlegroups.com` to announce the release
|
||||||
|
|
||||||
|
### Flowchart
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
### Image promotion process
|
||||||
|
|
||||||
|
Every merge to any branch triggers an [image build and push](https://github.com/kubernetes/test-infra/blob/c36b8e5/config/jobs/image-pushing/k8s-staging-descheduler.yaml) to a `gcr.io` repository.
|
||||||
|
These automated image builds are snapshots of the code in place at the time of every PR merge and
|
||||||
|
tagged with the latest git SHA at the time of the build. To create a final release image, the desired
|
||||||
|
auto-built image SHA is added to a [file upstream](https://github.com/kubernetes/k8s.io/blob/e9e971c/k8s.gcr.io/images/k8s-staging-descheduler/images.yaml) which
|
||||||
|
copies that image to a public registry.
|
||||||
|
|
||||||
|
Automatic builds can be monitored and re-triggered with the [`post-descheduler-push-images` job](https://prow.k8s.io/?job=post-descheduler-push-images) on prow.k8s.io.
|
||||||
|
|
||||||
|
Note that images can also be manually built and pushed using `VERSION=$VERSION make push-all` by [users with access](https://github.com/kubernetes/k8s.io/blob/fbee8f67b70304241e613a672c625ad972998ad7/groups/sig-scheduling/groups.yaml#L33-L43).
|
||||||
|
|
||||||
|
## Helm Chart
|
||||||
|
We currently use the [chart-releaser-action GitHub Action](https://github.com/helm/chart-releaser-action) to automatically
|
||||||
|
publish [Helm chart releases](https://github.com/kubernetes-sigs/descheduler/blob/022e07c/.github/workflows/release.yaml).
|
||||||
|
This action is triggered when it detects any changes to [`Chart.yaml`](https://github.com/kubernetes-sigs/descheduler/blob/022e07c27853fade6d1304adc0a6ebe02642386c/charts/descheduler/Chart.yaml) on
|
||||||
|
a `release-*` branch.
|
||||||
|
|
||||||
|
Helm chart releases are managed by a separate set of git tags that are prefixed with `descheduler-helm-chart-*`. Example git tag name is `descheduler-helm-chart-0.18.0`.
|
||||||
|
Released versions of the helm charts are stored in the `gh-pages` branch of this repo.
|
||||||
|
|
||||||
|
The major and minor version of the chart matches the descheduler major and minor versions. For example descheduler helm chart version helm-descheduler-chart-0.18.0 corresponds
|
||||||
|
to descheduler version v0.18.0. The patch version of the descheduler helm chart and the patcher version of the descheduler will not necessarily match. The patch
|
||||||
|
version of the descheduler helm chart is used to version changes specific to the helm chart.
|
||||||
|
|
||||||
|
1. Merge all helm chart changes into the master branch before the release is tagged/cut
|
||||||
|
1. Ensure that `appVersion` in file `charts/descheduler/Chart.yaml` matches the descheduler version(no `v` prefix)
|
||||||
|
2. Ensure that `version` in file `charts/descheduler/Chart.yaml` has been incremented. This is the chart version.
|
||||||
|
2. Make sure your repo is clean by git's standards
|
||||||
|
3. Follow the release-branch or patch release tagging pattern from the above section.
|
||||||
|
4. Verify the new helm artifact has been successfully pushed to the `gh-pages` branch
|
||||||
|
|
||||||
|
## Notes
|
||||||
|
The Helm releaser-action compares the changes in the action-triggering branch to the latest tag on that branch, so if you tag before creating the new branch there
|
||||||
|
will be nothing to compare and it will fail. This is why it's necessary to tag, eg, `v0.24.0` *before* making the changes to the
|
||||||
|
Helm chart version, so that there is a new diff for the action to find. (Tagging *after* making the Helm chart changes would
|
||||||
|
also work, but then the code that gets built into the promoted image will be tagged as `descheduler-helm-chart-xxx` rather than `v0.xx.0`).
|
||||||
|
|
||||||
See [post-descheduler-push-images dashboard](https://testgrid.k8s.io/sig-scheduling#post-descheduler-push-images) for staging registry image build job status.
|
See [post-descheduler-push-images dashboard](https://testgrid.k8s.io/sig-scheduling#post-descheduler-push-images) for staging registry image build job status.
|
||||||
|
|
||||||
@@ -56,19 +102,3 @@ Pull image from the staging registry.
|
|||||||
```
|
```
|
||||||
docker pull gcr.io/k8s-staging-descheduler/descheduler:v20200206-0.9.0-94-ge2a23f284
|
docker pull gcr.io/k8s-staging-descheduler/descheduler:v20200206-0.9.0-94-ge2a23f284
|
||||||
```
|
```
|
||||||
|
|
||||||
## Helm Chart
|
|
||||||
Helm chart releases are managed by a separate set of git tags that are prefixed with `descheduler-helm-chart-*`. Example git tag name is `descheduler-helm-chart-0.18.0`.
|
|
||||||
Released versions of the helm charts are stored in the `gh-pages` branch of this repo. The [chart-releaser-action GitHub Action](https://github.com/helm/chart-releaser-action)
|
|
||||||
is setup to build and push the helm charts to the `gh-pages` branch when changes are pushed to a `release-*` branch.
|
|
||||||
|
|
||||||
The major and minor version of the chart matches the descheduler major and minor versions. For example descheduler helm chart version helm-descheduler-chart-0.18.0 corresponds
|
|
||||||
to descheduler version v0.18.0. The patch version of the descheduler helm chart and the patcher version of the descheduler will not necessarily match. The patch
|
|
||||||
version of the descheduler helm chart is used to version changes specific to the helm chart.
|
|
||||||
|
|
||||||
1. Merge all helm chart changes into the master branch before the release is tagged/cut
|
|
||||||
1. Ensure that `appVersion` in file `charts/descheduler/Chart.yaml` matches the descheduler version(no `v` prefix)
|
|
||||||
2. Ensure that `version` in file `charts/descheduler/Chart.yaml` has been incremented. This is the chart version.
|
|
||||||
2. Make sure your repo is clean by git's standards
|
|
||||||
3. Follow the release-branch or patch release tagging pattern from the above section.
|
|
||||||
4. Verify the new helm artifact has been successfully pushed to the `gh-pages` branch
|
|
||||||
BIN
docs/release-process.png
Normal file
BIN
docs/release-process.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 121 KiB |
@@ -2,25 +2,29 @@
|
|||||||
|
|
||||||
Starting with descheduler release v0.10.0 container images are available in the official k8s container registry.
|
Starting with descheduler release v0.10.0 container images are available in the official k8s container registry.
|
||||||
|
|
||||||
Descheduler Version | Container Image | Architectures |
|
Descheduler Version | Container Image | Architectures |
|
||||||
------------------- |--------------------------------------------|-------------------------|
|
------------------- |-------------------------------------------------|-------------------------|
|
||||||
v0.24.1 | k8s.gcr.io/descheduler/descheduler:v0.24.0 | AMD64<br>ARM64<br>ARMv7 |
|
v0.26.1 | registry.k8s.io/descheduler/descheduler:v0.26.1 | AMD64<br>ARM64<br>ARMv7 |
|
||||||
v0.24.0 | k8s.gcr.io/descheduler/descheduler:v0.24.0 | AMD64<br>ARM64<br>ARMv7 |
|
v0.26.0 | registry.k8s.io/descheduler/descheduler:v0.26.0 | AMD64<br>ARM64<br>ARMv7 |
|
||||||
v0.23.1 | k8s.gcr.io/descheduler/descheduler:v0.23.1 | AMD64<br>ARM64<br>ARMv7 |
|
v0.25.1 | registry.k8s.io/descheduler/descheduler:v0.25.1 | AMD64<br>ARM64<br>ARMv7 |
|
||||||
v0.22.0 | k8s.gcr.io/descheduler/descheduler:v0.22.0 | AMD64<br>ARM64<br>ARMv7 |
|
v0.25.0 | registry.k8s.io/descheduler/descheduler:v0.25.0 | AMD64<br>ARM64<br>ARMv7 |
|
||||||
v0.21.0 | k8s.gcr.io/descheduler/descheduler:v0.21.0 | AMD64<br>ARM64<br>ARMv7 |
|
v0.24.1 | registry.k8s.io/descheduler/descheduler:v0.24.1 | AMD64<br>ARM64<br>ARMv7 |
|
||||||
v0.20.0 | k8s.gcr.io/descheduler/descheduler:v0.20.0 | AMD64<br>ARM64 |
|
v0.24.0 | registry.k8s.io/descheduler/descheduler:v0.24.0 | AMD64<br>ARM64<br>ARMv7 |
|
||||||
v0.19.0 | k8s.gcr.io/descheduler/descheduler:v0.19.0 | AMD64 |
|
v0.23.1 | registry.k8s.io/descheduler/descheduler:v0.23.1 | AMD64<br>ARM64<br>ARMv7 |
|
||||||
v0.18.0 | k8s.gcr.io/descheduler/descheduler:v0.18.0 | AMD64 |
|
v0.22.0 | registry.k8s.io/descheduler/descheduler:v0.22.0 | AMD64<br>ARM64<br>ARMv7 |
|
||||||
v0.10.0 | k8s.gcr.io/descheduler/descheduler:v0.10.0 | AMD64 |
|
v0.21.0 | registry.k8s.io/descheduler/descheduler:v0.21.0 | AMD64<br>ARM64<br>ARMv7 |
|
||||||
|
v0.20.0 | registry.k8s.io/descheduler/descheduler:v0.20.0 | AMD64<br>ARM64 |
|
||||||
|
v0.19.0 | registry.k8s.io/descheduler/descheduler:v0.19.0 | AMD64 |
|
||||||
|
v0.18.0 | registry.k8s.io/descheduler/descheduler:v0.18.0 | AMD64 |
|
||||||
|
v0.10.0 | registry.k8s.io/descheduler/descheduler:v0.10.0 | AMD64 |
|
||||||
|
|
||||||
Note that multi-arch container images cannot be pulled by [kind](https://kind.sigs.k8s.io) from a registry. Therefore
|
Note that multi-arch container images cannot be pulled by [kind](https://kind.sigs.k8s.io) from a registry. Therefore
|
||||||
starting with descheduler release v0.20.0 use the below process to download the official descheduler
|
starting with descheduler release v0.20.0 use the below process to download the official descheduler
|
||||||
image into a kind cluster.
|
image into a kind cluster.
|
||||||
```
|
```
|
||||||
kind create cluster
|
kind create cluster
|
||||||
docker pull k8s.gcr.io/descheduler/descheduler:v0.20.0
|
docker pull registry.k8s.io/descheduler/descheduler:v0.20.0
|
||||||
kind load docker-image k8s.gcr.io/descheduler/descheduler:v0.20.0
|
kind load docker-image registry.k8s.io/descheduler/descheduler:v0.20.0
|
||||||
```
|
```
|
||||||
|
|
||||||
## Policy Configuration Examples
|
## Policy Configuration Examples
|
||||||
@@ -60,11 +64,11 @@ Flags:
|
|||||||
--leader-elect-resource-namespace string The namespace of resource object that is used for locking during leader election. (default "kube-system")
|
--leader-elect-resource-namespace string The namespace of resource object that is used for locking during leader election. (default "kube-system")
|
||||||
--leader-elect-retry-period duration The duration the clients should wait between attempting acquisition and renewal of a leadership. This is only applicable if leader election is enabled. (default 2s)
|
--leader-elect-retry-period duration The duration the clients should wait between attempting acquisition and renewal of a leadership. This is only applicable if leader election is enabled. (default 2s)
|
||||||
--log-backtrace-at traceLocation when logging hits line file:N, emit a stack trace (default :0) (DEPRECATED: will be removed in a future release, see https://github.com/kubernetes/enhancements/tree/master/keps/sig-instrumentation/2845-deprecate-klog-specific-flags-in-k8s-components)
|
--log-backtrace-at traceLocation when logging hits line file:N, emit a stack trace (default :0) (DEPRECATED: will be removed in a future release, see https://github.com/kubernetes/enhancements/tree/master/keps/sig-instrumentation/2845-deprecate-klog-specific-flags-in-k8s-components)
|
||||||
--log-dir string If non-empty, write log files in this directory (DEPRECATED: will be removed in a future release, see https://github.com/kubernetes/enhancements/tree/master/keps/sig-instrumentation/2845-deprecate-klog-specific-flags-in-k8s-components)
|
--log_dir string If non-empty, write log files in this directory (DEPRECATED: will be removed in a future release, see https://github.com/kubernetes/enhancements/tree/master/keps/sig-instrumentation/2845-deprecate-klog-specific-flags-in-k8s-components)
|
||||||
--log-file string If non-empty, use this log file (DEPRECATED: will be removed in a future release, see https://github.com/kubernetes/enhancements/tree/master/keps/sig-instrumentation/2845-deprecate-klog-specific-flags-in-k8s-components)
|
--log_file string If non-empty, use this log file (DEPRECATED: will be removed in a future release, see https://github.com/kubernetes/enhancements/tree/master/keps/sig-instrumentation/2845-deprecate-klog-specific-flags-in-k8s-components)
|
||||||
--log-file-max-size uint Defines the maximum size a log file can grow to. Unit is megabytes. If the value is 0, the maximum file size is unlimited. (default 1800) (DEPRECATED: will be removed in a future release, see https://github.com/kubernetes/enhancements/tree/master/keps/sig-instrumentation/2845-deprecate-klog-specific-flags-in-k8s-components)
|
--log_file_max_size uint Defines the maximum size a log file can grow to. Unit is megabytes. If the value is 0, the maximum file size is unlimited. (default 1800) (DEPRECATED: will be removed in a future release, see https://github.com/kubernetes/enhancements/tree/master/keps/sig-instrumentation/2845-deprecate-klog-specific-flags-in-k8s-components)
|
||||||
--log-flush-frequency duration Maximum number of seconds between log flushes (default 5s)
|
--log-flush-frequency duration Maximum number of seconds between log flushes (default 5s)
|
||||||
--logging-format string Sets the log format. Permitted formats: "text", "json". Non-default formats don't honor these flags: --add-dir-header, --alsologtostderr, --log-backtrace-at, --log-dir, --log-file, --log-file-max-size, --logtostderr, --skip-headers, --skip-log-headers, --stderrthreshold, --log-flush-frequency.\nNon-default choices are currently alpha and subject to change without warning. (default "text")
|
--logging-format string Sets the log format. Permitted formats: "text", "json". Non-default formats don't honor these flags: --add-dir-header, --alsologtostderr, --log-backtrace-at, --log_dir, --log_file, --log_file_max_size, --logtostderr, --skip-headers, --skip-log-headers, --stderrthreshold, --log-flush-frequency.\nNon-default choices are currently alpha and subject to change without warning. (default "text")
|
||||||
--logtostderr log to standard error instead of files (default true) (DEPRECATED: will be removed in a future release, see https://github.com/kubernetes/enhancements/tree/master/keps/sig-instrumentation/2845-deprecate-klog-specific-flags-in-k8s-components)
|
--logtostderr log to standard error instead of files (default true) (DEPRECATED: will be removed in a future release, see https://github.com/kubernetes/enhancements/tree/master/keps/sig-instrumentation/2845-deprecate-klog-specific-flags-in-k8s-components)
|
||||||
--one-output If true, only write logs to their native severity level (vs also writing to each lower severity level) (DEPRECATED: will be removed in a future release, see https://github.com/kubernetes/enhancements/tree/master/keps/sig-instrumentation/2845-deprecate-klog-specific-flags-in-k8s-components)
|
--one-output If true, only write logs to their native severity level (vs also writing to each lower severity level) (DEPRECATED: will be removed in a future release, see https://github.com/kubernetes/enhancements/tree/master/keps/sig-instrumentation/2845-deprecate-klog-specific-flags-in-k8s-components)
|
||||||
--permit-address-sharing If true, SO_REUSEADDR will be used when binding the port. This allows binding to wildcard IPs like 0.0.0.0 and specific IPs in parallel, and it avoids waiting for the kernel to release sockets in TIME_WAIT state. [default=false]
|
--permit-address-sharing If true, SO_REUSEADDR will be used when binding the port. This allows binding to wildcard IPs like 0.0.0.0 and specific IPs in parallel, and it avoids waiting for the kernel to release sockets in TIME_WAIT state. [default=false]
|
||||||
@@ -158,6 +162,7 @@ strategies:
|
|||||||
```
|
```
|
||||||
|
|
||||||
### Autoheal Node Problems
|
### Autoheal Node Problems
|
||||||
|
|
||||||
Descheduler's `RemovePodsViolatingNodeTaints` strategy can be combined with
|
Descheduler's `RemovePodsViolatingNodeTaints` strategy can be combined with
|
||||||
[Node Problem Detector](https://github.com/kubernetes/node-problem-detector/) and
|
[Node Problem Detector](https://github.com/kubernetes/node-problem-detector/) and
|
||||||
[Cluster Autoscaler](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler) to automatically remove
|
[Cluster Autoscaler](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler) to automatically remove
|
||||||
@@ -166,6 +171,7 @@ There is a feature called TaintNodeByCondition of the node controller that takes
|
|||||||
The Descheduler will then deschedule workloads from those Nodes. Finally, if the descheduled Node's resource
|
The Descheduler will then deschedule workloads from those Nodes. Finally, if the descheduled Node's resource
|
||||||
allocation falls below the Cluster Autoscaler's scale down threshold, the Node will become a scale down candidate
|
allocation falls below the Cluster Autoscaler's scale down threshold, the Node will become a scale down candidate
|
||||||
and can be removed by Cluster Autoscaler. These three components form an autohealing cycle for Node problems.
|
and can be removed by Cluster Autoscaler. These three components form an autohealing cycle for Node problems.
|
||||||
|
|
||||||
---
|
---
|
||||||
**NOTE**
|
**NOTE**
|
||||||
|
|
||||||
|
|||||||
@@ -6,3 +6,6 @@ strategies:
|
|||||||
params:
|
params:
|
||||||
podLifeTime:
|
podLifeTime:
|
||||||
maxPodLifeTimeSeconds: 604800 # 7 days
|
maxPodLifeTimeSeconds: 604800 # 7 days
|
||||||
|
states:
|
||||||
|
- "Pending"
|
||||||
|
- "PodInitializing"
|
||||||
|
|||||||
@@ -4,4 +4,5 @@ strategies:
|
|||||||
"RemovePodsViolatingTopologySpreadConstraint":
|
"RemovePodsViolatingTopologySpreadConstraint":
|
||||||
enabled: true
|
enabled: true
|
||||||
params:
|
params:
|
||||||
|
nodeFit: true
|
||||||
includeSoftConstraints: true # Include 'ScheduleAnyways' constraints
|
includeSoftConstraints: true # Include 'ScheduleAnyways' constraints
|
||||||
|
|||||||
137
go.mod
137
go.mod
@@ -1,117 +1,110 @@
|
|||||||
module sigs.k8s.io/descheduler
|
module sigs.k8s.io/descheduler
|
||||||
|
|
||||||
go 1.18
|
go 1.19
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/client9/misspell v0.3.4
|
github.com/client9/misspell v0.3.4
|
||||||
github.com/spf13/cobra v1.4.0
|
github.com/google/go-cmp v0.5.9
|
||||||
|
github.com/spf13/cobra v1.6.0
|
||||||
github.com/spf13/pflag v1.0.5
|
github.com/spf13/pflag v1.0.5
|
||||||
k8s.io/api v0.24.0
|
k8s.io/api v0.26.10
|
||||||
k8s.io/apimachinery v0.24.0
|
k8s.io/apimachinery v0.26.10
|
||||||
k8s.io/apiserver v0.24.0
|
k8s.io/apiserver v0.26.10
|
||||||
k8s.io/client-go v0.24.0
|
k8s.io/client-go v0.26.10
|
||||||
k8s.io/code-generator v0.24.0
|
k8s.io/code-generator v0.26.10
|
||||||
k8s.io/component-base v0.24.0
|
k8s.io/component-base v0.26.10
|
||||||
k8s.io/component-helpers v0.24.0
|
k8s.io/component-helpers v0.26.10
|
||||||
k8s.io/klog/v2 v2.60.1
|
k8s.io/klog/v2 v2.80.1
|
||||||
k8s.io/kubectl v0.20.5
|
k8s.io/utils v0.0.0-20221128185143-99ec85e7a448
|
||||||
sigs.k8s.io/mdtoc v1.0.1
|
sigs.k8s.io/mdtoc v1.0.1
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
cloud.google.com/go v0.81.0 // indirect
|
|
||||||
github.com/Azure/go-autorest v14.2.0+incompatible // indirect
|
|
||||||
github.com/Azure/go-autorest/autorest v0.11.18 // indirect
|
|
||||||
github.com/Azure/go-autorest/autorest/adal v0.9.13 // indirect
|
|
||||||
github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect
|
|
||||||
github.com/Azure/go-autorest/logger v0.2.1 // indirect
|
|
||||||
github.com/Azure/go-autorest/tracing v0.6.0 // indirect
|
|
||||||
github.com/BurntSushi/toml v0.3.1 // indirect
|
github.com/BurntSushi/toml v0.3.1 // indirect
|
||||||
github.com/NYTimes/gziphandler v1.1.1 // indirect
|
github.com/NYTimes/gziphandler v1.1.1 // indirect
|
||||||
github.com/PuerkitoBio/purell v1.1.1 // indirect
|
github.com/antlr/antlr4/runtime/Go/antlr v1.4.10 // indirect
|
||||||
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect
|
|
||||||
github.com/beorn7/perks v1.0.1 // indirect
|
github.com/beorn7/perks v1.0.1 // indirect
|
||||||
github.com/blang/semver/v4 v4.0.0 // indirect
|
github.com/blang/semver/v4 v4.0.0 // indirect
|
||||||
|
github.com/cenkalti/backoff/v4 v4.1.3 // indirect
|
||||||
github.com/cespare/xxhash/v2 v2.1.2 // indirect
|
github.com/cespare/xxhash/v2 v2.1.2 // indirect
|
||||||
github.com/coreos/go-semver v0.3.0 // indirect
|
github.com/coreos/go-semver v0.3.0 // indirect
|
||||||
github.com/coreos/go-systemd/v22 v22.3.2 // indirect
|
github.com/coreos/go-systemd/v22 v22.3.2 // indirect
|
||||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||||
github.com/emicklei/go-restful v2.9.5+incompatible // indirect
|
github.com/emicklei/go-restful/v3 v3.9.0 // indirect
|
||||||
github.com/evanphx/json-patch v4.12.0+incompatible // indirect
|
github.com/evanphx/json-patch v4.12.0+incompatible // indirect
|
||||||
github.com/felixge/httpsnoop v1.0.1 // indirect
|
github.com/felixge/httpsnoop v1.0.3 // indirect
|
||||||
github.com/form3tech-oss/jwt-go v3.2.3+incompatible // indirect
|
github.com/fsnotify/fsnotify v1.6.0 // indirect
|
||||||
github.com/fsnotify/fsnotify v1.4.9 // indirect
|
github.com/go-logr/logr v1.2.3 // indirect
|
||||||
github.com/go-logr/logr v1.2.0 // indirect
|
github.com/go-logr/stdr v1.2.2 // indirect
|
||||||
github.com/go-logr/zapr v1.2.0 // indirect
|
github.com/go-logr/zapr v1.2.3 // indirect
|
||||||
github.com/go-openapi/jsonpointer v0.19.5 // indirect
|
github.com/go-openapi/jsonpointer v0.19.5 // indirect
|
||||||
github.com/go-openapi/jsonreference v0.19.5 // indirect
|
github.com/go-openapi/jsonreference v0.20.0 // indirect
|
||||||
github.com/go-openapi/swag v0.19.14 // indirect
|
github.com/go-openapi/swag v0.19.14 // indirect
|
||||||
github.com/gogo/protobuf v1.3.2 // indirect
|
github.com/gogo/protobuf v1.3.2 // indirect
|
||||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||||
github.com/golang/protobuf v1.5.2 // indirect
|
github.com/golang/protobuf v1.5.2 // indirect
|
||||||
github.com/gomarkdown/markdown v0.0.0-20200824053859-8c8b3816f167 // indirect
|
github.com/gomarkdown/markdown v0.0.0-20200824053859-8c8b3816f167 // indirect
|
||||||
|
github.com/google/cel-go v0.12.7 // indirect
|
||||||
github.com/google/gnostic v0.5.7-v3refs // indirect
|
github.com/google/gnostic v0.5.7-v3refs // indirect
|
||||||
github.com/google/go-cmp v0.5.5 // indirect
|
|
||||||
github.com/google/gofuzz v1.1.0 // indirect
|
github.com/google/gofuzz v1.1.0 // indirect
|
||||||
github.com/google/uuid v1.1.2 // indirect
|
github.com/google/uuid v1.3.0 // indirect
|
||||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect
|
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect
|
||||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect
|
github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 // indirect
|
||||||
github.com/imdario/mergo v0.3.5 // indirect
|
github.com/imdario/mergo v0.3.6 // indirect
|
||||||
github.com/inconshreveable/mousetrap v1.0.0 // indirect
|
github.com/inconshreveable/mousetrap v1.0.1 // indirect
|
||||||
github.com/josharian/intern v1.0.0 // indirect
|
github.com/josharian/intern v1.0.0 // indirect
|
||||||
github.com/json-iterator/go v1.1.12 // indirect
|
github.com/json-iterator/go v1.1.12 // indirect
|
||||||
github.com/mailru/easyjson v0.7.6 // indirect
|
github.com/mailru/easyjson v0.7.6 // indirect
|
||||||
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
|
github.com/matttproud/golang_protobuf_extensions v1.0.2 // indirect
|
||||||
github.com/mmarkdown/mmark v2.0.40+incompatible // indirect
|
github.com/mmarkdown/mmark v2.0.40+incompatible // indirect
|
||||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||||
github.com/pkg/errors v0.9.1 // indirect
|
github.com/pkg/errors v0.9.1 // indirect
|
||||||
github.com/prometheus/client_golang v1.12.1 // indirect
|
github.com/prometheus/client_golang v1.14.0 // indirect
|
||||||
github.com/prometheus/client_model v0.2.0 // indirect
|
github.com/prometheus/client_model v0.3.0 // indirect
|
||||||
github.com/prometheus/common v0.32.1 // indirect
|
github.com/prometheus/common v0.37.0 // indirect
|
||||||
github.com/prometheus/procfs v0.7.3 // indirect
|
github.com/prometheus/procfs v0.8.0 // indirect
|
||||||
go.etcd.io/etcd/api/v3 v3.5.1 // indirect
|
github.com/stoewer/go-strcase v1.2.0 // indirect
|
||||||
go.etcd.io/etcd/client/pkg/v3 v3.5.1 // indirect
|
go.etcd.io/etcd/api/v3 v3.5.5 // indirect
|
||||||
go.etcd.io/etcd/client/v3 v3.5.1 // indirect
|
go.etcd.io/etcd/client/pkg/v3 v3.5.5 // indirect
|
||||||
go.opentelemetry.io/contrib v0.20.0 // indirect
|
go.etcd.io/etcd/client/v3 v3.5.5 // indirect
|
||||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0 // indirect
|
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0 // indirect
|
||||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.20.0 // indirect
|
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1 // indirect
|
||||||
go.opentelemetry.io/otel v0.20.0 // indirect
|
go.opentelemetry.io/otel v1.10.0 // indirect
|
||||||
go.opentelemetry.io/otel/exporters/otlp v0.20.0 // indirect
|
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0 // indirect
|
||||||
go.opentelemetry.io/otel/metric v0.20.0 // indirect
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0 // indirect
|
||||||
go.opentelemetry.io/otel/sdk v0.20.0 // indirect
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0 // indirect
|
||||||
go.opentelemetry.io/otel/sdk/export/metric v0.20.0 // indirect
|
go.opentelemetry.io/otel/metric v0.31.0 // indirect
|
||||||
go.opentelemetry.io/otel/sdk/metric v0.20.0 // indirect
|
go.opentelemetry.io/otel/sdk v1.10.0 // indirect
|
||||||
go.opentelemetry.io/otel/trace v0.20.0 // indirect
|
go.opentelemetry.io/otel/trace v1.10.0 // indirect
|
||||||
go.opentelemetry.io/proto/otlp v0.7.0 // indirect
|
go.opentelemetry.io/proto/otlp v0.19.0 // indirect
|
||||||
go.uber.org/atomic v1.7.0 // indirect
|
go.uber.org/atomic v1.7.0 // indirect
|
||||||
go.uber.org/multierr v1.6.0 // indirect
|
go.uber.org/multierr v1.6.0 // indirect
|
||||||
go.uber.org/zap v1.19.0 // indirect
|
go.uber.org/zap v1.19.0 // indirect
|
||||||
golang.org/x/crypto v0.0.0-20220518034528-6f7dac969898 // indirect
|
golang.org/x/crypto v0.14.0 // indirect
|
||||||
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 // indirect
|
golang.org/x/mod v0.9.0 // indirect
|
||||||
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd // indirect
|
golang.org/x/net v0.17.0 // indirect
|
||||||
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 // indirect
|
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b // indirect
|
||||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c // indirect
|
golang.org/x/sync v0.1.0 // indirect
|
||||||
golang.org/x/sys v0.0.0-20220209214540-3681064d5158 // indirect
|
golang.org/x/sys v0.13.0 // indirect
|
||||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect
|
golang.org/x/term v0.13.0 // indirect
|
||||||
golang.org/x/text v0.3.7 // indirect
|
golang.org/x/text v0.13.0 // indirect
|
||||||
golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 // indirect
|
golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 // indirect
|
||||||
golang.org/x/tools v0.1.10-0.20220218145154-897bd77cd717 // indirect
|
golang.org/x/tools v0.6.0 // indirect
|
||||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
|
|
||||||
google.golang.org/appengine v1.6.7 // indirect
|
google.golang.org/appengine v1.6.7 // indirect
|
||||||
google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368 // indirect
|
google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21 // indirect
|
||||||
google.golang.org/grpc v1.40.0 // indirect
|
google.golang.org/grpc v1.49.0 // indirect
|
||||||
google.golang.org/protobuf v1.27.1 // indirect
|
google.golang.org/protobuf v1.28.1 // indirect
|
||||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect
|
gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect
|
||||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
|
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||||
k8s.io/gengo v0.0.0-20211129171323-c02415ce4185 // indirect
|
k8s.io/gengo v0.0.0-20220902162205-c0856e24416d // indirect
|
||||||
k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42 // indirect
|
k8s.io/kms v0.26.10 // indirect
|
||||||
k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 // indirect
|
k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 // indirect
|
||||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.30 // indirect
|
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.37 // indirect
|
||||||
sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 // indirect
|
sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect
|
||||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.1 // indirect
|
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect
|
||||||
sigs.k8s.io/yaml v1.2.0 // indirect
|
sigs.k8s.io/yaml v1.3.0 // indirect
|
||||||
)
|
)
|
||||||
|
|||||||
25
hack/lib/generator-help.sh
Normal file
25
hack/lib/generator-help.sh
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
# Utility command based on 'find' command. The pipeline is as following:
|
||||||
|
# 1. find all the go files; (exclude specific path: vendor etc)
|
||||||
|
# 2. find all the files containing specific tags in contents;
|
||||||
|
# 3. extract related dirs;
|
||||||
|
# 4. remove duplicated paths;
|
||||||
|
# 5. merge all dirs in array with delimiter ,;
|
||||||
|
#
|
||||||
|
# Example:
|
||||||
|
# find_dirs_containing_comment_tags("+k8s:")
|
||||||
|
# Return:
|
||||||
|
# sigs.k8s.io/descheduler/a,sigs.k8s.io/descheduler/b,sigs.k8s.io/descheduler/c
|
||||||
|
function find_dirs_containing_comment_tags() {
|
||||||
|
array=()
|
||||||
|
while IFS='' read -r line; do array+=("$line"); done < <( \
|
||||||
|
find . -type f -name \*.go -not -path "./vendor/*" -not -path "./_tmp/*" -print0 \
|
||||||
|
| xargs -0 grep --color=never -l "$@" \
|
||||||
|
| xargs -n1 dirname \
|
||||||
|
| LC_ALL=C sort -u \
|
||||||
|
)
|
||||||
|
|
||||||
|
IFS=",";
|
||||||
|
printf '%s' "${array[*]}";
|
||||||
|
}
|
||||||
@@ -1,9 +1,10 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
source "$(dirname "${BASH_SOURCE}")/lib/init.sh"
|
source "$(dirname "${BASH_SOURCE}")/lib/init.sh"
|
||||||
|
source "$(dirname "${BASH_SOURCE}")/lib/generator-help.sh"
|
||||||
|
|
||||||
go build -o "${OS_OUTPUT_BINPATH}/conversion-gen" "k8s.io/code-generator/cmd/conversion-gen"
|
go build -o "${OS_OUTPUT_BINPATH}/conversion-gen" "k8s.io/code-generator/cmd/conversion-gen"
|
||||||
|
|
||||||
${OS_OUTPUT_BINPATH}/conversion-gen \
|
${OS_OUTPUT_BINPATH}/conversion-gen \
|
||||||
--go-header-file "hack/boilerplate/boilerplate.go.txt" \
|
--go-header-file "hack/boilerplate/boilerplate.go.txt" \
|
||||||
--input-dirs "${PRJ_PREFIX}/pkg/apis/componentconfig/v1alpha1,${PRJ_PREFIX}/pkg/api/v1alpha1" \
|
--input-dirs "$(find_dirs_containing_comment_tags "+k8s:conversion-gen=")" \
|
||||||
--output-file-base zz_generated.conversion
|
--output-file-base zz_generated.conversion
|
||||||
|
|||||||
@@ -1,10 +1,11 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
source "$(dirname "${BASH_SOURCE}")/lib/init.sh"
|
source "$(dirname "${BASH_SOURCE}")/lib/init.sh"
|
||||||
|
source "$(dirname "${BASH_SOURCE}")/lib/generator-help.sh"
|
||||||
|
|
||||||
go build -o "${OS_OUTPUT_BINPATH}/deepcopy-gen" "k8s.io/code-generator/cmd/deepcopy-gen"
|
go build -o "${OS_OUTPUT_BINPATH}/deepcopy-gen" "k8s.io/code-generator/cmd/deepcopy-gen"
|
||||||
|
|
||||||
${OS_OUTPUT_BINPATH}/deepcopy-gen \
|
${OS_OUTPUT_BINPATH}/deepcopy-gen \
|
||||||
--go-header-file "hack/boilerplate/boilerplate.go.txt" \
|
--go-header-file "hack/boilerplate/boilerplate.go.txt" \
|
||||||
--input-dirs "${PRJ_PREFIX}/pkg/apis/componentconfig,${PRJ_PREFIX}/pkg/apis/componentconfig/v1alpha1,${PRJ_PREFIX}/pkg/api,${PRJ_PREFIX}/pkg/api/v1alpha1" \
|
--input-dirs "$(find_dirs_containing_comment_tags "+k8s:deepcopy-gen=")" \
|
||||||
--output-file-base zz_generated.deepcopy
|
--output-file-base zz_generated.deepcopy
|
||||||
|
|
||||||
|
|||||||
@@ -1,10 +1,11 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
source "$(dirname "${BASH_SOURCE}")/lib/init.sh"
|
source "$(dirname "${BASH_SOURCE}")/lib/init.sh"
|
||||||
|
source "$(dirname "${BASH_SOURCE}")/lib/generator-help.sh"
|
||||||
|
|
||||||
go build -o "${OS_OUTPUT_BINPATH}/defaulter-gen" "k8s.io/code-generator/cmd/defaulter-gen"
|
go build -o "${OS_OUTPUT_BINPATH}/defaulter-gen" "k8s.io/code-generator/cmd/defaulter-gen"
|
||||||
|
|
||||||
${OS_OUTPUT_BINPATH}/defaulter-gen \
|
${OS_OUTPUT_BINPATH}/defaulter-gen \
|
||||||
--go-header-file "hack/boilerplate/boilerplate.go.txt" \
|
--go-header-file "hack/boilerplate/boilerplate.go.txt" \
|
||||||
--input-dirs "${PRJ_PREFIX}/pkg/apis/componentconfig/v1alpha1,${PRJ_PREFIX}/pkg/api/v1alpha1" \
|
--input-dirs "$(find_dirs_containing_comment_tags "+k8s:defaulter-gen=")" \
|
||||||
--extra-peer-dirs "${PRJ_PREFIX}/pkg/apis/componentconfig/v1alpha1,${PRJ_PREFIX}/pkg/api/v1alpha1" \
|
--extra-peer-dirs "${PRJ_PREFIX}/pkg/apis/componentconfig/v1alpha1,${PRJ_PREFIX}/pkg/api/v1alpha1" \
|
||||||
--output-file-base zz_generated.defaults
|
--output-file-base zz_generated.defaults
|
||||||
|
|||||||
@@ -23,7 +23,7 @@ DESCHEDULER_ROOT=$(dirname "${BASH_SOURCE}")/..
|
|||||||
|
|
||||||
GO_VERSION=($(go version))
|
GO_VERSION=($(go version))
|
||||||
|
|
||||||
if [[ -z $(echo "${GO_VERSION[2]}" | grep -E 'go1.17|go1.18') ]]; then
|
if [[ -z $(echo "${GO_VERSION[2]}" | grep -E 'go1.17|go1.18|go1.19') ]]; then
|
||||||
echo "Unknown go version '${GO_VERSION[2]}', skipping gofmt."
|
echo "Unknown go version '${GO_VERSION[2]}', skipping gofmt."
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|||||||
1
hack/verify-chart.sh
Executable file
1
hack/verify-chart.sh
Executable file
@@ -0,0 +1 @@
|
|||||||
|
${CONTAINER_ENGINE:-docker} run -it --rm --network host --workdir=/data --volume ~/.kube/config:/root/.kube/config:ro --volume $(pwd):/data quay.io/helmpack/chart-testing:v3.7.0 /bin/bash -c "git config --global --add safe.directory /data; ct install --config=.github/ci/ct.yaml --helm-extra-set-args=\"--set=kind=Deployment\""
|
||||||
@@ -16,12 +16,7 @@ git archive --format=tar --prefix=descheduler/ "$(git write-tree)" | (cd "${_des
|
|||||||
_deschedulertmp="${_deschedulertmp}/descheduler"
|
_deschedulertmp="${_deschedulertmp}/descheduler"
|
||||||
|
|
||||||
pushd "${_deschedulertmp}" > /dev/null 2>&1
|
pushd "${_deschedulertmp}" > /dev/null 2>&1
|
||||||
go build -o "${OS_OUTPUT_BINPATH}/conversion-gen" "k8s.io/code-generator/cmd/conversion-gen"
|
./hack/update-generated-conversions.sh
|
||||||
|
|
||||||
${OS_OUTPUT_BINPATH}/conversion-gen \
|
|
||||||
--go-header-file "hack/boilerplate/boilerplate.go.txt" \
|
|
||||||
--input-dirs "./pkg/apis/componentconfig/v1alpha1,./pkg/api/v1alpha1" \
|
|
||||||
--output-file-base zz_generated.conversion
|
|
||||||
popd > /dev/null 2>&1
|
popd > /dev/null 2>&1
|
||||||
|
|
||||||
pushd "${DESCHEDULER_ROOT}" > /dev/null 2>&1
|
pushd "${DESCHEDULER_ROOT}" > /dev/null 2>&1
|
||||||
|
|||||||
@@ -16,12 +16,7 @@ git archive --format=tar --prefix=descheduler/ "$(git write-tree)" | (cd "${_des
|
|||||||
_deschedulertmp="${_deschedulertmp}/descheduler"
|
_deschedulertmp="${_deschedulertmp}/descheduler"
|
||||||
|
|
||||||
pushd "${_deschedulertmp}" > /dev/null 2>&1
|
pushd "${_deschedulertmp}" > /dev/null 2>&1
|
||||||
go build -o "${OS_OUTPUT_BINPATH}/deepcopy-gen" "k8s.io/code-generator/cmd/deepcopy-gen"
|
./hack/update-generated-deep-copies.sh
|
||||||
|
|
||||||
${OS_OUTPUT_BINPATH}/deepcopy-gen \
|
|
||||||
--go-header-file "hack/boilerplate/boilerplate.go.txt" \
|
|
||||||
--input-dirs "./pkg/apis/componentconfig,./pkg/apis/componentconfig/v1alpha1,./pkg/api,./pkg/api/v1alpha1" \
|
|
||||||
--output-file-base zz_generated.deepcopy
|
|
||||||
popd > /dev/null 2>&1
|
popd > /dev/null 2>&1
|
||||||
|
|
||||||
pushd "${DESCHEDULER_ROOT}" > /dev/null 2>&1
|
pushd "${DESCHEDULER_ROOT}" > /dev/null 2>&1
|
||||||
|
|||||||
@@ -15,13 +15,7 @@ git archive --format=tar --prefix=descheduler/ "$(git write-tree)" | (cd "${_des
|
|||||||
_deschedulertmp="${_deschedulertmp}/descheduler"
|
_deschedulertmp="${_deschedulertmp}/descheduler"
|
||||||
|
|
||||||
pushd "${_deschedulertmp}" > /dev/null 2>&1
|
pushd "${_deschedulertmp}" > /dev/null 2>&1
|
||||||
go build -o "${OS_OUTPUT_BINPATH}/defaulter-gen" "k8s.io/code-generator/cmd/defaulter-gen"
|
./hack/update-generated-defaulters.sh
|
||||||
|
|
||||||
${OS_OUTPUT_BINPATH}/defaulter-gen \
|
|
||||||
--go-header-file "hack/boilerplate/boilerplate.go.txt" \
|
|
||||||
--input-dirs "${PRJ_PREFIX}/pkg/apis/componentconfig/v1alpha1,${PRJ_PREFIX}/pkg/api/v1alpha1" \
|
|
||||||
--extra-peer-dirs "${PRJ_PREFIX}/pkg/apis/componentconfig/v1alpha1,${PRJ_PREFIX}/pkg/api/v1alpha1" \
|
|
||||||
--output-file-base zz_generated.defaults
|
|
||||||
popd > /dev/null 2>&1
|
popd > /dev/null 2>&1
|
||||||
|
|
||||||
pushd "${DESCHEDULER_ROOT}" > /dev/null 2>&1
|
pushd "${DESCHEDULER_ROOT}" > /dev/null 2>&1
|
||||||
|
|||||||
@@ -23,7 +23,7 @@ DESCHEDULER_ROOT=$(dirname "${BASH_SOURCE}")/..
|
|||||||
|
|
||||||
GO_VERSION=($(go version))
|
GO_VERSION=($(go version))
|
||||||
|
|
||||||
if [[ -z $(echo "${GO_VERSION[2]}" | grep -E 'go1.17|go1.18') ]]; then
|
if [[ -z $(echo "${GO_VERSION[2]}" | grep -E 'go1.17|go1.18|go1.19') ]]; then
|
||||||
echo "Unknown go version '${GO_VERSION[2]}', skipping gofmt."
|
echo "Unknown go version '${GO_VERSION[2]}', skipping gofmt."
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|||||||
@@ -4,7 +4,7 @@ apiVersion: rbac.authorization.k8s.io/v1
|
|||||||
metadata:
|
metadata:
|
||||||
name: descheduler-cluster-role
|
name: descheduler-cluster-role
|
||||||
rules:
|
rules:
|
||||||
- apiGroups: [""]
|
- apiGroups: ["events.k8s.io"]
|
||||||
resources: ["events"]
|
resources: ["events"]
|
||||||
verbs: ["create", "update"]
|
verbs: ["create", "update"]
|
||||||
- apiGroups: [""]
|
- apiGroups: [""]
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
---
|
---
|
||||||
apiVersion: batch/v1 # for k8s version < 1.21.0, use batch/v1beta1
|
apiVersion: batch/v1
|
||||||
kind: CronJob
|
kind: CronJob
|
||||||
metadata:
|
metadata:
|
||||||
name: descheduler-cronjob
|
name: descheduler-cronjob
|
||||||
@@ -16,7 +16,7 @@ spec:
|
|||||||
priorityClassName: system-cluster-critical
|
priorityClassName: system-cluster-critical
|
||||||
containers:
|
containers:
|
||||||
- name: descheduler
|
- name: descheduler
|
||||||
image: k8s.gcr.io/descheduler/descheduler:v0.24.1
|
image: registry.k8s.io/descheduler/descheduler:v0.26.1
|
||||||
volumeMounts:
|
volumeMounts:
|
||||||
- mountPath: /policy-dir
|
- mountPath: /policy-dir
|
||||||
name: policy-volume
|
name: policy-volume
|
||||||
|
|||||||
@@ -19,7 +19,7 @@ spec:
|
|||||||
serviceAccountName: descheduler-sa
|
serviceAccountName: descheduler-sa
|
||||||
containers:
|
containers:
|
||||||
- name: descheduler
|
- name: descheduler
|
||||||
image: k8s.gcr.io/descheduler/descheduler:v0.24.1
|
image: registry.k8s.io/descheduler/descheduler:v0.26.1
|
||||||
imagePullPolicy: IfNotPresent
|
imagePullPolicy: IfNotPresent
|
||||||
command:
|
command:
|
||||||
- "/bin/descheduler"
|
- "/bin/descheduler"
|
||||||
|
|||||||
@@ -14,7 +14,7 @@ spec:
|
|||||||
priorityClassName: system-cluster-critical
|
priorityClassName: system-cluster-critical
|
||||||
containers:
|
containers:
|
||||||
- name: descheduler
|
- name: descheduler
|
||||||
image: k8s.gcr.io/descheduler/descheduler:v0.24.1
|
image: registry.k8s.io/descheduler/descheduler:v0.26.1
|
||||||
volumeMounts:
|
volumeMounts:
|
||||||
- mountPath: /policy-dir
|
- mountPath: /policy-dir
|
||||||
name: policy-volume
|
name: policy-volume
|
||||||
|
|||||||
@@ -43,7 +43,7 @@ var (
|
|||||||
Subsystem: DeschedulerSubsystem,
|
Subsystem: DeschedulerSubsystem,
|
||||||
Name: "build_info",
|
Name: "build_info",
|
||||||
Help: "Build info about descheduler, including Go version, Descheduler version, Git SHA, Git branch",
|
Help: "Build info about descheduler, including Go version, Descheduler version, Git SHA, Git branch",
|
||||||
ConstLabels: map[string]string{"GoVersion": version.Get().GoVersion, "DeschedulerVersion": version.Get().GitVersion, "GitBranch": version.Get().GitBranch, "GitSha1": version.Get().GitSha1},
|
ConstLabels: map[string]string{"GoVersion": version.Get().GoVersion, "AppVersion": version.Get().Major + "." + version.Get().Minor, "DeschedulerVersion": version.Get().GitVersion, "GitBranch": version.Get().GitBranch, "GitSha1": version.Get().GitSha1},
|
||||||
StabilityLevel: metrics.ALPHA,
|
StabilityLevel: metrics.ALPHA,
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|||||||
23
pkg/api/sort.go
Normal file
23
pkg/api/sort.go
Normal file
@@ -0,0 +1,23 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2017 The Kubernetes Authors.
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package api
|
||||||
|
|
||||||
|
import "sort"
|
||||||
|
|
||||||
|
func SortProfilesByName(profiles []Profile) []Profile {
|
||||||
|
sort.Slice(profiles, func(i, j int) bool {
|
||||||
|
return profiles[i].Name < profiles[j].Name
|
||||||
|
})
|
||||||
|
return profiles
|
||||||
|
}
|
||||||
@@ -19,6 +19,7 @@ package api
|
|||||||
import (
|
import (
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
)
|
)
|
||||||
|
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
@@ -26,24 +27,12 @@ import (
|
|||||||
type DeschedulerPolicy struct {
|
type DeschedulerPolicy struct {
|
||||||
metav1.TypeMeta
|
metav1.TypeMeta
|
||||||
|
|
||||||
// Strategies
|
// Profiles
|
||||||
Strategies StrategyList
|
Profiles []Profile
|
||||||
|
|
||||||
// NodeSelector for a set of nodes to operate over
|
// NodeSelector for a set of nodes to operate over
|
||||||
NodeSelector *string
|
NodeSelector *string
|
||||||
|
|
||||||
// EvictFailedBarePods allows pods without ownerReferences and in failed phase to be evicted.
|
|
||||||
EvictFailedBarePods *bool
|
|
||||||
|
|
||||||
// EvictLocalStoragePods allows pods using local storage to be evicted.
|
|
||||||
EvictLocalStoragePods *bool
|
|
||||||
|
|
||||||
// EvictSystemCriticalPods allows eviction of pods of any priority (including Kubernetes system pods)
|
|
||||||
EvictSystemCriticalPods *bool
|
|
||||||
|
|
||||||
// IgnorePVCPods prevents pods with PVCs from being evicted.
|
|
||||||
IgnorePVCPods *bool
|
|
||||||
|
|
||||||
// MaxNoOfPodsToEvictPerNode restricts maximum of pods to be evicted per node.
|
// MaxNoOfPodsToEvictPerNode restricts maximum of pods to be evicted per node.
|
||||||
MaxNoOfPodsToEvictPerNode *uint
|
MaxNoOfPodsToEvictPerNode *uint
|
||||||
|
|
||||||
@@ -51,20 +40,6 @@ type DeschedulerPolicy struct {
|
|||||||
MaxNoOfPodsToEvictPerNamespace *uint
|
MaxNoOfPodsToEvictPerNamespace *uint
|
||||||
}
|
}
|
||||||
|
|
||||||
type StrategyName string
|
|
||||||
type StrategyList map[StrategyName]DeschedulerStrategy
|
|
||||||
|
|
||||||
type DeschedulerStrategy struct {
|
|
||||||
// Enabled or disabled
|
|
||||||
Enabled bool
|
|
||||||
|
|
||||||
// Weight
|
|
||||||
Weight int
|
|
||||||
|
|
||||||
// Strategy parameters
|
|
||||||
Params *StrategyParameters
|
|
||||||
}
|
|
||||||
|
|
||||||
// Namespaces carries a list of included/excluded namespaces
|
// Namespaces carries a list of included/excluded namespaces
|
||||||
// for which a given strategy is applicable
|
// for which a given strategy is applicable
|
||||||
type Namespaces struct {
|
type Namespaces struct {
|
||||||
@@ -72,53 +47,38 @@ type Namespaces struct {
|
|||||||
Exclude []string
|
Exclude []string
|
||||||
}
|
}
|
||||||
|
|
||||||
// Besides Namespaces only one of its members may be specified
|
type (
|
||||||
// TODO(jchaloup): move Namespaces ThresholdPriority and ThresholdPriorityClassName to individual strategies
|
Percentage float64
|
||||||
// once the policy version is bumped to v1alpha2
|
ResourceThresholds map[v1.ResourceName]Percentage
|
||||||
type StrategyParameters struct {
|
)
|
||||||
NodeResourceUtilizationThresholds *NodeResourceUtilizationThresholds
|
|
||||||
NodeAffinityType []string
|
type PriorityThreshold struct {
|
||||||
PodsHavingTooManyRestarts *PodsHavingTooManyRestarts
|
Value *int32
|
||||||
PodLifeTime *PodLifeTime
|
Name string
|
||||||
RemoveDuplicates *RemoveDuplicates
|
|
||||||
FailedPods *FailedPods
|
|
||||||
IncludeSoftConstraints bool
|
|
||||||
Namespaces *Namespaces
|
|
||||||
ThresholdPriority *int32
|
|
||||||
ThresholdPriorityClassName string
|
|
||||||
LabelSelector *metav1.LabelSelector
|
|
||||||
NodeFit bool
|
|
||||||
IncludePreferNoSchedule bool
|
|
||||||
ExcludedTaints []string
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type Percentage float64
|
type Profile struct {
|
||||||
type ResourceThresholds map[v1.ResourceName]Percentage
|
Name string
|
||||||
|
PluginConfigs []PluginConfig
|
||||||
type NodeResourceUtilizationThresholds struct {
|
Plugins Plugins
|
||||||
UseDeviationThresholds bool
|
|
||||||
Thresholds ResourceThresholds
|
|
||||||
TargetThresholds ResourceThresholds
|
|
||||||
NumberOfNodes int
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type PodsHavingTooManyRestarts struct {
|
type PluginConfig struct {
|
||||||
PodRestartThreshold int32
|
Name string
|
||||||
IncludingInitContainers bool
|
Args runtime.Object
|
||||||
}
|
}
|
||||||
|
|
||||||
type RemoveDuplicates struct {
|
type Plugins struct {
|
||||||
ExcludeOwnerKinds []string
|
PreSort PluginSet
|
||||||
|
Sort PluginSet
|
||||||
|
Deschedule PluginSet
|
||||||
|
Balance PluginSet
|
||||||
|
Evict PluginSet
|
||||||
|
Filter PluginSet
|
||||||
|
PreEvictionFilter PluginSet
|
||||||
}
|
}
|
||||||
|
|
||||||
type PodLifeTime struct {
|
type PluginSet struct {
|
||||||
MaxPodLifeTimeSeconds *uint
|
Enabled []string
|
||||||
PodStatusPhases []string
|
Disabled []string
|
||||||
}
|
|
||||||
|
|
||||||
type FailedPods struct {
|
|
||||||
ExcludeOwnerKinds []string
|
|
||||||
MinPodLifetimeSeconds *uint
|
|
||||||
Reasons []string
|
|
||||||
IncludingInitContainers bool
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -15,7 +15,6 @@ limitations under the License.
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
// +k8s:deepcopy-gen=package,register
|
// +k8s:deepcopy-gen=package,register
|
||||||
// +k8s:conversion-gen=sigs.k8s.io/descheduler/pkg/api
|
|
||||||
// +k8s:defaulter-gen=TypeMeta
|
// +k8s:defaulter-gen=TypeMeta
|
||||||
|
|
||||||
// Package v1alpha1 is the v1alpha1 version of the descheduler API
|
// Package v1alpha1 is the v1alpha1 version of the descheduler API
|
||||||
|
|||||||
@@ -28,8 +28,10 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// GroupName is the group name used in this package
|
// GroupName is the group name used in this package
|
||||||
const GroupName = "descheduler"
|
const (
|
||||||
const GroupVersion = "v1alpha1"
|
GroupName = "descheduler"
|
||||||
|
GroupVersion = "v1alpha1"
|
||||||
|
)
|
||||||
|
|
||||||
// SchemeGroupVersion is group version used to register these objects
|
// SchemeGroupVersion is group version used to register these objects
|
||||||
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: GroupVersion}
|
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: GroupVersion}
|
||||||
|
|||||||
@@ -45,14 +45,16 @@ type DeschedulerPolicy struct {
|
|||||||
IgnorePVCPods *bool `json:"ignorePvcPods,omitempty"`
|
IgnorePVCPods *bool `json:"ignorePvcPods,omitempty"`
|
||||||
|
|
||||||
// MaxNoOfPodsToEvictPerNode restricts maximum of pods to be evicted per node.
|
// MaxNoOfPodsToEvictPerNode restricts maximum of pods to be evicted per node.
|
||||||
MaxNoOfPodsToEvictPerNode *int `json:"maxNoOfPodsToEvictPerNode,omitempty"`
|
MaxNoOfPodsToEvictPerNode *uint `json:"maxNoOfPodsToEvictPerNode,omitempty"`
|
||||||
|
|
||||||
// MaxNoOfPodsToEvictPerNamespace restricts maximum of pods to be evicted per namespace.
|
// MaxNoOfPodsToEvictPerNamespace restricts maximum of pods to be evicted per namespace.
|
||||||
MaxNoOfPodsToEvictPerNamespace *int `json:"maxNoOfPodsToEvictPerNamespace,omitempty"`
|
MaxNoOfPodsToEvictPerNamespace *uint `json:"maxNoOfPodsToEvictPerNamespace,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type StrategyName string
|
type (
|
||||||
type StrategyList map[StrategyName]DeschedulerStrategy
|
StrategyName string
|
||||||
|
StrategyList map[StrategyName]DeschedulerStrategy
|
||||||
|
)
|
||||||
|
|
||||||
type DeschedulerStrategy struct {
|
type DeschedulerStrategy struct {
|
||||||
// Enabled or disabled
|
// Enabled or disabled
|
||||||
@@ -90,8 +92,10 @@ type StrategyParameters struct {
|
|||||||
ExcludedTaints []string `json:"excludedTaints,omitempty"`
|
ExcludedTaints []string `json:"excludedTaints,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type Percentage float64
|
type (
|
||||||
type ResourceThresholds map[v1.ResourceName]Percentage
|
Percentage float64
|
||||||
|
ResourceThresholds map[v1.ResourceName]Percentage
|
||||||
|
)
|
||||||
|
|
||||||
type NodeResourceUtilizationThresholds struct {
|
type NodeResourceUtilizationThresholds struct {
|
||||||
UseDeviationThresholds bool `json:"useDeviationThresholds,omitempty"`
|
UseDeviationThresholds bool `json:"useDeviationThresholds,omitempty"`
|
||||||
@@ -111,7 +115,10 @@ type RemoveDuplicates struct {
|
|||||||
|
|
||||||
type PodLifeTime struct {
|
type PodLifeTime struct {
|
||||||
MaxPodLifeTimeSeconds *uint `json:"maxPodLifeTimeSeconds,omitempty"`
|
MaxPodLifeTimeSeconds *uint `json:"maxPodLifeTimeSeconds,omitempty"`
|
||||||
PodStatusPhases []string `json:"podStatusPhases,omitempty"`
|
States []string `json:"states,omitempty"`
|
||||||
|
|
||||||
|
// Deprecated: Use States instead.
|
||||||
|
PodStatusPhases []string `json:"podStatusPhases,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type FailedPods struct {
|
type FailedPods struct {
|
||||||
|
|||||||
@@ -1,397 +0,0 @@
|
|||||||
//go:build !ignore_autogenerated
|
|
||||||
// +build !ignore_autogenerated
|
|
||||||
|
|
||||||
/*
|
|
||||||
Copyright 2022 The Kubernetes Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
// Code generated by conversion-gen. DO NOT EDIT.
|
|
||||||
|
|
||||||
package v1alpha1
|
|
||||||
|
|
||||||
import (
|
|
||||||
unsafe "unsafe"
|
|
||||||
|
|
||||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
||||||
conversion "k8s.io/apimachinery/pkg/conversion"
|
|
||||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
|
||||||
api "sigs.k8s.io/descheduler/pkg/api"
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
localSchemeBuilder.Register(RegisterConversions)
|
|
||||||
}
|
|
||||||
|
|
||||||
// RegisterConversions adds conversion functions to the given scheme.
|
|
||||||
// Public to allow building arbitrary schemes.
|
|
||||||
func RegisterConversions(s *runtime.Scheme) error {
|
|
||||||
if err := s.AddGeneratedConversionFunc((*DeschedulerPolicy)(nil), (*api.DeschedulerPolicy)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
|
||||||
return Convert_v1alpha1_DeschedulerPolicy_To_api_DeschedulerPolicy(a.(*DeschedulerPolicy), b.(*api.DeschedulerPolicy), scope)
|
|
||||||
}); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := s.AddGeneratedConversionFunc((*api.DeschedulerPolicy)(nil), (*DeschedulerPolicy)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
|
||||||
return Convert_api_DeschedulerPolicy_To_v1alpha1_DeschedulerPolicy(a.(*api.DeschedulerPolicy), b.(*DeschedulerPolicy), scope)
|
|
||||||
}); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := s.AddGeneratedConversionFunc((*DeschedulerStrategy)(nil), (*api.DeschedulerStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
|
||||||
return Convert_v1alpha1_DeschedulerStrategy_To_api_DeschedulerStrategy(a.(*DeschedulerStrategy), b.(*api.DeschedulerStrategy), scope)
|
|
||||||
}); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := s.AddGeneratedConversionFunc((*api.DeschedulerStrategy)(nil), (*DeschedulerStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
|
||||||
return Convert_api_DeschedulerStrategy_To_v1alpha1_DeschedulerStrategy(a.(*api.DeschedulerStrategy), b.(*DeschedulerStrategy), scope)
|
|
||||||
}); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := s.AddGeneratedConversionFunc((*FailedPods)(nil), (*api.FailedPods)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
|
||||||
return Convert_v1alpha1_FailedPods_To_api_FailedPods(a.(*FailedPods), b.(*api.FailedPods), scope)
|
|
||||||
}); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := s.AddGeneratedConversionFunc((*api.FailedPods)(nil), (*FailedPods)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
|
||||||
return Convert_api_FailedPods_To_v1alpha1_FailedPods(a.(*api.FailedPods), b.(*FailedPods), scope)
|
|
||||||
}); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := s.AddGeneratedConversionFunc((*Namespaces)(nil), (*api.Namespaces)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
|
||||||
return Convert_v1alpha1_Namespaces_To_api_Namespaces(a.(*Namespaces), b.(*api.Namespaces), scope)
|
|
||||||
}); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := s.AddGeneratedConversionFunc((*api.Namespaces)(nil), (*Namespaces)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
|
||||||
return Convert_api_Namespaces_To_v1alpha1_Namespaces(a.(*api.Namespaces), b.(*Namespaces), scope)
|
|
||||||
}); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := s.AddGeneratedConversionFunc((*NodeResourceUtilizationThresholds)(nil), (*api.NodeResourceUtilizationThresholds)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
|
||||||
return Convert_v1alpha1_NodeResourceUtilizationThresholds_To_api_NodeResourceUtilizationThresholds(a.(*NodeResourceUtilizationThresholds), b.(*api.NodeResourceUtilizationThresholds), scope)
|
|
||||||
}); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := s.AddGeneratedConversionFunc((*api.NodeResourceUtilizationThresholds)(nil), (*NodeResourceUtilizationThresholds)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
|
||||||
return Convert_api_NodeResourceUtilizationThresholds_To_v1alpha1_NodeResourceUtilizationThresholds(a.(*api.NodeResourceUtilizationThresholds), b.(*NodeResourceUtilizationThresholds), scope)
|
|
||||||
}); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := s.AddGeneratedConversionFunc((*PodLifeTime)(nil), (*api.PodLifeTime)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
|
||||||
return Convert_v1alpha1_PodLifeTime_To_api_PodLifeTime(a.(*PodLifeTime), b.(*api.PodLifeTime), scope)
|
|
||||||
}); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := s.AddGeneratedConversionFunc((*api.PodLifeTime)(nil), (*PodLifeTime)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
|
||||||
return Convert_api_PodLifeTime_To_v1alpha1_PodLifeTime(a.(*api.PodLifeTime), b.(*PodLifeTime), scope)
|
|
||||||
}); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := s.AddGeneratedConversionFunc((*PodsHavingTooManyRestarts)(nil), (*api.PodsHavingTooManyRestarts)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
|
||||||
return Convert_v1alpha1_PodsHavingTooManyRestarts_To_api_PodsHavingTooManyRestarts(a.(*PodsHavingTooManyRestarts), b.(*api.PodsHavingTooManyRestarts), scope)
|
|
||||||
}); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := s.AddGeneratedConversionFunc((*api.PodsHavingTooManyRestarts)(nil), (*PodsHavingTooManyRestarts)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
|
||||||
return Convert_api_PodsHavingTooManyRestarts_To_v1alpha1_PodsHavingTooManyRestarts(a.(*api.PodsHavingTooManyRestarts), b.(*PodsHavingTooManyRestarts), scope)
|
|
||||||
}); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := s.AddGeneratedConversionFunc((*RemoveDuplicates)(nil), (*api.RemoveDuplicates)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
|
||||||
return Convert_v1alpha1_RemoveDuplicates_To_api_RemoveDuplicates(a.(*RemoveDuplicates), b.(*api.RemoveDuplicates), scope)
|
|
||||||
}); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := s.AddGeneratedConversionFunc((*api.RemoveDuplicates)(nil), (*RemoveDuplicates)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
|
||||||
return Convert_api_RemoveDuplicates_To_v1alpha1_RemoveDuplicates(a.(*api.RemoveDuplicates), b.(*RemoveDuplicates), scope)
|
|
||||||
}); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := s.AddGeneratedConversionFunc((*StrategyParameters)(nil), (*api.StrategyParameters)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
|
||||||
return Convert_v1alpha1_StrategyParameters_To_api_StrategyParameters(a.(*StrategyParameters), b.(*api.StrategyParameters), scope)
|
|
||||||
}); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := s.AddGeneratedConversionFunc((*api.StrategyParameters)(nil), (*StrategyParameters)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
|
||||||
return Convert_api_StrategyParameters_To_v1alpha1_StrategyParameters(a.(*api.StrategyParameters), b.(*StrategyParameters), scope)
|
|
||||||
}); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func autoConvert_v1alpha1_DeschedulerPolicy_To_api_DeschedulerPolicy(in *DeschedulerPolicy, out *api.DeschedulerPolicy, s conversion.Scope) error {
|
|
||||||
out.Strategies = *(*api.StrategyList)(unsafe.Pointer(&in.Strategies))
|
|
||||||
out.NodeSelector = (*string)(unsafe.Pointer(in.NodeSelector))
|
|
||||||
out.EvictFailedBarePods = (*bool)(unsafe.Pointer(in.EvictFailedBarePods))
|
|
||||||
out.EvictLocalStoragePods = (*bool)(unsafe.Pointer(in.EvictLocalStoragePods))
|
|
||||||
out.EvictSystemCriticalPods = (*bool)(unsafe.Pointer(in.EvictSystemCriticalPods))
|
|
||||||
out.IgnorePVCPods = (*bool)(unsafe.Pointer(in.IgnorePVCPods))
|
|
||||||
if in.MaxNoOfPodsToEvictPerNode != nil {
|
|
||||||
in, out := &in.MaxNoOfPodsToEvictPerNode, &out.MaxNoOfPodsToEvictPerNode
|
|
||||||
*out = new(uint)
|
|
||||||
**out = uint(**in)
|
|
||||||
} else {
|
|
||||||
out.MaxNoOfPodsToEvictPerNode = nil
|
|
||||||
}
|
|
||||||
if in.MaxNoOfPodsToEvictPerNamespace != nil {
|
|
||||||
in, out := &in.MaxNoOfPodsToEvictPerNamespace, &out.MaxNoOfPodsToEvictPerNamespace
|
|
||||||
*out = new(uint)
|
|
||||||
**out = uint(**in)
|
|
||||||
} else {
|
|
||||||
out.MaxNoOfPodsToEvictPerNamespace = nil
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Convert_v1alpha1_DeschedulerPolicy_To_api_DeschedulerPolicy is an autogenerated conversion function.
|
|
||||||
func Convert_v1alpha1_DeschedulerPolicy_To_api_DeschedulerPolicy(in *DeschedulerPolicy, out *api.DeschedulerPolicy, s conversion.Scope) error {
|
|
||||||
return autoConvert_v1alpha1_DeschedulerPolicy_To_api_DeschedulerPolicy(in, out, s)
|
|
||||||
}
|
|
||||||
|
|
||||||
func autoConvert_api_DeschedulerPolicy_To_v1alpha1_DeschedulerPolicy(in *api.DeschedulerPolicy, out *DeschedulerPolicy, s conversion.Scope) error {
|
|
||||||
out.Strategies = *(*StrategyList)(unsafe.Pointer(&in.Strategies))
|
|
||||||
out.NodeSelector = (*string)(unsafe.Pointer(in.NodeSelector))
|
|
||||||
out.EvictFailedBarePods = (*bool)(unsafe.Pointer(in.EvictFailedBarePods))
|
|
||||||
out.EvictLocalStoragePods = (*bool)(unsafe.Pointer(in.EvictLocalStoragePods))
|
|
||||||
out.EvictSystemCriticalPods = (*bool)(unsafe.Pointer(in.EvictSystemCriticalPods))
|
|
||||||
out.IgnorePVCPods = (*bool)(unsafe.Pointer(in.IgnorePVCPods))
|
|
||||||
if in.MaxNoOfPodsToEvictPerNode != nil {
|
|
||||||
in, out := &in.MaxNoOfPodsToEvictPerNode, &out.MaxNoOfPodsToEvictPerNode
|
|
||||||
*out = new(int)
|
|
||||||
**out = int(**in)
|
|
||||||
} else {
|
|
||||||
out.MaxNoOfPodsToEvictPerNode = nil
|
|
||||||
}
|
|
||||||
if in.MaxNoOfPodsToEvictPerNamespace != nil {
|
|
||||||
in, out := &in.MaxNoOfPodsToEvictPerNamespace, &out.MaxNoOfPodsToEvictPerNamespace
|
|
||||||
*out = new(int)
|
|
||||||
**out = int(**in)
|
|
||||||
} else {
|
|
||||||
out.MaxNoOfPodsToEvictPerNamespace = nil
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Convert_api_DeschedulerPolicy_To_v1alpha1_DeschedulerPolicy is an autogenerated conversion function.
|
|
||||||
func Convert_api_DeschedulerPolicy_To_v1alpha1_DeschedulerPolicy(in *api.DeschedulerPolicy, out *DeschedulerPolicy, s conversion.Scope) error {
|
|
||||||
return autoConvert_api_DeschedulerPolicy_To_v1alpha1_DeschedulerPolicy(in, out, s)
|
|
||||||
}
|
|
||||||
|
|
||||||
func autoConvert_v1alpha1_DeschedulerStrategy_To_api_DeschedulerStrategy(in *DeschedulerStrategy, out *api.DeschedulerStrategy, s conversion.Scope) error {
|
|
||||||
out.Enabled = in.Enabled
|
|
||||||
out.Weight = in.Weight
|
|
||||||
out.Params = (*api.StrategyParameters)(unsafe.Pointer(in.Params))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Convert_v1alpha1_DeschedulerStrategy_To_api_DeschedulerStrategy is an autogenerated conversion function.
|
|
||||||
func Convert_v1alpha1_DeschedulerStrategy_To_api_DeschedulerStrategy(in *DeschedulerStrategy, out *api.DeschedulerStrategy, s conversion.Scope) error {
|
|
||||||
return autoConvert_v1alpha1_DeschedulerStrategy_To_api_DeschedulerStrategy(in, out, s)
|
|
||||||
}
|
|
||||||
|
|
||||||
func autoConvert_api_DeschedulerStrategy_To_v1alpha1_DeschedulerStrategy(in *api.DeschedulerStrategy, out *DeschedulerStrategy, s conversion.Scope) error {
|
|
||||||
out.Enabled = in.Enabled
|
|
||||||
out.Weight = in.Weight
|
|
||||||
out.Params = (*StrategyParameters)(unsafe.Pointer(in.Params))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Convert_api_DeschedulerStrategy_To_v1alpha1_DeschedulerStrategy is an autogenerated conversion function.
|
|
||||||
func Convert_api_DeschedulerStrategy_To_v1alpha1_DeschedulerStrategy(in *api.DeschedulerStrategy, out *DeschedulerStrategy, s conversion.Scope) error {
|
|
||||||
return autoConvert_api_DeschedulerStrategy_To_v1alpha1_DeschedulerStrategy(in, out, s)
|
|
||||||
}
|
|
||||||
|
|
||||||
func autoConvert_v1alpha1_FailedPods_To_api_FailedPods(in *FailedPods, out *api.FailedPods, s conversion.Scope) error {
|
|
||||||
out.ExcludeOwnerKinds = *(*[]string)(unsafe.Pointer(&in.ExcludeOwnerKinds))
|
|
||||||
out.MinPodLifetimeSeconds = (*uint)(unsafe.Pointer(in.MinPodLifetimeSeconds))
|
|
||||||
out.Reasons = *(*[]string)(unsafe.Pointer(&in.Reasons))
|
|
||||||
out.IncludingInitContainers = in.IncludingInitContainers
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Convert_v1alpha1_FailedPods_To_api_FailedPods is an autogenerated conversion function.
|
|
||||||
func Convert_v1alpha1_FailedPods_To_api_FailedPods(in *FailedPods, out *api.FailedPods, s conversion.Scope) error {
|
|
||||||
return autoConvert_v1alpha1_FailedPods_To_api_FailedPods(in, out, s)
|
|
||||||
}
|
|
||||||
|
|
||||||
func autoConvert_api_FailedPods_To_v1alpha1_FailedPods(in *api.FailedPods, out *FailedPods, s conversion.Scope) error {
|
|
||||||
out.ExcludeOwnerKinds = *(*[]string)(unsafe.Pointer(&in.ExcludeOwnerKinds))
|
|
||||||
out.MinPodLifetimeSeconds = (*uint)(unsafe.Pointer(in.MinPodLifetimeSeconds))
|
|
||||||
out.Reasons = *(*[]string)(unsafe.Pointer(&in.Reasons))
|
|
||||||
out.IncludingInitContainers = in.IncludingInitContainers
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Convert_api_FailedPods_To_v1alpha1_FailedPods is an autogenerated conversion function.
|
|
||||||
func Convert_api_FailedPods_To_v1alpha1_FailedPods(in *api.FailedPods, out *FailedPods, s conversion.Scope) error {
|
|
||||||
return autoConvert_api_FailedPods_To_v1alpha1_FailedPods(in, out, s)
|
|
||||||
}
|
|
||||||
|
|
||||||
func autoConvert_v1alpha1_Namespaces_To_api_Namespaces(in *Namespaces, out *api.Namespaces, s conversion.Scope) error {
|
|
||||||
out.Include = *(*[]string)(unsafe.Pointer(&in.Include))
|
|
||||||
out.Exclude = *(*[]string)(unsafe.Pointer(&in.Exclude))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Convert_v1alpha1_Namespaces_To_api_Namespaces is an autogenerated conversion function.
|
|
||||||
func Convert_v1alpha1_Namespaces_To_api_Namespaces(in *Namespaces, out *api.Namespaces, s conversion.Scope) error {
|
|
||||||
return autoConvert_v1alpha1_Namespaces_To_api_Namespaces(in, out, s)
|
|
||||||
}
|
|
||||||
|
|
||||||
func autoConvert_api_Namespaces_To_v1alpha1_Namespaces(in *api.Namespaces, out *Namespaces, s conversion.Scope) error {
|
|
||||||
out.Include = *(*[]string)(unsafe.Pointer(&in.Include))
|
|
||||||
out.Exclude = *(*[]string)(unsafe.Pointer(&in.Exclude))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Convert_api_Namespaces_To_v1alpha1_Namespaces is an autogenerated conversion function.
|
|
||||||
func Convert_api_Namespaces_To_v1alpha1_Namespaces(in *api.Namespaces, out *Namespaces, s conversion.Scope) error {
|
|
||||||
return autoConvert_api_Namespaces_To_v1alpha1_Namespaces(in, out, s)
|
|
||||||
}
|
|
||||||
|
|
||||||
func autoConvert_v1alpha1_NodeResourceUtilizationThresholds_To_api_NodeResourceUtilizationThresholds(in *NodeResourceUtilizationThresholds, out *api.NodeResourceUtilizationThresholds, s conversion.Scope) error {
|
|
||||||
out.UseDeviationThresholds = in.UseDeviationThresholds
|
|
||||||
out.Thresholds = *(*api.ResourceThresholds)(unsafe.Pointer(&in.Thresholds))
|
|
||||||
out.TargetThresholds = *(*api.ResourceThresholds)(unsafe.Pointer(&in.TargetThresholds))
|
|
||||||
out.NumberOfNodes = in.NumberOfNodes
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Convert_v1alpha1_NodeResourceUtilizationThresholds_To_api_NodeResourceUtilizationThresholds is an autogenerated conversion function.
|
|
||||||
func Convert_v1alpha1_NodeResourceUtilizationThresholds_To_api_NodeResourceUtilizationThresholds(in *NodeResourceUtilizationThresholds, out *api.NodeResourceUtilizationThresholds, s conversion.Scope) error {
|
|
||||||
return autoConvert_v1alpha1_NodeResourceUtilizationThresholds_To_api_NodeResourceUtilizationThresholds(in, out, s)
|
|
||||||
}
|
|
||||||
|
|
||||||
func autoConvert_api_NodeResourceUtilizationThresholds_To_v1alpha1_NodeResourceUtilizationThresholds(in *api.NodeResourceUtilizationThresholds, out *NodeResourceUtilizationThresholds, s conversion.Scope) error {
|
|
||||||
out.UseDeviationThresholds = in.UseDeviationThresholds
|
|
||||||
out.Thresholds = *(*ResourceThresholds)(unsafe.Pointer(&in.Thresholds))
|
|
||||||
out.TargetThresholds = *(*ResourceThresholds)(unsafe.Pointer(&in.TargetThresholds))
|
|
||||||
out.NumberOfNodes = in.NumberOfNodes
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Convert_api_NodeResourceUtilizationThresholds_To_v1alpha1_NodeResourceUtilizationThresholds is an autogenerated conversion function.
|
|
||||||
func Convert_api_NodeResourceUtilizationThresholds_To_v1alpha1_NodeResourceUtilizationThresholds(in *api.NodeResourceUtilizationThresholds, out *NodeResourceUtilizationThresholds, s conversion.Scope) error {
|
|
||||||
return autoConvert_api_NodeResourceUtilizationThresholds_To_v1alpha1_NodeResourceUtilizationThresholds(in, out, s)
|
|
||||||
}
|
|
||||||
|
|
||||||
func autoConvert_v1alpha1_PodLifeTime_To_api_PodLifeTime(in *PodLifeTime, out *api.PodLifeTime, s conversion.Scope) error {
|
|
||||||
out.MaxPodLifeTimeSeconds = (*uint)(unsafe.Pointer(in.MaxPodLifeTimeSeconds))
|
|
||||||
out.PodStatusPhases = *(*[]string)(unsafe.Pointer(&in.PodStatusPhases))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Convert_v1alpha1_PodLifeTime_To_api_PodLifeTime is an autogenerated conversion function.
|
|
||||||
func Convert_v1alpha1_PodLifeTime_To_api_PodLifeTime(in *PodLifeTime, out *api.PodLifeTime, s conversion.Scope) error {
|
|
||||||
return autoConvert_v1alpha1_PodLifeTime_To_api_PodLifeTime(in, out, s)
|
|
||||||
}
|
|
||||||
|
|
||||||
func autoConvert_api_PodLifeTime_To_v1alpha1_PodLifeTime(in *api.PodLifeTime, out *PodLifeTime, s conversion.Scope) error {
|
|
||||||
out.MaxPodLifeTimeSeconds = (*uint)(unsafe.Pointer(in.MaxPodLifeTimeSeconds))
|
|
||||||
out.PodStatusPhases = *(*[]string)(unsafe.Pointer(&in.PodStatusPhases))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Convert_api_PodLifeTime_To_v1alpha1_PodLifeTime is an autogenerated conversion function.
|
|
||||||
func Convert_api_PodLifeTime_To_v1alpha1_PodLifeTime(in *api.PodLifeTime, out *PodLifeTime, s conversion.Scope) error {
|
|
||||||
return autoConvert_api_PodLifeTime_To_v1alpha1_PodLifeTime(in, out, s)
|
|
||||||
}
|
|
||||||
|
|
||||||
func autoConvert_v1alpha1_PodsHavingTooManyRestarts_To_api_PodsHavingTooManyRestarts(in *PodsHavingTooManyRestarts, out *api.PodsHavingTooManyRestarts, s conversion.Scope) error {
|
|
||||||
out.PodRestartThreshold = in.PodRestartThreshold
|
|
||||||
out.IncludingInitContainers = in.IncludingInitContainers
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Convert_v1alpha1_PodsHavingTooManyRestarts_To_api_PodsHavingTooManyRestarts is an autogenerated conversion function.
|
|
||||||
func Convert_v1alpha1_PodsHavingTooManyRestarts_To_api_PodsHavingTooManyRestarts(in *PodsHavingTooManyRestarts, out *api.PodsHavingTooManyRestarts, s conversion.Scope) error {
|
|
||||||
return autoConvert_v1alpha1_PodsHavingTooManyRestarts_To_api_PodsHavingTooManyRestarts(in, out, s)
|
|
||||||
}
|
|
||||||
|
|
||||||
func autoConvert_api_PodsHavingTooManyRestarts_To_v1alpha1_PodsHavingTooManyRestarts(in *api.PodsHavingTooManyRestarts, out *PodsHavingTooManyRestarts, s conversion.Scope) error {
|
|
||||||
out.PodRestartThreshold = in.PodRestartThreshold
|
|
||||||
out.IncludingInitContainers = in.IncludingInitContainers
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Convert_api_PodsHavingTooManyRestarts_To_v1alpha1_PodsHavingTooManyRestarts is an autogenerated conversion function.
|
|
||||||
func Convert_api_PodsHavingTooManyRestarts_To_v1alpha1_PodsHavingTooManyRestarts(in *api.PodsHavingTooManyRestarts, out *PodsHavingTooManyRestarts, s conversion.Scope) error {
|
|
||||||
return autoConvert_api_PodsHavingTooManyRestarts_To_v1alpha1_PodsHavingTooManyRestarts(in, out, s)
|
|
||||||
}
|
|
||||||
|
|
||||||
func autoConvert_v1alpha1_RemoveDuplicates_To_api_RemoveDuplicates(in *RemoveDuplicates, out *api.RemoveDuplicates, s conversion.Scope) error {
|
|
||||||
out.ExcludeOwnerKinds = *(*[]string)(unsafe.Pointer(&in.ExcludeOwnerKinds))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Convert_v1alpha1_RemoveDuplicates_To_api_RemoveDuplicates is an autogenerated conversion function.
|
|
||||||
func Convert_v1alpha1_RemoveDuplicates_To_api_RemoveDuplicates(in *RemoveDuplicates, out *api.RemoveDuplicates, s conversion.Scope) error {
|
|
||||||
return autoConvert_v1alpha1_RemoveDuplicates_To_api_RemoveDuplicates(in, out, s)
|
|
||||||
}
|
|
||||||
|
|
||||||
func autoConvert_api_RemoveDuplicates_To_v1alpha1_RemoveDuplicates(in *api.RemoveDuplicates, out *RemoveDuplicates, s conversion.Scope) error {
|
|
||||||
out.ExcludeOwnerKinds = *(*[]string)(unsafe.Pointer(&in.ExcludeOwnerKinds))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Convert_api_RemoveDuplicates_To_v1alpha1_RemoveDuplicates is an autogenerated conversion function.
|
|
||||||
func Convert_api_RemoveDuplicates_To_v1alpha1_RemoveDuplicates(in *api.RemoveDuplicates, out *RemoveDuplicates, s conversion.Scope) error {
|
|
||||||
return autoConvert_api_RemoveDuplicates_To_v1alpha1_RemoveDuplicates(in, out, s)
|
|
||||||
}
|
|
||||||
|
|
||||||
func autoConvert_v1alpha1_StrategyParameters_To_api_StrategyParameters(in *StrategyParameters, out *api.StrategyParameters, s conversion.Scope) error {
|
|
||||||
out.NodeResourceUtilizationThresholds = (*api.NodeResourceUtilizationThresholds)(unsafe.Pointer(in.NodeResourceUtilizationThresholds))
|
|
||||||
out.NodeAffinityType = *(*[]string)(unsafe.Pointer(&in.NodeAffinityType))
|
|
||||||
out.PodsHavingTooManyRestarts = (*api.PodsHavingTooManyRestarts)(unsafe.Pointer(in.PodsHavingTooManyRestarts))
|
|
||||||
out.PodLifeTime = (*api.PodLifeTime)(unsafe.Pointer(in.PodLifeTime))
|
|
||||||
out.RemoveDuplicates = (*api.RemoveDuplicates)(unsafe.Pointer(in.RemoveDuplicates))
|
|
||||||
out.FailedPods = (*api.FailedPods)(unsafe.Pointer(in.FailedPods))
|
|
||||||
out.IncludeSoftConstraints = in.IncludeSoftConstraints
|
|
||||||
out.Namespaces = (*api.Namespaces)(unsafe.Pointer(in.Namespaces))
|
|
||||||
out.ThresholdPriority = (*int32)(unsafe.Pointer(in.ThresholdPriority))
|
|
||||||
out.ThresholdPriorityClassName = in.ThresholdPriorityClassName
|
|
||||||
out.LabelSelector = (*v1.LabelSelector)(unsafe.Pointer(in.LabelSelector))
|
|
||||||
out.NodeFit = in.NodeFit
|
|
||||||
out.IncludePreferNoSchedule = in.IncludePreferNoSchedule
|
|
||||||
out.ExcludedTaints = *(*[]string)(unsafe.Pointer(&in.ExcludedTaints))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Convert_v1alpha1_StrategyParameters_To_api_StrategyParameters is an autogenerated conversion function.
|
|
||||||
func Convert_v1alpha1_StrategyParameters_To_api_StrategyParameters(in *StrategyParameters, out *api.StrategyParameters, s conversion.Scope) error {
|
|
||||||
return autoConvert_v1alpha1_StrategyParameters_To_api_StrategyParameters(in, out, s)
|
|
||||||
}
|
|
||||||
|
|
||||||
func autoConvert_api_StrategyParameters_To_v1alpha1_StrategyParameters(in *api.StrategyParameters, out *StrategyParameters, s conversion.Scope) error {
|
|
||||||
out.NodeResourceUtilizationThresholds = (*NodeResourceUtilizationThresholds)(unsafe.Pointer(in.NodeResourceUtilizationThresholds))
|
|
||||||
out.NodeAffinityType = *(*[]string)(unsafe.Pointer(&in.NodeAffinityType))
|
|
||||||
out.PodsHavingTooManyRestarts = (*PodsHavingTooManyRestarts)(unsafe.Pointer(in.PodsHavingTooManyRestarts))
|
|
||||||
out.PodLifeTime = (*PodLifeTime)(unsafe.Pointer(in.PodLifeTime))
|
|
||||||
out.RemoveDuplicates = (*RemoveDuplicates)(unsafe.Pointer(in.RemoveDuplicates))
|
|
||||||
out.FailedPods = (*FailedPods)(unsafe.Pointer(in.FailedPods))
|
|
||||||
out.IncludeSoftConstraints = in.IncludeSoftConstraints
|
|
||||||
out.Namespaces = (*Namespaces)(unsafe.Pointer(in.Namespaces))
|
|
||||||
out.ThresholdPriority = (*int32)(unsafe.Pointer(in.ThresholdPriority))
|
|
||||||
out.ThresholdPriorityClassName = in.ThresholdPriorityClassName
|
|
||||||
out.LabelSelector = (*v1.LabelSelector)(unsafe.Pointer(in.LabelSelector))
|
|
||||||
out.NodeFit = in.NodeFit
|
|
||||||
out.IncludePreferNoSchedule = in.IncludePreferNoSchedule
|
|
||||||
out.ExcludedTaints = *(*[]string)(unsafe.Pointer(&in.ExcludedTaints))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Convert_api_StrategyParameters_To_v1alpha1_StrategyParameters is an autogenerated conversion function.
|
|
||||||
func Convert_api_StrategyParameters_To_v1alpha1_StrategyParameters(in *api.StrategyParameters, out *StrategyParameters, s conversion.Scope) error {
|
|
||||||
return autoConvert_api_StrategyParameters_To_v1alpha1_StrategyParameters(in, out, s)
|
|
||||||
}
|
|
||||||
@@ -2,7 +2,7 @@
|
|||||||
// +build !ignore_autogenerated
|
// +build !ignore_autogenerated
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Copyright 2022 The Kubernetes Authors.
|
Copyright 2023 The Kubernetes Authors.
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
you may not use this file except in compliance with the License.
|
you may not use this file except in compliance with the License.
|
||||||
@@ -64,12 +64,12 @@ func (in *DeschedulerPolicy) DeepCopyInto(out *DeschedulerPolicy) {
|
|||||||
}
|
}
|
||||||
if in.MaxNoOfPodsToEvictPerNode != nil {
|
if in.MaxNoOfPodsToEvictPerNode != nil {
|
||||||
in, out := &in.MaxNoOfPodsToEvictPerNode, &out.MaxNoOfPodsToEvictPerNode
|
in, out := &in.MaxNoOfPodsToEvictPerNode, &out.MaxNoOfPodsToEvictPerNode
|
||||||
*out = new(int)
|
*out = new(uint)
|
||||||
**out = **in
|
**out = **in
|
||||||
}
|
}
|
||||||
if in.MaxNoOfPodsToEvictPerNamespace != nil {
|
if in.MaxNoOfPodsToEvictPerNamespace != nil {
|
||||||
in, out := &in.MaxNoOfPodsToEvictPerNamespace, &out.MaxNoOfPodsToEvictPerNamespace
|
in, out := &in.MaxNoOfPodsToEvictPerNamespace, &out.MaxNoOfPodsToEvictPerNamespace
|
||||||
*out = new(int)
|
*out = new(uint)
|
||||||
**out = **in
|
**out = **in
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
@@ -209,6 +209,11 @@ func (in *PodLifeTime) DeepCopyInto(out *PodLifeTime) {
|
|||||||
*out = new(uint)
|
*out = new(uint)
|
||||||
**out = **in
|
**out = **in
|
||||||
}
|
}
|
||||||
|
if in.States != nil {
|
||||||
|
in, out := &in.States, &out.States
|
||||||
|
*out = make([]string, len(*in))
|
||||||
|
copy(*out, *in)
|
||||||
|
}
|
||||||
if in.PodStatusPhases != nil {
|
if in.PodStatusPhases != nil {
|
||||||
in, out := &in.PodStatusPhases, &out.PodStatusPhases
|
in, out := &in.PodStatusPhases, &out.PodStatusPhases
|
||||||
*out = make([]string, len(*in))
|
*out = make([]string, len(*in))
|
||||||
|
|||||||
@@ -2,7 +2,7 @@
|
|||||||
// +build !ignore_autogenerated
|
// +build !ignore_autogenerated
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Copyright 2022 The Kubernetes Authors.
|
Copyright 2023 The Kubernetes Authors.
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
you may not use this file except in compliance with the License.
|
you may not use this file except in compliance with the License.
|
||||||
|
|||||||
@@ -2,7 +2,7 @@
|
|||||||
// +build !ignore_autogenerated
|
// +build !ignore_autogenerated
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Copyright 2022 The Kubernetes Authors.
|
Copyright 2023 The Kubernetes Authors.
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
you may not use this file except in compliance with the License.
|
you may not use this file except in compliance with the License.
|
||||||
@@ -22,7 +22,6 @@ limitations under the License.
|
|||||||
package api
|
package api
|
||||||
|
|
||||||
import (
|
import (
|
||||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
||||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -30,11 +29,11 @@ import (
|
|||||||
func (in *DeschedulerPolicy) DeepCopyInto(out *DeschedulerPolicy) {
|
func (in *DeschedulerPolicy) DeepCopyInto(out *DeschedulerPolicy) {
|
||||||
*out = *in
|
*out = *in
|
||||||
out.TypeMeta = in.TypeMeta
|
out.TypeMeta = in.TypeMeta
|
||||||
if in.Strategies != nil {
|
if in.Profiles != nil {
|
||||||
in, out := &in.Strategies, &out.Strategies
|
in, out := &in.Profiles, &out.Profiles
|
||||||
*out = make(StrategyList, len(*in))
|
*out = make([]Profile, len(*in))
|
||||||
for key, val := range *in {
|
for i := range *in {
|
||||||
(*out)[key] = *val.DeepCopy()
|
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if in.NodeSelector != nil {
|
if in.NodeSelector != nil {
|
||||||
@@ -42,26 +41,6 @@ func (in *DeschedulerPolicy) DeepCopyInto(out *DeschedulerPolicy) {
|
|||||||
*out = new(string)
|
*out = new(string)
|
||||||
**out = **in
|
**out = **in
|
||||||
}
|
}
|
||||||
if in.EvictFailedBarePods != nil {
|
|
||||||
in, out := &in.EvictFailedBarePods, &out.EvictFailedBarePods
|
|
||||||
*out = new(bool)
|
|
||||||
**out = **in
|
|
||||||
}
|
|
||||||
if in.EvictLocalStoragePods != nil {
|
|
||||||
in, out := &in.EvictLocalStoragePods, &out.EvictLocalStoragePods
|
|
||||||
*out = new(bool)
|
|
||||||
**out = **in
|
|
||||||
}
|
|
||||||
if in.EvictSystemCriticalPods != nil {
|
|
||||||
in, out := &in.EvictSystemCriticalPods, &out.EvictSystemCriticalPods
|
|
||||||
*out = new(bool)
|
|
||||||
**out = **in
|
|
||||||
}
|
|
||||||
if in.IgnorePVCPods != nil {
|
|
||||||
in, out := &in.IgnorePVCPods, &out.IgnorePVCPods
|
|
||||||
*out = new(bool)
|
|
||||||
**out = **in
|
|
||||||
}
|
|
||||||
if in.MaxNoOfPodsToEvictPerNode != nil {
|
if in.MaxNoOfPodsToEvictPerNode != nil {
|
||||||
in, out := &in.MaxNoOfPodsToEvictPerNode, &out.MaxNoOfPodsToEvictPerNode
|
in, out := &in.MaxNoOfPodsToEvictPerNode, &out.MaxNoOfPodsToEvictPerNode
|
||||||
*out = new(uint)
|
*out = new(uint)
|
||||||
@@ -93,58 +72,6 @@ func (in *DeschedulerPolicy) DeepCopyObject() runtime.Object {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
|
||||||
func (in *DeschedulerStrategy) DeepCopyInto(out *DeschedulerStrategy) {
|
|
||||||
*out = *in
|
|
||||||
if in.Params != nil {
|
|
||||||
in, out := &in.Params, &out.Params
|
|
||||||
*out = new(StrategyParameters)
|
|
||||||
(*in).DeepCopyInto(*out)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeschedulerStrategy.
|
|
||||||
func (in *DeschedulerStrategy) DeepCopy() *DeschedulerStrategy {
|
|
||||||
if in == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
out := new(DeschedulerStrategy)
|
|
||||||
in.DeepCopyInto(out)
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
|
||||||
func (in *FailedPods) DeepCopyInto(out *FailedPods) {
|
|
||||||
*out = *in
|
|
||||||
if in.ExcludeOwnerKinds != nil {
|
|
||||||
in, out := &in.ExcludeOwnerKinds, &out.ExcludeOwnerKinds
|
|
||||||
*out = make([]string, len(*in))
|
|
||||||
copy(*out, *in)
|
|
||||||
}
|
|
||||||
if in.MinPodLifetimeSeconds != nil {
|
|
||||||
in, out := &in.MinPodLifetimeSeconds, &out.MinPodLifetimeSeconds
|
|
||||||
*out = new(uint)
|
|
||||||
**out = **in
|
|
||||||
}
|
|
||||||
if in.Reasons != nil {
|
|
||||||
in, out := &in.Reasons, &out.Reasons
|
|
||||||
*out = make([]string, len(*in))
|
|
||||||
copy(*out, *in)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FailedPods.
|
|
||||||
func (in *FailedPods) DeepCopy() *FailedPods {
|
|
||||||
if in == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
out := new(FailedPods)
|
|
||||||
in.DeepCopyInto(out)
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
func (in *Namespaces) DeepCopyInto(out *Namespaces) {
|
func (in *Namespaces) DeepCopyInto(out *Namespaces) {
|
||||||
*out = *in
|
*out = *in
|
||||||
@@ -172,94 +99,114 @@ func (in *Namespaces) DeepCopy() *Namespaces {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
func (in *NodeResourceUtilizationThresholds) DeepCopyInto(out *NodeResourceUtilizationThresholds) {
|
func (in *PluginConfig) DeepCopyInto(out *PluginConfig) {
|
||||||
*out = *in
|
*out = *in
|
||||||
if in.Thresholds != nil {
|
if in.Args != nil {
|
||||||
in, out := &in.Thresholds, &out.Thresholds
|
out.Args = in.Args.DeepCopyObject()
|
||||||
*out = make(ResourceThresholds, len(*in))
|
|
||||||
for key, val := range *in {
|
|
||||||
(*out)[key] = val
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if in.TargetThresholds != nil {
|
|
||||||
in, out := &in.TargetThresholds, &out.TargetThresholds
|
|
||||||
*out = make(ResourceThresholds, len(*in))
|
|
||||||
for key, val := range *in {
|
|
||||||
(*out)[key] = val
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeResourceUtilizationThresholds.
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PluginConfig.
|
||||||
func (in *NodeResourceUtilizationThresholds) DeepCopy() *NodeResourceUtilizationThresholds {
|
func (in *PluginConfig) DeepCopy() *PluginConfig {
|
||||||
if in == nil {
|
if in == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
out := new(NodeResourceUtilizationThresholds)
|
out := new(PluginConfig)
|
||||||
in.DeepCopyInto(out)
|
in.DeepCopyInto(out)
|
||||||
return out
|
return out
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
func (in *PodLifeTime) DeepCopyInto(out *PodLifeTime) {
|
func (in *PluginSet) DeepCopyInto(out *PluginSet) {
|
||||||
*out = *in
|
*out = *in
|
||||||
if in.MaxPodLifeTimeSeconds != nil {
|
if in.Enabled != nil {
|
||||||
in, out := &in.MaxPodLifeTimeSeconds, &out.MaxPodLifeTimeSeconds
|
in, out := &in.Enabled, &out.Enabled
|
||||||
*out = new(uint)
|
*out = make([]string, len(*in))
|
||||||
|
copy(*out, *in)
|
||||||
|
}
|
||||||
|
if in.Disabled != nil {
|
||||||
|
in, out := &in.Disabled, &out.Disabled
|
||||||
|
*out = make([]string, len(*in))
|
||||||
|
copy(*out, *in)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PluginSet.
|
||||||
|
func (in *PluginSet) DeepCopy() *PluginSet {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(PluginSet)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *Plugins) DeepCopyInto(out *Plugins) {
|
||||||
|
*out = *in
|
||||||
|
in.PreSort.DeepCopyInto(&out.PreSort)
|
||||||
|
in.Sort.DeepCopyInto(&out.Sort)
|
||||||
|
in.Deschedule.DeepCopyInto(&out.Deschedule)
|
||||||
|
in.Balance.DeepCopyInto(&out.Balance)
|
||||||
|
in.Evict.DeepCopyInto(&out.Evict)
|
||||||
|
in.Filter.DeepCopyInto(&out.Filter)
|
||||||
|
in.PreEvictionFilter.DeepCopyInto(&out.PreEvictionFilter)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Plugins.
|
||||||
|
func (in *Plugins) DeepCopy() *Plugins {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(Plugins)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *PriorityThreshold) DeepCopyInto(out *PriorityThreshold) {
|
||||||
|
*out = *in
|
||||||
|
if in.Value != nil {
|
||||||
|
in, out := &in.Value, &out.Value
|
||||||
|
*out = new(int32)
|
||||||
**out = **in
|
**out = **in
|
||||||
}
|
}
|
||||||
if in.PodStatusPhases != nil {
|
|
||||||
in, out := &in.PodStatusPhases, &out.PodStatusPhases
|
|
||||||
*out = make([]string, len(*in))
|
|
||||||
copy(*out, *in)
|
|
||||||
}
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodLifeTime.
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityThreshold.
|
||||||
func (in *PodLifeTime) DeepCopy() *PodLifeTime {
|
func (in *PriorityThreshold) DeepCopy() *PriorityThreshold {
|
||||||
if in == nil {
|
if in == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
out := new(PodLifeTime)
|
out := new(PriorityThreshold)
|
||||||
in.DeepCopyInto(out)
|
in.DeepCopyInto(out)
|
||||||
return out
|
return out
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
func (in *PodsHavingTooManyRestarts) DeepCopyInto(out *PodsHavingTooManyRestarts) {
|
func (in *Profile) DeepCopyInto(out *Profile) {
|
||||||
*out = *in
|
*out = *in
|
||||||
|
if in.PluginConfigs != nil {
|
||||||
|
in, out := &in.PluginConfigs, &out.PluginConfigs
|
||||||
|
*out = make([]PluginConfig, len(*in))
|
||||||
|
for i := range *in {
|
||||||
|
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
in.Plugins.DeepCopyInto(&out.Plugins)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodsHavingTooManyRestarts.
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Profile.
|
||||||
func (in *PodsHavingTooManyRestarts) DeepCopy() *PodsHavingTooManyRestarts {
|
func (in *Profile) DeepCopy() *Profile {
|
||||||
if in == nil {
|
if in == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
out := new(PodsHavingTooManyRestarts)
|
out := new(Profile)
|
||||||
in.DeepCopyInto(out)
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
|
||||||
func (in *RemoveDuplicates) DeepCopyInto(out *RemoveDuplicates) {
|
|
||||||
*out = *in
|
|
||||||
if in.ExcludeOwnerKinds != nil {
|
|
||||||
in, out := &in.ExcludeOwnerKinds, &out.ExcludeOwnerKinds
|
|
||||||
*out = make([]string, len(*in))
|
|
||||||
copy(*out, *in)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RemoveDuplicates.
|
|
||||||
func (in *RemoveDuplicates) DeepCopy() *RemoveDuplicates {
|
|
||||||
if in == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
out := new(RemoveDuplicates)
|
|
||||||
in.DeepCopyInto(out)
|
in.DeepCopyInto(out)
|
||||||
return out
|
return out
|
||||||
}
|
}
|
||||||
@@ -285,91 +232,3 @@ func (in ResourceThresholds) DeepCopy() ResourceThresholds {
|
|||||||
in.DeepCopyInto(out)
|
in.DeepCopyInto(out)
|
||||||
return *out
|
return *out
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
|
||||||
func (in StrategyList) DeepCopyInto(out *StrategyList) {
|
|
||||||
{
|
|
||||||
in := &in
|
|
||||||
*out = make(StrategyList, len(*in))
|
|
||||||
for key, val := range *in {
|
|
||||||
(*out)[key] = *val.DeepCopy()
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StrategyList.
|
|
||||||
func (in StrategyList) DeepCopy() StrategyList {
|
|
||||||
if in == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
out := new(StrategyList)
|
|
||||||
in.DeepCopyInto(out)
|
|
||||||
return *out
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
|
||||||
func (in *StrategyParameters) DeepCopyInto(out *StrategyParameters) {
|
|
||||||
*out = *in
|
|
||||||
if in.NodeResourceUtilizationThresholds != nil {
|
|
||||||
in, out := &in.NodeResourceUtilizationThresholds, &out.NodeResourceUtilizationThresholds
|
|
||||||
*out = new(NodeResourceUtilizationThresholds)
|
|
||||||
(*in).DeepCopyInto(*out)
|
|
||||||
}
|
|
||||||
if in.NodeAffinityType != nil {
|
|
||||||
in, out := &in.NodeAffinityType, &out.NodeAffinityType
|
|
||||||
*out = make([]string, len(*in))
|
|
||||||
copy(*out, *in)
|
|
||||||
}
|
|
||||||
if in.PodsHavingTooManyRestarts != nil {
|
|
||||||
in, out := &in.PodsHavingTooManyRestarts, &out.PodsHavingTooManyRestarts
|
|
||||||
*out = new(PodsHavingTooManyRestarts)
|
|
||||||
**out = **in
|
|
||||||
}
|
|
||||||
if in.PodLifeTime != nil {
|
|
||||||
in, out := &in.PodLifeTime, &out.PodLifeTime
|
|
||||||
*out = new(PodLifeTime)
|
|
||||||
(*in).DeepCopyInto(*out)
|
|
||||||
}
|
|
||||||
if in.RemoveDuplicates != nil {
|
|
||||||
in, out := &in.RemoveDuplicates, &out.RemoveDuplicates
|
|
||||||
*out = new(RemoveDuplicates)
|
|
||||||
(*in).DeepCopyInto(*out)
|
|
||||||
}
|
|
||||||
if in.FailedPods != nil {
|
|
||||||
in, out := &in.FailedPods, &out.FailedPods
|
|
||||||
*out = new(FailedPods)
|
|
||||||
(*in).DeepCopyInto(*out)
|
|
||||||
}
|
|
||||||
if in.Namespaces != nil {
|
|
||||||
in, out := &in.Namespaces, &out.Namespaces
|
|
||||||
*out = new(Namespaces)
|
|
||||||
(*in).DeepCopyInto(*out)
|
|
||||||
}
|
|
||||||
if in.ThresholdPriority != nil {
|
|
||||||
in, out := &in.ThresholdPriority, &out.ThresholdPriority
|
|
||||||
*out = new(int32)
|
|
||||||
**out = **in
|
|
||||||
}
|
|
||||||
if in.LabelSelector != nil {
|
|
||||||
in, out := &in.LabelSelector, &out.LabelSelector
|
|
||||||
*out = new(v1.LabelSelector)
|
|
||||||
(*in).DeepCopyInto(*out)
|
|
||||||
}
|
|
||||||
if in.ExcludedTaints != nil {
|
|
||||||
in, out := &in.ExcludedTaints, &out.ExcludedTaints
|
|
||||||
*out = make([]string, len(*in))
|
|
||||||
copy(*out, *in)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StrategyParameters.
|
|
||||||
func (in *StrategyParameters) DeepCopy() *StrategyParameters {
|
|
||||||
if in == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
out := new(StrategyParameters)
|
|
||||||
in.DeepCopyInto(out)
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -21,6 +21,7 @@ import (
|
|||||||
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
componentbaseconfig "k8s.io/component-base/config"
|
componentbaseconfig "k8s.io/component-base/config"
|
||||||
|
registry "k8s.io/component-base/logs/api/v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
@@ -57,6 +58,6 @@ type DeschedulerConfiguration struct {
|
|||||||
LeaderElection componentbaseconfig.LeaderElectionConfiguration
|
LeaderElection componentbaseconfig.LeaderElectionConfiguration
|
||||||
|
|
||||||
// Logging specifies the options of logging.
|
// Logging specifies the options of logging.
|
||||||
// Refer [Logs Options](https://github.com/kubernetes/component-base/blob/master/logs/options.go) for more information.
|
// Refer [Logs Options](https://github.com/kubernetes/component-base/blob/master/logs/api/v1/options.go) for more information.
|
||||||
Logging componentbaseconfig.LoggingConfiguration
|
Logging registry.LoggingConfiguration
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -28,8 +28,10 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// GroupName is the group name use in this package
|
// GroupName is the group name use in this package
|
||||||
const GroupName = "deschedulercomponentconfig"
|
const (
|
||||||
const GroupVersion = "v1alpha1"
|
GroupName = "deschedulercomponentconfig"
|
||||||
|
GroupVersion = "v1alpha1"
|
||||||
|
)
|
||||||
|
|
||||||
// SchemeGroupVersion is group version used to register these objects
|
// SchemeGroupVersion is group version used to register these objects
|
||||||
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: GroupVersion}
|
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: GroupVersion}
|
||||||
|
|||||||
@@ -19,9 +19,9 @@ package v1alpha1
|
|||||||
import (
|
import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
componentbaseconfig "k8s.io/component-base/config"
|
|
||||||
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
componentbaseconfig "k8s.io/component-base/config"
|
||||||
|
registry "k8s.io/component-base/logs/api/v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
@@ -58,6 +58,6 @@ type DeschedulerConfiguration struct {
|
|||||||
LeaderElection componentbaseconfig.LeaderElectionConfiguration `json:"leaderElection,omitempty"`
|
LeaderElection componentbaseconfig.LeaderElectionConfiguration `json:"leaderElection,omitempty"`
|
||||||
|
|
||||||
// Logging specifies the options of logging.
|
// Logging specifies the options of logging.
|
||||||
// Refer [Logs Options](https://github.com/kubernetes/component-base/blob/master/logs/options.go) for more information.
|
// Refer [Logs Options](https://github.com/kubernetes/component-base/blob/master/logs/api/v1/options.go) for more information.
|
||||||
Logging componentbaseconfig.LoggingConfiguration `json:"logging,omitempty"`
|
Logging registry.LoggingConfiguration `json:"logging,omitempty"`
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2,7 +2,7 @@
|
|||||||
// +build !ignore_autogenerated
|
// +build !ignore_autogenerated
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Copyright 2022 The Kubernetes Authors.
|
Copyright 2023 The Kubernetes Authors.
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
you may not use this file except in compliance with the License.
|
you may not use this file except in compliance with the License.
|
||||||
|
|||||||
@@ -2,7 +2,7 @@
|
|||||||
// +build !ignore_autogenerated
|
// +build !ignore_autogenerated
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Copyright 2022 The Kubernetes Authors.
|
Copyright 2023 The Kubernetes Authors.
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
you may not use this file except in compliance with the License.
|
you may not use this file except in compliance with the License.
|
||||||
|
|||||||
@@ -2,7 +2,7 @@
|
|||||||
// +build !ignore_autogenerated
|
// +build !ignore_autogenerated
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Copyright 2022 The Kubernetes Authors.
|
Copyright 2023 The Kubernetes Authors.
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
you may not use this file except in compliance with the License.
|
you may not use this file except in compliance with the License.
|
||||||
|
|||||||
@@ -2,7 +2,7 @@
|
|||||||
// +build !ignore_autogenerated
|
// +build !ignore_autogenerated
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Copyright 2022 The Kubernetes Authors.
|
Copyright 2023 The Kubernetes Authors.
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
you may not use this file except in compliance with the License.
|
you may not use this file except in compliance with the License.
|
||||||
|
|||||||
@@ -26,7 +26,7 @@ import (
|
|||||||
"k8s.io/client-go/tools/clientcmd"
|
"k8s.io/client-go/tools/clientcmd"
|
||||||
)
|
)
|
||||||
|
|
||||||
func CreateClient(kubeconfig string) (clientset.Interface, error) {
|
func CreateClient(kubeconfig, userAgt string) (clientset.Interface, error) {
|
||||||
var cfg *rest.Config
|
var cfg *rest.Config
|
||||||
if len(kubeconfig) != 0 {
|
if len(kubeconfig) != 0 {
|
||||||
master, err := GetMasterFromKubeconfig(kubeconfig)
|
master, err := GetMasterFromKubeconfig(kubeconfig)
|
||||||
@@ -47,7 +47,11 @@ func CreateClient(kubeconfig string) (clientset.Interface, error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return clientset.NewForConfig(cfg)
|
if len(userAgt) != 0 {
|
||||||
|
return clientset.NewForConfig(rest.AddUserAgent(cfg, userAgt))
|
||||||
|
} else {
|
||||||
|
return clientset.NewForConfig(cfg)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func GetMasterFromKubeconfig(filename string) (string, error) {
|
func GetMasterFromKubeconfig(filename string) (string, error) {
|
||||||
|
|||||||
@@ -20,8 +20,10 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
|
"k8s.io/klog/v2"
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
policy "k8s.io/api/policy/v1beta1"
|
policy "k8s.io/api/policy/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/labels"
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
@@ -29,11 +31,9 @@ import (
|
|||||||
"k8s.io/client-go/informers"
|
"k8s.io/client-go/informers"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
fakeclientset "k8s.io/client-go/kubernetes/fake"
|
fakeclientset "k8s.io/client-go/kubernetes/fake"
|
||||||
|
listersv1 "k8s.io/client-go/listers/core/v1"
|
||||||
|
schedulingv1 "k8s.io/client-go/listers/scheduling/v1"
|
||||||
core "k8s.io/client-go/testing"
|
core "k8s.io/client-go/testing"
|
||||||
"k8s.io/klog/v2"
|
|
||||||
|
|
||||||
corev1informers "k8s.io/client-go/informers/core/v1"
|
|
||||||
schedulingv1informers "k8s.io/client-go/informers/scheduling/v1"
|
|
||||||
|
|
||||||
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
|
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
|
||||||
"sigs.k8s.io/descheduler/metrics"
|
"sigs.k8s.io/descheduler/metrics"
|
||||||
@@ -43,20 +43,23 @@ import (
|
|||||||
eutils "sigs.k8s.io/descheduler/pkg/descheduler/evictions/utils"
|
eutils "sigs.k8s.io/descheduler/pkg/descheduler/evictions/utils"
|
||||||
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
|
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
|
||||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||||
"sigs.k8s.io/descheduler/pkg/descheduler/strategies"
|
"sigs.k8s.io/descheduler/pkg/framework"
|
||||||
"sigs.k8s.io/descheduler/pkg/descheduler/strategies/nodeutilization"
|
"sigs.k8s.io/descheduler/pkg/framework/plugins/defaultevictor"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/framework/plugins/pluginbuilder"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/utils"
|
||||||
)
|
)
|
||||||
|
|
||||||
func Run(ctx context.Context, rs *options.DeschedulerServer) error {
|
func Run(ctx context.Context, rs *options.DeschedulerServer) error {
|
||||||
metrics.Register()
|
metrics.Register()
|
||||||
|
|
||||||
rsclient, err := client.CreateClient(rs.KubeconfigFile)
|
rsclient, eventClient, err := createClients(rs.KubeconfigFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
rs.Client = rsclient
|
rs.Client = rsclient
|
||||||
|
rs.EventClient = eventClient
|
||||||
|
|
||||||
deschedulerPolicy, err := LoadPolicyConfig(rs.PolicyConfigFile)
|
deschedulerPolicy, err := LoadPolicyConfig(rs.PolicyConfigFile, rs.Client, pluginbuilder.PluginRegistry)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -77,6 +80,10 @@ func Run(ctx context.Context, rs *options.DeschedulerServer) error {
|
|||||||
return fmt.Errorf("leaderElection must be used with deschedulingInterval")
|
return fmt.Errorf("leaderElection must be used with deschedulingInterval")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if rs.LeaderElection.LeaderElect && rs.DryRun {
|
||||||
|
klog.V(1).InfoS("Warning: DryRun is set to True. You need to disable it to use Leader Election.")
|
||||||
|
}
|
||||||
|
|
||||||
if rs.LeaderElection.LeaderElect && !rs.DryRun {
|
if rs.LeaderElection.LeaderElect && !rs.DryRun {
|
||||||
if err := NewLeaderElection(runFn, rsclient, &rs.LeaderElection, ctx); err != nil {
|
if err := NewLeaderElection(runFn, rsclient, &rs.LeaderElection, ctx); err != nil {
|
||||||
return fmt.Errorf("leaderElection: %w", err)
|
return fmt.Errorf("leaderElection: %w", err)
|
||||||
@@ -87,14 +94,12 @@ func Run(ctx context.Context, rs *options.DeschedulerServer) error {
|
|||||||
return runFn()
|
return runFn()
|
||||||
}
|
}
|
||||||
|
|
||||||
type strategyFunction func(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor, getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc)
|
|
||||||
|
|
||||||
func cachedClient(
|
func cachedClient(
|
||||||
realClient clientset.Interface,
|
realClient clientset.Interface,
|
||||||
podInformer corev1informers.PodInformer,
|
podLister listersv1.PodLister,
|
||||||
nodeInformer corev1informers.NodeInformer,
|
nodeLister listersv1.NodeLister,
|
||||||
namespaceInformer corev1informers.NamespaceInformer,
|
namespaceLister listersv1.NamespaceLister,
|
||||||
priorityClassInformer schedulingv1informers.PriorityClassInformer,
|
priorityClassLister schedulingv1.PriorityClassLister,
|
||||||
) (clientset.Interface, error) {
|
) (clientset.Interface, error) {
|
||||||
fakeClient := fakeclientset.NewSimpleClientset()
|
fakeClient := fakeclientset.NewSimpleClientset()
|
||||||
// simulate a pod eviction by deleting a pod
|
// simulate a pod eviction by deleting a pod
|
||||||
@@ -118,7 +123,7 @@ func cachedClient(
|
|||||||
})
|
})
|
||||||
|
|
||||||
klog.V(3).Infof("Pulling resources for the cached client from the cluster")
|
klog.V(3).Infof("Pulling resources for the cached client from the cluster")
|
||||||
pods, err := podInformer.Lister().List(labels.Everything())
|
pods, err := podLister.List(labels.Everything())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("unable to list pods: %v", err)
|
return nil, fmt.Errorf("unable to list pods: %v", err)
|
||||||
}
|
}
|
||||||
@@ -129,7 +134,7 @@ func cachedClient(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
nodes, err := nodeInformer.Lister().List(labels.Everything())
|
nodes, err := nodeLister.List(labels.Everything())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("unable to list nodes: %v", err)
|
return nil, fmt.Errorf("unable to list nodes: %v", err)
|
||||||
}
|
}
|
||||||
@@ -140,7 +145,7 @@ func cachedClient(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
namespaces, err := namespaceInformer.Lister().List(labels.Everything())
|
namespaces, err := namespaceLister.List(labels.Everything())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("unable to list namespaces: %v", err)
|
return nil, fmt.Errorf("unable to list namespaces: %v", err)
|
||||||
}
|
}
|
||||||
@@ -151,7 +156,7 @@ func cachedClient(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
priorityClasses, err := priorityClassInformer.Lister().List(labels.Everything())
|
priorityClasses, err := priorityClassLister.List(labels.Everything())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("unable to list priorityclasses: %v", err)
|
return nil, fmt.Errorf("unable to list priorityclasses: %v", err)
|
||||||
}
|
}
|
||||||
@@ -165,20 +170,75 @@ func cachedClient(
|
|||||||
return fakeClient, nil
|
return fakeClient, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// evictorImpl implements the Evictor interface so plugins
|
||||||
|
// can evict a pod without importing a specific pod evictor
|
||||||
|
type evictorImpl struct {
|
||||||
|
podEvictor *evictions.PodEvictor
|
||||||
|
evictorFilter framework.EvictorPlugin
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ framework.Evictor = &evictorImpl{}
|
||||||
|
|
||||||
|
// Filter checks if a pod can be evicted
|
||||||
|
func (ei *evictorImpl) Filter(pod *v1.Pod) bool {
|
||||||
|
return ei.evictorFilter.Filter(pod)
|
||||||
|
}
|
||||||
|
|
||||||
|
// PreEvictionFilter checks if pod can be evicted right before eviction
|
||||||
|
func (ei *evictorImpl) PreEvictionFilter(pod *v1.Pod) bool {
|
||||||
|
return ei.evictorFilter.PreEvictionFilter(pod)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Evict evicts a pod (no pre-check performed)
|
||||||
|
func (ei *evictorImpl) Evict(ctx context.Context, pod *v1.Pod, opts evictions.EvictOptions) bool {
|
||||||
|
return ei.podEvictor.EvictPod(ctx, pod, opts)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ei *evictorImpl) NodeLimitExceeded(node *v1.Node) bool {
|
||||||
|
return ei.podEvictor.NodeLimitExceeded(node)
|
||||||
|
}
|
||||||
|
|
||||||
|
// handleImpl implements the framework handle which gets passed to plugins
|
||||||
|
type handleImpl struct {
|
||||||
|
clientSet clientset.Interface
|
||||||
|
getPodsAssignedToNodeFunc podutil.GetPodsAssignedToNodeFunc
|
||||||
|
sharedInformerFactory informers.SharedInformerFactory
|
||||||
|
evictor *evictorImpl
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ framework.Handle = &handleImpl{}
|
||||||
|
|
||||||
|
// ClientSet retrieves kube client set
|
||||||
|
func (hi *handleImpl) ClientSet() clientset.Interface {
|
||||||
|
return hi.clientSet
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetPodsAssignedToNodeFunc retrieves GetPodsAssignedToNodeFunc implementation
|
||||||
|
func (hi *handleImpl) GetPodsAssignedToNodeFunc() podutil.GetPodsAssignedToNodeFunc {
|
||||||
|
return hi.getPodsAssignedToNodeFunc
|
||||||
|
}
|
||||||
|
|
||||||
|
// SharedInformerFactory retrieves shared informer factory
|
||||||
|
func (hi *handleImpl) SharedInformerFactory() informers.SharedInformerFactory {
|
||||||
|
return hi.sharedInformerFactory
|
||||||
|
}
|
||||||
|
|
||||||
|
// Evictor retrieves evictor so plugins can filter and evict pods
|
||||||
|
func (hi *handleImpl) Evictor() framework.Evictor {
|
||||||
|
return hi.evictor
|
||||||
|
}
|
||||||
|
|
||||||
func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer, deschedulerPolicy *api.DeschedulerPolicy, evictionPolicyGroupVersion string) error {
|
func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer, deschedulerPolicy *api.DeschedulerPolicy, evictionPolicyGroupVersion string) error {
|
||||||
sharedInformerFactory := informers.NewSharedInformerFactory(rs.Client, 0)
|
sharedInformerFactory := informers.NewSharedInformerFactory(rs.Client, 0)
|
||||||
nodeInformer := sharedInformerFactory.Core().V1().Nodes()
|
podInformer := sharedInformerFactory.Core().V1().Pods().Informer()
|
||||||
podInformer := sharedInformerFactory.Core().V1().Pods()
|
podLister := sharedInformerFactory.Core().V1().Pods().Lister()
|
||||||
namespaceInformer := sharedInformerFactory.Core().V1().Namespaces()
|
nodeLister := sharedInformerFactory.Core().V1().Nodes().Lister()
|
||||||
priorityClassInformer := sharedInformerFactory.Scheduling().V1().PriorityClasses()
|
namespaceLister := sharedInformerFactory.Core().V1().Namespaces().Lister()
|
||||||
|
priorityClassLister := sharedInformerFactory.Scheduling().V1().PriorityClasses().Lister()
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(ctx)
|
ctx, cancel := context.WithCancel(ctx)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
// create the informers
|
|
||||||
namespaceInformer.Informer()
|
|
||||||
priorityClassInformer.Informer()
|
|
||||||
|
|
||||||
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
|
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("build get pods assigned to node function error: %v", err)
|
return fmt.Errorf("build get pods assigned to node function error: %v", err)
|
||||||
@@ -187,52 +247,23 @@ func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer
|
|||||||
sharedInformerFactory.Start(ctx.Done())
|
sharedInformerFactory.Start(ctx.Done())
|
||||||
sharedInformerFactory.WaitForCacheSync(ctx.Done())
|
sharedInformerFactory.WaitForCacheSync(ctx.Done())
|
||||||
|
|
||||||
strategyFuncs := map[api.StrategyName]strategyFunction{
|
|
||||||
"RemoveDuplicates": strategies.RemoveDuplicatePods,
|
|
||||||
"LowNodeUtilization": nodeutilization.LowNodeUtilization,
|
|
||||||
"HighNodeUtilization": nodeutilization.HighNodeUtilization,
|
|
||||||
"RemovePodsViolatingInterPodAntiAffinity": strategies.RemovePodsViolatingInterPodAntiAffinity,
|
|
||||||
"RemovePodsViolatingNodeAffinity": strategies.RemovePodsViolatingNodeAffinity,
|
|
||||||
"RemovePodsViolatingNodeTaints": strategies.RemovePodsViolatingNodeTaints,
|
|
||||||
"RemovePodsHavingTooManyRestarts": strategies.RemovePodsHavingTooManyRestarts,
|
|
||||||
"PodLifeTime": strategies.PodLifeTime,
|
|
||||||
"RemovePodsViolatingTopologySpreadConstraint": strategies.RemovePodsViolatingTopologySpreadConstraint,
|
|
||||||
"RemoveFailedPods": strategies.RemoveFailedPods,
|
|
||||||
}
|
|
||||||
|
|
||||||
var nodeSelector string
|
var nodeSelector string
|
||||||
if deschedulerPolicy.NodeSelector != nil {
|
if deschedulerPolicy.NodeSelector != nil {
|
||||||
nodeSelector = *deschedulerPolicy.NodeSelector
|
nodeSelector = *deschedulerPolicy.NodeSelector
|
||||||
}
|
}
|
||||||
|
|
||||||
var evictLocalStoragePods bool
|
var eventClient clientset.Interface
|
||||||
if deschedulerPolicy.EvictLocalStoragePods != nil {
|
if rs.DryRun {
|
||||||
evictLocalStoragePods = *deschedulerPolicy.EvictLocalStoragePods
|
eventClient = fakeclientset.NewSimpleClientset()
|
||||||
|
} else {
|
||||||
|
eventClient = rs.Client
|
||||||
}
|
}
|
||||||
|
|
||||||
evictBarePods := false
|
eventBroadcaster, eventRecorder := utils.GetRecorderAndBroadcaster(ctx, eventClient)
|
||||||
if deschedulerPolicy.EvictFailedBarePods != nil {
|
defer eventBroadcaster.Shutdown()
|
||||||
evictBarePods = *deschedulerPolicy.EvictFailedBarePods
|
|
||||||
if evictBarePods {
|
|
||||||
klog.V(1).InfoS("Warning: EvictFailedBarePods is set to True. This could cause eviction of pods without ownerReferences.")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
evictSystemCriticalPods := false
|
|
||||||
if deschedulerPolicy.EvictSystemCriticalPods != nil {
|
|
||||||
evictSystemCriticalPods = *deschedulerPolicy.EvictSystemCriticalPods
|
|
||||||
if evictSystemCriticalPods {
|
|
||||||
klog.V(1).InfoS("Warning: EvictSystemCriticalPods is set to True. This could cause eviction of Kubernetes system pods.")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
ignorePvcPods := false
|
|
||||||
if deschedulerPolicy.IgnorePVCPods != nil {
|
|
||||||
ignorePvcPods = *deschedulerPolicy.IgnorePVCPods
|
|
||||||
}
|
|
||||||
|
|
||||||
wait.NonSlidingUntil(func() {
|
wait.NonSlidingUntil(func() {
|
||||||
nodes, err := nodeutil.ReadyNodes(ctx, rs.Client, nodeInformer, nodeSelector)
|
nodes, err := nodeutil.ReadyNodes(ctx, rs.Client, nodeLister, nodeSelector)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.V(1).InfoS("Unable to get ready nodes", "err", err)
|
klog.V(1).InfoS("Unable to get ready nodes", "err", err)
|
||||||
cancel()
|
cancel()
|
||||||
@@ -252,14 +283,14 @@ func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer
|
|||||||
if rs.DryRun {
|
if rs.DryRun {
|
||||||
klog.V(3).Infof("Building a cached client from the cluster for the dry run")
|
klog.V(3).Infof("Building a cached client from the cluster for the dry run")
|
||||||
// Create a new cache so we start from scratch without any leftovers
|
// Create a new cache so we start from scratch without any leftovers
|
||||||
fakeClient, err := cachedClient(rs.Client, podInformer, nodeInformer, namespaceInformer, priorityClassInformer)
|
fakeClient, err := cachedClient(rs.Client, podLister, nodeLister, namespaceLister, priorityClassLister)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.Error(err)
|
klog.Error(err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
fakeSharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
|
fakeSharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
|
||||||
getPodsAssignedToNode, err = podutil.BuildGetPodsAssignedToNodeFunc(fakeSharedInformerFactory.Core().V1().Pods())
|
getPodsAssignedToNode, err = podutil.BuildGetPodsAssignedToNodeFunc(fakeSharedInformerFactory.Core().V1().Pods().Informer())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.Errorf("build get pods assigned to node function error: %v", err)
|
klog.Errorf("build get pods assigned to node function error: %v", err)
|
||||||
return
|
return
|
||||||
@@ -283,21 +314,94 @@ func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer
|
|||||||
deschedulerPolicy.MaxNoOfPodsToEvictPerNode,
|
deschedulerPolicy.MaxNoOfPodsToEvictPerNode,
|
||||||
deschedulerPolicy.MaxNoOfPodsToEvictPerNamespace,
|
deschedulerPolicy.MaxNoOfPodsToEvictPerNamespace,
|
||||||
nodes,
|
nodes,
|
||||||
getPodsAssignedToNode,
|
|
||||||
evictLocalStoragePods,
|
|
||||||
evictSystemCriticalPods,
|
|
||||||
ignorePvcPods,
|
|
||||||
evictBarePods,
|
|
||||||
!rs.DisableMetrics,
|
!rs.DisableMetrics,
|
||||||
|
eventRecorder,
|
||||||
)
|
)
|
||||||
|
|
||||||
for name, strategy := range deschedulerPolicy.Strategies {
|
var enabledDeschedulePlugins []framework.DeschedulePlugin
|
||||||
if f, ok := strategyFuncs[name]; ok {
|
var enabledBalancePlugins []framework.BalancePlugin
|
||||||
if strategy.Enabled {
|
|
||||||
f(ctx, rs.Client, strategy, nodes, podEvictor, getPodsAssignedToNode)
|
// Build plugins
|
||||||
|
for _, profile := range deschedulerPolicy.Profiles {
|
||||||
|
pc := getPluginConfig(defaultevictor.PluginName, profile.PluginConfigs)
|
||||||
|
if pc == nil {
|
||||||
|
klog.ErrorS(fmt.Errorf("unable to get plugin config"), "skipping plugin", "plugin", defaultevictor.PluginName, "profile", profile.Name)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
evictorFilter, err := defaultevictor.New(
|
||||||
|
pc.Args,
|
||||||
|
&handleImpl{
|
||||||
|
clientSet: rs.Client,
|
||||||
|
getPodsAssignedToNodeFunc: getPodsAssignedToNode,
|
||||||
|
sharedInformerFactory: sharedInformerFactory,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
klog.ErrorS(fmt.Errorf("unable to construct a plugin"), "skipping plugin", "plugin", defaultevictor.PluginName)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
handle := &handleImpl{
|
||||||
|
clientSet: rs.Client,
|
||||||
|
getPodsAssignedToNodeFunc: getPodsAssignedToNode,
|
||||||
|
sharedInformerFactory: sharedInformerFactory,
|
||||||
|
evictor: &evictorImpl{
|
||||||
|
podEvictor: podEvictor,
|
||||||
|
evictorFilter: evictorFilter.(framework.EvictorPlugin),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
// Assuming only a list of enabled extension points.
|
||||||
|
// Later, when a default list of plugins and their extension points is established,
|
||||||
|
// compute the list of enabled extension points as (DefaultEnabled + Enabled - Disabled)
|
||||||
|
for _, plugin := range append(profile.Plugins.Deschedule.Enabled, profile.Plugins.Balance.Enabled...) {
|
||||||
|
pc := getPluginConfig(plugin, profile.PluginConfigs)
|
||||||
|
if pc == nil {
|
||||||
|
klog.ErrorS(fmt.Errorf("unable to get plugin config"), "skipping plugin", "plugin", plugin)
|
||||||
|
continue
|
||||||
}
|
}
|
||||||
} else {
|
registryPlugin, ok := pluginbuilder.PluginRegistry[plugin]
|
||||||
klog.ErrorS(fmt.Errorf("unknown strategy name"), "skipping strategy", "strategy", name)
|
pgFnc := registryPlugin.PluginBuilder
|
||||||
|
if !ok {
|
||||||
|
klog.ErrorS(fmt.Errorf("unable to find plugin in the pluginsMap"), "skipping plugin", "plugin", plugin)
|
||||||
|
}
|
||||||
|
pg, err := pgFnc(pc.Args, handle)
|
||||||
|
if err != nil {
|
||||||
|
klog.ErrorS(err, "unable to initialize a plugin", "pluginName", plugin)
|
||||||
|
}
|
||||||
|
if pg != nil {
|
||||||
|
switch v := pg.(type) {
|
||||||
|
case framework.DeschedulePlugin:
|
||||||
|
enabledDeschedulePlugins = append(enabledDeschedulePlugins, v)
|
||||||
|
case framework.BalancePlugin:
|
||||||
|
enabledBalancePlugins = append(enabledBalancePlugins, v)
|
||||||
|
default:
|
||||||
|
klog.ErrorS(fmt.Errorf("unknown plugin extension point"), "skipping plugin", "plugin", plugin)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Execute extension points
|
||||||
|
for _, pg := range enabledDeschedulePlugins {
|
||||||
|
// TODO: strategyName should be accessible from within the strategy using a framework
|
||||||
|
// handle or function which the Evictor has access to. For migration/in-progress framework
|
||||||
|
// work, we are currently passing this via context. To be removed
|
||||||
|
// (See discussion thread https://github.com/kubernetes-sigs/descheduler/pull/885#discussion_r919962292)
|
||||||
|
childCtx := context.WithValue(ctx, "strategyName", pg.Name())
|
||||||
|
status := pg.Deschedule(childCtx, nodes)
|
||||||
|
if status != nil && status.Err != nil {
|
||||||
|
klog.ErrorS(status.Err, "plugin finished with error", "pluginName", pg.Name())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, pg := range enabledBalancePlugins {
|
||||||
|
// TODO: strategyName should be accessible from within the strategy using a framework
|
||||||
|
// handle or function which the Evictor has access to. For migration/in-progress framework
|
||||||
|
// work, we are currently passing this via context. To be removed
|
||||||
|
// (See discussion thread https://github.com/kubernetes-sigs/descheduler/pull/885#discussion_r919962292)
|
||||||
|
childCtx := context.WithValue(ctx, "strategyName", pg.Name())
|
||||||
|
status := pg.Balance(childCtx, nodes)
|
||||||
|
if status != nil && status.Err != nil {
|
||||||
|
klog.ErrorS(status.Err, "plugin finished with error", "pluginName", pg.Name())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -311,3 +415,26 @@ func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer
|
|||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func getPluginConfig(pluginName string, pluginConfigs []api.PluginConfig) *api.PluginConfig {
|
||||||
|
for _, pluginConfig := range pluginConfigs {
|
||||||
|
if pluginConfig.Name == pluginName {
|
||||||
|
return &pluginConfig
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func createClients(kubeconfig string) (clientset.Interface, clientset.Interface, error) {
|
||||||
|
kClient, err := client.CreateClient(kubeconfig, "descheduler")
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
eventClient, err := client.CreateClient(kubeconfig, "")
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return kClient, eventClient, nil
|
||||||
|
}
|
||||||
|
|||||||
@@ -3,20 +3,27 @@ package descheduler
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"strings"
|
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
|
policy "k8s.io/api/policy/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
fakeclientset "k8s.io/client-go/kubernetes/fake"
|
fakeclientset "k8s.io/client-go/kubernetes/fake"
|
||||||
|
core "k8s.io/client-go/testing"
|
||||||
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
|
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
|
||||||
"sigs.k8s.io/descheduler/pkg/api"
|
"sigs.k8s.io/descheduler/pkg/api"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/api/v1alpha1"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/framework/plugins/pluginbuilder"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/framework/plugins/removeduplicates"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/framework/plugins/removepodsviolatingnodetaints"
|
||||||
"sigs.k8s.io/descheduler/test"
|
"sigs.k8s.io/descheduler/test"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestTaintsUpdated(t *testing.T) {
|
func TestTaintsUpdated(t *testing.T) {
|
||||||
|
pluginbuilder.PluginRegistry = pluginbuilder.NewRegistry()
|
||||||
|
pluginbuilder.Register(removepodsviolatingnodetaints.PluginName, removepodsviolatingnodetaints.New, &removepodsviolatingnodetaints.RemovePodsViolatingNodeTaintsArgs{}, pluginbuilder.PluginRegistry)
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
n1 := test.BuildTestNode("n1", 2000, 3000, 10, nil)
|
n1 := test.BuildTestNode("n1", 2000, 3000, 10, nil)
|
||||||
n2 := test.BuildTestNode("n2", 2000, 3000, 10, nil)
|
n2 := test.BuildTestNode("n2", 2000, 3000, 10, nil)
|
||||||
@@ -27,9 +34,10 @@ func TestTaintsUpdated(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
client := fakeclientset.NewSimpleClientset(n1, n2, p1)
|
client := fakeclientset.NewSimpleClientset(n1, n2, p1)
|
||||||
dp := &api.DeschedulerPolicy{
|
eventClient := fakeclientset.NewSimpleClientset(n1, n2, p1)
|
||||||
Strategies: api.StrategyList{
|
dp := &v1alpha1.DeschedulerPolicy{
|
||||||
"RemovePodsViolatingNodeTaints": api.DeschedulerStrategy{
|
Strategies: v1alpha1.StrategyList{
|
||||||
|
"RemovePodsViolatingNodeTaints": v1alpha1.DeschedulerStrategy{
|
||||||
Enabled: true,
|
Enabled: true,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@@ -40,21 +48,7 @@ func TestTaintsUpdated(t *testing.T) {
|
|||||||
t.Fatalf("Unable to initialize server: %v", err)
|
t.Fatalf("Unable to initialize server: %v", err)
|
||||||
}
|
}
|
||||||
rs.Client = client
|
rs.Client = client
|
||||||
rs.DeschedulingInterval = 100 * time.Millisecond
|
rs.EventClient = eventClient
|
||||||
errChan := make(chan error, 1)
|
|
||||||
defer close(errChan)
|
|
||||||
go func() {
|
|
||||||
err := RunDeschedulerStrategies(ctx, rs, dp, "v1beta1")
|
|
||||||
errChan <- err
|
|
||||||
}()
|
|
||||||
select {
|
|
||||||
case err := <-errChan:
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Unable to run descheduler strategies: %v", err)
|
|
||||||
}
|
|
||||||
case <-time.After(1 * time.Second):
|
|
||||||
// Wait for few cycles and then verify the only pod still exists
|
|
||||||
}
|
|
||||||
|
|
||||||
pods, err := client.CoreV1().Pods(p1.Namespace).List(ctx, metav1.ListOptions{})
|
pods, err := client.CoreV1().Pods(p1.Namespace).List(ctx, metav1.ListOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -77,25 +71,81 @@ func TestTaintsUpdated(t *testing.T) {
|
|||||||
t.Fatalf("Unable to update node: %v\n", err)
|
t.Fatalf("Unable to update node: %v\n", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := wait.PollImmediate(100*time.Millisecond, time.Second, func() (bool, error) {
|
var evictedPods []string
|
||||||
// Get over evicted pod result in panic
|
client.PrependReactor("create", "pods", podEvictionReactionFuc(&evictedPods))
|
||||||
//pods, err := client.CoreV1().Pods(p1.Namespace).Get(p1.Name, metav1.GetOptions{})
|
|
||||||
// List is better, it does not panic.
|
|
||||||
// Though once the pod is evicted, List starts to error with "can't assign or convert v1beta1.Eviction into v1.Pod"
|
|
||||||
pods, err := client.CoreV1().Pods(p1.Namespace).List(ctx, metav1.ListOptions{})
|
|
||||||
if err == nil {
|
|
||||||
if len(pods.Items) > 0 {
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
return true, nil
|
|
||||||
}
|
|
||||||
if strings.Contains(err.Error(), "can't assign or convert v1beta1.Eviction into v1.Pod") {
|
|
||||||
return true, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return false, nil
|
internalDeschedulerPolicy, err := V1alpha1ToInternal(client, dp, pluginbuilder.PluginRegistry)
|
||||||
}); err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Unable to evict pod, node taint did not get propagated to descheduler strategies")
|
t.Fatalf("Unable to convert v1alpha1 to v1alpha2: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := RunDeschedulerStrategies(ctx, rs, internalDeschedulerPolicy, "v1"); err != nil {
|
||||||
|
t.Fatalf("Unable to run descheduler strategies: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(evictedPods) != 1 {
|
||||||
|
t.Fatalf("Unable to evict pod, node taint did not get propagated to descheduler strategies %v\n", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDuplicate(t *testing.T) {
|
||||||
|
pluginbuilder.PluginRegistry = pluginbuilder.NewRegistry()
|
||||||
|
pluginbuilder.Register(removeduplicates.PluginName, removeduplicates.New, &removeduplicates.RemoveDuplicatesArgs{}, pluginbuilder.PluginRegistry)
|
||||||
|
ctx := context.Background()
|
||||||
|
node1 := test.BuildTestNode("n1", 2000, 3000, 10, nil)
|
||||||
|
node2 := test.BuildTestNode("n2", 2000, 3000, 10, nil)
|
||||||
|
|
||||||
|
p1 := test.BuildTestPod("p1", 100, 0, node1.Name, nil)
|
||||||
|
p1.Namespace = "dev"
|
||||||
|
p2 := test.BuildTestPod("p2", 100, 0, node1.Name, nil)
|
||||||
|
p2.Namespace = "dev"
|
||||||
|
p3 := test.BuildTestPod("p3", 100, 0, node1.Name, nil)
|
||||||
|
p3.Namespace = "dev"
|
||||||
|
|
||||||
|
ownerRef1 := test.GetReplicaSetOwnerRefList()
|
||||||
|
p1.ObjectMeta.OwnerReferences = ownerRef1
|
||||||
|
p2.ObjectMeta.OwnerReferences = ownerRef1
|
||||||
|
p3.ObjectMeta.OwnerReferences = ownerRef1
|
||||||
|
|
||||||
|
client := fakeclientset.NewSimpleClientset(node1, node2, p1, p2, p3)
|
||||||
|
eventClient := fakeclientset.NewSimpleClientset(node1, node2, p1, p2, p3)
|
||||||
|
dp := &v1alpha1.DeschedulerPolicy{
|
||||||
|
Strategies: v1alpha1.StrategyList{
|
||||||
|
"RemoveDuplicates": v1alpha1.DeschedulerStrategy{
|
||||||
|
Enabled: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
rs, err := options.NewDeschedulerServer()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Unable to initialize server: %v", err)
|
||||||
|
}
|
||||||
|
rs.Client = client
|
||||||
|
rs.EventClient = eventClient
|
||||||
|
|
||||||
|
pods, err := client.CoreV1().Pods(p1.Namespace).List(ctx, metav1.ListOptions{})
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Unable to list pods: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(pods.Items) != 3 {
|
||||||
|
t.Errorf("Pods number should be 3 before evict")
|
||||||
|
}
|
||||||
|
|
||||||
|
var evictedPods []string
|
||||||
|
client.PrependReactor("create", "pods", podEvictionReactionFuc(&evictedPods))
|
||||||
|
|
||||||
|
internalDeschedulerPolicy, err := V1alpha1ToInternal(client, dp, pluginbuilder.PluginRegistry)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Unable to convert v1alpha1 to v1alpha2: %v", err)
|
||||||
|
}
|
||||||
|
if err := RunDeschedulerStrategies(ctx, rs, internalDeschedulerPolicy, "v1"); err != nil {
|
||||||
|
t.Fatalf("Unable to run descheduler strategies: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(evictedPods) == 0 {
|
||||||
|
t.Fatalf("Unable to evict pod, node taint did not get propagated to descheduler strategies %v\n", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -104,8 +154,9 @@ func TestRootCancel(t *testing.T) {
|
|||||||
n1 := test.BuildTestNode("n1", 2000, 3000, 10, nil)
|
n1 := test.BuildTestNode("n1", 2000, 3000, 10, nil)
|
||||||
n2 := test.BuildTestNode("n2", 2000, 3000, 10, nil)
|
n2 := test.BuildTestNode("n2", 2000, 3000, 10, nil)
|
||||||
client := fakeclientset.NewSimpleClientset(n1, n2)
|
client := fakeclientset.NewSimpleClientset(n1, n2)
|
||||||
|
eventClient := fakeclientset.NewSimpleClientset(n1, n2)
|
||||||
dp := &api.DeschedulerPolicy{
|
dp := &api.DeschedulerPolicy{
|
||||||
Strategies: api.StrategyList{}, // no strategies needed for this test
|
Profiles: []api.Profile{}, // no strategies needed for this test
|
||||||
}
|
}
|
||||||
|
|
||||||
rs, err := options.NewDeschedulerServer()
|
rs, err := options.NewDeschedulerServer()
|
||||||
@@ -113,12 +164,13 @@ func TestRootCancel(t *testing.T) {
|
|||||||
t.Fatalf("Unable to initialize server: %v", err)
|
t.Fatalf("Unable to initialize server: %v", err)
|
||||||
}
|
}
|
||||||
rs.Client = client
|
rs.Client = client
|
||||||
|
rs.EventClient = eventClient
|
||||||
rs.DeschedulingInterval = 100 * time.Millisecond
|
rs.DeschedulingInterval = 100 * time.Millisecond
|
||||||
errChan := make(chan error, 1)
|
errChan := make(chan error, 1)
|
||||||
defer close(errChan)
|
defer close(errChan)
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
err := RunDeschedulerStrategies(ctx, rs, dp, "v1beta1")
|
err := RunDeschedulerStrategies(ctx, rs, dp, "v1")
|
||||||
errChan <- err
|
errChan <- err
|
||||||
}()
|
}()
|
||||||
cancel()
|
cancel()
|
||||||
@@ -137,8 +189,9 @@ func TestRootCancelWithNoInterval(t *testing.T) {
|
|||||||
n1 := test.BuildTestNode("n1", 2000, 3000, 10, nil)
|
n1 := test.BuildTestNode("n1", 2000, 3000, 10, nil)
|
||||||
n2 := test.BuildTestNode("n2", 2000, 3000, 10, nil)
|
n2 := test.BuildTestNode("n2", 2000, 3000, 10, nil)
|
||||||
client := fakeclientset.NewSimpleClientset(n1, n2)
|
client := fakeclientset.NewSimpleClientset(n1, n2)
|
||||||
|
eventClient := fakeclientset.NewSimpleClientset(n1, n2)
|
||||||
dp := &api.DeschedulerPolicy{
|
dp := &api.DeschedulerPolicy{
|
||||||
Strategies: api.StrategyList{}, // no strategies needed for this test
|
Profiles: []api.Profile{}, // no strategies needed for this test
|
||||||
}
|
}
|
||||||
|
|
||||||
rs, err := options.NewDeschedulerServer()
|
rs, err := options.NewDeschedulerServer()
|
||||||
@@ -146,12 +199,13 @@ func TestRootCancelWithNoInterval(t *testing.T) {
|
|||||||
t.Fatalf("Unable to initialize server: %v", err)
|
t.Fatalf("Unable to initialize server: %v", err)
|
||||||
}
|
}
|
||||||
rs.Client = client
|
rs.Client = client
|
||||||
|
rs.EventClient = eventClient
|
||||||
rs.DeschedulingInterval = 0
|
rs.DeschedulingInterval = 0
|
||||||
errChan := make(chan error, 1)
|
errChan := make(chan error, 1)
|
||||||
defer close(errChan)
|
defer close(errChan)
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
err := RunDeschedulerStrategies(ctx, rs, dp, "v1beta1")
|
err := RunDeschedulerStrategies(ctx, rs, dp, "v1")
|
||||||
errChan <- err
|
errChan <- err
|
||||||
}()
|
}()
|
||||||
cancel()
|
cancel()
|
||||||
@@ -164,3 +218,18 @@ func TestRootCancelWithNoInterval(t *testing.T) {
|
|||||||
t.Fatal("Root ctx should have canceled immediately")
|
t.Fatal("Root ctx should have canceled immediately")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func podEvictionReactionFuc(evictedPods *[]string) func(action core.Action) (bool, runtime.Object, error) {
|
||||||
|
return func(action core.Action) (bool, runtime.Object, error) {
|
||||||
|
if action.GetSubresource() == "eviction" {
|
||||||
|
createAct, matched := action.(core.CreateActionImpl)
|
||||||
|
if !matched {
|
||||||
|
return false, nil, fmt.Errorf("unable to convert action to core.CreateActionImpl")
|
||||||
|
}
|
||||||
|
if eviction, matched := createAct.Object.(*policy.Eviction); matched {
|
||||||
|
*evictedPods = append(*evictedPods, eviction.GetName())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false, nil, nil // fallback to the default reactor
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -19,50 +19,36 @@ package evictions
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"strings"
|
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
policy "k8s.io/api/policy/v1beta1"
|
policy "k8s.io/api/policy/v1"
|
||||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/labels"
|
|
||||||
"k8s.io/apimachinery/pkg/util/errors"
|
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/client-go/kubernetes/scheme"
|
"k8s.io/client-go/tools/events"
|
||||||
clientcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
|
||||||
"k8s.io/client-go/tools/record"
|
|
||||||
"k8s.io/klog/v2"
|
"k8s.io/klog/v2"
|
||||||
"sigs.k8s.io/descheduler/metrics"
|
"sigs.k8s.io/descheduler/metrics"
|
||||||
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
|
|
||||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
|
||||||
"sigs.k8s.io/descheduler/pkg/utils"
|
|
||||||
|
|
||||||
eutils "sigs.k8s.io/descheduler/pkg/descheduler/evictions/utils"
|
eutils "sigs.k8s.io/descheduler/pkg/descheduler/evictions/utils"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
|
||||||
evictPodAnnotationKey = "descheduler.alpha.kubernetes.io/evict"
|
|
||||||
)
|
|
||||||
|
|
||||||
// nodePodEvictedCount keeps count of pods evicted on node
|
// nodePodEvictedCount keeps count of pods evicted on node
|
||||||
type nodePodEvictedCount map[*v1.Node]uint
|
type (
|
||||||
type namespacePodEvictCount map[string]uint
|
nodePodEvictedCount map[string]uint
|
||||||
|
namespacePodEvictCount map[string]uint
|
||||||
|
)
|
||||||
|
|
||||||
type PodEvictor struct {
|
type PodEvictor struct {
|
||||||
client clientset.Interface
|
client clientset.Interface
|
||||||
nodes []*v1.Node
|
nodes []*v1.Node
|
||||||
nodeIndexer podutil.GetPodsAssignedToNodeFunc
|
|
||||||
policyGroupVersion string
|
policyGroupVersion string
|
||||||
dryRun bool
|
dryRun bool
|
||||||
maxPodsToEvictPerNode *uint
|
maxPodsToEvictPerNode *uint
|
||||||
maxPodsToEvictPerNamespace *uint
|
maxPodsToEvictPerNamespace *uint
|
||||||
nodepodCount nodePodEvictedCount
|
nodepodCount nodePodEvictedCount
|
||||||
namespacePodCount namespacePodEvictCount
|
namespacePodCount namespacePodEvictCount
|
||||||
evictFailedBarePods bool
|
|
||||||
evictLocalStoragePods bool
|
|
||||||
evictSystemCriticalPods bool
|
|
||||||
ignorePvcPods bool
|
|
||||||
metricsEnabled bool
|
metricsEnabled bool
|
||||||
|
eventRecorder events.EventRecorder
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewPodEvictor(
|
func NewPodEvictor(
|
||||||
@@ -72,41 +58,33 @@ func NewPodEvictor(
|
|||||||
maxPodsToEvictPerNode *uint,
|
maxPodsToEvictPerNode *uint,
|
||||||
maxPodsToEvictPerNamespace *uint,
|
maxPodsToEvictPerNamespace *uint,
|
||||||
nodes []*v1.Node,
|
nodes []*v1.Node,
|
||||||
nodeIndexer podutil.GetPodsAssignedToNodeFunc,
|
|
||||||
evictLocalStoragePods bool,
|
|
||||||
evictSystemCriticalPods bool,
|
|
||||||
ignorePvcPods bool,
|
|
||||||
evictFailedBarePods bool,
|
|
||||||
metricsEnabled bool,
|
metricsEnabled bool,
|
||||||
|
eventRecorder events.EventRecorder,
|
||||||
) *PodEvictor {
|
) *PodEvictor {
|
||||||
var nodePodCount = make(nodePodEvictedCount)
|
nodePodCount := make(nodePodEvictedCount)
|
||||||
var namespacePodCount = make(namespacePodEvictCount)
|
namespacePodCount := make(namespacePodEvictCount)
|
||||||
for _, node := range nodes {
|
for _, node := range nodes {
|
||||||
// Initialize podsEvicted till now with 0.
|
// Initialize podsEvicted till now with 0.
|
||||||
nodePodCount[node] = 0
|
nodePodCount[node.Name] = 0
|
||||||
}
|
}
|
||||||
|
|
||||||
return &PodEvictor{
|
return &PodEvictor{
|
||||||
client: client,
|
client: client,
|
||||||
nodes: nodes,
|
nodes: nodes,
|
||||||
nodeIndexer: nodeIndexer,
|
|
||||||
policyGroupVersion: policyGroupVersion,
|
policyGroupVersion: policyGroupVersion,
|
||||||
dryRun: dryRun,
|
dryRun: dryRun,
|
||||||
maxPodsToEvictPerNode: maxPodsToEvictPerNode,
|
maxPodsToEvictPerNode: maxPodsToEvictPerNode,
|
||||||
maxPodsToEvictPerNamespace: maxPodsToEvictPerNamespace,
|
maxPodsToEvictPerNamespace: maxPodsToEvictPerNamespace,
|
||||||
nodepodCount: nodePodCount,
|
nodepodCount: nodePodCount,
|
||||||
namespacePodCount: namespacePodCount,
|
namespacePodCount: namespacePodCount,
|
||||||
evictLocalStoragePods: evictLocalStoragePods,
|
|
||||||
evictSystemCriticalPods: evictSystemCriticalPods,
|
|
||||||
evictFailedBarePods: evictFailedBarePods,
|
|
||||||
ignorePvcPods: ignorePvcPods,
|
|
||||||
metricsEnabled: metricsEnabled,
|
metricsEnabled: metricsEnabled,
|
||||||
|
eventRecorder: eventRecorder,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// NodeEvicted gives a number of pods evicted for node
|
// NodeEvicted gives a number of pods evicted for node
|
||||||
func (pe *PodEvictor) NodeEvicted(node *v1.Node) uint {
|
func (pe *PodEvictor) NodeEvicted(node *v1.Node) uint {
|
||||||
return pe.nodepodCount[node]
|
return pe.nodepodCount[node.Name]
|
||||||
}
|
}
|
||||||
|
|
||||||
// TotalEvicted gives a number of pods evicted through all nodes
|
// TotalEvicted gives a number of pods evicted through all nodes
|
||||||
@@ -118,56 +96,80 @@ func (pe *PodEvictor) TotalEvicted() uint {
|
|||||||
return total
|
return total
|
||||||
}
|
}
|
||||||
|
|
||||||
// EvictPod returns non-nil error only when evicting a pod on a node is not
|
// NodeLimitExceeded checks if the number of evictions for a node was exceeded
|
||||||
// possible (due to maxPodsToEvictPerNode constraint). Success is true when the pod
|
func (pe *PodEvictor) NodeLimitExceeded(node *v1.Node) bool {
|
||||||
// is evicted on the server side.
|
if pe.maxPodsToEvictPerNode != nil {
|
||||||
func (pe *PodEvictor) EvictPod(ctx context.Context, pod *v1.Pod, node *v1.Node, strategy string, reasons ...string) (bool, error) {
|
return pe.nodepodCount[node.Name] == *pe.maxPodsToEvictPerNode
|
||||||
reason := strategy
|
|
||||||
if len(reasons) > 0 {
|
|
||||||
reason += " (" + strings.Join(reasons, ", ") + ")"
|
|
||||||
}
|
}
|
||||||
if pe.maxPodsToEvictPerNode != nil && pe.nodepodCount[node]+1 > *pe.maxPodsToEvictPerNode {
|
return false
|
||||||
if pe.metricsEnabled {
|
}
|
||||||
metrics.PodsEvicted.With(map[string]string{"result": "maximum number of pods per node reached", "strategy": strategy, "namespace": pod.Namespace, "node": node.Name}).Inc()
|
|
||||||
|
// EvictOptions provides a handle for passing additional info to EvictPod
|
||||||
|
type EvictOptions struct {
|
||||||
|
// Reason allows for passing details about the specific eviction for logging.
|
||||||
|
Reason string
|
||||||
|
}
|
||||||
|
|
||||||
|
// EvictPod evicts a pod while exercising eviction limits.
|
||||||
|
// Returns true when the pod is evicted on the server side.
|
||||||
|
func (pe *PodEvictor) EvictPod(ctx context.Context, pod *v1.Pod, opts EvictOptions) bool {
|
||||||
|
// TODO: Replace context-propagated Strategy name with a defined framework handle for accessing Strategy info
|
||||||
|
strategy := ""
|
||||||
|
if ctx.Value("strategyName") != nil {
|
||||||
|
strategy = ctx.Value("strategyName").(string)
|
||||||
|
}
|
||||||
|
|
||||||
|
if pod.Spec.NodeName != "" {
|
||||||
|
if pe.maxPodsToEvictPerNode != nil && pe.nodepodCount[pod.Spec.NodeName]+1 > *pe.maxPodsToEvictPerNode {
|
||||||
|
if pe.metricsEnabled {
|
||||||
|
metrics.PodsEvicted.With(map[string]string{"result": "maximum number of pods per node reached", "strategy": strategy, "namespace": pod.Namespace, "node": pod.Spec.NodeName}).Inc()
|
||||||
|
}
|
||||||
|
klog.ErrorS(fmt.Errorf("Maximum number of evicted pods per node reached"), "limit", *pe.maxPodsToEvictPerNode, "node", pod.Spec.NodeName)
|
||||||
|
return false
|
||||||
}
|
}
|
||||||
return false, fmt.Errorf("Maximum number %v of evicted pods per %q node reached", *pe.maxPodsToEvictPerNode, node.Name)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if pe.maxPodsToEvictPerNamespace != nil && pe.namespacePodCount[pod.Namespace]+1 > *pe.maxPodsToEvictPerNamespace {
|
if pe.maxPodsToEvictPerNamespace != nil && pe.namespacePodCount[pod.Namespace]+1 > *pe.maxPodsToEvictPerNamespace {
|
||||||
if pe.metricsEnabled {
|
if pe.metricsEnabled {
|
||||||
metrics.PodsEvicted.With(map[string]string{"result": "maximum number of pods per namespace reached", "strategy": strategy, "namespace": pod.Namespace, "node": node.Name}).Inc()
|
metrics.PodsEvicted.With(map[string]string{"result": "maximum number of pods per namespace reached", "strategy": strategy, "namespace": pod.Namespace, "node": pod.Spec.NodeName}).Inc()
|
||||||
}
|
}
|
||||||
return false, fmt.Errorf("Maximum number %v of evicted pods per %q namespace reached", *pe.maxPodsToEvictPerNamespace, pod.Namespace)
|
klog.ErrorS(fmt.Errorf("Maximum number of evicted pods per namespace reached"), "limit", *pe.maxPodsToEvictPerNamespace, "namespace", pod.Namespace)
|
||||||
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
err := evictPod(ctx, pe.client, pod, pe.policyGroupVersion)
|
err := evictPod(ctx, pe.client, pod, pe.policyGroupVersion)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// err is used only for logging purposes
|
// err is used only for logging purposes
|
||||||
klog.ErrorS(err, "Error evicting pod", "pod", klog.KObj(pod), "reason", reason)
|
klog.ErrorS(err, "Error evicting pod", "pod", klog.KObj(pod), "reason", opts.Reason)
|
||||||
if pe.metricsEnabled {
|
if pe.metricsEnabled {
|
||||||
metrics.PodsEvicted.With(map[string]string{"result": "error", "strategy": strategy, "namespace": pod.Namespace, "node": node.Name}).Inc()
|
metrics.PodsEvicted.With(map[string]string{"result": "error", "strategy": strategy, "namespace": pod.Namespace, "node": pod.Spec.NodeName}).Inc()
|
||||||
}
|
}
|
||||||
return false, nil
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
pe.nodepodCount[node]++
|
if pod.Spec.NodeName != "" {
|
||||||
|
pe.nodepodCount[pod.Spec.NodeName]++
|
||||||
|
}
|
||||||
pe.namespacePodCount[pod.Namespace]++
|
pe.namespacePodCount[pod.Namespace]++
|
||||||
|
|
||||||
if pe.metricsEnabled {
|
if pe.metricsEnabled {
|
||||||
metrics.PodsEvicted.With(map[string]string{"result": "success", "strategy": strategy, "namespace": pod.Namespace, "node": node.Name}).Inc()
|
metrics.PodsEvicted.With(map[string]string{"result": "success", "strategy": strategy, "namespace": pod.Namespace, "node": pod.Spec.NodeName}).Inc()
|
||||||
}
|
}
|
||||||
|
|
||||||
if pe.dryRun {
|
if pe.dryRun {
|
||||||
klog.V(1).InfoS("Evicted pod in dry run mode", "pod", klog.KObj(pod), "reason", reason, "strategy", strategy, "node", node.Name)
|
klog.V(1).InfoS("Evicted pod in dry run mode", "pod", klog.KObj(pod), "reason", opts.Reason, "strategy", strategy, "node", pod.Spec.NodeName)
|
||||||
} else {
|
} else {
|
||||||
klog.V(1).InfoS("Evicted pod", "pod", klog.KObj(pod), "reason", reason, "strategy", strategy, "node", node.Name)
|
klog.V(1).InfoS("Evicted pod", "pod", klog.KObj(pod), "reason", opts.Reason, "strategy", strategy, "node", pod.Spec.NodeName)
|
||||||
eventBroadcaster := record.NewBroadcaster()
|
reason := opts.Reason
|
||||||
eventBroadcaster.StartStructuredLogging(3)
|
if len(reason) == 0 {
|
||||||
eventBroadcaster.StartRecordingToSink(&clientcorev1.EventSinkImpl{Interface: pe.client.CoreV1().Events(pod.Namespace)})
|
reason = strategy
|
||||||
r := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "sigs.k8s.io.descheduler"})
|
if len(reason) == 0 {
|
||||||
r.Event(pod, v1.EventTypeNormal, "Descheduled", fmt.Sprintf("pod evicted by sigs.k8s.io/descheduler%s", reason))
|
reason = "NotSet"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
pe.eventRecorder.Eventf(pod, nil, v1.EventTypeNormal, reason, "Descheduled", "pod evicted from %v node by sigs.k8s.io/descheduler", pod.Spec.NodeName)
|
||||||
}
|
}
|
||||||
return true, nil
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
func evictPod(ctx context.Context, client clientset.Interface, pod *v1.Pod, policyGroupVersion string) error {
|
func evictPod(ctx context.Context, client clientset.Interface, pod *v1.Pod, policyGroupVersion string) error {
|
||||||
@@ -184,7 +186,7 @@ func evictPod(ctx context.Context, client clientset.Interface, pod *v1.Pod, poli
|
|||||||
},
|
},
|
||||||
DeleteOptions: deleteOptions,
|
DeleteOptions: deleteOptions,
|
||||||
}
|
}
|
||||||
err := client.PolicyV1beta1().Evictions(eviction.Namespace).Evict(ctx, eviction)
|
err := client.PolicyV1().Evictions(eviction.Namespace).Evict(ctx, eviction)
|
||||||
|
|
||||||
if apierrors.IsTooManyRequests(err) {
|
if apierrors.IsTooManyRequests(err) {
|
||||||
return fmt.Errorf("error when evicting pod (ignoring) %q: %v", pod.Name, err)
|
return fmt.Errorf("error when evicting pod (ignoring) %q: %v", pod.Name, err)
|
||||||
@@ -194,171 +196,3 @@ func evictPod(ctx context.Context, client clientset.Interface, pod *v1.Pod, poli
|
|||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
type Options struct {
|
|
||||||
priority *int32
|
|
||||||
nodeFit bool
|
|
||||||
labelSelector labels.Selector
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithPriorityThreshold sets a threshold for pod's priority class.
|
|
||||||
// Any pod whose priority class is lower is evictable.
|
|
||||||
func WithPriorityThreshold(priority int32) func(opts *Options) {
|
|
||||||
return func(opts *Options) {
|
|
||||||
var p int32 = priority
|
|
||||||
opts.priority = &p
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithNodeFit sets whether or not to consider taints, node selectors,
|
|
||||||
// and pod affinity when evicting. A pod whose tolerations, node selectors,
|
|
||||||
// and affinity match a node other than the one it is currently running on
|
|
||||||
// is evictable.
|
|
||||||
func WithNodeFit(nodeFit bool) func(opts *Options) {
|
|
||||||
return func(opts *Options) {
|
|
||||||
opts.nodeFit = nodeFit
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithLabelSelector sets whether or not to apply label filtering when evicting.
|
|
||||||
// Any pod matching the label selector is considered evictable.
|
|
||||||
func WithLabelSelector(labelSelector labels.Selector) func(opts *Options) {
|
|
||||||
return func(opts *Options) {
|
|
||||||
opts.labelSelector = labelSelector
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type constraint func(pod *v1.Pod) error
|
|
||||||
|
|
||||||
type evictable struct {
|
|
||||||
constraints []constraint
|
|
||||||
}
|
|
||||||
|
|
||||||
// Evictable provides an implementation of IsEvictable(IsEvictable(pod *v1.Pod) bool).
|
|
||||||
// The method accepts a list of options which allow to extend constraints
|
|
||||||
// which decides when a pod is considered evictable.
|
|
||||||
func (pe *PodEvictor) Evictable(opts ...func(opts *Options)) *evictable {
|
|
||||||
options := &Options{}
|
|
||||||
for _, opt := range opts {
|
|
||||||
opt(options)
|
|
||||||
}
|
|
||||||
|
|
||||||
ev := &evictable{}
|
|
||||||
if pe.evictFailedBarePods {
|
|
||||||
ev.constraints = append(ev.constraints, func(pod *v1.Pod) error {
|
|
||||||
ownerRefList := podutil.OwnerRef(pod)
|
|
||||||
// Enable evictFailedBarePods to evict bare pods in failed phase
|
|
||||||
if len(ownerRefList) == 0 && pod.Status.Phase != v1.PodFailed {
|
|
||||||
return fmt.Errorf("pod does not have any ownerRefs and is not in failed phase")
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
} else {
|
|
||||||
ev.constraints = append(ev.constraints, func(pod *v1.Pod) error {
|
|
||||||
ownerRefList := podutil.OwnerRef(pod)
|
|
||||||
// Moved from IsEvictable function for backward compatibility
|
|
||||||
if len(ownerRefList) == 0 {
|
|
||||||
return fmt.Errorf("pod does not have any ownerRefs")
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
}
|
|
||||||
if !pe.evictSystemCriticalPods {
|
|
||||||
ev.constraints = append(ev.constraints, func(pod *v1.Pod) error {
|
|
||||||
// Moved from IsEvictable function to allow for disabling
|
|
||||||
if utils.IsCriticalPriorityPod(pod) {
|
|
||||||
return fmt.Errorf("pod has system critical priority")
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
|
|
||||||
if options.priority != nil {
|
|
||||||
ev.constraints = append(ev.constraints, func(pod *v1.Pod) error {
|
|
||||||
if IsPodEvictableBasedOnPriority(pod, *options.priority) {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return fmt.Errorf("pod has higher priority than specified priority class threshold")
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !pe.evictLocalStoragePods {
|
|
||||||
ev.constraints = append(ev.constraints, func(pod *v1.Pod) error {
|
|
||||||
if utils.IsPodWithLocalStorage(pod) {
|
|
||||||
return fmt.Errorf("pod has local storage and descheduler is not configured with evictLocalStoragePods")
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
}
|
|
||||||
if pe.ignorePvcPods {
|
|
||||||
ev.constraints = append(ev.constraints, func(pod *v1.Pod) error {
|
|
||||||
if utils.IsPodWithPVC(pod) {
|
|
||||||
return fmt.Errorf("pod has a PVC and descheduler is configured to ignore PVC pods")
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
}
|
|
||||||
if options.nodeFit {
|
|
||||||
ev.constraints = append(ev.constraints, func(pod *v1.Pod) error {
|
|
||||||
if !nodeutil.PodFitsAnyOtherNode(pe.nodeIndexer, pod, pe.nodes) {
|
|
||||||
return fmt.Errorf("pod does not fit on any other node because of nodeSelector(s), Taint(s), or nodes marked as unschedulable")
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
}
|
|
||||||
if options.labelSelector != nil && !options.labelSelector.Empty() {
|
|
||||||
ev.constraints = append(ev.constraints, func(pod *v1.Pod) error {
|
|
||||||
if !options.labelSelector.Matches(labels.Set(pod.Labels)) {
|
|
||||||
return fmt.Errorf("pod labels do not match the labelSelector filter in the policy parameter")
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
return ev
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsEvictable decides when a pod is evictable
|
|
||||||
func (ev *evictable) IsEvictable(pod *v1.Pod) bool {
|
|
||||||
checkErrs := []error{}
|
|
||||||
|
|
||||||
ownerRefList := podutil.OwnerRef(pod)
|
|
||||||
if utils.IsDaemonsetPod(ownerRefList) {
|
|
||||||
checkErrs = append(checkErrs, fmt.Errorf("pod is a DaemonSet pod"))
|
|
||||||
}
|
|
||||||
|
|
||||||
if utils.IsMirrorPod(pod) {
|
|
||||||
checkErrs = append(checkErrs, fmt.Errorf("pod is a mirror pod"))
|
|
||||||
}
|
|
||||||
|
|
||||||
if utils.IsStaticPod(pod) {
|
|
||||||
checkErrs = append(checkErrs, fmt.Errorf("pod is a static pod"))
|
|
||||||
}
|
|
||||||
|
|
||||||
if utils.IsPodTerminating(pod) {
|
|
||||||
checkErrs = append(checkErrs, fmt.Errorf("pod is terminating"))
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, c := range ev.constraints {
|
|
||||||
if err := c(pod); err != nil {
|
|
||||||
checkErrs = append(checkErrs, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(checkErrs) > 0 && !HaveEvictAnnotation(pod) {
|
|
||||||
klog.V(4).InfoS("Pod lacks an eviction annotation and fails the following checks", "pod", klog.KObj(pod), "checks", errors.NewAggregate(checkErrs).Error())
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// HaveEvictAnnotation checks if the pod have evict annotation
|
|
||||||
func HaveEvictAnnotation(pod *v1.Pod) bool {
|
|
||||||
_, found := pod.ObjectMeta.Annotations[evictPodAnnotationKey]
|
|
||||||
return found
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsPodEvictableBasedOnPriority checks if the given pod is evictable based on priority resolved from pod Spec.
|
|
||||||
func IsPodEvictableBasedOnPriority(pod *v1.Pod, priority int32) bool {
|
|
||||||
return pod.Spec.Priority == nil || *pod.Spec.Priority < priority
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -21,10 +21,8 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
policyv1 "k8s.io/api/policy/v1"
|
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
"k8s.io/client-go/informers"
|
|
||||||
"k8s.io/client-go/kubernetes/fake"
|
"k8s.io/client-go/kubernetes/fake"
|
||||||
core "k8s.io/client-go/testing"
|
core "k8s.io/client-go/testing"
|
||||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||||
@@ -71,603 +69,6 @@ func TestEvictPod(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestIsEvictable(t *testing.T) {
|
|
||||||
n1 := test.BuildTestNode("node1", 1000, 2000, 13, nil)
|
|
||||||
lowPriority := int32(800)
|
|
||||||
highPriority := int32(900)
|
|
||||||
|
|
||||||
nodeTaintKey := "hardware"
|
|
||||||
nodeTaintValue := "gpu"
|
|
||||||
|
|
||||||
nodeLabelKey := "datacenter"
|
|
||||||
nodeLabelValue := "east"
|
|
||||||
type testCase struct {
|
|
||||||
description string
|
|
||||||
pods []*v1.Pod
|
|
||||||
nodes []*v1.Node
|
|
||||||
evictFailedBarePods bool
|
|
||||||
evictLocalStoragePods bool
|
|
||||||
evictSystemCriticalPods bool
|
|
||||||
priorityThreshold *int32
|
|
||||||
nodeFit bool
|
|
||||||
result bool
|
|
||||||
}
|
|
||||||
|
|
||||||
testCases := []testCase{
|
|
||||||
{
|
|
||||||
description: "Failed pod eviction with no ownerRefs",
|
|
||||||
pods: []*v1.Pod{
|
|
||||||
test.BuildTestPod("bare_pod_failed", 400, 0, n1.Name, func(pod *v1.Pod) {
|
|
||||||
pod.Status.Phase = v1.PodFailed
|
|
||||||
}),
|
|
||||||
},
|
|
||||||
evictFailedBarePods: false,
|
|
||||||
result: false,
|
|
||||||
}, {
|
|
||||||
description: "Normal pod eviction with no ownerRefs and evictFailedBarePods enabled",
|
|
||||||
pods: []*v1.Pod{test.BuildTestPod("bare_pod", 400, 0, n1.Name, nil)},
|
|
||||||
evictFailedBarePods: true,
|
|
||||||
result: false,
|
|
||||||
}, {
|
|
||||||
description: "Failed pod eviction with no ownerRefs",
|
|
||||||
pods: []*v1.Pod{
|
|
||||||
test.BuildTestPod("bare_pod_failed_but_can_be_evicted", 400, 0, n1.Name, func(pod *v1.Pod) {
|
|
||||||
pod.Status.Phase = v1.PodFailed
|
|
||||||
}),
|
|
||||||
},
|
|
||||||
evictFailedBarePods: true,
|
|
||||||
result: true,
|
|
||||||
}, {
|
|
||||||
description: "Normal pod eviction with normal ownerRefs",
|
|
||||||
pods: []*v1.Pod{
|
|
||||||
test.BuildTestPod("p1", 400, 0, n1.Name, func(pod *v1.Pod) {
|
|
||||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
|
||||||
}),
|
|
||||||
},
|
|
||||||
evictLocalStoragePods: false,
|
|
||||||
evictSystemCriticalPods: false,
|
|
||||||
result: true,
|
|
||||||
}, {
|
|
||||||
description: "Normal pod eviction with normal ownerRefs and descheduler.alpha.kubernetes.io/evict annotation",
|
|
||||||
pods: []*v1.Pod{
|
|
||||||
test.BuildTestPod("p2", 400, 0, n1.Name, func(pod *v1.Pod) {
|
|
||||||
pod.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"}
|
|
||||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
|
||||||
}),
|
|
||||||
},
|
|
||||||
evictLocalStoragePods: false,
|
|
||||||
evictSystemCriticalPods: false,
|
|
||||||
result: true,
|
|
||||||
}, {
|
|
||||||
description: "Normal pod eviction with replicaSet ownerRefs",
|
|
||||||
pods: []*v1.Pod{
|
|
||||||
test.BuildTestPod("p3", 400, 0, n1.Name, func(pod *v1.Pod) {
|
|
||||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
|
||||||
}),
|
|
||||||
},
|
|
||||||
evictLocalStoragePods: false,
|
|
||||||
evictSystemCriticalPods: false,
|
|
||||||
result: true,
|
|
||||||
}, {
|
|
||||||
description: "Normal pod eviction with replicaSet ownerRefs and descheduler.alpha.kubernetes.io/evict annotation",
|
|
||||||
pods: []*v1.Pod{
|
|
||||||
test.BuildTestPod("p4", 400, 0, n1.Name, func(pod *v1.Pod) {
|
|
||||||
pod.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"}
|
|
||||||
pod.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
|
|
||||||
}),
|
|
||||||
},
|
|
||||||
evictLocalStoragePods: false,
|
|
||||||
evictSystemCriticalPods: false,
|
|
||||||
result: true,
|
|
||||||
}, {
|
|
||||||
description: "Normal pod eviction with statefulSet ownerRefs",
|
|
||||||
pods: []*v1.Pod{
|
|
||||||
test.BuildTestPod("p18", 400, 0, n1.Name, func(pod *v1.Pod) {
|
|
||||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
|
||||||
}),
|
|
||||||
},
|
|
||||||
evictLocalStoragePods: false,
|
|
||||||
evictSystemCriticalPods: false,
|
|
||||||
result: true,
|
|
||||||
}, {
|
|
||||||
description: "Normal pod eviction with statefulSet ownerRefs and descheduler.alpha.kubernetes.io/evict annotation",
|
|
||||||
pods: []*v1.Pod{
|
|
||||||
test.BuildTestPod("p19", 400, 0, n1.Name, func(pod *v1.Pod) {
|
|
||||||
pod.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"}
|
|
||||||
pod.ObjectMeta.OwnerReferences = test.GetStatefulSetOwnerRefList()
|
|
||||||
}),
|
|
||||||
},
|
|
||||||
evictLocalStoragePods: false,
|
|
||||||
evictSystemCriticalPods: false,
|
|
||||||
result: true,
|
|
||||||
}, {
|
|
||||||
description: "Pod not evicted because it is bound to a PV and evictLocalStoragePods = false",
|
|
||||||
pods: []*v1.Pod{
|
|
||||||
test.BuildTestPod("p5", 400, 0, n1.Name, func(pod *v1.Pod) {
|
|
||||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
|
||||||
pod.Spec.Volumes = []v1.Volume{
|
|
||||||
{
|
|
||||||
Name: "sample",
|
|
||||||
VolumeSource: v1.VolumeSource{
|
|
||||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
|
||||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
|
||||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}),
|
|
||||||
},
|
|
||||||
evictLocalStoragePods: false,
|
|
||||||
evictSystemCriticalPods: false,
|
|
||||||
result: false,
|
|
||||||
}, {
|
|
||||||
description: "Pod is evicted because it is bound to a PV and evictLocalStoragePods = true",
|
|
||||||
pods: []*v1.Pod{
|
|
||||||
test.BuildTestPod("p6", 400, 0, n1.Name, func(pod *v1.Pod) {
|
|
||||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
|
||||||
pod.Spec.Volumes = []v1.Volume{
|
|
||||||
{
|
|
||||||
Name: "sample",
|
|
||||||
VolumeSource: v1.VolumeSource{
|
|
||||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
|
||||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
|
||||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}),
|
|
||||||
},
|
|
||||||
evictLocalStoragePods: true,
|
|
||||||
evictSystemCriticalPods: false,
|
|
||||||
result: true,
|
|
||||||
}, {
|
|
||||||
description: "Pod is evicted because it is bound to a PV and evictLocalStoragePods = false, but it has scheduler.alpha.kubernetes.io/evict annotation",
|
|
||||||
pods: []*v1.Pod{
|
|
||||||
test.BuildTestPod("p7", 400, 0, n1.Name, func(pod *v1.Pod) {
|
|
||||||
pod.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"}
|
|
||||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
|
||||||
pod.Spec.Volumes = []v1.Volume{
|
|
||||||
{
|
|
||||||
Name: "sample",
|
|
||||||
VolumeSource: v1.VolumeSource{
|
|
||||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
|
||||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
|
||||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}),
|
|
||||||
},
|
|
||||||
evictLocalStoragePods: false,
|
|
||||||
evictSystemCriticalPods: false,
|
|
||||||
result: true,
|
|
||||||
}, {
|
|
||||||
description: "Pod not evicted becasuse it is part of a daemonSet",
|
|
||||||
pods: []*v1.Pod{
|
|
||||||
test.BuildTestPod("p8", 400, 0, n1.Name, func(pod *v1.Pod) {
|
|
||||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
|
||||||
pod.ObjectMeta.OwnerReferences = test.GetDaemonSetOwnerRefList()
|
|
||||||
}),
|
|
||||||
},
|
|
||||||
evictLocalStoragePods: false,
|
|
||||||
evictSystemCriticalPods: false,
|
|
||||||
result: false,
|
|
||||||
}, {
|
|
||||||
description: "Pod is evicted becasuse it is part of a daemonSet, but it has scheduler.alpha.kubernetes.io/evict annotation",
|
|
||||||
pods: []*v1.Pod{
|
|
||||||
test.BuildTestPod("p9", 400, 0, n1.Name, func(pod *v1.Pod) {
|
|
||||||
pod.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"}
|
|
||||||
pod.ObjectMeta.OwnerReferences = test.GetDaemonSetOwnerRefList()
|
|
||||||
}),
|
|
||||||
},
|
|
||||||
evictLocalStoragePods: false,
|
|
||||||
evictSystemCriticalPods: false,
|
|
||||||
result: true,
|
|
||||||
}, {
|
|
||||||
description: "Pod not evicted becasuse it is a mirror poddsa",
|
|
||||||
pods: []*v1.Pod{
|
|
||||||
test.BuildTestPod("p10", 400, 0, n1.Name, func(pod *v1.Pod) {
|
|
||||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
|
||||||
pod.Annotations = test.GetMirrorPodAnnotation()
|
|
||||||
}),
|
|
||||||
},
|
|
||||||
evictLocalStoragePods: false,
|
|
||||||
evictSystemCriticalPods: false,
|
|
||||||
result: false,
|
|
||||||
}, {
|
|
||||||
description: "Pod is evicted becasuse it is a mirror pod, but it has scheduler.alpha.kubernetes.io/evict annotation",
|
|
||||||
pods: []*v1.Pod{
|
|
||||||
test.BuildTestPod("p11", 400, 0, n1.Name, func(pod *v1.Pod) {
|
|
||||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
|
||||||
pod.Annotations = test.GetMirrorPodAnnotation()
|
|
||||||
pod.Annotations["descheduler.alpha.kubernetes.io/evict"] = "true"
|
|
||||||
}),
|
|
||||||
},
|
|
||||||
evictLocalStoragePods: false,
|
|
||||||
evictSystemCriticalPods: false,
|
|
||||||
result: true,
|
|
||||||
}, {
|
|
||||||
description: "Pod not evicted becasuse it has system critical priority",
|
|
||||||
pods: []*v1.Pod{
|
|
||||||
test.BuildTestPod("p12", 400, 0, n1.Name, func(pod *v1.Pod) {
|
|
||||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
|
||||||
priority := utils.SystemCriticalPriority
|
|
||||||
pod.Spec.Priority = &priority
|
|
||||||
}),
|
|
||||||
},
|
|
||||||
evictLocalStoragePods: false,
|
|
||||||
evictSystemCriticalPods: false,
|
|
||||||
result: false,
|
|
||||||
}, {
|
|
||||||
description: "Pod is evicted becasuse it has system critical priority, but it has scheduler.alpha.kubernetes.io/evict annotation",
|
|
||||||
pods: []*v1.Pod{
|
|
||||||
test.BuildTestPod("p13", 400, 0, n1.Name, func(pod *v1.Pod) {
|
|
||||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
|
||||||
priority := utils.SystemCriticalPriority
|
|
||||||
pod.Spec.Priority = &priority
|
|
||||||
pod.Annotations = map[string]string{
|
|
||||||
"descheduler.alpha.kubernetes.io/evict": "true",
|
|
||||||
}
|
|
||||||
}),
|
|
||||||
},
|
|
||||||
evictLocalStoragePods: false,
|
|
||||||
evictSystemCriticalPods: false,
|
|
||||||
result: true,
|
|
||||||
}, {
|
|
||||||
description: "Pod not evicted becasuse it has a priority higher than the configured priority threshold",
|
|
||||||
pods: []*v1.Pod{
|
|
||||||
test.BuildTestPod("p14", 400, 0, n1.Name, func(pod *v1.Pod) {
|
|
||||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
|
||||||
pod.Spec.Priority = &highPriority
|
|
||||||
}),
|
|
||||||
},
|
|
||||||
evictLocalStoragePods: false,
|
|
||||||
evictSystemCriticalPods: false,
|
|
||||||
priorityThreshold: &lowPriority,
|
|
||||||
result: false,
|
|
||||||
}, {
|
|
||||||
description: "Pod is evicted becasuse it has a priority higher than the configured priority threshold, but it has scheduler.alpha.kubernetes.io/evict annotation",
|
|
||||||
pods: []*v1.Pod{
|
|
||||||
test.BuildTestPod("p15", 400, 0, n1.Name, func(pod *v1.Pod) {
|
|
||||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
|
||||||
pod.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"}
|
|
||||||
pod.Spec.Priority = &highPriority
|
|
||||||
}),
|
|
||||||
},
|
|
||||||
evictLocalStoragePods: false,
|
|
||||||
evictSystemCriticalPods: false,
|
|
||||||
priorityThreshold: &lowPriority,
|
|
||||||
result: true,
|
|
||||||
}, {
|
|
||||||
description: "Pod is evicted becasuse it has system critical priority, but evictSystemCriticalPods = true",
|
|
||||||
pods: []*v1.Pod{
|
|
||||||
test.BuildTestPod("p16", 400, 0, n1.Name, func(pod *v1.Pod) {
|
|
||||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
|
||||||
priority := utils.SystemCriticalPriority
|
|
||||||
pod.Spec.Priority = &priority
|
|
||||||
}),
|
|
||||||
},
|
|
||||||
evictLocalStoragePods: false,
|
|
||||||
evictSystemCriticalPods: true,
|
|
||||||
result: true,
|
|
||||||
}, {
|
|
||||||
description: "Pod is evicted becasuse it has system critical priority, but evictSystemCriticalPods = true and it has scheduler.alpha.kubernetes.io/evict annotation",
|
|
||||||
pods: []*v1.Pod{
|
|
||||||
test.BuildTestPod("p16", 400, 0, n1.Name, func(pod *v1.Pod) {
|
|
||||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
|
||||||
pod.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"}
|
|
||||||
priority := utils.SystemCriticalPriority
|
|
||||||
pod.Spec.Priority = &priority
|
|
||||||
}),
|
|
||||||
},
|
|
||||||
evictLocalStoragePods: false,
|
|
||||||
evictSystemCriticalPods: true,
|
|
||||||
result: true,
|
|
||||||
}, {
|
|
||||||
description: "Pod is evicted becasuse it has a priority higher than the configured priority threshold, but evictSystemCriticalPods = true",
|
|
||||||
pods: []*v1.Pod{
|
|
||||||
test.BuildTestPod("p17", 400, 0, n1.Name, func(pod *v1.Pod) {
|
|
||||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
|
||||||
pod.Spec.Priority = &highPriority
|
|
||||||
}),
|
|
||||||
},
|
|
||||||
evictLocalStoragePods: false,
|
|
||||||
evictSystemCriticalPods: true,
|
|
||||||
priorityThreshold: &lowPriority,
|
|
||||||
result: true,
|
|
||||||
}, {
|
|
||||||
description: "Pod is evicted becasuse it has a priority higher than the configured priority threshold, but evictSystemCriticalPods = true and it has scheduler.alpha.kubernetes.io/evict annotation",
|
|
||||||
pods: []*v1.Pod{
|
|
||||||
test.BuildTestPod("p17", 400, 0, n1.Name, func(pod *v1.Pod) {
|
|
||||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
|
||||||
pod.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"}
|
|
||||||
pod.Spec.Priority = &highPriority
|
|
||||||
}),
|
|
||||||
},
|
|
||||||
evictLocalStoragePods: false,
|
|
||||||
evictSystemCriticalPods: true,
|
|
||||||
priorityThreshold: &lowPriority,
|
|
||||||
result: true,
|
|
||||||
}, {
|
|
||||||
description: "Pod with no tolerations running on normal node, all other nodes tainted",
|
|
||||||
pods: []*v1.Pod{
|
|
||||||
test.BuildTestPod("p1", 400, 0, n1.Name, func(pod *v1.Pod) {
|
|
||||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
|
||||||
}),
|
|
||||||
},
|
|
||||||
nodes: []*v1.Node{
|
|
||||||
test.BuildTestNode("node2", 1000, 2000, 13, func(node *v1.Node) {
|
|
||||||
node.Spec.Taints = []v1.Taint{
|
|
||||||
{
|
|
||||||
Key: nodeTaintKey,
|
|
||||||
Value: nodeTaintValue,
|
|
||||||
Effect: v1.TaintEffectNoSchedule,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}),
|
|
||||||
test.BuildTestNode("node3", 1000, 2000, 13, func(node *v1.Node) {
|
|
||||||
node.Spec.Taints = []v1.Taint{
|
|
||||||
{
|
|
||||||
Key: nodeTaintKey,
|
|
||||||
Value: nodeTaintValue,
|
|
||||||
Effect: v1.TaintEffectNoSchedule,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}),
|
|
||||||
},
|
|
||||||
evictLocalStoragePods: false,
|
|
||||||
evictSystemCriticalPods: false,
|
|
||||||
nodeFit: true,
|
|
||||||
result: false,
|
|
||||||
}, {
|
|
||||||
description: "Pod with correct tolerations running on normal node, all other nodes tainted",
|
|
||||||
pods: []*v1.Pod{
|
|
||||||
test.BuildTestPod("p1", 400, 0, n1.Name, func(pod *v1.Pod) {
|
|
||||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
|
||||||
pod.Spec.Tolerations = []v1.Toleration{
|
|
||||||
{
|
|
||||||
Key: nodeTaintKey,
|
|
||||||
Value: nodeTaintValue,
|
|
||||||
Effect: v1.TaintEffectNoSchedule,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}),
|
|
||||||
},
|
|
||||||
nodes: []*v1.Node{
|
|
||||||
test.BuildTestNode("node2", 1000, 2000, 13, func(node *v1.Node) {
|
|
||||||
node.Spec.Taints = []v1.Taint{
|
|
||||||
{
|
|
||||||
Key: nodeTaintKey,
|
|
||||||
Value: nodeTaintValue,
|
|
||||||
Effect: v1.TaintEffectNoSchedule,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}),
|
|
||||||
test.BuildTestNode("node3", 1000, 2000, 13, func(node *v1.Node) {
|
|
||||||
node.Spec.Taints = []v1.Taint{
|
|
||||||
{
|
|
||||||
Key: nodeTaintKey,
|
|
||||||
Value: nodeTaintValue,
|
|
||||||
Effect: v1.TaintEffectNoSchedule,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}),
|
|
||||||
},
|
|
||||||
evictLocalStoragePods: false,
|
|
||||||
evictSystemCriticalPods: false,
|
|
||||||
nodeFit: true,
|
|
||||||
result: true,
|
|
||||||
}, {
|
|
||||||
description: "Pod with incorrect node selector",
|
|
||||||
pods: []*v1.Pod{
|
|
||||||
test.BuildTestPod("p1", 400, 0, n1.Name, func(pod *v1.Pod) {
|
|
||||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
|
||||||
pod.Spec.NodeSelector = map[string]string{
|
|
||||||
nodeLabelKey: "fail",
|
|
||||||
}
|
|
||||||
}),
|
|
||||||
},
|
|
||||||
nodes: []*v1.Node{
|
|
||||||
test.BuildTestNode("node2", 1000, 2000, 13, func(node *v1.Node) {
|
|
||||||
node.ObjectMeta.Labels = map[string]string{
|
|
||||||
nodeLabelKey: nodeLabelValue,
|
|
||||||
}
|
|
||||||
}),
|
|
||||||
test.BuildTestNode("node3", 1000, 2000, 13, func(node *v1.Node) {
|
|
||||||
node.ObjectMeta.Labels = map[string]string{
|
|
||||||
nodeLabelKey: nodeLabelValue,
|
|
||||||
}
|
|
||||||
}),
|
|
||||||
},
|
|
||||||
evictLocalStoragePods: false,
|
|
||||||
evictSystemCriticalPods: false,
|
|
||||||
nodeFit: true,
|
|
||||||
result: false,
|
|
||||||
}, {
|
|
||||||
description: "Pod with correct node selector",
|
|
||||||
pods: []*v1.Pod{
|
|
||||||
test.BuildTestPod("p1", 400, 0, n1.Name, func(pod *v1.Pod) {
|
|
||||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
|
||||||
pod.Spec.NodeSelector = map[string]string{
|
|
||||||
nodeLabelKey: nodeLabelValue,
|
|
||||||
}
|
|
||||||
}),
|
|
||||||
},
|
|
||||||
nodes: []*v1.Node{
|
|
||||||
test.BuildTestNode("node2", 1000, 2000, 13, func(node *v1.Node) {
|
|
||||||
node.ObjectMeta.Labels = map[string]string{
|
|
||||||
nodeLabelKey: nodeLabelValue,
|
|
||||||
}
|
|
||||||
}),
|
|
||||||
test.BuildTestNode("node3", 1000, 2000, 13, func(node *v1.Node) {
|
|
||||||
node.ObjectMeta.Labels = map[string]string{
|
|
||||||
nodeLabelKey: nodeLabelValue,
|
|
||||||
}
|
|
||||||
}),
|
|
||||||
},
|
|
||||||
evictLocalStoragePods: false,
|
|
||||||
evictSystemCriticalPods: false,
|
|
||||||
nodeFit: true,
|
|
||||||
result: true,
|
|
||||||
}, {
|
|
||||||
description: "Pod with correct node selector, but only available node doesn't have enough CPU",
|
|
||||||
pods: []*v1.Pod{
|
|
||||||
test.BuildTestPod("p1", 12, 8, n1.Name, func(pod *v1.Pod) {
|
|
||||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
|
||||||
pod.Spec.NodeSelector = map[string]string{
|
|
||||||
nodeLabelKey: nodeLabelValue,
|
|
||||||
}
|
|
||||||
}),
|
|
||||||
},
|
|
||||||
nodes: []*v1.Node{
|
|
||||||
test.BuildTestNode("node2-TEST", 10, 16, 10, func(node *v1.Node) {
|
|
||||||
node.ObjectMeta.Labels = map[string]string{
|
|
||||||
nodeLabelKey: nodeLabelValue,
|
|
||||||
}
|
|
||||||
}),
|
|
||||||
test.BuildTestNode("node3-TEST", 10, 16, 10, func(node *v1.Node) {
|
|
||||||
node.ObjectMeta.Labels = map[string]string{
|
|
||||||
nodeLabelKey: nodeLabelValue,
|
|
||||||
}
|
|
||||||
}),
|
|
||||||
},
|
|
||||||
evictLocalStoragePods: false,
|
|
||||||
evictSystemCriticalPods: false,
|
|
||||||
nodeFit: true,
|
|
||||||
result: false,
|
|
||||||
}, {
|
|
||||||
description: "Pod with correct node selector, and one node has enough memory",
|
|
||||||
pods: []*v1.Pod{
|
|
||||||
test.BuildTestPod("p1", 12, 8, n1.Name, func(pod *v1.Pod) {
|
|
||||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
|
||||||
pod.Spec.NodeSelector = map[string]string{
|
|
||||||
nodeLabelKey: nodeLabelValue,
|
|
||||||
}
|
|
||||||
}),
|
|
||||||
test.BuildTestPod("node2-pod-10GB-mem", 20, 10, "node2", func(pod *v1.Pod) {
|
|
||||||
pod.ObjectMeta.Labels = map[string]string{
|
|
||||||
"test": "true",
|
|
||||||
}
|
|
||||||
}),
|
|
||||||
test.BuildTestPod("node3-pod-10GB-mem", 20, 10, "node3", func(pod *v1.Pod) {
|
|
||||||
pod.ObjectMeta.Labels = map[string]string{
|
|
||||||
"test": "true",
|
|
||||||
}
|
|
||||||
}),
|
|
||||||
},
|
|
||||||
nodes: []*v1.Node{
|
|
||||||
test.BuildTestNode("node2", 100, 16, 10, func(node *v1.Node) {
|
|
||||||
node.ObjectMeta.Labels = map[string]string{
|
|
||||||
nodeLabelKey: nodeLabelValue,
|
|
||||||
}
|
|
||||||
}),
|
|
||||||
test.BuildTestNode("node3", 100, 20, 10, func(node *v1.Node) {
|
|
||||||
node.ObjectMeta.Labels = map[string]string{
|
|
||||||
nodeLabelKey: nodeLabelValue,
|
|
||||||
}
|
|
||||||
}),
|
|
||||||
},
|
|
||||||
evictLocalStoragePods: false,
|
|
||||||
evictSystemCriticalPods: false,
|
|
||||||
nodeFit: true,
|
|
||||||
result: true,
|
|
||||||
}, {
|
|
||||||
description: "Pod with correct node selector, but both nodes don't have enough memory",
|
|
||||||
pods: []*v1.Pod{
|
|
||||||
test.BuildTestPod("p1", 12, 8, n1.Name, func(pod *v1.Pod) {
|
|
||||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
|
||||||
pod.Spec.NodeSelector = map[string]string{
|
|
||||||
nodeLabelKey: nodeLabelValue,
|
|
||||||
}
|
|
||||||
}),
|
|
||||||
test.BuildTestPod("node2-pod-10GB-mem", 10, 10, "node2", func(pod *v1.Pod) {
|
|
||||||
pod.ObjectMeta.Labels = map[string]string{
|
|
||||||
"test": "true",
|
|
||||||
}
|
|
||||||
}),
|
|
||||||
test.BuildTestPod("node3-pod-10GB-mem", 10, 10, "node3", func(pod *v1.Pod) {
|
|
||||||
pod.ObjectMeta.Labels = map[string]string{
|
|
||||||
"test": "true",
|
|
||||||
}
|
|
||||||
}),
|
|
||||||
},
|
|
||||||
nodes: []*v1.Node{
|
|
||||||
test.BuildTestNode("node2", 100, 16, 10, func(node *v1.Node) {
|
|
||||||
node.ObjectMeta.Labels = map[string]string{
|
|
||||||
nodeLabelKey: nodeLabelValue,
|
|
||||||
}
|
|
||||||
}),
|
|
||||||
test.BuildTestNode("node3", 100, 16, 10, func(node *v1.Node) {
|
|
||||||
node.ObjectMeta.Labels = map[string]string{
|
|
||||||
nodeLabelKey: nodeLabelValue,
|
|
||||||
}
|
|
||||||
}),
|
|
||||||
},
|
|
||||||
evictLocalStoragePods: false,
|
|
||||||
evictSystemCriticalPods: false,
|
|
||||||
nodeFit: true,
|
|
||||||
result: false,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, test := range testCases {
|
|
||||||
|
|
||||||
t.Run(test.description, func(t *testing.T) {
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
nodes := append(test.nodes, n1)
|
|
||||||
|
|
||||||
var objs []runtime.Object
|
|
||||||
for _, node := range test.nodes {
|
|
||||||
objs = append(objs, node)
|
|
||||||
}
|
|
||||||
for _, pod := range test.pods {
|
|
||||||
objs = append(objs, pod)
|
|
||||||
}
|
|
||||||
|
|
||||||
fakeClient := fake.NewSimpleClientset(objs...)
|
|
||||||
|
|
||||||
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
|
|
||||||
podInformer := sharedInformerFactory.Core().V1().Pods()
|
|
||||||
|
|
||||||
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("Build get pods assigned to node function error: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
sharedInformerFactory.Start(ctx.Done())
|
|
||||||
sharedInformerFactory.WaitForCacheSync(ctx.Done())
|
|
||||||
|
|
||||||
podEvictor := &PodEvictor{
|
|
||||||
client: fakeClient,
|
|
||||||
nodes: nodes,
|
|
||||||
nodeIndexer: getPodsAssignedToNode,
|
|
||||||
policyGroupVersion: policyv1.SchemeGroupVersion.String(),
|
|
||||||
dryRun: false,
|
|
||||||
maxPodsToEvictPerNode: nil,
|
|
||||||
maxPodsToEvictPerNamespace: nil,
|
|
||||||
evictLocalStoragePods: test.evictLocalStoragePods,
|
|
||||||
evictSystemCriticalPods: test.evictSystemCriticalPods,
|
|
||||||
evictFailedBarePods: test.evictFailedBarePods,
|
|
||||||
}
|
|
||||||
|
|
||||||
var opts []func(opts *Options)
|
|
||||||
if test.priorityThreshold != nil {
|
|
||||||
opts = append(opts, WithPriorityThreshold(*test.priorityThreshold))
|
|
||||||
}
|
|
||||||
if test.nodeFit {
|
|
||||||
opts = append(opts, WithNodeFit(true))
|
|
||||||
}
|
|
||||||
evictable := podEvictor.Evictable(opts...)
|
|
||||||
|
|
||||||
result := evictable.IsEvictable(test.pods[0])
|
|
||||||
if result != test.result {
|
|
||||||
t.Errorf("IsEvictable should return for pod %s %t, but it returns %t", test.pods[0].Name, test.result, result)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
func TestPodTypes(t *testing.T) {
|
func TestPodTypes(t *testing.T) {
|
||||||
n1 := test.BuildTestNode("node1", 1000, 2000, 9, nil)
|
n1 := test.BuildTestNode("node1", 1000, 2000, 9, nil)
|
||||||
p1 := test.BuildTestPod("p1", 400, 0, n1.Name, nil)
|
p1 := test.BuildTestPod("p1", 400, 0, n1.Name, nil)
|
||||||
@@ -680,7 +81,7 @@ func TestPodTypes(t *testing.T) {
|
|||||||
p1.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
|
p1.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
|
||||||
// The following 4 pods won't get evicted.
|
// The following 4 pods won't get evicted.
|
||||||
// A daemonset.
|
// A daemonset.
|
||||||
//p2.Annotations = test.GetDaemonSetAnnotation()
|
// p2.Annotations = test.GetDaemonSetAnnotation()
|
||||||
p2.ObjectMeta.OwnerReferences = test.GetDaemonSetOwnerRefList()
|
p2.ObjectMeta.OwnerReferences = test.GetDaemonSetOwnerRefList()
|
||||||
// A pod with local storage.
|
// A pod with local storage.
|
||||||
p3.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
p3.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||||
@@ -690,7 +91,8 @@ func TestPodTypes(t *testing.T) {
|
|||||||
VolumeSource: v1.VolumeSource{
|
VolumeSource: v1.VolumeSource{
|
||||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
|
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@@ -710,5 +112,4 @@ func TestPodTypes(t *testing.T) {
|
|||||||
if utils.IsDaemonsetPod(ownerRefList) || utils.IsPodWithLocalStorage(p1) || utils.IsCriticalPriorityPod(p1) || utils.IsMirrorPod(p1) || utils.IsStaticPod(p1) {
|
if utils.IsDaemonsetPod(ownerRefList) || utils.IsPodWithLocalStorage(p1) || utils.IsCriticalPriorityPod(p1) || utils.IsMirrorPod(p1) || utils.IsStaticPod(p1) {
|
||||||
t.Errorf("Expected p1 to be a normal pod.")
|
t.Errorf("Expected p1 to be a normal pod.")
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -19,13 +19,14 @@ package descheduler
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"os"
|
||||||
|
|
||||||
"k8s.io/apimachinery/pkg/util/uuid"
|
"k8s.io/apimachinery/pkg/util/uuid"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/client-go/tools/leaderelection"
|
"k8s.io/client-go/tools/leaderelection"
|
||||||
"k8s.io/client-go/tools/leaderelection/resourcelock"
|
"k8s.io/client-go/tools/leaderelection/resourcelock"
|
||||||
componentbaseconfig "k8s.io/component-base/config"
|
componentbaseconfig "k8s.io/component-base/config"
|
||||||
"k8s.io/klog/v2"
|
"k8s.io/klog/v2"
|
||||||
"os"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// NewLeaderElection starts the leader election code loop
|
// NewLeaderElection starts the leader election code loop
|
||||||
|
|||||||
@@ -24,8 +24,8 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/labels"
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
coreinformers "k8s.io/client-go/informers/core/v1"
|
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
|
listersv1 "k8s.io/client-go/listers/core/v1"
|
||||||
"k8s.io/klog/v2"
|
"k8s.io/klog/v2"
|
||||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||||
"sigs.k8s.io/descheduler/pkg/utils"
|
"sigs.k8s.io/descheduler/pkg/utils"
|
||||||
@@ -33,7 +33,7 @@ import (
|
|||||||
|
|
||||||
// ReadyNodes returns ready nodes irrespective of whether they are
|
// ReadyNodes returns ready nodes irrespective of whether they are
|
||||||
// schedulable or not.
|
// schedulable or not.
|
||||||
func ReadyNodes(ctx context.Context, client clientset.Interface, nodeInformer coreinformers.NodeInformer, nodeSelector string) ([]*v1.Node, error) {
|
func ReadyNodes(ctx context.Context, client clientset.Interface, nodeLister listersv1.NodeLister, nodeSelector string) ([]*v1.Node, error) {
|
||||||
ns, err := labels.Parse(nodeSelector)
|
ns, err := labels.Parse(nodeSelector)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return []*v1.Node{}, err
|
return []*v1.Node{}, err
|
||||||
@@ -41,7 +41,7 @@ func ReadyNodes(ctx context.Context, client clientset.Interface, nodeInformer co
|
|||||||
|
|
||||||
var nodes []*v1.Node
|
var nodes []*v1.Node
|
||||||
// err is defined above
|
// err is defined above
|
||||||
if nodes, err = nodeInformer.Lister().List(ns); err != nil {
|
if nodes, err = nodeLister.List(ns); err != nil {
|
||||||
return []*v1.Node{}, err
|
return []*v1.Node{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -119,9 +119,11 @@ func NodeFit(nodeIndexer podutil.GetPodsAssignedToNodeFunc, pod *v1.Pod, node *v
|
|||||||
errors = append(errors, fmt.Errorf("pod does not tolerate taints on the node"))
|
errors = append(errors, fmt.Errorf("pod does not tolerate taints on the node"))
|
||||||
}
|
}
|
||||||
// Check if the pod can fit on a node based off it's requests
|
// Check if the pod can fit on a node based off it's requests
|
||||||
ok, reqErrors := fitsRequest(nodeIndexer, pod, node)
|
if pod.Spec.NodeName == "" || pod.Spec.NodeName != node.Name {
|
||||||
if !ok {
|
ok, reqErrors := fitsRequest(nodeIndexer, pod, node)
|
||||||
errors = append(errors, reqErrors...)
|
if !ok {
|
||||||
|
errors = append(errors, reqErrors...)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
// Check if node is schedulable
|
// Check if node is schedulable
|
||||||
if IsNodeUnschedulable(node) {
|
if IsNodeUnschedulable(node) {
|
||||||
|
|||||||
@@ -56,7 +56,6 @@ func TestReadyNodes(t *testing.T) {
|
|||||||
if IsReady(node5) {
|
if IsReady(node5) {
|
||||||
t.Errorf("Expected %v to be not ready", node5.Name)
|
t.Errorf("Expected %v to be not ready", node5.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestReadyNodesWithNodeSelector(t *testing.T) {
|
func TestReadyNodesWithNodeSelector(t *testing.T) {
|
||||||
@@ -70,14 +69,14 @@ func TestReadyNodesWithNodeSelector(t *testing.T) {
|
|||||||
nodeSelector := "type=compute"
|
nodeSelector := "type=compute"
|
||||||
|
|
||||||
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
|
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
|
||||||
nodeInformer := sharedInformerFactory.Core().V1().Nodes()
|
nodeLister := sharedInformerFactory.Core().V1().Nodes().Lister()
|
||||||
|
|
||||||
stopChannel := make(chan struct{})
|
stopChannel := make(chan struct{})
|
||||||
sharedInformerFactory.Start(stopChannel)
|
sharedInformerFactory.Start(stopChannel)
|
||||||
sharedInformerFactory.WaitForCacheSync(stopChannel)
|
sharedInformerFactory.WaitForCacheSync(stopChannel)
|
||||||
defer close(stopChannel)
|
defer close(stopChannel)
|
||||||
|
|
||||||
nodes, _ := ReadyNodes(ctx, fakeClient, nodeInformer, nodeSelector)
|
nodes, _ := ReadyNodes(ctx, fakeClient, nodeLister, nodeSelector)
|
||||||
|
|
||||||
if nodes[0].Name != "node1" {
|
if nodes[0].Name != "node1" {
|
||||||
t.Errorf("Expected node1, got %s", nodes[0].Name)
|
t.Errorf("Expected node1, got %s", nodes[0].Name)
|
||||||
@@ -111,11 +110,9 @@ func TestIsNodeUnschedulable(t *testing.T) {
|
|||||||
t.Errorf("Test %#v failed", test.description)
|
t.Errorf("Test %#v failed", test.description)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestPodFitsCurrentNode(t *testing.T) {
|
func TestPodFitsCurrentNode(t *testing.T) {
|
||||||
|
|
||||||
nodeLabelKey := "kubernetes.io/desiredNode"
|
nodeLabelKey := "kubernetes.io/desiredNode"
|
||||||
nodeLabelValue := "yes"
|
nodeLabelValue := "yes"
|
||||||
|
|
||||||
@@ -207,7 +204,7 @@ func TestPodFitsCurrentNode(t *testing.T) {
|
|||||||
fakeClient := fake.NewSimpleClientset(objs...)
|
fakeClient := fake.NewSimpleClientset(objs...)
|
||||||
|
|
||||||
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
|
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
|
||||||
podInformer := sharedInformerFactory.Core().V1().Pods()
|
podInformer := sharedInformerFactory.Core().V1().Pods().Informer()
|
||||||
|
|
||||||
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
|
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -737,7 +734,7 @@ func TestPodFitsAnyOtherNode(t *testing.T) {
|
|||||||
fakeClient := fake.NewSimpleClientset(objs...)
|
fakeClient := fake.NewSimpleClientset(objs...)
|
||||||
|
|
||||||
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
|
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
|
||||||
podInformer := sharedInformerFactory.Core().V1().Pods()
|
podInformer := sharedInformerFactory.Core().V1().Pods().Informer()
|
||||||
|
|
||||||
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
|
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -756,7 +753,7 @@ func TestPodFitsAnyOtherNode(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// createResourceList builds a small resource list of core resources
|
// createResourceList builds a small resource list of core resources
|
||||||
func createResourceList(cpu int64, memory int64, ephemeralStorage int64) v1.ResourceList {
|
func createResourceList(cpu, memory, ephemeralStorage int64) v1.ResourceList {
|
||||||
resourceList := make(map[v1.ResourceName]resource.Quantity)
|
resourceList := make(map[v1.ResourceName]resource.Quantity)
|
||||||
resourceList[v1.ResourceCPU] = *resource.NewMilliQuantity(cpu, resource.DecimalSI)
|
resourceList[v1.ResourceCPU] = *resource.NewMilliQuantity(cpu, resource.DecimalSI)
|
||||||
resourceList[v1.ResourceMemory] = *resource.NewQuantity(memory, resource.DecimalSI)
|
resourceList[v1.ResourceMemory] = *resource.NewQuantity(memory, resource.DecimalSI)
|
||||||
|
|||||||
@@ -23,7 +23,6 @@ import (
|
|||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/labels"
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
"k8s.io/apimachinery/pkg/util/sets"
|
"k8s.io/apimachinery/pkg/util/sets"
|
||||||
coreinformers "k8s.io/client-go/informers/core/v1"
|
|
||||||
"k8s.io/client-go/tools/cache"
|
"k8s.io/client-go/tools/cache"
|
||||||
|
|
||||||
"sigs.k8s.io/descheduler/pkg/utils"
|
"sigs.k8s.io/descheduler/pkg/utils"
|
||||||
@@ -118,9 +117,9 @@ func (o *Options) BuildFilterFunc() (FilterFunc, error) {
|
|||||||
|
|
||||||
// BuildGetPodsAssignedToNodeFunc establishes an indexer to map the pods and their assigned nodes.
|
// BuildGetPodsAssignedToNodeFunc establishes an indexer to map the pods and their assigned nodes.
|
||||||
// It returns a function to help us get all the pods that assigned to a node based on the indexer.
|
// It returns a function to help us get all the pods that assigned to a node based on the indexer.
|
||||||
func BuildGetPodsAssignedToNodeFunc(podInformer coreinformers.PodInformer) (GetPodsAssignedToNodeFunc, error) {
|
func BuildGetPodsAssignedToNodeFunc(podInformer cache.SharedIndexInformer) (GetPodsAssignedToNodeFunc, error) {
|
||||||
// Establish an indexer to map the pods and their assigned nodes.
|
// Establish an indexer to map the pods and their assigned nodes.
|
||||||
err := podInformer.Informer().AddIndexers(cache.Indexers{
|
err := podInformer.AddIndexers(cache.Indexers{
|
||||||
nodeNameKeyIndex: func(obj interface{}) ([]string, error) {
|
nodeNameKeyIndex: func(obj interface{}) ([]string, error) {
|
||||||
pod, ok := obj.(*v1.Pod)
|
pod, ok := obj.(*v1.Pod)
|
||||||
if !ok {
|
if !ok {
|
||||||
@@ -137,7 +136,7 @@ func BuildGetPodsAssignedToNodeFunc(podInformer coreinformers.PodInformer) (GetP
|
|||||||
}
|
}
|
||||||
|
|
||||||
// The indexer helps us get all the pods that assigned to a node.
|
// The indexer helps us get all the pods that assigned to a node.
|
||||||
podIndexer := podInformer.Informer().GetIndexer()
|
podIndexer := podInformer.GetIndexer()
|
||||||
getPodsAssignedToNode := func(nodeName string, filter FilterFunc) ([]*v1.Pod, error) {
|
getPodsAssignedToNode := func(nodeName string, filter FilterFunc) ([]*v1.Pod, error) {
|
||||||
objs, err := podIndexer.ByIndex(nodeNameKeyIndex, nodeName)
|
objs, err := podIndexer.ByIndex(nodeNameKeyIndex, nodeName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -228,3 +227,10 @@ func SortPodsBasedOnPriorityLowToHigh(pods []*v1.Pod) {
|
|||||||
return *pods[i].Spec.Priority < *pods[j].Spec.Priority
|
return *pods[i].Spec.Priority < *pods[j].Spec.Priority
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SortPodsBasedOnAge sorts Pods from oldest to most recent in place
|
||||||
|
func SortPodsBasedOnAge(pods []*v1.Pod) {
|
||||||
|
sort.Slice(pods, func(i, j int) bool {
|
||||||
|
return pods[i].CreationTimestamp.Before(&pods[j].CreationTimestamp)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|||||||
@@ -18,8 +18,10 @@ package pod
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"fmt"
|
||||||
"reflect"
|
"reflect"
|
||||||
"testing"
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
@@ -92,7 +94,7 @@ func TestListPodsOnANode(t *testing.T) {
|
|||||||
fakeClient := fake.NewSimpleClientset(objs...)
|
fakeClient := fake.NewSimpleClientset(objs...)
|
||||||
|
|
||||||
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
|
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
|
||||||
podInformer := sharedInformerFactory.Core().V1().Pods()
|
podInformer := sharedInformerFactory.Core().V1().Pods().Informer()
|
||||||
|
|
||||||
getPodsAssignedToNode, err := BuildGetPodsAssignedToNodeFunc(podInformer)
|
getPodsAssignedToNode, err := BuildGetPodsAssignedToNodeFunc(podInformer)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -154,3 +156,23 @@ func TestSortPodsBasedOnPriorityLowToHigh(t *testing.T) {
|
|||||||
t.Errorf("Expected last pod in sorted list to be %v which of highest priority and guaranteed but got %v", p4, podList[len(podList)-1])
|
t.Errorf("Expected last pod in sorted list to be %v which of highest priority and guaranteed but got %v", p4, podList[len(podList)-1])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestSortPodsBasedOnAge(t *testing.T) {
|
||||||
|
podList := make([]*v1.Pod, 9)
|
||||||
|
n1 := test.BuildTestNode("n1", 4000, 3000, int64(len(podList)), nil)
|
||||||
|
|
||||||
|
for i := 0; i < len(podList); i++ {
|
||||||
|
podList[i] = test.BuildTestPod(fmt.Sprintf("p%d", i), 1, 32, n1.Name, func(pod *v1.Pod) {
|
||||||
|
creationTimestamp := metav1.Now().Add(time.Minute * time.Duration(-i))
|
||||||
|
pod.ObjectMeta.SetCreationTimestamp(metav1.NewTime(creationTimestamp))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
SortPodsBasedOnAge(podList)
|
||||||
|
|
||||||
|
for i := 0; i < len(podList)-1; i++ {
|
||||||
|
if podList[i+1].CreationTimestamp.Before(&podList[i].CreationTimestamp) {
|
||||||
|
t.Errorf("Expected pods to be sorted by age but pod at index %d was older than %d", i+1, i)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -17,24 +17,30 @@ limitations under the License.
|
|||||||
package descheduler
|
package descheduler
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"os"
|
||||||
|
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/klog/v2"
|
"k8s.io/klog/v2"
|
||||||
|
|
||||||
"sigs.k8s.io/descheduler/pkg/api"
|
"sigs.k8s.io/descheduler/pkg/api"
|
||||||
"sigs.k8s.io/descheduler/pkg/api/v1alpha1"
|
"sigs.k8s.io/descheduler/pkg/api/v1alpha1"
|
||||||
"sigs.k8s.io/descheduler/pkg/descheduler/scheme"
|
"sigs.k8s.io/descheduler/pkg/descheduler/scheme"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/framework"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/framework/plugins/defaultevictor"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/framework/plugins/pluginbuilder"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/utils"
|
||||||
)
|
)
|
||||||
|
|
||||||
func LoadPolicyConfig(policyConfigFile string) (*api.DeschedulerPolicy, error) {
|
func LoadPolicyConfig(policyConfigFile string, client clientset.Interface, registry pluginbuilder.Registry) (*api.DeschedulerPolicy, error) {
|
||||||
if policyConfigFile == "" {
|
if policyConfigFile == "" {
|
||||||
klog.V(1).InfoS("Policy config file not specified")
|
klog.V(1).InfoS("Policy config file not specified")
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
policy, err := ioutil.ReadFile(policyConfigFile)
|
policy, err := os.ReadFile(policyConfigFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to read policy config file %q: %+v", policyConfigFile, err)
|
return nil, fmt.Errorf("failed to read policy config file %q: %+v", policyConfigFile, err)
|
||||||
}
|
}
|
||||||
@@ -46,10 +52,164 @@ func LoadPolicyConfig(policyConfigFile string) (*api.DeschedulerPolicy, error) {
|
|||||||
return nil, fmt.Errorf("failed decoding descheduler's policy config %q: %v", policyConfigFile, err)
|
return nil, fmt.Errorf("failed decoding descheduler's policy config %q: %v", policyConfigFile, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
internalPolicy := &api.DeschedulerPolicy{}
|
// Build profiles
|
||||||
if err := scheme.Scheme.Convert(versionedPolicy, internalPolicy, nil); err != nil {
|
internalPolicy, err := V1alpha1ToInternal(client, versionedPolicy, registry)
|
||||||
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed converting versioned policy to internal policy version: %v", err)
|
return nil, fmt.Errorf("failed converting versioned policy to internal policy version: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return internalPolicy, nil
|
return internalPolicy, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func V1alpha1ToInternal(
|
||||||
|
client clientset.Interface,
|
||||||
|
deschedulerPolicy *v1alpha1.DeschedulerPolicy,
|
||||||
|
registry pluginbuilder.Registry,
|
||||||
|
) (*api.DeschedulerPolicy, error) {
|
||||||
|
var evictLocalStoragePods bool
|
||||||
|
if deschedulerPolicy.EvictLocalStoragePods != nil {
|
||||||
|
evictLocalStoragePods = *deschedulerPolicy.EvictLocalStoragePods
|
||||||
|
}
|
||||||
|
|
||||||
|
evictBarePods := false
|
||||||
|
if deschedulerPolicy.EvictFailedBarePods != nil {
|
||||||
|
evictBarePods = *deschedulerPolicy.EvictFailedBarePods
|
||||||
|
if evictBarePods {
|
||||||
|
klog.V(1).InfoS("Warning: EvictFailedBarePods is set to True. This could cause eviction of pods without ownerReferences.")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
evictSystemCriticalPods := false
|
||||||
|
if deschedulerPolicy.EvictSystemCriticalPods != nil {
|
||||||
|
evictSystemCriticalPods = *deschedulerPolicy.EvictSystemCriticalPods
|
||||||
|
if evictSystemCriticalPods {
|
||||||
|
klog.V(1).InfoS("Warning: EvictSystemCriticalPods is set to True. This could cause eviction of Kubernetes system pods.")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ignorePvcPods := false
|
||||||
|
if deschedulerPolicy.IgnorePVCPods != nil {
|
||||||
|
ignorePvcPods = *deschedulerPolicy.IgnorePVCPods
|
||||||
|
}
|
||||||
|
|
||||||
|
var profiles []api.Profile
|
||||||
|
|
||||||
|
// Build profiles
|
||||||
|
for name, strategy := range deschedulerPolicy.Strategies {
|
||||||
|
if _, ok := pluginbuilder.PluginRegistry[string(name)]; ok {
|
||||||
|
if strategy.Enabled {
|
||||||
|
params := strategy.Params
|
||||||
|
if params == nil {
|
||||||
|
params = &v1alpha1.StrategyParameters{}
|
||||||
|
}
|
||||||
|
|
||||||
|
nodeFit := false
|
||||||
|
if name != "PodLifeTime" {
|
||||||
|
nodeFit = params.NodeFit
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO(jchaloup): once all strategies are migrated move this check under
|
||||||
|
// the default evictor args validation
|
||||||
|
if params.ThresholdPriority != nil && params.ThresholdPriorityClassName != "" {
|
||||||
|
klog.ErrorS(fmt.Errorf("priority threshold misconfigured"), "only one of priorityThreshold fields can be set", "pluginName", name)
|
||||||
|
return nil, fmt.Errorf("priority threshold misconfigured for plugin %v", name)
|
||||||
|
}
|
||||||
|
var priorityThreshold *api.PriorityThreshold
|
||||||
|
if strategy.Params != nil {
|
||||||
|
priorityThreshold = &api.PriorityThreshold{
|
||||||
|
Value: strategy.Params.ThresholdPriority,
|
||||||
|
Name: strategy.Params.ThresholdPriorityClassName,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
thresholdPriority, err := utils.GetPriorityFromStrategyParams(context.TODO(), client, priorityThreshold)
|
||||||
|
if err != nil {
|
||||||
|
klog.ErrorS(err, "Failed to get threshold priority from strategy's params")
|
||||||
|
return nil, fmt.Errorf("failed to get threshold priority from strategy's params: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var pluginConfig *api.PluginConfig
|
||||||
|
if pcFnc, exists := strategyParamsToPluginArgs[string(name)]; exists {
|
||||||
|
pluginConfig, err = pcFnc(params)
|
||||||
|
if err != nil {
|
||||||
|
klog.ErrorS(err, "skipping strategy", "strategy", name)
|
||||||
|
return nil, fmt.Errorf("failed to get plugin config for strategy %v: %v", name, err)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
klog.ErrorS(fmt.Errorf("unknown strategy name"), "skipping strategy", "strategy", name)
|
||||||
|
return nil, fmt.Errorf("unknown strategy name: %v", name)
|
||||||
|
}
|
||||||
|
|
||||||
|
profile := api.Profile{
|
||||||
|
Name: fmt.Sprintf("strategy-%v-profile", name),
|
||||||
|
PluginConfigs: []api.PluginConfig{
|
||||||
|
{
|
||||||
|
Name: defaultevictor.PluginName,
|
||||||
|
Args: &defaultevictor.DefaultEvictorArgs{
|
||||||
|
EvictLocalStoragePods: evictLocalStoragePods,
|
||||||
|
EvictSystemCriticalPods: evictSystemCriticalPods,
|
||||||
|
IgnorePvcPods: ignorePvcPods,
|
||||||
|
EvictFailedBarePods: evictBarePods,
|
||||||
|
NodeFit: nodeFit,
|
||||||
|
PriorityThreshold: &api.PriorityThreshold{
|
||||||
|
Value: &thresholdPriority,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
*pluginConfig,
|
||||||
|
},
|
||||||
|
Plugins: api.Plugins{
|
||||||
|
Evict: api.PluginSet{
|
||||||
|
Enabled: []string{defaultevictor.PluginName},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
pluginArgs := registry[string(name)].PluginArgInstance
|
||||||
|
pluginInstance, err := registry[string(name)].PluginBuilder(pluginArgs, &handleImpl{})
|
||||||
|
if err != nil {
|
||||||
|
klog.ErrorS(fmt.Errorf("could not build plugin"), "plugin build error", "plugin", name)
|
||||||
|
return nil, fmt.Errorf("could not build plugin: %v", name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// pluginInstance can be of any of each type, or both
|
||||||
|
profilePlugins := profile.Plugins
|
||||||
|
profile.Plugins = enableProfilePluginsByType(profilePlugins, pluginInstance, pluginConfig)
|
||||||
|
profiles = append(profiles, profile)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
klog.ErrorS(fmt.Errorf("unknown strategy name"), "skipping strategy", "strategy", name)
|
||||||
|
return nil, fmt.Errorf("unknown strategy name: %v", name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return &api.DeschedulerPolicy{
|
||||||
|
Profiles: profiles,
|
||||||
|
NodeSelector: deschedulerPolicy.NodeSelector,
|
||||||
|
MaxNoOfPodsToEvictPerNode: deschedulerPolicy.MaxNoOfPodsToEvictPerNode,
|
||||||
|
MaxNoOfPodsToEvictPerNamespace: deschedulerPolicy.MaxNoOfPodsToEvictPerNamespace,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func enableProfilePluginsByType(profilePlugins api.Plugins, pluginInstance framework.Plugin, pluginConfig *api.PluginConfig) api.Plugins {
|
||||||
|
profilePlugins = checkBalance(profilePlugins, pluginInstance, pluginConfig)
|
||||||
|
profilePlugins = checkDeschedule(profilePlugins, pluginInstance, pluginConfig)
|
||||||
|
return profilePlugins
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkBalance(profilePlugins api.Plugins, pluginInstance framework.Plugin, pluginConfig *api.PluginConfig) api.Plugins {
|
||||||
|
switch p := pluginInstance.(type) {
|
||||||
|
case framework.BalancePlugin:
|
||||||
|
klog.V(3).Info("converting Balance plugin: %s", p.Name())
|
||||||
|
profilePlugins.Balance.Enabled = []string{pluginConfig.Name}
|
||||||
|
}
|
||||||
|
return profilePlugins
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkDeschedule(profilePlugins api.Plugins, pluginInstance framework.Plugin, pluginConfig *api.PluginConfig) api.Plugins {
|
||||||
|
switch p := pluginInstance.(type) {
|
||||||
|
case framework.DeschedulePlugin:
|
||||||
|
klog.V(3).Info("converting Deschedule plugin: %s", p.Name())
|
||||||
|
profilePlugins.Deschedule.Enabled = []string{pluginConfig.Name}
|
||||||
|
}
|
||||||
|
return profilePlugins
|
||||||
|
}
|
||||||
|
|||||||
701
pkg/descheduler/policyconfig_test.go
Normal file
701
pkg/descheduler/policyconfig_test.go
Normal file
@@ -0,0 +1,701 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2017 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package descheduler
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/google/go-cmp/cmp"
|
||||||
|
fakeclientset "k8s.io/client-go/kubernetes/fake"
|
||||||
|
utilpointer "k8s.io/utils/pointer"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/api"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/api/v1alpha1"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/framework/plugins/defaultevictor"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/framework/plugins/nodeutilization"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/framework/plugins/pluginbuilder"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/framework/plugins/removeduplicates"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/framework/plugins/removefailedpods"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/framework/plugins/removepodshavingtoomanyrestarts"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/framework/plugins/removepodsviolatinginterpodantiaffinity"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/framework/plugins/removepodsviolatingnodeaffinity"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/framework/plugins/removepodsviolatingnodetaints"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/framework/plugins/removepodsviolatingtopologyspreadconstraint"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/utils"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestV1alpha1ToV1alpha2(t *testing.T) {
|
||||||
|
SetupPlugins()
|
||||||
|
defaultEvictorPluginConfig := api.PluginConfig{
|
||||||
|
Name: defaultevictor.PluginName,
|
||||||
|
Args: &defaultevictor.DefaultEvictorArgs{
|
||||||
|
PriorityThreshold: &api.PriorityThreshold{
|
||||||
|
Value: utilpointer.Int32(utils.SystemCriticalPriority),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
defaultEvictorPluginSet := api.PluginSet{
|
||||||
|
Enabled: []string{defaultevictor.PluginName},
|
||||||
|
}
|
||||||
|
type testCase struct {
|
||||||
|
description string
|
||||||
|
policy *v1alpha1.DeschedulerPolicy
|
||||||
|
err error
|
||||||
|
result *api.DeschedulerPolicy
|
||||||
|
}
|
||||||
|
testCases := []testCase{
|
||||||
|
{
|
||||||
|
description: "RemoveFailedPods enabled, LowNodeUtilization disabled strategies to profile",
|
||||||
|
policy: &v1alpha1.DeschedulerPolicy{
|
||||||
|
Strategies: v1alpha1.StrategyList{
|
||||||
|
removeduplicates.PluginName: v1alpha1.DeschedulerStrategy{
|
||||||
|
Enabled: true,
|
||||||
|
Params: &v1alpha1.StrategyParameters{
|
||||||
|
Namespaces: &v1alpha1.Namespaces{
|
||||||
|
Exclude: []string{
|
||||||
|
"test2",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
nodeutilization.LowNodeUtilizationPluginName: v1alpha1.DeschedulerStrategy{
|
||||||
|
Enabled: false,
|
||||||
|
Params: &v1alpha1.StrategyParameters{
|
||||||
|
NodeResourceUtilizationThresholds: &v1alpha1.NodeResourceUtilizationThresholds{
|
||||||
|
Thresholds: v1alpha1.ResourceThresholds{
|
||||||
|
"cpu": v1alpha1.Percentage(20),
|
||||||
|
"memory": v1alpha1.Percentage(20),
|
||||||
|
"pods": v1alpha1.Percentage(20),
|
||||||
|
},
|
||||||
|
TargetThresholds: v1alpha1.ResourceThresholds{
|
||||||
|
"cpu": v1alpha1.Percentage(50),
|
||||||
|
"memory": v1alpha1.Percentage(50),
|
||||||
|
"pods": v1alpha1.Percentage(50),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
result: &api.DeschedulerPolicy{
|
||||||
|
Profiles: []api.Profile{
|
||||||
|
{
|
||||||
|
Name: fmt.Sprintf("strategy-%s-profile", removeduplicates.PluginName),
|
||||||
|
PluginConfigs: []api.PluginConfig{
|
||||||
|
defaultEvictorPluginConfig,
|
||||||
|
{
|
||||||
|
Name: removeduplicates.PluginName,
|
||||||
|
Args: &removeduplicates.RemoveDuplicatesArgs{
|
||||||
|
Namespaces: &api.Namespaces{
|
||||||
|
Exclude: []string{
|
||||||
|
"test2",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Plugins: api.Plugins{
|
||||||
|
Balance: api.PluginSet{
|
||||||
|
Enabled: []string{removeduplicates.PluginName},
|
||||||
|
},
|
||||||
|
Evict: defaultEvictorPluginSet,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
// Disabled strategy is not generating internal plugin since it is not being used internally currently
|
||||||
|
// {
|
||||||
|
// Name: nodeutilization.LowNodeUtilizationPluginName,
|
||||||
|
// PluginConfigs: []api.PluginConfig{
|
||||||
|
// {
|
||||||
|
// Name: nodeutilization.LowNodeUtilizationPluginName,
|
||||||
|
// Args: &nodeutilization.LowNodeUtilizationArgs{
|
||||||
|
// Thresholds: api.ResourceThresholds{
|
||||||
|
// "cpu": api.Percentage(20),
|
||||||
|
// [...]
|
||||||
|
// [...]
|
||||||
|
// },
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
description: "convert all strategies",
|
||||||
|
policy: &v1alpha1.DeschedulerPolicy{
|
||||||
|
Strategies: v1alpha1.StrategyList{
|
||||||
|
removeduplicates.PluginName: v1alpha1.DeschedulerStrategy{
|
||||||
|
Enabled: true,
|
||||||
|
Params: &v1alpha1.StrategyParameters{},
|
||||||
|
},
|
||||||
|
nodeutilization.LowNodeUtilizationPluginName: v1alpha1.DeschedulerStrategy{
|
||||||
|
Enabled: true,
|
||||||
|
Params: &v1alpha1.StrategyParameters{
|
||||||
|
NodeResourceUtilizationThresholds: &v1alpha1.NodeResourceUtilizationThresholds{
|
||||||
|
Thresholds: v1alpha1.ResourceThresholds{
|
||||||
|
"cpu": v1alpha1.Percentage(20),
|
||||||
|
"memory": v1alpha1.Percentage(20),
|
||||||
|
"pods": v1alpha1.Percentage(20),
|
||||||
|
},
|
||||||
|
TargetThresholds: v1alpha1.ResourceThresholds{
|
||||||
|
"cpu": v1alpha1.Percentage(50),
|
||||||
|
"memory": v1alpha1.Percentage(50),
|
||||||
|
"pods": v1alpha1.Percentage(50),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
nodeutilization.HighNodeUtilizationPluginName: v1alpha1.DeschedulerStrategy{
|
||||||
|
Enabled: true,
|
||||||
|
Params: &v1alpha1.StrategyParameters{
|
||||||
|
NodeResourceUtilizationThresholds: &v1alpha1.NodeResourceUtilizationThresholds{
|
||||||
|
Thresholds: v1alpha1.ResourceThresholds{
|
||||||
|
"cpu": v1alpha1.Percentage(20),
|
||||||
|
"memory": v1alpha1.Percentage(20),
|
||||||
|
"pods": v1alpha1.Percentage(20),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
removefailedpods.PluginName: v1alpha1.DeschedulerStrategy{
|
||||||
|
Enabled: true,
|
||||||
|
Params: &v1alpha1.StrategyParameters{},
|
||||||
|
},
|
||||||
|
removepodshavingtoomanyrestarts.PluginName: v1alpha1.DeschedulerStrategy{
|
||||||
|
Enabled: true,
|
||||||
|
Params: &v1alpha1.StrategyParameters{
|
||||||
|
PodsHavingTooManyRestarts: &v1alpha1.PodsHavingTooManyRestarts{
|
||||||
|
PodRestartThreshold: 100,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
removepodsviolatinginterpodantiaffinity.PluginName: v1alpha1.DeschedulerStrategy{
|
||||||
|
Enabled: true,
|
||||||
|
Params: &v1alpha1.StrategyParameters{},
|
||||||
|
},
|
||||||
|
removepodsviolatingnodeaffinity.PluginName: v1alpha1.DeschedulerStrategy{
|
||||||
|
Enabled: true,
|
||||||
|
Params: &v1alpha1.StrategyParameters{
|
||||||
|
NodeAffinityType: []string{"requiredDuringSchedulingIgnoredDuringExecution"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
removepodsviolatingnodetaints.PluginName: v1alpha1.DeschedulerStrategy{
|
||||||
|
Enabled: true,
|
||||||
|
Params: &v1alpha1.StrategyParameters{},
|
||||||
|
},
|
||||||
|
removepodsviolatingtopologyspreadconstraint.PluginName: v1alpha1.DeschedulerStrategy{
|
||||||
|
Enabled: true,
|
||||||
|
Params: &v1alpha1.StrategyParameters{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
result: &api.DeschedulerPolicy{
|
||||||
|
Profiles: []api.Profile{
|
||||||
|
{
|
||||||
|
Name: fmt.Sprintf("strategy-%s-profile", nodeutilization.HighNodeUtilizationPluginName),
|
||||||
|
PluginConfigs: []api.PluginConfig{
|
||||||
|
defaultEvictorPluginConfig,
|
||||||
|
{
|
||||||
|
Name: nodeutilization.HighNodeUtilizationPluginName,
|
||||||
|
Args: &nodeutilization.HighNodeUtilizationArgs{
|
||||||
|
Thresholds: api.ResourceThresholds{
|
||||||
|
"cpu": api.Percentage(20),
|
||||||
|
"memory": api.Percentage(20),
|
||||||
|
"pods": api.Percentage(20),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Plugins: api.Plugins{
|
||||||
|
Balance: api.PluginSet{
|
||||||
|
Enabled: []string{nodeutilization.HighNodeUtilizationPluginName},
|
||||||
|
},
|
||||||
|
Evict: defaultEvictorPluginSet,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: fmt.Sprintf("strategy-%s-profile", nodeutilization.LowNodeUtilizationPluginName),
|
||||||
|
PluginConfigs: []api.PluginConfig{
|
||||||
|
defaultEvictorPluginConfig,
|
||||||
|
{
|
||||||
|
Name: nodeutilization.LowNodeUtilizationPluginName,
|
||||||
|
Args: &nodeutilization.LowNodeUtilizationArgs{
|
||||||
|
Thresholds: api.ResourceThresholds{
|
||||||
|
"cpu": api.Percentage(20),
|
||||||
|
"memory": api.Percentage(20),
|
||||||
|
"pods": api.Percentage(20),
|
||||||
|
},
|
||||||
|
TargetThresholds: api.ResourceThresholds{
|
||||||
|
"cpu": api.Percentage(50),
|
||||||
|
"memory": api.Percentage(50),
|
||||||
|
"pods": api.Percentage(50),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Plugins: api.Plugins{
|
||||||
|
Balance: api.PluginSet{
|
||||||
|
Enabled: []string{nodeutilization.LowNodeUtilizationPluginName},
|
||||||
|
},
|
||||||
|
Evict: defaultEvictorPluginSet,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: fmt.Sprintf("strategy-%s-profile", removeduplicates.PluginName),
|
||||||
|
PluginConfigs: []api.PluginConfig{
|
||||||
|
defaultEvictorPluginConfig,
|
||||||
|
{
|
||||||
|
Name: removeduplicates.PluginName,
|
||||||
|
Args: &removeduplicates.RemoveDuplicatesArgs{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Plugins: api.Plugins{
|
||||||
|
Balance: api.PluginSet{
|
||||||
|
Enabled: []string{removeduplicates.PluginName},
|
||||||
|
},
|
||||||
|
Evict: defaultEvictorPluginSet,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: fmt.Sprintf("strategy-%s-profile", removefailedpods.PluginName),
|
||||||
|
PluginConfigs: []api.PluginConfig{
|
||||||
|
defaultEvictorPluginConfig,
|
||||||
|
{
|
||||||
|
Name: removefailedpods.PluginName,
|
||||||
|
Args: &removefailedpods.RemoveFailedPodsArgs{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Plugins: api.Plugins{
|
||||||
|
Deschedule: api.PluginSet{
|
||||||
|
Enabled: []string{removefailedpods.PluginName},
|
||||||
|
},
|
||||||
|
Evict: defaultEvictorPluginSet,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: fmt.Sprintf("strategy-%s-profile", removepodshavingtoomanyrestarts.PluginName),
|
||||||
|
PluginConfigs: []api.PluginConfig{
|
||||||
|
defaultEvictorPluginConfig,
|
||||||
|
{
|
||||||
|
Name: removepodshavingtoomanyrestarts.PluginName,
|
||||||
|
Args: &removepodshavingtoomanyrestarts.RemovePodsHavingTooManyRestartsArgs{
|
||||||
|
PodRestartThreshold: 100,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Plugins: api.Plugins{
|
||||||
|
Deschedule: api.PluginSet{
|
||||||
|
Enabled: []string{removepodshavingtoomanyrestarts.PluginName},
|
||||||
|
},
|
||||||
|
Evict: defaultEvictorPluginSet,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: fmt.Sprintf("strategy-%s-profile", removepodsviolatinginterpodantiaffinity.PluginName),
|
||||||
|
PluginConfigs: []api.PluginConfig{
|
||||||
|
defaultEvictorPluginConfig,
|
||||||
|
{
|
||||||
|
Name: removepodsviolatinginterpodantiaffinity.PluginName,
|
||||||
|
Args: &removepodsviolatinginterpodantiaffinity.RemovePodsViolatingInterPodAntiAffinityArgs{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Plugins: api.Plugins{
|
||||||
|
Deschedule: api.PluginSet{
|
||||||
|
Enabled: []string{removepodsviolatinginterpodantiaffinity.PluginName},
|
||||||
|
},
|
||||||
|
Evict: defaultEvictorPluginSet,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: fmt.Sprintf("strategy-%s-profile", removepodsviolatingnodeaffinity.PluginName),
|
||||||
|
PluginConfigs: []api.PluginConfig{
|
||||||
|
defaultEvictorPluginConfig,
|
||||||
|
{
|
||||||
|
Name: removepodsviolatingnodeaffinity.PluginName,
|
||||||
|
Args: &removepodsviolatingnodeaffinity.RemovePodsViolatingNodeAffinityArgs{
|
||||||
|
NodeAffinityType: []string{"requiredDuringSchedulingIgnoredDuringExecution"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Plugins: api.Plugins{
|
||||||
|
Deschedule: api.PluginSet{
|
||||||
|
Enabled: []string{removepodsviolatingnodeaffinity.PluginName},
|
||||||
|
},
|
||||||
|
Evict: defaultEvictorPluginSet,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: fmt.Sprintf("strategy-%s-profile", removepodsviolatingnodetaints.PluginName),
|
||||||
|
PluginConfigs: []api.PluginConfig{
|
||||||
|
defaultEvictorPluginConfig,
|
||||||
|
{
|
||||||
|
Name: removepodsviolatingnodetaints.PluginName,
|
||||||
|
Args: &removepodsviolatingnodetaints.RemovePodsViolatingNodeTaintsArgs{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Plugins: api.Plugins{
|
||||||
|
Deschedule: api.PluginSet{
|
||||||
|
Enabled: []string{removepodsviolatingnodetaints.PluginName},
|
||||||
|
},
|
||||||
|
Evict: defaultEvictorPluginSet,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: fmt.Sprintf("strategy-%s-profile", removepodsviolatingtopologyspreadconstraint.PluginName),
|
||||||
|
PluginConfigs: []api.PluginConfig{
|
||||||
|
defaultEvictorPluginConfig,
|
||||||
|
{
|
||||||
|
Name: removepodsviolatingtopologyspreadconstraint.PluginName,
|
||||||
|
Args: &removepodsviolatingtopologyspreadconstraint.RemovePodsViolatingTopologySpreadConstraintArgs{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Plugins: api.Plugins{
|
||||||
|
Balance: api.PluginSet{
|
||||||
|
Enabled: []string{removepodsviolatingtopologyspreadconstraint.PluginName},
|
||||||
|
},
|
||||||
|
Evict: defaultEvictorPluginSet,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
description: "pass in all params to check args",
|
||||||
|
policy: &v1alpha1.DeschedulerPolicy{
|
||||||
|
Strategies: v1alpha1.StrategyList{
|
||||||
|
removeduplicates.PluginName: v1alpha1.DeschedulerStrategy{
|
||||||
|
Enabled: true,
|
||||||
|
Params: &v1alpha1.StrategyParameters{
|
||||||
|
RemoveDuplicates: &v1alpha1.RemoveDuplicates{
|
||||||
|
ExcludeOwnerKinds: []string{"ReplicaSet"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
nodeutilization.LowNodeUtilizationPluginName: v1alpha1.DeschedulerStrategy{
|
||||||
|
Enabled: true,
|
||||||
|
Params: &v1alpha1.StrategyParameters{
|
||||||
|
NodeResourceUtilizationThresholds: &v1alpha1.NodeResourceUtilizationThresholds{
|
||||||
|
Thresholds: v1alpha1.ResourceThresholds{
|
||||||
|
"cpu": v1alpha1.Percentage(20),
|
||||||
|
"memory": v1alpha1.Percentage(20),
|
||||||
|
"pods": v1alpha1.Percentage(20),
|
||||||
|
},
|
||||||
|
TargetThresholds: v1alpha1.ResourceThresholds{
|
||||||
|
"cpu": v1alpha1.Percentage(50),
|
||||||
|
"memory": v1alpha1.Percentage(50),
|
||||||
|
"pods": v1alpha1.Percentage(50),
|
||||||
|
},
|
||||||
|
UseDeviationThresholds: true,
|
||||||
|
NumberOfNodes: 3,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
nodeutilization.HighNodeUtilizationPluginName: v1alpha1.DeschedulerStrategy{
|
||||||
|
Enabled: true,
|
||||||
|
Params: &v1alpha1.StrategyParameters{
|
||||||
|
NodeResourceUtilizationThresholds: &v1alpha1.NodeResourceUtilizationThresholds{
|
||||||
|
Thresholds: v1alpha1.ResourceThresholds{
|
||||||
|
"cpu": v1alpha1.Percentage(20),
|
||||||
|
"memory": v1alpha1.Percentage(20),
|
||||||
|
"pods": v1alpha1.Percentage(20),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
removefailedpods.PluginName: v1alpha1.DeschedulerStrategy{
|
||||||
|
Enabled: true,
|
||||||
|
Params: &v1alpha1.StrategyParameters{
|
||||||
|
FailedPods: &v1alpha1.FailedPods{
|
||||||
|
MinPodLifetimeSeconds: utilpointer.Uint(3600),
|
||||||
|
ExcludeOwnerKinds: []string{"Job"},
|
||||||
|
Reasons: []string{"NodeAffinity"},
|
||||||
|
IncludingInitContainers: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
removepodshavingtoomanyrestarts.PluginName: v1alpha1.DeschedulerStrategy{
|
||||||
|
Enabled: true,
|
||||||
|
Params: &v1alpha1.StrategyParameters{
|
||||||
|
PodsHavingTooManyRestarts: &v1alpha1.PodsHavingTooManyRestarts{
|
||||||
|
PodRestartThreshold: 100,
|
||||||
|
IncludingInitContainers: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
removepodsviolatinginterpodantiaffinity.PluginName: v1alpha1.DeschedulerStrategy{
|
||||||
|
Enabled: true,
|
||||||
|
Params: &v1alpha1.StrategyParameters{},
|
||||||
|
},
|
||||||
|
removepodsviolatingnodeaffinity.PluginName: v1alpha1.DeschedulerStrategy{
|
||||||
|
Enabled: true,
|
||||||
|
Params: &v1alpha1.StrategyParameters{
|
||||||
|
NodeAffinityType: []string{"requiredDuringSchedulingIgnoredDuringExecution"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
removepodsviolatingnodetaints.PluginName: v1alpha1.DeschedulerStrategy{
|
||||||
|
Enabled: true,
|
||||||
|
Params: &v1alpha1.StrategyParameters{
|
||||||
|
ExcludedTaints: []string{"dedicated=special-user", "reserved"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
removepodsviolatingtopologyspreadconstraint.PluginName: v1alpha1.DeschedulerStrategy{
|
||||||
|
Enabled: true,
|
||||||
|
Params: &v1alpha1.StrategyParameters{
|
||||||
|
IncludeSoftConstraints: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
result: &api.DeschedulerPolicy{
|
||||||
|
Profiles: []api.Profile{
|
||||||
|
{
|
||||||
|
Name: fmt.Sprintf("strategy-%s-profile", nodeutilization.HighNodeUtilizationPluginName),
|
||||||
|
PluginConfigs: []api.PluginConfig{
|
||||||
|
defaultEvictorPluginConfig,
|
||||||
|
{
|
||||||
|
Name: nodeutilization.HighNodeUtilizationPluginName,
|
||||||
|
Args: &nodeutilization.HighNodeUtilizationArgs{
|
||||||
|
Thresholds: api.ResourceThresholds{
|
||||||
|
"cpu": api.Percentage(20),
|
||||||
|
"memory": api.Percentage(20),
|
||||||
|
"pods": api.Percentage(20),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Plugins: api.Plugins{
|
||||||
|
Balance: api.PluginSet{
|
||||||
|
Enabled: []string{nodeutilization.HighNodeUtilizationPluginName},
|
||||||
|
},
|
||||||
|
Evict: defaultEvictorPluginSet,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: fmt.Sprintf("strategy-%s-profile", nodeutilization.LowNodeUtilizationPluginName),
|
||||||
|
PluginConfigs: []api.PluginConfig{
|
||||||
|
defaultEvictorPluginConfig,
|
||||||
|
{
|
||||||
|
Name: nodeutilization.LowNodeUtilizationPluginName,
|
||||||
|
Args: &nodeutilization.LowNodeUtilizationArgs{
|
||||||
|
UseDeviationThresholds: true,
|
||||||
|
NumberOfNodes: 3,
|
||||||
|
Thresholds: api.ResourceThresholds{
|
||||||
|
"cpu": api.Percentage(20),
|
||||||
|
"memory": api.Percentage(20),
|
||||||
|
"pods": api.Percentage(20),
|
||||||
|
},
|
||||||
|
TargetThresholds: api.ResourceThresholds{
|
||||||
|
"cpu": api.Percentage(50),
|
||||||
|
"memory": api.Percentage(50),
|
||||||
|
"pods": api.Percentage(50),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Plugins: api.Plugins{
|
||||||
|
Balance: api.PluginSet{
|
||||||
|
Enabled: []string{nodeutilization.LowNodeUtilizationPluginName},
|
||||||
|
},
|
||||||
|
Evict: defaultEvictorPluginSet,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: fmt.Sprintf("strategy-%s-profile", removeduplicates.PluginName),
|
||||||
|
PluginConfigs: []api.PluginConfig{
|
||||||
|
defaultEvictorPluginConfig,
|
||||||
|
{
|
||||||
|
Name: removeduplicates.PluginName,
|
||||||
|
Args: &removeduplicates.RemoveDuplicatesArgs{
|
||||||
|
ExcludeOwnerKinds: []string{"ReplicaSet"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Plugins: api.Plugins{
|
||||||
|
Balance: api.PluginSet{
|
||||||
|
Enabled: []string{removeduplicates.PluginName},
|
||||||
|
},
|
||||||
|
Evict: defaultEvictorPluginSet,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: fmt.Sprintf("strategy-%s-profile", removefailedpods.PluginName),
|
||||||
|
PluginConfigs: []api.PluginConfig{
|
||||||
|
defaultEvictorPluginConfig,
|
||||||
|
{
|
||||||
|
Name: removefailedpods.PluginName,
|
||||||
|
Args: &removefailedpods.RemoveFailedPodsArgs{
|
||||||
|
ExcludeOwnerKinds: []string{"Job"},
|
||||||
|
MinPodLifetimeSeconds: utilpointer.Uint(3600),
|
||||||
|
Reasons: []string{"NodeAffinity"},
|
||||||
|
IncludingInitContainers: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Plugins: api.Plugins{
|
||||||
|
Deschedule: api.PluginSet{
|
||||||
|
Enabled: []string{removefailedpods.PluginName},
|
||||||
|
},
|
||||||
|
Evict: defaultEvictorPluginSet,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: fmt.Sprintf("strategy-%s-profile", removepodshavingtoomanyrestarts.PluginName),
|
||||||
|
PluginConfigs: []api.PluginConfig{
|
||||||
|
defaultEvictorPluginConfig,
|
||||||
|
{
|
||||||
|
Name: removepodshavingtoomanyrestarts.PluginName,
|
||||||
|
Args: &removepodshavingtoomanyrestarts.RemovePodsHavingTooManyRestartsArgs{
|
||||||
|
PodRestartThreshold: 100,
|
||||||
|
IncludingInitContainers: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Plugins: api.Plugins{
|
||||||
|
Deschedule: api.PluginSet{
|
||||||
|
Enabled: []string{removepodshavingtoomanyrestarts.PluginName},
|
||||||
|
},
|
||||||
|
Evict: defaultEvictorPluginSet,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: fmt.Sprintf("strategy-%s-profile", removepodsviolatinginterpodantiaffinity.PluginName),
|
||||||
|
PluginConfigs: []api.PluginConfig{
|
||||||
|
defaultEvictorPluginConfig,
|
||||||
|
{
|
||||||
|
Name: removepodsviolatinginterpodantiaffinity.PluginName,
|
||||||
|
Args: &removepodsviolatinginterpodantiaffinity.RemovePodsViolatingInterPodAntiAffinityArgs{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Plugins: api.Plugins{
|
||||||
|
Deschedule: api.PluginSet{
|
||||||
|
Enabled: []string{removepodsviolatinginterpodantiaffinity.PluginName},
|
||||||
|
},
|
||||||
|
Evict: defaultEvictorPluginSet,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: fmt.Sprintf("strategy-%s-profile", removepodsviolatingnodeaffinity.PluginName),
|
||||||
|
PluginConfigs: []api.PluginConfig{
|
||||||
|
defaultEvictorPluginConfig,
|
||||||
|
{
|
||||||
|
Name: removepodsviolatingnodeaffinity.PluginName,
|
||||||
|
Args: &removepodsviolatingnodeaffinity.RemovePodsViolatingNodeAffinityArgs{
|
||||||
|
NodeAffinityType: []string{"requiredDuringSchedulingIgnoredDuringExecution"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Plugins: api.Plugins{
|
||||||
|
Deschedule: api.PluginSet{
|
||||||
|
Enabled: []string{removepodsviolatingnodeaffinity.PluginName},
|
||||||
|
},
|
||||||
|
Evict: defaultEvictorPluginSet,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: fmt.Sprintf("strategy-%s-profile", removepodsviolatingnodetaints.PluginName),
|
||||||
|
PluginConfigs: []api.PluginConfig{
|
||||||
|
defaultEvictorPluginConfig,
|
||||||
|
{
|
||||||
|
Name: removepodsviolatingnodetaints.PluginName,
|
||||||
|
Args: &removepodsviolatingnodetaints.RemovePodsViolatingNodeTaintsArgs{
|
||||||
|
ExcludedTaints: []string{"dedicated=special-user", "reserved"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Plugins: api.Plugins{
|
||||||
|
Deschedule: api.PluginSet{
|
||||||
|
Enabled: []string{removepodsviolatingnodetaints.PluginName},
|
||||||
|
},
|
||||||
|
Evict: defaultEvictorPluginSet,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: fmt.Sprintf("strategy-%s-profile", removepodsviolatingtopologyspreadconstraint.PluginName),
|
||||||
|
PluginConfigs: []api.PluginConfig{
|
||||||
|
defaultEvictorPluginConfig,
|
||||||
|
{
|
||||||
|
Name: removepodsviolatingtopologyspreadconstraint.PluginName,
|
||||||
|
Args: &removepodsviolatingtopologyspreadconstraint.RemovePodsViolatingTopologySpreadConstraintArgs{
|
||||||
|
IncludeSoftConstraints: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Plugins: api.Plugins{
|
||||||
|
Balance: api.PluginSet{
|
||||||
|
Enabled: []string{removepodsviolatingtopologyspreadconstraint.PluginName},
|
||||||
|
},
|
||||||
|
Evict: defaultEvictorPluginSet,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
description: "invalid strategy name",
|
||||||
|
policy: &v1alpha1.DeschedulerPolicy{Strategies: v1alpha1.StrategyList{
|
||||||
|
"InvalidName": v1alpha1.DeschedulerStrategy{
|
||||||
|
Enabled: true,
|
||||||
|
Params: &v1alpha1.StrategyParameters{},
|
||||||
|
},
|
||||||
|
}},
|
||||||
|
result: nil,
|
||||||
|
err: fmt.Errorf("unknown strategy name: InvalidName"),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
description: "invalid threshold priority",
|
||||||
|
policy: &v1alpha1.DeschedulerPolicy{Strategies: v1alpha1.StrategyList{
|
||||||
|
nodeutilization.LowNodeUtilizationPluginName: v1alpha1.DeschedulerStrategy{
|
||||||
|
Enabled: true,
|
||||||
|
Params: &v1alpha1.StrategyParameters{
|
||||||
|
ThresholdPriority: utilpointer.Int32(100),
|
||||||
|
ThresholdPriorityClassName: "name",
|
||||||
|
NodeResourceUtilizationThresholds: &v1alpha1.NodeResourceUtilizationThresholds{
|
||||||
|
Thresholds: v1alpha1.ResourceThresholds{
|
||||||
|
"cpu": v1alpha1.Percentage(20),
|
||||||
|
"memory": v1alpha1.Percentage(20),
|
||||||
|
"pods": v1alpha1.Percentage(20),
|
||||||
|
},
|
||||||
|
TargetThresholds: v1alpha1.ResourceThresholds{
|
||||||
|
"cpu": v1alpha1.Percentage(50),
|
||||||
|
"memory": v1alpha1.Percentage(50),
|
||||||
|
"pods": v1alpha1.Percentage(50),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}},
|
||||||
|
result: nil,
|
||||||
|
err: fmt.Errorf("priority threshold misconfigured for plugin LowNodeUtilization"),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range testCases {
|
||||||
|
t.Run(tc.description, func(t *testing.T) {
|
||||||
|
client := fakeclientset.NewSimpleClientset()
|
||||||
|
result, err := V1alpha1ToInternal(client, tc.policy, pluginbuilder.PluginRegistry)
|
||||||
|
if err != nil {
|
||||||
|
if err.Error() != tc.err.Error() {
|
||||||
|
t.Errorf("unexpected error: %s", err.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err == nil {
|
||||||
|
// sort to easily compare deepequality
|
||||||
|
result.Profiles = api.SortProfilesByName(result.Profiles)
|
||||||
|
diff := cmp.Diff(tc.result, result)
|
||||||
|
if diff != "" {
|
||||||
|
t.Errorf("test '%s' failed. Results are not deep equal. mismatch (-want +got):\n%s", tc.description, diff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
50
pkg/descheduler/setupplugins.go
Normal file
50
pkg/descheduler/setupplugins.go
Normal file
@@ -0,0 +1,50 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2017 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package descheduler
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sigs.k8s.io/descheduler/pkg/framework/plugins/defaultevictor"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/framework/plugins/nodeutilization"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/framework/plugins/pluginbuilder"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/framework/plugins/podlifetime"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/framework/plugins/removeduplicates"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/framework/plugins/removefailedpods"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/framework/plugins/removepodshavingtoomanyrestarts"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/framework/plugins/removepodsviolatinginterpodantiaffinity"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/framework/plugins/removepodsviolatingnodeaffinity"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/framework/plugins/removepodsviolatingnodetaints"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/framework/plugins/removepodsviolatingtopologyspreadconstraint"
|
||||||
|
)
|
||||||
|
|
||||||
|
func SetupPlugins() {
|
||||||
|
pluginbuilder.PluginRegistry = pluginbuilder.NewRegistry()
|
||||||
|
RegisterDefaultPlugins(pluginbuilder.PluginRegistry)
|
||||||
|
}
|
||||||
|
|
||||||
|
func RegisterDefaultPlugins(registry pluginbuilder.Registry) {
|
||||||
|
pluginbuilder.Register(defaultevictor.PluginName, defaultevictor.New, &defaultevictor.DefaultEvictorArgs{}, registry)
|
||||||
|
pluginbuilder.Register(nodeutilization.LowNodeUtilizationPluginName, nodeutilization.NewLowNodeUtilization, &nodeutilization.LowNodeUtilizationArgs{}, registry)
|
||||||
|
pluginbuilder.Register(nodeutilization.HighNodeUtilizationPluginName, nodeutilization.NewHighNodeUtilization, &nodeutilization.HighNodeUtilizationArgs{}, registry)
|
||||||
|
pluginbuilder.Register(podlifetime.PluginName, podlifetime.New, &podlifetime.PodLifeTimeArgs{}, registry)
|
||||||
|
pluginbuilder.Register(removeduplicates.PluginName, removeduplicates.New, &removeduplicates.RemoveDuplicatesArgs{}, registry)
|
||||||
|
pluginbuilder.Register(removefailedpods.PluginName, removefailedpods.New, &removefailedpods.RemoveFailedPodsArgs{}, registry)
|
||||||
|
pluginbuilder.Register(removepodshavingtoomanyrestarts.PluginName, removepodshavingtoomanyrestarts.New, &removepodshavingtoomanyrestarts.RemovePodsHavingTooManyRestartsArgs{}, registry)
|
||||||
|
pluginbuilder.Register(removepodsviolatinginterpodantiaffinity.PluginName, removepodsviolatinginterpodantiaffinity.New, &removepodsviolatinginterpodantiaffinity.RemovePodsViolatingInterPodAntiAffinityArgs{}, registry)
|
||||||
|
pluginbuilder.Register(removepodsviolatingnodeaffinity.PluginName, removepodsviolatingnodeaffinity.New, &removepodsviolatingnodeaffinity.RemovePodsViolatingNodeAffinityArgs{}, registry)
|
||||||
|
pluginbuilder.Register(removepodsviolatingnodetaints.PluginName, removepodsviolatingnodetaints.New, &removepodsviolatingnodetaints.RemovePodsViolatingNodeTaintsArgs{}, registry)
|
||||||
|
pluginbuilder.Register(removepodsviolatingtopologyspreadconstraint.PluginName, removepodsviolatingtopologyspreadconstraint.New, &removepodsviolatingtopologyspreadconstraint.RemovePodsViolatingTopologySpreadConstraintArgs{}, registry)
|
||||||
|
}
|
||||||
@@ -1,179 +0,0 @@
|
|||||||
package strategies
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
||||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
|
||||||
"k8s.io/apimachinery/pkg/util/sets"
|
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
|
||||||
"k8s.io/klog/v2"
|
|
||||||
|
|
||||||
"sigs.k8s.io/descheduler/pkg/api"
|
|
||||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
|
||||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
|
||||||
"sigs.k8s.io/descheduler/pkg/descheduler/strategies/validation"
|
|
||||||
)
|
|
||||||
|
|
||||||
// validatedFailedPodsStrategyParams contains validated strategy parameters
|
|
||||||
type validatedFailedPodsStrategyParams struct {
|
|
||||||
validation.ValidatedStrategyParams
|
|
||||||
includingInitContainers bool
|
|
||||||
reasons sets.String
|
|
||||||
excludeOwnerKinds sets.String
|
|
||||||
minPodLifetimeSeconds *uint
|
|
||||||
}
|
|
||||||
|
|
||||||
// RemoveFailedPods removes Pods that are in failed status phase.
|
|
||||||
func RemoveFailedPods(
|
|
||||||
ctx context.Context,
|
|
||||||
client clientset.Interface,
|
|
||||||
strategy api.DeschedulerStrategy,
|
|
||||||
nodes []*v1.Node,
|
|
||||||
podEvictor *evictions.PodEvictor,
|
|
||||||
getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc,
|
|
||||||
) {
|
|
||||||
strategyParams, err := validateAndParseRemoveFailedPodsParams(ctx, client, strategy.Params)
|
|
||||||
if err != nil {
|
|
||||||
klog.ErrorS(err, "Invalid RemoveFailedPods parameters")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
evictable := podEvictor.Evictable(
|
|
||||||
evictions.WithPriorityThreshold(strategyParams.ThresholdPriority),
|
|
||||||
evictions.WithNodeFit(strategyParams.NodeFit),
|
|
||||||
evictions.WithLabelSelector(strategyParams.LabelSelector),
|
|
||||||
)
|
|
||||||
|
|
||||||
var labelSelector *metav1.LabelSelector
|
|
||||||
if strategy.Params != nil {
|
|
||||||
labelSelector = strategy.Params.LabelSelector
|
|
||||||
}
|
|
||||||
|
|
||||||
podFilter, err := podutil.NewOptions().
|
|
||||||
WithFilter(evictable.IsEvictable).
|
|
||||||
WithNamespaces(strategyParams.IncludedNamespaces).
|
|
||||||
WithoutNamespaces(strategyParams.ExcludedNamespaces).
|
|
||||||
WithLabelSelector(labelSelector).
|
|
||||||
BuildFilterFunc()
|
|
||||||
if err != nil {
|
|
||||||
klog.ErrorS(err, "Error initializing pod filter function")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// Only list failed pods
|
|
||||||
phaseFilter := func(pod *v1.Pod) bool { return pod.Status.Phase == v1.PodFailed }
|
|
||||||
podFilter = podutil.WrapFilterFuncs(phaseFilter, podFilter)
|
|
||||||
|
|
||||||
for _, node := range nodes {
|
|
||||||
klog.V(1).InfoS("Processing node", "node", klog.KObj(node))
|
|
||||||
pods, err := podutil.ListAllPodsOnANode(node.Name, getPodsAssignedToNode, podFilter)
|
|
||||||
if err != nil {
|
|
||||||
klog.ErrorS(err, "Error listing a nodes failed pods", "node", klog.KObj(node))
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
for i, pod := range pods {
|
|
||||||
if err = validateFailedPodShouldEvict(pod, *strategyParams); err != nil {
|
|
||||||
klog.V(4).InfoS(fmt.Sprintf("ignoring pod for eviction due to: %s", err.Error()), "pod", klog.KObj(pod))
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err = podEvictor.EvictPod(ctx, pods[i], node, "FailedPod"); err != nil {
|
|
||||||
klog.ErrorS(err, "Error evicting pod", "pod", klog.KObj(pod))
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func validateAndParseRemoveFailedPodsParams(
|
|
||||||
ctx context.Context,
|
|
||||||
client clientset.Interface,
|
|
||||||
params *api.StrategyParameters,
|
|
||||||
) (*validatedFailedPodsStrategyParams, error) {
|
|
||||||
if params == nil {
|
|
||||||
return &validatedFailedPodsStrategyParams{
|
|
||||||
ValidatedStrategyParams: validation.DefaultValidatedStrategyParams(),
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
strategyParams, err := validation.ValidateAndParseStrategyParams(ctx, client, params)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
var reasons, excludeOwnerKinds sets.String
|
|
||||||
var includingInitContainers bool
|
|
||||||
var minPodLifetimeSeconds *uint
|
|
||||||
if params.FailedPods != nil {
|
|
||||||
reasons = sets.NewString(params.FailedPods.Reasons...)
|
|
||||||
includingInitContainers = params.FailedPods.IncludingInitContainers
|
|
||||||
excludeOwnerKinds = sets.NewString(params.FailedPods.ExcludeOwnerKinds...)
|
|
||||||
minPodLifetimeSeconds = params.FailedPods.MinPodLifetimeSeconds
|
|
||||||
}
|
|
||||||
|
|
||||||
return &validatedFailedPodsStrategyParams{
|
|
||||||
ValidatedStrategyParams: *strategyParams,
|
|
||||||
includingInitContainers: includingInitContainers,
|
|
||||||
reasons: reasons,
|
|
||||||
excludeOwnerKinds: excludeOwnerKinds,
|
|
||||||
minPodLifetimeSeconds: minPodLifetimeSeconds,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// validateFailedPodShouldEvict looks at strategy params settings to see if the Pod
|
|
||||||
// should be evicted given the params in the PodFailed policy.
|
|
||||||
func validateFailedPodShouldEvict(pod *v1.Pod, strategyParams validatedFailedPodsStrategyParams) error {
|
|
||||||
var errs []error
|
|
||||||
|
|
||||||
if strategyParams.minPodLifetimeSeconds != nil {
|
|
||||||
podAgeSeconds := uint(metav1.Now().Sub(pod.GetCreationTimestamp().Local()).Seconds())
|
|
||||||
if podAgeSeconds < *strategyParams.minPodLifetimeSeconds {
|
|
||||||
errs = append(errs, fmt.Errorf("pod does not exceed the min age seconds of %d", *strategyParams.minPodLifetimeSeconds))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(strategyParams.excludeOwnerKinds) > 0 {
|
|
||||||
ownerRefList := podutil.OwnerRef(pod)
|
|
||||||
for _, owner := range ownerRefList {
|
|
||||||
if strategyParams.excludeOwnerKinds.Has(owner.Kind) {
|
|
||||||
errs = append(errs, fmt.Errorf("pod's owner kind of %s is excluded", owner.Kind))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(strategyParams.reasons) > 0 {
|
|
||||||
reasons := getFailedContainerStatusReasons(pod.Status.ContainerStatuses)
|
|
||||||
|
|
||||||
if pod.Status.Phase == v1.PodFailed && pod.Status.Reason != "" {
|
|
||||||
reasons = append(reasons, pod.Status.Reason)
|
|
||||||
}
|
|
||||||
|
|
||||||
if strategyParams.includingInitContainers {
|
|
||||||
reasons = append(reasons, getFailedContainerStatusReasons(pod.Status.InitContainerStatuses)...)
|
|
||||||
}
|
|
||||||
|
|
||||||
if !strategyParams.reasons.HasAny(reasons...) {
|
|
||||||
errs = append(errs, fmt.Errorf("pod does not match any of the reasons"))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return utilerrors.NewAggregate(errs)
|
|
||||||
}
|
|
||||||
|
|
||||||
func getFailedContainerStatusReasons(containerStatuses []v1.ContainerStatus) []string {
|
|
||||||
reasons := make([]string, 0)
|
|
||||||
|
|
||||||
for _, containerStatus := range containerStatuses {
|
|
||||||
if containerStatus.State.Waiting != nil && containerStatus.State.Waiting.Reason != "" {
|
|
||||||
reasons = append(reasons, containerStatus.State.Waiting.Reason)
|
|
||||||
}
|
|
||||||
if containerStatus.State.Terminated != nil && containerStatus.State.Terminated.Reason != "" {
|
|
||||||
reasons = append(reasons, containerStatus.State.Terminated.Reason)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return reasons
|
|
||||||
}
|
|
||||||
@@ -1,120 +0,0 @@
|
|||||||
/*
|
|
||||||
Copyright 2017 The Kubernetes Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package strategies
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
|
||||||
"k8s.io/apimachinery/pkg/util/sets"
|
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
|
||||||
"k8s.io/klog/v2"
|
|
||||||
|
|
||||||
"sigs.k8s.io/descheduler/pkg/api"
|
|
||||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
|
||||||
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
|
|
||||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
|
||||||
"sigs.k8s.io/descheduler/pkg/utils"
|
|
||||||
)
|
|
||||||
|
|
||||||
func validatePodsViolatingNodeAffinityParams(params *api.StrategyParameters) error {
|
|
||||||
if params == nil || len(params.NodeAffinityType) == 0 {
|
|
||||||
return fmt.Errorf("NodeAffinityType is empty")
|
|
||||||
}
|
|
||||||
// At most one of include/exclude can be set
|
|
||||||
if params.Namespaces != nil && len(params.Namespaces.Include) > 0 && len(params.Namespaces.Exclude) > 0 {
|
|
||||||
return fmt.Errorf("only one of Include/Exclude namespaces can be set")
|
|
||||||
}
|
|
||||||
if params.ThresholdPriority != nil && params.ThresholdPriorityClassName != "" {
|
|
||||||
return fmt.Errorf("only one of thresholdPriority and thresholdPriorityClassName can be set")
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// RemovePodsViolatingNodeAffinity evicts pods on nodes which violate node affinity
|
|
||||||
func RemovePodsViolatingNodeAffinity(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor, getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc) {
|
|
||||||
if err := validatePodsViolatingNodeAffinityParams(strategy.Params); err != nil {
|
|
||||||
klog.ErrorS(err, "Invalid RemovePodsViolatingNodeAffinity parameters")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
thresholdPriority, err := utils.GetPriorityFromStrategyParams(ctx, client, strategy.Params)
|
|
||||||
if err != nil {
|
|
||||||
klog.ErrorS(err, "Failed to get threshold priority from strategy's params")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
var includedNamespaces, excludedNamespaces sets.String
|
|
||||||
if strategy.Params.Namespaces != nil {
|
|
||||||
includedNamespaces = sets.NewString(strategy.Params.Namespaces.Include...)
|
|
||||||
excludedNamespaces = sets.NewString(strategy.Params.Namespaces.Exclude...)
|
|
||||||
}
|
|
||||||
|
|
||||||
nodeFit := false
|
|
||||||
if strategy.Params != nil {
|
|
||||||
nodeFit = strategy.Params.NodeFit
|
|
||||||
}
|
|
||||||
|
|
||||||
evictable := podEvictor.Evictable(evictions.WithPriorityThreshold(thresholdPriority), evictions.WithNodeFit(nodeFit))
|
|
||||||
|
|
||||||
podFilter, err := podutil.NewOptions().
|
|
||||||
WithNamespaces(includedNamespaces).
|
|
||||||
WithoutNamespaces(excludedNamespaces).
|
|
||||||
WithLabelSelector(strategy.Params.LabelSelector).
|
|
||||||
BuildFilterFunc()
|
|
||||||
if err != nil {
|
|
||||||
klog.ErrorS(err, "Error initializing pod filter function")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, nodeAffinity := range strategy.Params.NodeAffinityType {
|
|
||||||
klog.V(2).InfoS("Executing for nodeAffinityType", "nodeAffinity", nodeAffinity)
|
|
||||||
|
|
||||||
switch nodeAffinity {
|
|
||||||
case "requiredDuringSchedulingIgnoredDuringExecution":
|
|
||||||
for _, node := range nodes {
|
|
||||||
klog.V(1).InfoS("Processing node", "node", klog.KObj(node))
|
|
||||||
|
|
||||||
pods, err := podutil.ListPodsOnANode(
|
|
||||||
node.Name,
|
|
||||||
getPodsAssignedToNode,
|
|
||||||
podutil.WrapFilterFuncs(podFilter, func(pod *v1.Pod) bool {
|
|
||||||
return evictable.IsEvictable(pod) &&
|
|
||||||
!nodeutil.PodFitsCurrentNode(getPodsAssignedToNode, pod, node) &&
|
|
||||||
nodeutil.PodFitsAnyNode(getPodsAssignedToNode, pod, nodes)
|
|
||||||
}),
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
klog.ErrorS(err, "Failed to get pods", "node", klog.KObj(node))
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, pod := range pods {
|
|
||||||
if pod.Spec.Affinity != nil && pod.Spec.Affinity.NodeAffinity != nil && pod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil {
|
|
||||||
klog.V(1).InfoS("Evicting pod", "pod", klog.KObj(pod))
|
|
||||||
if _, err := podEvictor.EvictPod(ctx, pod, node, "NodeAffinity"); err != nil {
|
|
||||||
klog.ErrorS(err, "Error evicting pod")
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
klog.ErrorS(nil, "Invalid nodeAffinityType", "nodeAffinity", nodeAffinity)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,129 +0,0 @@
|
|||||||
/*
|
|
||||||
Copyright 2017 The Kubernetes Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package strategies
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"k8s.io/apimachinery/pkg/util/sets"
|
|
||||||
"sigs.k8s.io/descheduler/pkg/api"
|
|
||||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
|
||||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
|
||||||
"sigs.k8s.io/descheduler/pkg/utils"
|
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
|
||||||
"k8s.io/klog/v2"
|
|
||||||
)
|
|
||||||
|
|
||||||
func validateRemovePodsViolatingNodeTaintsParams(params *api.StrategyParameters) error {
|
|
||||||
if params == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// At most one of include/exclude can be set
|
|
||||||
if params.Namespaces != nil && len(params.Namespaces.Include) > 0 && len(params.Namespaces.Exclude) > 0 {
|
|
||||||
return fmt.Errorf("only one of Include/Exclude namespaces can be set")
|
|
||||||
}
|
|
||||||
if params.ThresholdPriority != nil && params.ThresholdPriorityClassName != "" {
|
|
||||||
return fmt.Errorf("only one of thresholdPriority and thresholdPriorityClassName can be set")
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// RemovePodsViolatingNodeTaints evicts pods on the node which violate NoSchedule Taints on nodes
|
|
||||||
func RemovePodsViolatingNodeTaints(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor, getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc) {
|
|
||||||
if err := validateRemovePodsViolatingNodeTaintsParams(strategy.Params); err != nil {
|
|
||||||
klog.ErrorS(err, "Invalid RemovePodsViolatingNodeTaints parameters")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
var includedNamespaces, excludedNamespaces, excludedTaints sets.String
|
|
||||||
var labelSelector *metav1.LabelSelector
|
|
||||||
if strategy.Params != nil {
|
|
||||||
if strategy.Params.Namespaces != nil {
|
|
||||||
includedNamespaces = sets.NewString(strategy.Params.Namespaces.Include...)
|
|
||||||
excludedNamespaces = sets.NewString(strategy.Params.Namespaces.Exclude...)
|
|
||||||
}
|
|
||||||
if strategy.Params.ExcludedTaints != nil {
|
|
||||||
excludedTaints = sets.NewString(strategy.Params.ExcludedTaints...)
|
|
||||||
}
|
|
||||||
labelSelector = strategy.Params.LabelSelector
|
|
||||||
}
|
|
||||||
|
|
||||||
thresholdPriority, err := utils.GetPriorityFromStrategyParams(ctx, client, strategy.Params)
|
|
||||||
if err != nil {
|
|
||||||
klog.ErrorS(err, "Failed to get threshold priority from strategy's params")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
nodeFit := false
|
|
||||||
if strategy.Params != nil {
|
|
||||||
nodeFit = strategy.Params.NodeFit
|
|
||||||
}
|
|
||||||
|
|
||||||
evictable := podEvictor.Evictable(evictions.WithPriorityThreshold(thresholdPriority), evictions.WithNodeFit(nodeFit))
|
|
||||||
|
|
||||||
podFilter, err := podutil.NewOptions().
|
|
||||||
WithFilter(evictable.IsEvictable).
|
|
||||||
WithNamespaces(includedNamespaces).
|
|
||||||
WithoutNamespaces(excludedNamespaces).
|
|
||||||
WithLabelSelector(labelSelector).
|
|
||||||
BuildFilterFunc()
|
|
||||||
if err != nil {
|
|
||||||
klog.ErrorS(err, "Error initializing pod filter function")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
excludeTaint := func(taint *v1.Taint) bool {
|
|
||||||
// Exclude taints by key *or* key=value
|
|
||||||
return excludedTaints.Has(taint.Key) || (taint.Value != "" && excludedTaints.Has(fmt.Sprintf("%s=%s", taint.Key, taint.Value)))
|
|
||||||
}
|
|
||||||
|
|
||||||
taintFilterFnc := func(taint *v1.Taint) bool { return (taint.Effect == v1.TaintEffectNoSchedule) && !excludeTaint(taint) }
|
|
||||||
if strategy.Params != nil && strategy.Params.IncludePreferNoSchedule {
|
|
||||||
taintFilterFnc = func(taint *v1.Taint) bool {
|
|
||||||
return (taint.Effect == v1.TaintEffectNoSchedule || taint.Effect == v1.TaintEffectPreferNoSchedule) && !excludeTaint(taint)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, node := range nodes {
|
|
||||||
klog.V(1).InfoS("Processing node", "node", klog.KObj(node))
|
|
||||||
pods, err := podutil.ListAllPodsOnANode(node.Name, getPodsAssignedToNode, podFilter)
|
|
||||||
if err != nil {
|
|
||||||
//no pods evicted as error encountered retrieving evictable Pods
|
|
||||||
return
|
|
||||||
}
|
|
||||||
totalPods := len(pods)
|
|
||||||
for i := 0; i < totalPods; i++ {
|
|
||||||
if !utils.TolerationsTolerateTaintsWithFilter(
|
|
||||||
pods[i].Spec.Tolerations,
|
|
||||||
node.Spec.Taints,
|
|
||||||
taintFilterFnc,
|
|
||||||
) {
|
|
||||||
klog.V(2).InfoS("Not all taints with NoSchedule effect are tolerated after update for pod on node", "pod", klog.KObj(pods[i]), "node", klog.KObj(node))
|
|
||||||
if _, err := podEvictor.EvictPod(ctx, pods[i], node, "NodeTaint"); err != nil {
|
|
||||||
klog.ErrorS(err, "Error evicting pod")
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,142 +0,0 @@
|
|||||||
/*
|
|
||||||
Copyright 2020 The Kubernetes Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package strategies
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
||||||
"k8s.io/apimachinery/pkg/util/sets"
|
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
|
||||||
"k8s.io/klog/v2"
|
|
||||||
|
|
||||||
"sigs.k8s.io/descheduler/pkg/api"
|
|
||||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
|
||||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
|
||||||
"sigs.k8s.io/descheduler/pkg/utils"
|
|
||||||
)
|
|
||||||
|
|
||||||
func validatePodLifeTimeParams(params *api.StrategyParameters) error {
|
|
||||||
if params == nil || params.PodLifeTime == nil || params.PodLifeTime.MaxPodLifeTimeSeconds == nil {
|
|
||||||
return fmt.Errorf("MaxPodLifeTimeSeconds not set")
|
|
||||||
}
|
|
||||||
|
|
||||||
if params.PodLifeTime.PodStatusPhases != nil {
|
|
||||||
for _, phase := range params.PodLifeTime.PodStatusPhases {
|
|
||||||
if phase != string(v1.PodPending) && phase != string(v1.PodRunning) {
|
|
||||||
return fmt.Errorf("only Pending and Running phases are supported in PodLifeTime")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// At most one of include/exclude can be set
|
|
||||||
if params.Namespaces != nil && len(params.Namespaces.Include) > 0 && len(params.Namespaces.Exclude) > 0 {
|
|
||||||
return fmt.Errorf("only one of Include/Exclude namespaces can be set")
|
|
||||||
}
|
|
||||||
if params.ThresholdPriority != nil && params.ThresholdPriorityClassName != "" {
|
|
||||||
return fmt.Errorf("only one of thresholdPriority and thresholdPriorityClassName can be set")
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// PodLifeTime evicts pods on nodes that were created more than strategy.Params.MaxPodLifeTimeSeconds seconds ago.
|
|
||||||
func PodLifeTime(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor, getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc) {
|
|
||||||
if err := validatePodLifeTimeParams(strategy.Params); err != nil {
|
|
||||||
klog.ErrorS(err, "Invalid PodLifeTime parameters")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
thresholdPriority, err := utils.GetPriorityFromStrategyParams(ctx, client, strategy.Params)
|
|
||||||
if err != nil {
|
|
||||||
klog.ErrorS(err, "Failed to get threshold priority from strategy's params")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
var includedNamespaces, excludedNamespaces sets.String
|
|
||||||
if strategy.Params.Namespaces != nil {
|
|
||||||
includedNamespaces = sets.NewString(strategy.Params.Namespaces.Include...)
|
|
||||||
excludedNamespaces = sets.NewString(strategy.Params.Namespaces.Exclude...)
|
|
||||||
}
|
|
||||||
|
|
||||||
evictable := podEvictor.Evictable(evictions.WithPriorityThreshold(thresholdPriority))
|
|
||||||
|
|
||||||
filter := evictable.IsEvictable
|
|
||||||
if strategy.Params.PodLifeTime.PodStatusPhases != nil {
|
|
||||||
filter = func(pod *v1.Pod) bool {
|
|
||||||
for _, phase := range strategy.Params.PodLifeTime.PodStatusPhases {
|
|
||||||
if string(pod.Status.Phase) == phase {
|
|
||||||
return evictable.IsEvictable(pod)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
podFilter, err := podutil.NewOptions().
|
|
||||||
WithFilter(filter).
|
|
||||||
WithNamespaces(includedNamespaces).
|
|
||||||
WithoutNamespaces(excludedNamespaces).
|
|
||||||
WithLabelSelector(strategy.Params.LabelSelector).
|
|
||||||
BuildFilterFunc()
|
|
||||||
if err != nil {
|
|
||||||
klog.ErrorS(err, "Error initializing pod filter function")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, node := range nodes {
|
|
||||||
klog.V(1).InfoS("Processing node", "node", klog.KObj(node))
|
|
||||||
|
|
||||||
pods := listOldPodsOnNode(node.Name, getPodsAssignedToNode, podFilter, *strategy.Params.PodLifeTime.MaxPodLifeTimeSeconds)
|
|
||||||
for _, pod := range pods {
|
|
||||||
success, err := podEvictor.EvictPod(ctx, pod, node, "PodLifeTime")
|
|
||||||
if success {
|
|
||||||
klog.V(1).InfoS("Evicted pod because it exceeded its lifetime", "pod", klog.KObj(pod), "maxPodLifeTime", *strategy.Params.PodLifeTime.MaxPodLifeTimeSeconds)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
klog.ErrorS(err, "Error evicting pod", "pod", klog.KObj(pod))
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func listOldPodsOnNode(
|
|
||||||
nodeName string,
|
|
||||||
getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc,
|
|
||||||
filter podutil.FilterFunc,
|
|
||||||
maxPodLifeTimeSeconds uint,
|
|
||||||
) []*v1.Pod {
|
|
||||||
pods, err := podutil.ListPodsOnANode(nodeName, getPodsAssignedToNode, filter)
|
|
||||||
if err != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var oldPods []*v1.Pod
|
|
||||||
for _, pod := range pods {
|
|
||||||
podAgeSeconds := uint(metav1.Now().Sub(pod.GetCreationTimestamp().Local()).Seconds())
|
|
||||||
if podAgeSeconds > maxPodLifeTimeSeconds {
|
|
||||||
oldPods = append(oldPods, pod)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return oldPods
|
|
||||||
}
|
|
||||||
@@ -1,127 +0,0 @@
|
|||||||
/*
|
|
||||||
Copyright 2018 The Kubernetes Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package strategies
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
|
||||||
"k8s.io/apimachinery/pkg/util/sets"
|
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
|
||||||
"k8s.io/klog/v2"
|
|
||||||
|
|
||||||
"sigs.k8s.io/descheduler/pkg/api"
|
|
||||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
|
||||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
|
||||||
"sigs.k8s.io/descheduler/pkg/utils"
|
|
||||||
)
|
|
||||||
|
|
||||||
func validateRemovePodsHavingTooManyRestartsParams(params *api.StrategyParameters) error {
|
|
||||||
if params == nil || params.PodsHavingTooManyRestarts == nil || params.PodsHavingTooManyRestarts.PodRestartThreshold < 1 {
|
|
||||||
return fmt.Errorf("PodsHavingTooManyRestarts threshold not set")
|
|
||||||
}
|
|
||||||
|
|
||||||
// At most one of include/exclude can be set
|
|
||||||
if params.Namespaces != nil && len(params.Namespaces.Include) > 0 && len(params.Namespaces.Exclude) > 0 {
|
|
||||||
return fmt.Errorf("only one of Include/Exclude namespaces can be set")
|
|
||||||
}
|
|
||||||
if params.ThresholdPriority != nil && params.ThresholdPriorityClassName != "" {
|
|
||||||
return fmt.Errorf("only one of thresholdPriority and thresholdPriorityClassName can be set")
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// RemovePodsHavingTooManyRestarts removes the pods that have too many restarts on node.
|
|
||||||
// There are too many cases leading this issue: Volume mount failed, app error due to nodes' different settings.
|
|
||||||
// As of now, this strategy won't evict daemonsets, mirror pods, critical pods and pods with local storages.
|
|
||||||
func RemovePodsHavingTooManyRestarts(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor, getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc) {
|
|
||||||
if err := validateRemovePodsHavingTooManyRestartsParams(strategy.Params); err != nil {
|
|
||||||
klog.ErrorS(err, "Invalid RemovePodsHavingTooManyRestarts parameters")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
thresholdPriority, err := utils.GetPriorityFromStrategyParams(ctx, client, strategy.Params)
|
|
||||||
if err != nil {
|
|
||||||
klog.ErrorS(err, "Failed to get threshold priority from strategy's params")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
var includedNamespaces, excludedNamespaces sets.String
|
|
||||||
if strategy.Params.Namespaces != nil {
|
|
||||||
includedNamespaces = sets.NewString(strategy.Params.Namespaces.Include...)
|
|
||||||
excludedNamespaces = sets.NewString(strategy.Params.Namespaces.Exclude...)
|
|
||||||
}
|
|
||||||
|
|
||||||
nodeFit := false
|
|
||||||
if strategy.Params != nil {
|
|
||||||
nodeFit = strategy.Params.NodeFit
|
|
||||||
}
|
|
||||||
|
|
||||||
evictable := podEvictor.Evictable(evictions.WithPriorityThreshold(thresholdPriority), evictions.WithNodeFit(nodeFit))
|
|
||||||
|
|
||||||
podFilter, err := podutil.NewOptions().
|
|
||||||
WithFilter(evictable.IsEvictable).
|
|
||||||
WithNamespaces(includedNamespaces).
|
|
||||||
WithoutNamespaces(excludedNamespaces).
|
|
||||||
WithLabelSelector(strategy.Params.LabelSelector).
|
|
||||||
BuildFilterFunc()
|
|
||||||
if err != nil {
|
|
||||||
klog.ErrorS(err, "Error initializing pod filter function")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, node := range nodes {
|
|
||||||
klog.V(1).InfoS("Processing node", "node", klog.KObj(node))
|
|
||||||
pods, err := podutil.ListPodsOnANode(node.Name, getPodsAssignedToNode, podFilter)
|
|
||||||
if err != nil {
|
|
||||||
klog.ErrorS(err, "Error listing a nodes pods", "node", klog.KObj(node))
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
for i, pod := range pods {
|
|
||||||
restarts, initRestarts := calcContainerRestarts(pod)
|
|
||||||
if strategy.Params.PodsHavingTooManyRestarts.IncludingInitContainers {
|
|
||||||
if restarts+initRestarts < strategy.Params.PodsHavingTooManyRestarts.PodRestartThreshold {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
} else if restarts < strategy.Params.PodsHavingTooManyRestarts.PodRestartThreshold {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if _, err := podEvictor.EvictPod(ctx, pods[i], node, "TooManyRestarts"); err != nil {
|
|
||||||
klog.ErrorS(err, "Error evicting pod", "pod", klog.KObj(pod))
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// calcContainerRestarts get container restarts and init container restarts.
|
|
||||||
func calcContainerRestarts(pod *v1.Pod) (int32, int32) {
|
|
||||||
var restarts, initRestarts int32
|
|
||||||
|
|
||||||
for _, cs := range pod.Status.ContainerStatuses {
|
|
||||||
restarts += cs.RestartCount
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, cs := range pod.Status.InitContainerStatuses {
|
|
||||||
initRestarts += cs.RestartCount
|
|
||||||
}
|
|
||||||
|
|
||||||
return restarts, initRestarts
|
|
||||||
}
|
|
||||||
@@ -1,71 +0,0 @@
|
|||||||
package validation
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
||||||
"k8s.io/apimachinery/pkg/labels"
|
|
||||||
"k8s.io/apimachinery/pkg/util/sets"
|
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
|
||||||
|
|
||||||
"sigs.k8s.io/descheduler/pkg/api"
|
|
||||||
"sigs.k8s.io/descheduler/pkg/utils"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ValidatedStrategyParams contains validated common strategy parameters
|
|
||||||
type ValidatedStrategyParams struct {
|
|
||||||
ThresholdPriority int32
|
|
||||||
IncludedNamespaces sets.String
|
|
||||||
ExcludedNamespaces sets.String
|
|
||||||
LabelSelector labels.Selector
|
|
||||||
NodeFit bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func DefaultValidatedStrategyParams() ValidatedStrategyParams {
|
|
||||||
return ValidatedStrategyParams{ThresholdPriority: utils.SystemCriticalPriority}
|
|
||||||
}
|
|
||||||
|
|
||||||
func ValidateAndParseStrategyParams(
|
|
||||||
ctx context.Context,
|
|
||||||
client clientset.Interface,
|
|
||||||
params *api.StrategyParameters,
|
|
||||||
) (*ValidatedStrategyParams, error) {
|
|
||||||
if params == nil {
|
|
||||||
defaultValidatedStrategyParams := DefaultValidatedStrategyParams()
|
|
||||||
return &defaultValidatedStrategyParams, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// At most one of include/exclude can be set
|
|
||||||
var includedNamespaces, excludedNamespaces sets.String
|
|
||||||
if params.Namespaces != nil && len(params.Namespaces.Include) > 0 && len(params.Namespaces.Exclude) > 0 {
|
|
||||||
return nil, fmt.Errorf("only one of Include/Exclude namespaces can be set")
|
|
||||||
}
|
|
||||||
if params.ThresholdPriority != nil && params.ThresholdPriorityClassName != "" {
|
|
||||||
return nil, fmt.Errorf("only one of ThresholdPriority and thresholdPriorityClassName can be set")
|
|
||||||
}
|
|
||||||
|
|
||||||
thresholdPriority, err := utils.GetPriorityFromStrategyParams(ctx, client, params)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to get threshold priority from strategy's params: %+v", err)
|
|
||||||
}
|
|
||||||
if params.Namespaces != nil {
|
|
||||||
includedNamespaces = sets.NewString(params.Namespaces.Include...)
|
|
||||||
excludedNamespaces = sets.NewString(params.Namespaces.Exclude...)
|
|
||||||
}
|
|
||||||
var selector labels.Selector
|
|
||||||
if params.LabelSelector != nil {
|
|
||||||
selector, err = metav1.LabelSelectorAsSelector(params.LabelSelector)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to get label selectors from strategy's params: %+v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return &ValidatedStrategyParams{
|
|
||||||
ThresholdPriority: thresholdPriority,
|
|
||||||
IncludedNamespaces: includedNamespaces,
|
|
||||||
ExcludedNamespaces: excludedNamespaces,
|
|
||||||
LabelSelector: selector,
|
|
||||||
NodeFit: params.NodeFit,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
@@ -1,79 +0,0 @@
|
|||||||
package validation
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
||||||
"k8s.io/client-go/kubernetes/fake"
|
|
||||||
|
|
||||||
"sigs.k8s.io/descheduler/pkg/api"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
thresholdPriority int32 = 1000
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestValidStrategyParams(t *testing.T) {
|
|
||||||
ctx := context.Background()
|
|
||||||
fakeClient := &fake.Clientset{}
|
|
||||||
testCases := []struct {
|
|
||||||
name string
|
|
||||||
params *api.StrategyParameters
|
|
||||||
}{
|
|
||||||
{name: "validate nil params", params: nil},
|
|
||||||
{name: "validate empty params", params: &api.StrategyParameters{}},
|
|
||||||
{name: "validate params with NodeFit", params: &api.StrategyParameters{NodeFit: true}},
|
|
||||||
{name: "validate params with ThresholdPriority", params: &api.StrategyParameters{ThresholdPriority: &thresholdPriority}},
|
|
||||||
{name: "validate params with priorityClassName", params: &api.StrategyParameters{ThresholdPriorityClassName: "high-priority"}},
|
|
||||||
{name: "validate params with excluded namespace", params: &api.StrategyParameters{Namespaces: &api.Namespaces{Exclude: []string{"excluded-ns"}}}},
|
|
||||||
{name: "validate params with included namespace", params: &api.StrategyParameters{Namespaces: &api.Namespaces{Include: []string{"include-ns"}}}},
|
|
||||||
{name: "validate params with empty label selector", params: &api.StrategyParameters{LabelSelector: &metav1.LabelSelector{}}},
|
|
||||||
}
|
|
||||||
for _, tc := range testCases {
|
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
params, err := ValidateAndParseStrategyParams(ctx, fakeClient, tc.params)
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("strategy params should be valid but got err: %v", err.Error())
|
|
||||||
}
|
|
||||||
if params == nil {
|
|
||||||
t.Errorf("strategy params should return a strategyParams but got nil")
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestInvalidStrategyParams(t *testing.T) {
|
|
||||||
ctx := context.Background()
|
|
||||||
fakeClient := &fake.Clientset{}
|
|
||||||
testCases := []struct {
|
|
||||||
name string
|
|
||||||
params *api.StrategyParameters
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "invalid params with both included and excluded namespaces nil params",
|
|
||||||
params: &api.StrategyParameters{Namespaces: &api.Namespaces{Include: []string{"include-ns"}, Exclude: []string{"exclude-ns"}}},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "invalid params with both threshold priority and priority class name",
|
|
||||||
params: &api.StrategyParameters{ThresholdPriorityClassName: "high-priority", ThresholdPriority: &thresholdPriority},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "invalid params with bad label selector",
|
|
||||||
params: &api.StrategyParameters{LabelSelector: &metav1.LabelSelector{MatchLabels: map[string]string{"": "missing-label"}}},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
for _, tc := range testCases {
|
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
params, err := ValidateAndParseStrategyParams(ctx, fakeClient, tc.params)
|
|
||||||
if err == nil {
|
|
||||||
t.Errorf("strategy params should be invalid but did not get err")
|
|
||||||
}
|
|
||||||
if params != nil {
|
|
||||||
t.Errorf("strategy params should return a nil strategyParams but got %v", params)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
250
pkg/descheduler/strategy_migration.go
Normal file
250
pkg/descheduler/strategy_migration.go
Normal file
@@ -0,0 +1,250 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2022 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package descheduler
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"k8s.io/klog/v2"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/api"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/api/v1alpha1"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/framework/plugins/nodeutilization"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/framework/plugins/podlifetime"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/framework/plugins/removeduplicates"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/framework/plugins/removefailedpods"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/framework/plugins/removepodshavingtoomanyrestarts"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/framework/plugins/removepodsviolatinginterpodantiaffinity"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/framework/plugins/removepodsviolatingnodeaffinity"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/framework/plugins/removepodsviolatingnodetaints"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/framework/plugins/removepodsviolatingtopologyspreadconstraint"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Once all strategies are migrated the arguments get read from the configuration file
|
||||||
|
// without any wiring. Keeping the wiring here so the descheduler can still use
|
||||||
|
// the v1alpha1 configuration during the strategy migration to plugins.
|
||||||
|
|
||||||
|
var strategyParamsToPluginArgs = map[string]func(params *v1alpha1.StrategyParameters) (*api.PluginConfig, error){
|
||||||
|
"RemovePodsViolatingNodeTaints": func(params *v1alpha1.StrategyParameters) (*api.PluginConfig, error) {
|
||||||
|
args := &removepodsviolatingnodetaints.RemovePodsViolatingNodeTaintsArgs{
|
||||||
|
Namespaces: v1alpha1NamespacesToInternal(params.Namespaces),
|
||||||
|
LabelSelector: params.LabelSelector,
|
||||||
|
IncludePreferNoSchedule: params.IncludePreferNoSchedule,
|
||||||
|
ExcludedTaints: params.ExcludedTaints,
|
||||||
|
}
|
||||||
|
if err := removepodsviolatingnodetaints.ValidateRemovePodsViolatingNodeTaintsArgs(args); err != nil {
|
||||||
|
klog.ErrorS(err, "unable to validate plugin arguments", "pluginName", removepodsviolatingnodetaints.PluginName)
|
||||||
|
return nil, fmt.Errorf("strategy %q param validation failed: %v", removepodsviolatingnodetaints.PluginName, err)
|
||||||
|
}
|
||||||
|
return &api.PluginConfig{
|
||||||
|
Name: removepodsviolatingnodetaints.PluginName,
|
||||||
|
Args: args,
|
||||||
|
}, nil
|
||||||
|
},
|
||||||
|
"RemoveFailedPods": func(params *v1alpha1.StrategyParameters) (*api.PluginConfig, error) {
|
||||||
|
failedPodsParams := params.FailedPods
|
||||||
|
if failedPodsParams == nil {
|
||||||
|
failedPodsParams = &v1alpha1.FailedPods{}
|
||||||
|
}
|
||||||
|
args := &removefailedpods.RemoveFailedPodsArgs{
|
||||||
|
Namespaces: v1alpha1NamespacesToInternal(params.Namespaces),
|
||||||
|
LabelSelector: params.LabelSelector,
|
||||||
|
IncludingInitContainers: failedPodsParams.IncludingInitContainers,
|
||||||
|
MinPodLifetimeSeconds: failedPodsParams.MinPodLifetimeSeconds,
|
||||||
|
ExcludeOwnerKinds: failedPodsParams.ExcludeOwnerKinds,
|
||||||
|
Reasons: failedPodsParams.Reasons,
|
||||||
|
}
|
||||||
|
if err := removefailedpods.ValidateRemoveFailedPodsArgs(args); err != nil {
|
||||||
|
klog.ErrorS(err, "unable to validate plugin arguments", "pluginName", removefailedpods.PluginName)
|
||||||
|
return nil, fmt.Errorf("strategy %q param validation failed: %v", removefailedpods.PluginName, err)
|
||||||
|
}
|
||||||
|
return &api.PluginConfig{
|
||||||
|
Name: removefailedpods.PluginName,
|
||||||
|
Args: args,
|
||||||
|
}, nil
|
||||||
|
},
|
||||||
|
"RemovePodsViolatingNodeAffinity": func(params *v1alpha1.StrategyParameters) (*api.PluginConfig, error) {
|
||||||
|
args := &removepodsviolatingnodeaffinity.RemovePodsViolatingNodeAffinityArgs{
|
||||||
|
Namespaces: v1alpha1NamespacesToInternal(params.Namespaces),
|
||||||
|
LabelSelector: params.LabelSelector,
|
||||||
|
NodeAffinityType: params.NodeAffinityType,
|
||||||
|
}
|
||||||
|
if err := removepodsviolatingnodeaffinity.ValidateRemovePodsViolatingNodeAffinityArgs(args); err != nil {
|
||||||
|
klog.ErrorS(err, "unable to validate plugin arguments", "pluginName", removepodsviolatingnodeaffinity.PluginName)
|
||||||
|
return nil, fmt.Errorf("strategy %q param validation failed: %v", removepodsviolatingnodeaffinity.PluginName, err)
|
||||||
|
}
|
||||||
|
return &api.PluginConfig{
|
||||||
|
Name: removepodsviolatingnodeaffinity.PluginName,
|
||||||
|
Args: args,
|
||||||
|
}, nil
|
||||||
|
},
|
||||||
|
"RemovePodsViolatingInterPodAntiAffinity": func(params *v1alpha1.StrategyParameters) (*api.PluginConfig, error) {
|
||||||
|
args := &removepodsviolatinginterpodantiaffinity.RemovePodsViolatingInterPodAntiAffinityArgs{
|
||||||
|
Namespaces: v1alpha1NamespacesToInternal(params.Namespaces),
|
||||||
|
LabelSelector: params.LabelSelector,
|
||||||
|
}
|
||||||
|
if err := removepodsviolatinginterpodantiaffinity.ValidateRemovePodsViolatingInterPodAntiAffinityArgs(args); err != nil {
|
||||||
|
klog.ErrorS(err, "unable to validate plugin arguments", "pluginName", removepodsviolatinginterpodantiaffinity.PluginName)
|
||||||
|
return nil, fmt.Errorf("strategy %q param validation failed: %v", removepodsviolatinginterpodantiaffinity.PluginName, err)
|
||||||
|
}
|
||||||
|
return &api.PluginConfig{
|
||||||
|
Name: removepodsviolatinginterpodantiaffinity.PluginName,
|
||||||
|
Args: args,
|
||||||
|
}, nil
|
||||||
|
},
|
||||||
|
"RemovePodsHavingTooManyRestarts": func(params *v1alpha1.StrategyParameters) (*api.PluginConfig, error) {
|
||||||
|
tooManyRestartsParams := params.PodsHavingTooManyRestarts
|
||||||
|
if tooManyRestartsParams == nil {
|
||||||
|
tooManyRestartsParams = &v1alpha1.PodsHavingTooManyRestarts{}
|
||||||
|
}
|
||||||
|
args := &removepodshavingtoomanyrestarts.RemovePodsHavingTooManyRestartsArgs{
|
||||||
|
Namespaces: v1alpha1NamespacesToInternal(params.Namespaces),
|
||||||
|
LabelSelector: params.LabelSelector,
|
||||||
|
PodRestartThreshold: tooManyRestartsParams.PodRestartThreshold,
|
||||||
|
IncludingInitContainers: tooManyRestartsParams.IncludingInitContainers,
|
||||||
|
}
|
||||||
|
if err := removepodshavingtoomanyrestarts.ValidateRemovePodsHavingTooManyRestartsArgs(args); err != nil {
|
||||||
|
klog.ErrorS(err, "unable to validate plugin arguments", "pluginName", removepodshavingtoomanyrestarts.PluginName)
|
||||||
|
return nil, fmt.Errorf("strategy %q param validation failed: %v", removepodshavingtoomanyrestarts.PluginName, err)
|
||||||
|
}
|
||||||
|
return &api.PluginConfig{
|
||||||
|
Name: removepodshavingtoomanyrestarts.PluginName,
|
||||||
|
Args: args,
|
||||||
|
}, nil
|
||||||
|
},
|
||||||
|
"PodLifeTime": func(params *v1alpha1.StrategyParameters) (*api.PluginConfig, error) {
|
||||||
|
podLifeTimeParams := params.PodLifeTime
|
||||||
|
if podLifeTimeParams == nil {
|
||||||
|
podLifeTimeParams = &v1alpha1.PodLifeTime{}
|
||||||
|
}
|
||||||
|
|
||||||
|
var states []string
|
||||||
|
if podLifeTimeParams.PodStatusPhases != nil {
|
||||||
|
states = append(states, podLifeTimeParams.PodStatusPhases...)
|
||||||
|
}
|
||||||
|
if podLifeTimeParams.States != nil {
|
||||||
|
states = append(states, podLifeTimeParams.States...)
|
||||||
|
}
|
||||||
|
|
||||||
|
args := &podlifetime.PodLifeTimeArgs{
|
||||||
|
Namespaces: v1alpha1NamespacesToInternal(params.Namespaces),
|
||||||
|
LabelSelector: params.LabelSelector,
|
||||||
|
MaxPodLifeTimeSeconds: podLifeTimeParams.MaxPodLifeTimeSeconds,
|
||||||
|
States: states,
|
||||||
|
}
|
||||||
|
if err := podlifetime.ValidatePodLifeTimeArgs(args); err != nil {
|
||||||
|
klog.ErrorS(err, "unable to validate plugin arguments", "pluginName", podlifetime.PluginName)
|
||||||
|
return nil, fmt.Errorf("strategy %q param validation failed: %v", podlifetime.PluginName, err)
|
||||||
|
}
|
||||||
|
return &api.PluginConfig{
|
||||||
|
Name: podlifetime.PluginName,
|
||||||
|
Args: args,
|
||||||
|
}, nil
|
||||||
|
},
|
||||||
|
"RemoveDuplicates": func(params *v1alpha1.StrategyParameters) (*api.PluginConfig, error) {
|
||||||
|
args := &removeduplicates.RemoveDuplicatesArgs{
|
||||||
|
Namespaces: v1alpha1NamespacesToInternal(params.Namespaces),
|
||||||
|
}
|
||||||
|
if params.RemoveDuplicates != nil {
|
||||||
|
args.ExcludeOwnerKinds = params.RemoveDuplicates.ExcludeOwnerKinds
|
||||||
|
}
|
||||||
|
if err := removeduplicates.ValidateRemoveDuplicatesArgs(args); err != nil {
|
||||||
|
klog.ErrorS(err, "unable to validate plugin arguments", "pluginName", removeduplicates.PluginName)
|
||||||
|
return nil, fmt.Errorf("strategy %q param validation failed: %v", removeduplicates.PluginName, err)
|
||||||
|
}
|
||||||
|
return &api.PluginConfig{
|
||||||
|
Name: removeduplicates.PluginName,
|
||||||
|
Args: args,
|
||||||
|
}, nil
|
||||||
|
},
|
||||||
|
"RemovePodsViolatingTopologySpreadConstraint": func(params *v1alpha1.StrategyParameters) (*api.PluginConfig, error) {
|
||||||
|
args := &removepodsviolatingtopologyspreadconstraint.RemovePodsViolatingTopologySpreadConstraintArgs{
|
||||||
|
Namespaces: v1alpha1NamespacesToInternal(params.Namespaces),
|
||||||
|
LabelSelector: params.LabelSelector,
|
||||||
|
IncludeSoftConstraints: params.IncludeSoftConstraints,
|
||||||
|
}
|
||||||
|
if err := removepodsviolatingtopologyspreadconstraint.ValidateRemovePodsViolatingTopologySpreadConstraintArgs(args); err != nil {
|
||||||
|
klog.ErrorS(err, "unable to validate plugin arguments", "pluginName", removepodsviolatingtopologyspreadconstraint.PluginName)
|
||||||
|
return nil, fmt.Errorf("strategy %q param validation failed: %v", removepodsviolatingtopologyspreadconstraint.PluginName, err)
|
||||||
|
}
|
||||||
|
return &api.PluginConfig{
|
||||||
|
Name: removepodsviolatingtopologyspreadconstraint.PluginName,
|
||||||
|
Args: args,
|
||||||
|
}, nil
|
||||||
|
},
|
||||||
|
"HighNodeUtilization": func(params *v1alpha1.StrategyParameters) (*api.PluginConfig, error) {
|
||||||
|
if params.NodeResourceUtilizationThresholds == nil {
|
||||||
|
params.NodeResourceUtilizationThresholds = &v1alpha1.NodeResourceUtilizationThresholds{}
|
||||||
|
}
|
||||||
|
args := &nodeutilization.HighNodeUtilizationArgs{
|
||||||
|
EvictableNamespaces: v1alpha1NamespacesToInternal(params.Namespaces),
|
||||||
|
Thresholds: v1alpha1ThresholdToInternal(params.NodeResourceUtilizationThresholds.Thresholds),
|
||||||
|
NumberOfNodes: params.NodeResourceUtilizationThresholds.NumberOfNodes,
|
||||||
|
}
|
||||||
|
if err := nodeutilization.ValidateHighNodeUtilizationArgs(args); err != nil {
|
||||||
|
klog.ErrorS(err, "unable to validate plugin arguments", "pluginName", nodeutilization.HighNodeUtilizationPluginName)
|
||||||
|
return nil, fmt.Errorf("strategy %q param validation failed: %v", nodeutilization.HighNodeUtilizationPluginName, err)
|
||||||
|
}
|
||||||
|
return &api.PluginConfig{
|
||||||
|
Name: nodeutilization.HighNodeUtilizationPluginName,
|
||||||
|
Args: args,
|
||||||
|
}, nil
|
||||||
|
},
|
||||||
|
"LowNodeUtilization": func(params *v1alpha1.StrategyParameters) (*api.PluginConfig, error) {
|
||||||
|
if params.NodeResourceUtilizationThresholds == nil {
|
||||||
|
params.NodeResourceUtilizationThresholds = &v1alpha1.NodeResourceUtilizationThresholds{}
|
||||||
|
}
|
||||||
|
args := &nodeutilization.LowNodeUtilizationArgs{
|
||||||
|
EvictableNamespaces: v1alpha1NamespacesToInternal(params.Namespaces),
|
||||||
|
Thresholds: v1alpha1ThresholdToInternal(params.NodeResourceUtilizationThresholds.Thresholds),
|
||||||
|
TargetThresholds: v1alpha1ThresholdToInternal(params.NodeResourceUtilizationThresholds.TargetThresholds),
|
||||||
|
UseDeviationThresholds: params.NodeResourceUtilizationThresholds.UseDeviationThresholds,
|
||||||
|
NumberOfNodes: params.NodeResourceUtilizationThresholds.NumberOfNodes,
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := nodeutilization.ValidateLowNodeUtilizationArgs(args); err != nil {
|
||||||
|
klog.ErrorS(err, "unable to validate plugin arguments", "pluginName", nodeutilization.LowNodeUtilizationPluginName)
|
||||||
|
return nil, fmt.Errorf("strategy %q param validation failed: %v", nodeutilization.LowNodeUtilizationPluginName, err)
|
||||||
|
}
|
||||||
|
return &api.PluginConfig{
|
||||||
|
Name: nodeutilization.LowNodeUtilizationPluginName,
|
||||||
|
Args: args,
|
||||||
|
}, nil
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
func v1alpha1NamespacesToInternal(namespaces *v1alpha1.Namespaces) *api.Namespaces {
|
||||||
|
internal := &api.Namespaces{}
|
||||||
|
if namespaces != nil {
|
||||||
|
if namespaces.Exclude != nil {
|
||||||
|
internal.Exclude = namespaces.Exclude
|
||||||
|
}
|
||||||
|
if namespaces.Include != nil {
|
||||||
|
internal.Include = namespaces.Include
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
internal = nil
|
||||||
|
}
|
||||||
|
return internal
|
||||||
|
}
|
||||||
|
|
||||||
|
func v1alpha1ThresholdToInternal(thresholds v1alpha1.ResourceThresholds) api.ResourceThresholds {
|
||||||
|
internal := make(api.ResourceThresholds, len(thresholds))
|
||||||
|
for k, v := range thresholds {
|
||||||
|
internal[k] = api.Percentage(float64(v))
|
||||||
|
}
|
||||||
|
return internal
|
||||||
|
}
|
||||||
844
pkg/descheduler/strategy_migration_test.go
Normal file
844
pkg/descheduler/strategy_migration_test.go
Normal file
@@ -0,0 +1,844 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2022 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package descheduler
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/google/go-cmp/cmp"
|
||||||
|
utilpointer "k8s.io/utils/pointer"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/api"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/api/v1alpha1"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/framework/plugins/nodeutilization"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/framework/plugins/podlifetime"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/framework/plugins/removeduplicates"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/framework/plugins/removefailedpods"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/framework/plugins/removepodshavingtoomanyrestarts"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/framework/plugins/removepodsviolatinginterpodantiaffinity"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/framework/plugins/removepodsviolatingnodeaffinity"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/framework/plugins/removepodsviolatingnodetaints"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/framework/plugins/removepodsviolatingtopologyspreadconstraint"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestStrategyParamsToPluginArgsRemovePodsViolatingNodeTaints(t *testing.T) {
|
||||||
|
strategyName := "RemovePodsViolatingNodeTaints"
|
||||||
|
type testCase struct {
|
||||||
|
description string
|
||||||
|
params *v1alpha1.StrategyParameters
|
||||||
|
err error
|
||||||
|
result *api.PluginConfig
|
||||||
|
}
|
||||||
|
testCases := []testCase{
|
||||||
|
{
|
||||||
|
description: "wire in all valid parameters",
|
||||||
|
params: &v1alpha1.StrategyParameters{
|
||||||
|
ExcludedTaints: []string{
|
||||||
|
"dedicated=special-user",
|
||||||
|
"reserved",
|
||||||
|
},
|
||||||
|
ThresholdPriority: utilpointer.Int32(100),
|
||||||
|
Namespaces: &v1alpha1.Namespaces{
|
||||||
|
Exclude: []string{"test1"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
err: nil,
|
||||||
|
result: &api.PluginConfig{
|
||||||
|
Name: removepodsviolatingnodetaints.PluginName,
|
||||||
|
Args: &removepodsviolatingnodetaints.RemovePodsViolatingNodeTaintsArgs{
|
||||||
|
Namespaces: &api.Namespaces{
|
||||||
|
Exclude: []string{"test1"},
|
||||||
|
},
|
||||||
|
ExcludedTaints: []string{"dedicated=special-user", "reserved"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
description: "invalid params namespaces",
|
||||||
|
params: &v1alpha1.StrategyParameters{
|
||||||
|
Namespaces: &v1alpha1.Namespaces{
|
||||||
|
Exclude: []string{"test1"},
|
||||||
|
Include: []string{"test2"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
err: fmt.Errorf("strategy \"%s\" param validation failed: only one of Include/Exclude namespaces can be set", strategyName),
|
||||||
|
result: nil,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range testCases {
|
||||||
|
t.Run(tc.description, func(t *testing.T) {
|
||||||
|
var result *api.PluginConfig
|
||||||
|
var err error
|
||||||
|
if pcFnc, exists := strategyParamsToPluginArgs[strategyName]; exists {
|
||||||
|
result, err = pcFnc(tc.params)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
if err.Error() != tc.err.Error() {
|
||||||
|
t.Errorf("unexpected error: %s", err.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err == nil {
|
||||||
|
// sort to easily compare deepequality
|
||||||
|
diff := cmp.Diff(tc.result, result)
|
||||||
|
if diff != "" {
|
||||||
|
t.Errorf("test '%s' failed. Results are not deep equal. mismatch (-want +got):\n%s", tc.description, diff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStrategyParamsToPluginArgsRemoveFailedPods(t *testing.T) {
|
||||||
|
strategyName := "RemoveFailedPods"
|
||||||
|
type testCase struct {
|
||||||
|
description string
|
||||||
|
params *v1alpha1.StrategyParameters
|
||||||
|
err error
|
||||||
|
result *api.PluginConfig
|
||||||
|
}
|
||||||
|
testCases := []testCase{
|
||||||
|
{
|
||||||
|
description: "wire in all valid parameters",
|
||||||
|
params: &v1alpha1.StrategyParameters{
|
||||||
|
FailedPods: &v1alpha1.FailedPods{
|
||||||
|
MinPodLifetimeSeconds: utilpointer.Uint(3600),
|
||||||
|
ExcludeOwnerKinds: []string{"Job"},
|
||||||
|
Reasons: []string{"NodeAffinity"},
|
||||||
|
IncludingInitContainers: true,
|
||||||
|
},
|
||||||
|
ThresholdPriority: utilpointer.Int32(100),
|
||||||
|
Namespaces: &v1alpha1.Namespaces{
|
||||||
|
Exclude: []string{"test1"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
err: nil,
|
||||||
|
result: &api.PluginConfig{
|
||||||
|
Name: removefailedpods.PluginName,
|
||||||
|
Args: &removefailedpods.RemoveFailedPodsArgs{
|
||||||
|
ExcludeOwnerKinds: []string{"Job"},
|
||||||
|
MinPodLifetimeSeconds: utilpointer.Uint(3600),
|
||||||
|
Reasons: []string{"NodeAffinity"},
|
||||||
|
IncludingInitContainers: true,
|
||||||
|
Namespaces: &api.Namespaces{
|
||||||
|
Exclude: []string{"test1"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
description: "invalid params namespaces",
|
||||||
|
params: &v1alpha1.StrategyParameters{
|
||||||
|
Namespaces: &v1alpha1.Namespaces{
|
||||||
|
Exclude: []string{"test1"},
|
||||||
|
Include: []string{"test2"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
err: fmt.Errorf("strategy \"%s\" param validation failed: only one of Include/Exclude namespaces can be set", strategyName),
|
||||||
|
result: nil,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range testCases {
|
||||||
|
t.Run(tc.description, func(t *testing.T) {
|
||||||
|
var result *api.PluginConfig
|
||||||
|
var err error
|
||||||
|
if pcFnc, exists := strategyParamsToPluginArgs[strategyName]; exists {
|
||||||
|
result, err = pcFnc(tc.params)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
if err.Error() != tc.err.Error() {
|
||||||
|
t.Errorf("unexpected error: %s", err.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err == nil {
|
||||||
|
// sort to easily compare deepequality
|
||||||
|
diff := cmp.Diff(tc.result, result)
|
||||||
|
if diff != "" {
|
||||||
|
t.Errorf("test '%s' failed. Results are not deep equal. mismatch (-want +got):\n%s", tc.description, diff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStrategyParamsToPluginArgsRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||||
|
strategyName := "RemovePodsViolatingNodeAffinity"
|
||||||
|
type testCase struct {
|
||||||
|
description string
|
||||||
|
params *v1alpha1.StrategyParameters
|
||||||
|
err error
|
||||||
|
result *api.PluginConfig
|
||||||
|
}
|
||||||
|
testCases := []testCase{
|
||||||
|
{
|
||||||
|
description: "wire in all valid parameters",
|
||||||
|
params: &v1alpha1.StrategyParameters{
|
||||||
|
NodeAffinityType: []string{"requiredDuringSchedulingIgnoredDuringExecution"},
|
||||||
|
ThresholdPriority: utilpointer.Int32(100),
|
||||||
|
Namespaces: &v1alpha1.Namespaces{
|
||||||
|
Exclude: []string{"test1"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
err: nil,
|
||||||
|
result: &api.PluginConfig{
|
||||||
|
Name: removepodsviolatingnodeaffinity.PluginName,
|
||||||
|
Args: &removepodsviolatingnodeaffinity.RemovePodsViolatingNodeAffinityArgs{
|
||||||
|
NodeAffinityType: []string{"requiredDuringSchedulingIgnoredDuringExecution"},
|
||||||
|
Namespaces: &api.Namespaces{
|
||||||
|
Exclude: []string{"test1"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
description: "invalid params, not setting nodeaffinity type",
|
||||||
|
params: &v1alpha1.StrategyParameters{},
|
||||||
|
err: fmt.Errorf("strategy \"%s\" param validation failed: nodeAffinityType needs to be set", strategyName),
|
||||||
|
result: nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
description: "invalid params namespaces",
|
||||||
|
params: &v1alpha1.StrategyParameters{
|
||||||
|
NodeAffinityType: []string{"requiredDuringSchedulingIgnoredDuringExecution"},
|
||||||
|
Namespaces: &v1alpha1.Namespaces{
|
||||||
|
Exclude: []string{"test1"},
|
||||||
|
Include: []string{"test2"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
err: fmt.Errorf("strategy \"%s\" param validation failed: only one of Include/Exclude namespaces can be set", strategyName),
|
||||||
|
result: nil,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range testCases {
|
||||||
|
t.Run(tc.description, func(t *testing.T) {
|
||||||
|
var result *api.PluginConfig
|
||||||
|
var err error
|
||||||
|
if pcFnc, exists := strategyParamsToPluginArgs[strategyName]; exists {
|
||||||
|
result, err = pcFnc(tc.params)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
if err.Error() != tc.err.Error() {
|
||||||
|
t.Errorf("unexpected error: %s", err.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err == nil {
|
||||||
|
// sort to easily compare deepequality
|
||||||
|
diff := cmp.Diff(tc.result, result)
|
||||||
|
if diff != "" {
|
||||||
|
t.Errorf("test '%s' failed. Results are not deep equal. mismatch (-want +got):\n%s", tc.description, diff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStrategyParamsToPluginArgsRemovePodsViolatingInterPodAntiAffinity(t *testing.T) {
|
||||||
|
strategyName := "RemovePodsViolatingInterPodAntiAffinity"
|
||||||
|
type testCase struct {
|
||||||
|
description string
|
||||||
|
params *v1alpha1.StrategyParameters
|
||||||
|
err error
|
||||||
|
result *api.PluginConfig
|
||||||
|
}
|
||||||
|
testCases := []testCase{
|
||||||
|
{
|
||||||
|
description: "wire in all valid parameters",
|
||||||
|
params: &v1alpha1.StrategyParameters{
|
||||||
|
ThresholdPriority: utilpointer.Int32(100),
|
||||||
|
Namespaces: &v1alpha1.Namespaces{
|
||||||
|
Exclude: []string{"test1"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
err: nil,
|
||||||
|
result: &api.PluginConfig{
|
||||||
|
Name: removepodsviolatinginterpodantiaffinity.PluginName,
|
||||||
|
Args: &removepodsviolatinginterpodantiaffinity.RemovePodsViolatingInterPodAntiAffinityArgs{
|
||||||
|
Namespaces: &api.Namespaces{
|
||||||
|
Exclude: []string{"test1"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
description: "invalid params namespaces",
|
||||||
|
params: &v1alpha1.StrategyParameters{
|
||||||
|
Namespaces: &v1alpha1.Namespaces{
|
||||||
|
Exclude: []string{"test1"},
|
||||||
|
Include: []string{"test2"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
err: fmt.Errorf("strategy \"%s\" param validation failed: only one of Include/Exclude namespaces can be set", strategyName),
|
||||||
|
result: nil,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range testCases {
|
||||||
|
t.Run(tc.description, func(t *testing.T) {
|
||||||
|
var result *api.PluginConfig
|
||||||
|
var err error
|
||||||
|
if pcFnc, exists := strategyParamsToPluginArgs[strategyName]; exists {
|
||||||
|
result, err = pcFnc(tc.params)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
if err.Error() != tc.err.Error() {
|
||||||
|
t.Errorf("unexpected error: %s", err.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err == nil {
|
||||||
|
// sort to easily compare deepequality
|
||||||
|
diff := cmp.Diff(tc.result, result)
|
||||||
|
if diff != "" {
|
||||||
|
t.Errorf("test '%s' failed. Results are not deep equal. mismatch (-want +got):\n%s", tc.description, diff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStrategyParamsToPluginArgsRemovePodsHavingTooManyRestarts(t *testing.T) {
|
||||||
|
strategyName := "RemovePodsHavingTooManyRestarts"
|
||||||
|
type testCase struct {
|
||||||
|
description string
|
||||||
|
params *v1alpha1.StrategyParameters
|
||||||
|
err error
|
||||||
|
result *api.PluginConfig
|
||||||
|
}
|
||||||
|
testCases := []testCase{
|
||||||
|
{
|
||||||
|
description: "wire in all valid parameters",
|
||||||
|
params: &v1alpha1.StrategyParameters{
|
||||||
|
PodsHavingTooManyRestarts: &v1alpha1.PodsHavingTooManyRestarts{
|
||||||
|
PodRestartThreshold: 100,
|
||||||
|
IncludingInitContainers: true,
|
||||||
|
},
|
||||||
|
ThresholdPriority: utilpointer.Int32(100),
|
||||||
|
Namespaces: &v1alpha1.Namespaces{
|
||||||
|
Exclude: []string{"test1"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
err: nil,
|
||||||
|
result: &api.PluginConfig{
|
||||||
|
Name: removepodshavingtoomanyrestarts.PluginName,
|
||||||
|
Args: &removepodshavingtoomanyrestarts.RemovePodsHavingTooManyRestartsArgs{
|
||||||
|
PodRestartThreshold: 100,
|
||||||
|
IncludingInitContainers: true,
|
||||||
|
Namespaces: &api.Namespaces{
|
||||||
|
Exclude: []string{"test1"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
description: "invalid params namespaces",
|
||||||
|
params: &v1alpha1.StrategyParameters{
|
||||||
|
Namespaces: &v1alpha1.Namespaces{
|
||||||
|
Exclude: []string{"test1"},
|
||||||
|
Include: []string{"test2"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
err: fmt.Errorf("strategy \"%s\" param validation failed: only one of Include/Exclude namespaces can be set", strategyName),
|
||||||
|
result: nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
description: "invalid params restart threshold",
|
||||||
|
params: &v1alpha1.StrategyParameters{
|
||||||
|
PodsHavingTooManyRestarts: &v1alpha1.PodsHavingTooManyRestarts{
|
||||||
|
PodRestartThreshold: 0,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
err: fmt.Errorf("strategy \"%s\" param validation failed: invalid PodsHavingTooManyRestarts threshold", strategyName),
|
||||||
|
result: nil,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range testCases {
|
||||||
|
t.Run(tc.description, func(t *testing.T) {
|
||||||
|
var result *api.PluginConfig
|
||||||
|
var err error
|
||||||
|
if pcFnc, exists := strategyParamsToPluginArgs[strategyName]; exists {
|
||||||
|
result, err = pcFnc(tc.params)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
if err.Error() != tc.err.Error() {
|
||||||
|
t.Errorf("unexpected error: %s", err.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err == nil {
|
||||||
|
// sort to easily compare deepequality
|
||||||
|
diff := cmp.Diff(tc.result, result)
|
||||||
|
if diff != "" {
|
||||||
|
t.Errorf("test '%s' failed. Results are not deep equal. mismatch (-want +got):\n%s", tc.description, diff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStrategyParamsToPluginArgsPodLifeTime(t *testing.T) {
|
||||||
|
strategyName := "PodLifeTime"
|
||||||
|
type testCase struct {
|
||||||
|
description string
|
||||||
|
params *v1alpha1.StrategyParameters
|
||||||
|
err error
|
||||||
|
result *api.PluginConfig
|
||||||
|
}
|
||||||
|
testCases := []testCase{
|
||||||
|
{
|
||||||
|
description: "wire in all valid parameters",
|
||||||
|
params: &v1alpha1.StrategyParameters{
|
||||||
|
PodLifeTime: &v1alpha1.PodLifeTime{
|
||||||
|
MaxPodLifeTimeSeconds: utilpointer.Uint(86400),
|
||||||
|
States: []string{
|
||||||
|
"Pending",
|
||||||
|
"PodInitializing",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
ThresholdPriority: utilpointer.Int32(100),
|
||||||
|
Namespaces: &v1alpha1.Namespaces{
|
||||||
|
Exclude: []string{"test1"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
err: nil,
|
||||||
|
result: &api.PluginConfig{
|
||||||
|
Name: podlifetime.PluginName,
|
||||||
|
Args: &podlifetime.PodLifeTimeArgs{
|
||||||
|
MaxPodLifeTimeSeconds: utilpointer.Uint(86400),
|
||||||
|
States: []string{
|
||||||
|
"Pending",
|
||||||
|
"PodInitializing",
|
||||||
|
},
|
||||||
|
Namespaces: &api.Namespaces{
|
||||||
|
Exclude: []string{"test1"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
description: "invalid params namespaces",
|
||||||
|
params: &v1alpha1.StrategyParameters{
|
||||||
|
PodLifeTime: &v1alpha1.PodLifeTime{
|
||||||
|
MaxPodLifeTimeSeconds: utilpointer.Uint(86400),
|
||||||
|
},
|
||||||
|
Namespaces: &v1alpha1.Namespaces{
|
||||||
|
Exclude: []string{"test1"},
|
||||||
|
Include: []string{"test2"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
err: fmt.Errorf("strategy \"%s\" param validation failed: only one of Include/Exclude namespaces can be set", strategyName),
|
||||||
|
result: nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
description: "invalid params MaxPodLifeTimeSeconds not set",
|
||||||
|
params: &v1alpha1.StrategyParameters{
|
||||||
|
PodLifeTime: &v1alpha1.PodLifeTime{},
|
||||||
|
},
|
||||||
|
err: fmt.Errorf("strategy \"%s\" param validation failed: MaxPodLifeTimeSeconds not set", strategyName),
|
||||||
|
result: nil,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range testCases {
|
||||||
|
t.Run(tc.description, func(t *testing.T) {
|
||||||
|
var result *api.PluginConfig
|
||||||
|
var err error
|
||||||
|
if pcFnc, exists := strategyParamsToPluginArgs[strategyName]; exists {
|
||||||
|
result, err = pcFnc(tc.params)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
if err.Error() != tc.err.Error() {
|
||||||
|
t.Errorf("unexpected error: %s", err.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err == nil {
|
||||||
|
// sort to easily compare deepequality
|
||||||
|
diff := cmp.Diff(tc.result, result)
|
||||||
|
if diff != "" {
|
||||||
|
t.Errorf("test '%s' failed. Results are not deep equal. mismatch (-want +got):\n%s", tc.description, diff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStrategyParamsToPluginArgsRemoveDuplicates(t *testing.T) {
|
||||||
|
strategyName := "RemoveDuplicates"
|
||||||
|
type testCase struct {
|
||||||
|
description string
|
||||||
|
params *v1alpha1.StrategyParameters
|
||||||
|
err error
|
||||||
|
result *api.PluginConfig
|
||||||
|
}
|
||||||
|
testCases := []testCase{
|
||||||
|
{
|
||||||
|
description: "wire in all valid parameters",
|
||||||
|
params: &v1alpha1.StrategyParameters{
|
||||||
|
RemoveDuplicates: &v1alpha1.RemoveDuplicates{
|
||||||
|
ExcludeOwnerKinds: []string{"ReplicaSet"},
|
||||||
|
},
|
||||||
|
ThresholdPriority: utilpointer.Int32(100),
|
||||||
|
Namespaces: &v1alpha1.Namespaces{
|
||||||
|
Exclude: []string{"test1"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
err: nil,
|
||||||
|
result: &api.PluginConfig{
|
||||||
|
Name: removeduplicates.PluginName,
|
||||||
|
Args: &removeduplicates.RemoveDuplicatesArgs{
|
||||||
|
ExcludeOwnerKinds: []string{"ReplicaSet"},
|
||||||
|
Namespaces: &api.Namespaces{
|
||||||
|
Exclude: []string{"test1"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
description: "invalid params namespaces",
|
||||||
|
params: &v1alpha1.StrategyParameters{
|
||||||
|
PodLifeTime: &v1alpha1.PodLifeTime{
|
||||||
|
MaxPodLifeTimeSeconds: utilpointer.Uint(86400),
|
||||||
|
},
|
||||||
|
Namespaces: &v1alpha1.Namespaces{
|
||||||
|
Exclude: []string{"test1"},
|
||||||
|
Include: []string{"test2"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
err: fmt.Errorf("strategy \"%s\" param validation failed: only one of Include/Exclude namespaces can be set", strategyName),
|
||||||
|
result: nil,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range testCases {
|
||||||
|
t.Run(tc.description, func(t *testing.T) {
|
||||||
|
var result *api.PluginConfig
|
||||||
|
var err error
|
||||||
|
if pcFnc, exists := strategyParamsToPluginArgs[strategyName]; exists {
|
||||||
|
result, err = pcFnc(tc.params)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
if err.Error() != tc.err.Error() {
|
||||||
|
t.Errorf("unexpected error: %s", err.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err == nil {
|
||||||
|
// sort to easily compare deepequality
|
||||||
|
diff := cmp.Diff(tc.result, result)
|
||||||
|
if diff != "" {
|
||||||
|
t.Errorf("test '%s' failed. Results are not deep equal. mismatch (-want +got):\n%s", tc.description, diff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStrategyParamsToPluginArgsRemovePodsViolatingTopologySpreadConstraint(t *testing.T) {
|
||||||
|
strategyName := "RemovePodsViolatingTopologySpreadConstraint"
|
||||||
|
type testCase struct {
|
||||||
|
description string
|
||||||
|
params *v1alpha1.StrategyParameters
|
||||||
|
err error
|
||||||
|
result *api.PluginConfig
|
||||||
|
}
|
||||||
|
testCases := []testCase{
|
||||||
|
{
|
||||||
|
description: "wire in all valid parameters",
|
||||||
|
params: &v1alpha1.StrategyParameters{
|
||||||
|
IncludeSoftConstraints: true,
|
||||||
|
ThresholdPriority: utilpointer.Int32(100),
|
||||||
|
Namespaces: &v1alpha1.Namespaces{
|
||||||
|
Exclude: []string{"test1"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
err: nil,
|
||||||
|
result: &api.PluginConfig{
|
||||||
|
Name: removepodsviolatingtopologyspreadconstraint.PluginName,
|
||||||
|
Args: &removepodsviolatingtopologyspreadconstraint.RemovePodsViolatingTopologySpreadConstraintArgs{
|
||||||
|
IncludeSoftConstraints: true,
|
||||||
|
Namespaces: &api.Namespaces{
|
||||||
|
Exclude: []string{"test1"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
description: "invalid params namespaces",
|
||||||
|
params: &v1alpha1.StrategyParameters{
|
||||||
|
Namespaces: &v1alpha1.Namespaces{
|
||||||
|
Exclude: []string{"test1"},
|
||||||
|
Include: []string{"test2"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
err: fmt.Errorf("strategy \"%s\" param validation failed: only one of Include/Exclude namespaces can be set", strategyName),
|
||||||
|
result: nil,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range testCases {
|
||||||
|
t.Run(tc.description, func(t *testing.T) {
|
||||||
|
var result *api.PluginConfig
|
||||||
|
var err error
|
||||||
|
if pcFnc, exists := strategyParamsToPluginArgs[strategyName]; exists {
|
||||||
|
result, err = pcFnc(tc.params)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
if err.Error() != tc.err.Error() {
|
||||||
|
t.Errorf("unexpected error: %s", err.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err == nil {
|
||||||
|
// sort to easily compare deepequality
|
||||||
|
diff := cmp.Diff(tc.result, result)
|
||||||
|
if diff != "" {
|
||||||
|
t.Errorf("test '%s' failed. Results are not deep equal. mismatch (-want +got):\n%s", tc.description, diff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStrategyParamsToPluginArgsHighNodeUtilization(t *testing.T) {
|
||||||
|
strategyName := "HighNodeUtilization"
|
||||||
|
type testCase struct {
|
||||||
|
description string
|
||||||
|
params *v1alpha1.StrategyParameters
|
||||||
|
err error
|
||||||
|
result *api.PluginConfig
|
||||||
|
}
|
||||||
|
testCases := []testCase{
|
||||||
|
{
|
||||||
|
description: "wire in all valid parameters",
|
||||||
|
params: &v1alpha1.StrategyParameters{
|
||||||
|
NodeResourceUtilizationThresholds: &v1alpha1.NodeResourceUtilizationThresholds{
|
||||||
|
NumberOfNodes: 3,
|
||||||
|
Thresholds: v1alpha1.ResourceThresholds{
|
||||||
|
"cpu": v1alpha1.Percentage(20),
|
||||||
|
"memory": v1alpha1.Percentage(20),
|
||||||
|
"pods": v1alpha1.Percentage(20),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
ThresholdPriority: utilpointer.Int32(100),
|
||||||
|
Namespaces: &v1alpha1.Namespaces{
|
||||||
|
Exclude: []string{"test1"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
err: nil,
|
||||||
|
result: &api.PluginConfig{
|
||||||
|
Name: nodeutilization.HighNodeUtilizationPluginName,
|
||||||
|
Args: &nodeutilization.HighNodeUtilizationArgs{
|
||||||
|
Thresholds: api.ResourceThresholds{
|
||||||
|
"cpu": api.Percentage(20),
|
||||||
|
"memory": api.Percentage(20),
|
||||||
|
"pods": api.Percentage(20),
|
||||||
|
},
|
||||||
|
NumberOfNodes: 3,
|
||||||
|
EvictableNamespaces: &api.Namespaces{
|
||||||
|
Exclude: []string{"test1"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
description: "invalid params namespaces",
|
||||||
|
params: &v1alpha1.StrategyParameters{
|
||||||
|
NodeResourceUtilizationThresholds: &v1alpha1.NodeResourceUtilizationThresholds{
|
||||||
|
NumberOfNodes: 3,
|
||||||
|
Thresholds: v1alpha1.ResourceThresholds{
|
||||||
|
"cpu": v1alpha1.Percentage(20),
|
||||||
|
"memory": v1alpha1.Percentage(20),
|
||||||
|
"pods": v1alpha1.Percentage(20),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Namespaces: &v1alpha1.Namespaces{
|
||||||
|
Include: []string{"test2"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
err: fmt.Errorf("strategy \"%s\" param validation failed: only Exclude namespaces can be set, inclusion is not supported", strategyName),
|
||||||
|
result: nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
description: "invalid params nil ResourceThresholds",
|
||||||
|
params: &v1alpha1.StrategyParameters{
|
||||||
|
NodeResourceUtilizationThresholds: &v1alpha1.NodeResourceUtilizationThresholds{
|
||||||
|
NumberOfNodes: 3,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
err: fmt.Errorf("strategy \"%s\" param validation failed: no resource threshold is configured", strategyName),
|
||||||
|
result: nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
description: "invalid params out of bounds threshold",
|
||||||
|
params: &v1alpha1.StrategyParameters{
|
||||||
|
NodeResourceUtilizationThresholds: &v1alpha1.NodeResourceUtilizationThresholds{
|
||||||
|
NumberOfNodes: 3,
|
||||||
|
Thresholds: v1alpha1.ResourceThresholds{
|
||||||
|
"cpu": v1alpha1.Percentage(150),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
err: fmt.Errorf("strategy \"%s\" param validation failed: cpu threshold not in [0, 100] range", strategyName),
|
||||||
|
result: nil,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range testCases {
|
||||||
|
t.Run(tc.description, func(t *testing.T) {
|
||||||
|
var result *api.PluginConfig
|
||||||
|
var err error
|
||||||
|
if pcFnc, exists := strategyParamsToPluginArgs[strategyName]; exists {
|
||||||
|
result, err = pcFnc(tc.params)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
if err.Error() != tc.err.Error() {
|
||||||
|
t.Errorf("unexpected error: %s", err.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err == nil {
|
||||||
|
// sort to easily compare deepequality
|
||||||
|
diff := cmp.Diff(tc.result, result)
|
||||||
|
if diff != "" {
|
||||||
|
t.Errorf("test '%s' failed. Results are not deep equal. mismatch (-want +got):\n%s", tc.description, diff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStrategyParamsToPluginArgsLowNodeUtilization(t *testing.T) {
|
||||||
|
strategyName := "LowNodeUtilization"
|
||||||
|
type testCase struct {
|
||||||
|
description string
|
||||||
|
params *v1alpha1.StrategyParameters
|
||||||
|
err error
|
||||||
|
result *api.PluginConfig
|
||||||
|
}
|
||||||
|
testCases := []testCase{
|
||||||
|
{
|
||||||
|
description: "wire in all valid parameters",
|
||||||
|
params: &v1alpha1.StrategyParameters{
|
||||||
|
NodeResourceUtilizationThresholds: &v1alpha1.NodeResourceUtilizationThresholds{
|
||||||
|
NumberOfNodes: 3,
|
||||||
|
Thresholds: v1alpha1.ResourceThresholds{
|
||||||
|
"cpu": v1alpha1.Percentage(20),
|
||||||
|
"memory": v1alpha1.Percentage(20),
|
||||||
|
"pods": v1alpha1.Percentage(20),
|
||||||
|
},
|
||||||
|
TargetThresholds: v1alpha1.ResourceThresholds{
|
||||||
|
"cpu": v1alpha1.Percentage(50),
|
||||||
|
"memory": v1alpha1.Percentage(50),
|
||||||
|
"pods": v1alpha1.Percentage(50),
|
||||||
|
},
|
||||||
|
UseDeviationThresholds: true,
|
||||||
|
},
|
||||||
|
ThresholdPriority: utilpointer.Int32(100),
|
||||||
|
Namespaces: &v1alpha1.Namespaces{
|
||||||
|
Exclude: []string{"test1"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
err: nil,
|
||||||
|
result: &api.PluginConfig{
|
||||||
|
Name: nodeutilization.LowNodeUtilizationPluginName,
|
||||||
|
Args: &nodeutilization.LowNodeUtilizationArgs{
|
||||||
|
Thresholds: api.ResourceThresholds{
|
||||||
|
"cpu": api.Percentage(20),
|
||||||
|
"memory": api.Percentage(20),
|
||||||
|
"pods": api.Percentage(20),
|
||||||
|
},
|
||||||
|
TargetThresholds: api.ResourceThresholds{
|
||||||
|
"cpu": api.Percentage(50),
|
||||||
|
"memory": api.Percentage(50),
|
||||||
|
"pods": api.Percentage(50),
|
||||||
|
},
|
||||||
|
UseDeviationThresholds: true,
|
||||||
|
NumberOfNodes: 3,
|
||||||
|
EvictableNamespaces: &api.Namespaces{
|
||||||
|
Exclude: []string{"test1"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
description: "invalid params namespaces",
|
||||||
|
params: &v1alpha1.StrategyParameters{
|
||||||
|
NodeResourceUtilizationThresholds: &v1alpha1.NodeResourceUtilizationThresholds{
|
||||||
|
NumberOfNodes: 3,
|
||||||
|
Thresholds: v1alpha1.ResourceThresholds{
|
||||||
|
"cpu": v1alpha1.Percentage(20),
|
||||||
|
"memory": v1alpha1.Percentage(20),
|
||||||
|
"pods": v1alpha1.Percentage(20),
|
||||||
|
},
|
||||||
|
TargetThresholds: v1alpha1.ResourceThresholds{
|
||||||
|
"cpu": v1alpha1.Percentage(50),
|
||||||
|
"memory": v1alpha1.Percentage(50),
|
||||||
|
"pods": v1alpha1.Percentage(50),
|
||||||
|
},
|
||||||
|
UseDeviationThresholds: true,
|
||||||
|
},
|
||||||
|
Namespaces: &v1alpha1.Namespaces{
|
||||||
|
Include: []string{"test2"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
err: fmt.Errorf("strategy \"%s\" param validation failed: only Exclude namespaces can be set, inclusion is not supported", strategyName),
|
||||||
|
result: nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
description: "invalid params nil ResourceThresholds",
|
||||||
|
params: &v1alpha1.StrategyParameters{
|
||||||
|
NodeResourceUtilizationThresholds: &v1alpha1.NodeResourceUtilizationThresholds{
|
||||||
|
NumberOfNodes: 3,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
err: fmt.Errorf("strategy \"%s\" param validation failed: thresholds config is not valid: no resource threshold is configured", strategyName),
|
||||||
|
result: nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
description: "invalid params out of bounds threshold",
|
||||||
|
params: &v1alpha1.StrategyParameters{
|
||||||
|
NodeResourceUtilizationThresholds: &v1alpha1.NodeResourceUtilizationThresholds{
|
||||||
|
NumberOfNodes: 3,
|
||||||
|
Thresholds: v1alpha1.ResourceThresholds{
|
||||||
|
"cpu": v1alpha1.Percentage(150),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
err: fmt.Errorf("strategy \"%s\" param validation failed: thresholds config is not valid: cpu threshold not in [0, 100] range", strategyName),
|
||||||
|
result: nil,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range testCases {
|
||||||
|
t.Run(tc.description, func(t *testing.T) {
|
||||||
|
var result *api.PluginConfig
|
||||||
|
var err error
|
||||||
|
if pcFnc, exists := strategyParamsToPluginArgs[strategyName]; exists {
|
||||||
|
result, err = pcFnc(tc.params)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
if err.Error() != tc.err.Error() {
|
||||||
|
t.Errorf("unexpected error: %s", err.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err == nil {
|
||||||
|
// sort to easily compare deepequality
|
||||||
|
diff := cmp.Diff(tc.result, result)
|
||||||
|
if diff != "" {
|
||||||
|
t.Errorf("test '%s' failed. Results are not deep equal. mismatch (-want +got):\n%s", tc.description, diff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
55
pkg/framework/fake/fake.go
Normal file
55
pkg/framework/fake/fake.go
Normal file
@@ -0,0 +1,55 @@
|
|||||||
|
package fake
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
v1 "k8s.io/api/core/v1"
|
||||||
|
"k8s.io/client-go/informers"
|
||||||
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
|
|
||||||
|
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||||
|
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/framework"
|
||||||
|
)
|
||||||
|
|
||||||
|
type HandleImpl struct {
|
||||||
|
ClientsetImpl clientset.Interface
|
||||||
|
GetPodsAssignedToNodeFuncImpl podutil.GetPodsAssignedToNodeFunc
|
||||||
|
SharedInformerFactoryImpl informers.SharedInformerFactory
|
||||||
|
EvictorFilterImpl framework.EvictorPlugin
|
||||||
|
PodEvictorImpl *evictions.PodEvictor
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ framework.Handle = &HandleImpl{}
|
||||||
|
|
||||||
|
func (hi *HandleImpl) ClientSet() clientset.Interface {
|
||||||
|
return hi.ClientsetImpl
|
||||||
|
}
|
||||||
|
|
||||||
|
func (hi *HandleImpl) GetPodsAssignedToNodeFunc() podutil.GetPodsAssignedToNodeFunc {
|
||||||
|
return hi.GetPodsAssignedToNodeFuncImpl
|
||||||
|
}
|
||||||
|
|
||||||
|
func (hi *HandleImpl) SharedInformerFactory() informers.SharedInformerFactory {
|
||||||
|
return hi.SharedInformerFactoryImpl
|
||||||
|
}
|
||||||
|
|
||||||
|
func (hi *HandleImpl) Evictor() framework.Evictor {
|
||||||
|
return hi
|
||||||
|
}
|
||||||
|
|
||||||
|
func (hi *HandleImpl) Filter(pod *v1.Pod) bool {
|
||||||
|
return hi.EvictorFilterImpl.Filter(pod)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (hi *HandleImpl) PreEvictionFilter(pod *v1.Pod) bool {
|
||||||
|
return hi.EvictorFilterImpl.PreEvictionFilter(pod)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (hi *HandleImpl) Evict(ctx context.Context, pod *v1.Pod, opts evictions.EvictOptions) bool {
|
||||||
|
return hi.PodEvictorImpl.EvictPod(ctx, pod, opts)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (hi *HandleImpl) NodeLimitExceeded(node *v1.Node) bool {
|
||||||
|
return hi.PodEvictorImpl.NodeLimitExceeded(node)
|
||||||
|
}
|
||||||
207
pkg/framework/plugins/defaultevictor/defaultevictor.go
Normal file
207
pkg/framework/plugins/defaultevictor/defaultevictor.go
Normal file
@@ -0,0 +1,207 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2022 The Kubernetes Authors.
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package defaultevictor
|
||||||
|
|
||||||
|
import (
|
||||||
|
// "context"
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
v1 "k8s.io/api/core/v1"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
|
"k8s.io/apimachinery/pkg/util/errors"
|
||||||
|
"k8s.io/klog/v2"
|
||||||
|
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
|
||||||
|
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/framework"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/utils"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
PluginName = "DefaultEvictor"
|
||||||
|
evictPodAnnotationKey = "descheduler.alpha.kubernetes.io/evict"
|
||||||
|
)
|
||||||
|
|
||||||
|
var _ framework.EvictorPlugin = &DefaultEvictor{}
|
||||||
|
|
||||||
|
type constraint func(pod *v1.Pod) error
|
||||||
|
|
||||||
|
// DefaultEvictor is the first EvictorPlugin, which defines the default extension points of the
|
||||||
|
// pre-baked evictor that is shipped.
|
||||||
|
// Even though we name this plugin DefaultEvictor, it does not actually evict anything,
|
||||||
|
// This plugin is only meant to customize other actions (extension points) of the evictor,
|
||||||
|
// like filtering, sorting, and other ones that might be relevant in the future
|
||||||
|
type DefaultEvictor struct {
|
||||||
|
args runtime.Object
|
||||||
|
constraints []constraint
|
||||||
|
handle framework.Handle
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsPodEvictableBasedOnPriority checks if the given pod is evictable based on priority resolved from pod Spec.
|
||||||
|
func IsPodEvictableBasedOnPriority(pod *v1.Pod, priority int32) bool {
|
||||||
|
return pod.Spec.Priority == nil || *pod.Spec.Priority < priority
|
||||||
|
}
|
||||||
|
|
||||||
|
// HaveEvictAnnotation checks if the pod have evict annotation
|
||||||
|
func HaveEvictAnnotation(pod *v1.Pod) bool {
|
||||||
|
_, found := pod.ObjectMeta.Annotations[evictPodAnnotationKey]
|
||||||
|
return found
|
||||||
|
}
|
||||||
|
|
||||||
|
// New builds plugin from its arguments while passing a handle
|
||||||
|
func New(args runtime.Object, handle framework.Handle) (framework.Plugin, error) {
|
||||||
|
defaultEvictorArgs, ok := args.(*DefaultEvictorArgs)
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("want args to be of type defaultEvictorFilterArgs, got %T", args)
|
||||||
|
}
|
||||||
|
|
||||||
|
ev := &DefaultEvictor{}
|
||||||
|
ev.handle = handle
|
||||||
|
ev.args = defaultEvictorArgs
|
||||||
|
|
||||||
|
if defaultEvictorArgs.EvictFailedBarePods {
|
||||||
|
klog.V(1).InfoS("Warning: EvictFailedBarePods is set to True. This could cause eviction of pods without ownerReferences.")
|
||||||
|
ev.constraints = append(ev.constraints, func(pod *v1.Pod) error {
|
||||||
|
ownerRefList := podutil.OwnerRef(pod)
|
||||||
|
// Enable evictFailedBarePods to evict bare pods in failed phase
|
||||||
|
if len(ownerRefList) == 0 && pod.Status.Phase != v1.PodFailed {
|
||||||
|
return fmt.Errorf("pod does not have any ownerRefs and is not in failed phase")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
ev.constraints = append(ev.constraints, func(pod *v1.Pod) error {
|
||||||
|
ownerRefList := podutil.OwnerRef(pod)
|
||||||
|
if len(ownerRefList) == 0 {
|
||||||
|
return fmt.Errorf("pod does not have any ownerRefs")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
if !defaultEvictorArgs.EvictSystemCriticalPods {
|
||||||
|
ev.constraints = append(ev.constraints, func(pod *v1.Pod) error {
|
||||||
|
if utils.IsCriticalPriorityPod(pod) {
|
||||||
|
return fmt.Errorf("pod has system critical priority")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
|
||||||
|
if defaultEvictorArgs.PriorityThreshold != nil && (defaultEvictorArgs.PriorityThreshold.Value != nil || len(defaultEvictorArgs.PriorityThreshold.Name) > 0) {
|
||||||
|
thresholdPriority, err := utils.GetPriorityValueFromPriorityThreshold(context.TODO(), handle.ClientSet(), defaultEvictorArgs.PriorityThreshold)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to get priority threshold: %v", err)
|
||||||
|
}
|
||||||
|
ev.constraints = append(ev.constraints, func(pod *v1.Pod) error {
|
||||||
|
if IsPodEvictableBasedOnPriority(pod, thresholdPriority) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return fmt.Errorf("pod has higher priority than specified priority class threshold")
|
||||||
|
})
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
klog.V(1).InfoS("Warning: EvictSystemCriticalPods is set to True. This could cause eviction of Kubernetes system pods.")
|
||||||
|
}
|
||||||
|
if !defaultEvictorArgs.EvictLocalStoragePods {
|
||||||
|
ev.constraints = append(ev.constraints, func(pod *v1.Pod) error {
|
||||||
|
if utils.IsPodWithLocalStorage(pod) {
|
||||||
|
return fmt.Errorf("pod has local storage and descheduler is not configured with evictLocalStoragePods")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
if defaultEvictorArgs.IgnorePvcPods {
|
||||||
|
ev.constraints = append(ev.constraints, func(pod *v1.Pod) error {
|
||||||
|
if utils.IsPodWithPVC(pod) {
|
||||||
|
return fmt.Errorf("pod has a PVC and descheduler is configured to ignore PVC pods")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
selector, err := metav1.LabelSelectorAsSelector(defaultEvictorArgs.LabelSelector)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("could not get selector from label selector")
|
||||||
|
}
|
||||||
|
if defaultEvictorArgs.LabelSelector != nil && !selector.Empty() {
|
||||||
|
ev.constraints = append(ev.constraints, func(pod *v1.Pod) error {
|
||||||
|
if !selector.Matches(labels.Set(pod.Labels)) {
|
||||||
|
return fmt.Errorf("pod labels do not match the labelSelector filter in the policy parameter")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
return ev, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Name retrieves the plugin name
|
||||||
|
func (d *DefaultEvictor) Name() string {
|
||||||
|
return PluginName
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *DefaultEvictor) PreEvictionFilter(pod *v1.Pod) bool {
|
||||||
|
defaultEvictorArgs := d.args.(*DefaultEvictorArgs)
|
||||||
|
if defaultEvictorArgs.NodeFit {
|
||||||
|
nodes, err := nodeutil.ReadyNodes(context.TODO(), d.handle.ClientSet(), d.handle.SharedInformerFactory().Core().V1().Nodes().Lister(), defaultEvictorArgs.NodeSelector)
|
||||||
|
if err != nil {
|
||||||
|
klog.ErrorS(fmt.Errorf("Pod fails the following checks"), "pod", klog.KObj(pod))
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if !nodeutil.PodFitsAnyOtherNode(d.handle.GetPodsAssignedToNodeFunc(), pod, nodes) {
|
||||||
|
klog.ErrorS(fmt.Errorf("pod does not fit on any other node because of nodeSelector(s), Taint(s), or nodes marked as unschedulable"), "pod", klog.KObj(pod))
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *DefaultEvictor) Filter(pod *v1.Pod) bool {
|
||||||
|
checkErrs := []error{}
|
||||||
|
|
||||||
|
if HaveEvictAnnotation(pod) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
ownerRefList := podutil.OwnerRef(pod)
|
||||||
|
if utils.IsDaemonsetPod(ownerRefList) {
|
||||||
|
checkErrs = append(checkErrs, fmt.Errorf("pod is a DaemonSet pod"))
|
||||||
|
}
|
||||||
|
|
||||||
|
if utils.IsMirrorPod(pod) {
|
||||||
|
checkErrs = append(checkErrs, fmt.Errorf("pod is a mirror pod"))
|
||||||
|
}
|
||||||
|
|
||||||
|
if utils.IsStaticPod(pod) {
|
||||||
|
checkErrs = append(checkErrs, fmt.Errorf("pod is a static pod"))
|
||||||
|
}
|
||||||
|
|
||||||
|
if utils.IsPodTerminating(pod) {
|
||||||
|
checkErrs = append(checkErrs, fmt.Errorf("pod is terminating"))
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, c := range d.constraints {
|
||||||
|
if err := c(pod); err != nil {
|
||||||
|
checkErrs = append(checkErrs, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(checkErrs) > 0 {
|
||||||
|
klog.V(4).InfoS("Pod fails the following checks", "pod", klog.KObj(pod), "checks", errors.NewAggregate(checkErrs).Error())
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
764
pkg/framework/plugins/defaultevictor/defaultevictor_test.go
Normal file
764
pkg/framework/plugins/defaultevictor/defaultevictor_test.go
Normal file
@@ -0,0 +1,764 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2022 The Kubernetes Authors.
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package defaultevictor
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
v1 "k8s.io/api/core/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
|
"k8s.io/client-go/informers"
|
||||||
|
"k8s.io/client-go/kubernetes/fake"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/api"
|
||||||
|
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/framework"
|
||||||
|
frameworkfake "sigs.k8s.io/descheduler/pkg/framework/fake"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/utils"
|
||||||
|
"sigs.k8s.io/descheduler/test"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestDefaultEvictorPreEvictionFilter(t *testing.T) {
|
||||||
|
n1 := test.BuildTestNode("node1", 1000, 2000, 13, nil)
|
||||||
|
|
||||||
|
nodeTaintKey := "hardware"
|
||||||
|
nodeTaintValue := "gpu"
|
||||||
|
|
||||||
|
nodeLabelKey := "datacenter"
|
||||||
|
nodeLabelValue := "east"
|
||||||
|
type testCase struct {
|
||||||
|
description string
|
||||||
|
pods []*v1.Pod
|
||||||
|
nodes []*v1.Node
|
||||||
|
evictFailedBarePods bool
|
||||||
|
evictLocalStoragePods bool
|
||||||
|
evictSystemCriticalPods bool
|
||||||
|
priorityThreshold *int32
|
||||||
|
nodeFit bool
|
||||||
|
result bool
|
||||||
|
}
|
||||||
|
|
||||||
|
testCases := []testCase{
|
||||||
|
{
|
||||||
|
description: "Pod with no tolerations running on normal node, all other nodes tainted",
|
||||||
|
pods: []*v1.Pod{
|
||||||
|
test.BuildTestPod("p1", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||||
|
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
nodes: []*v1.Node{
|
||||||
|
test.BuildTestNode("node2", 1000, 2000, 13, func(node *v1.Node) {
|
||||||
|
node.Spec.Taints = []v1.Taint{
|
||||||
|
{
|
||||||
|
Key: nodeTaintKey,
|
||||||
|
Value: nodeTaintValue,
|
||||||
|
Effect: v1.TaintEffectNoSchedule,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}),
|
||||||
|
test.BuildTestNode("node3", 1000, 2000, 13, func(node *v1.Node) {
|
||||||
|
node.Spec.Taints = []v1.Taint{
|
||||||
|
{
|
||||||
|
Key: nodeTaintKey,
|
||||||
|
Value: nodeTaintValue,
|
||||||
|
Effect: v1.TaintEffectNoSchedule,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
evictLocalStoragePods: false,
|
||||||
|
evictSystemCriticalPods: false,
|
||||||
|
nodeFit: true,
|
||||||
|
result: false,
|
||||||
|
}, {
|
||||||
|
description: "Pod with correct tolerations running on normal node, all other nodes tainted",
|
||||||
|
pods: []*v1.Pod{
|
||||||
|
test.BuildTestPod("p1", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||||
|
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||||
|
pod.Spec.Tolerations = []v1.Toleration{
|
||||||
|
{
|
||||||
|
Key: nodeTaintKey,
|
||||||
|
Value: nodeTaintValue,
|
||||||
|
Effect: v1.TaintEffectNoSchedule,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
nodes: []*v1.Node{
|
||||||
|
test.BuildTestNode("node2", 1000, 2000, 13, func(node *v1.Node) {
|
||||||
|
node.Spec.Taints = []v1.Taint{
|
||||||
|
{
|
||||||
|
Key: nodeTaintKey,
|
||||||
|
Value: nodeTaintValue,
|
||||||
|
Effect: v1.TaintEffectNoSchedule,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}),
|
||||||
|
test.BuildTestNode("node3", 1000, 2000, 13, func(node *v1.Node) {
|
||||||
|
node.Spec.Taints = []v1.Taint{
|
||||||
|
{
|
||||||
|
Key: nodeTaintKey,
|
||||||
|
Value: nodeTaintValue,
|
||||||
|
Effect: v1.TaintEffectNoSchedule,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
evictLocalStoragePods: false,
|
||||||
|
evictSystemCriticalPods: false,
|
||||||
|
nodeFit: true,
|
||||||
|
result: true,
|
||||||
|
}, {
|
||||||
|
description: "Pod with incorrect node selector",
|
||||||
|
pods: []*v1.Pod{
|
||||||
|
test.BuildTestPod("p1", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||||
|
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||||
|
pod.Spec.NodeSelector = map[string]string{
|
||||||
|
nodeLabelKey: "fail",
|
||||||
|
}
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
nodes: []*v1.Node{
|
||||||
|
test.BuildTestNode("node2", 1000, 2000, 13, func(node *v1.Node) {
|
||||||
|
node.ObjectMeta.Labels = map[string]string{
|
||||||
|
nodeLabelKey: nodeLabelValue,
|
||||||
|
}
|
||||||
|
}),
|
||||||
|
test.BuildTestNode("node3", 1000, 2000, 13, func(node *v1.Node) {
|
||||||
|
node.ObjectMeta.Labels = map[string]string{
|
||||||
|
nodeLabelKey: nodeLabelValue,
|
||||||
|
}
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
evictLocalStoragePods: false,
|
||||||
|
evictSystemCriticalPods: false,
|
||||||
|
nodeFit: true,
|
||||||
|
result: false,
|
||||||
|
}, {
|
||||||
|
description: "Pod with correct node selector",
|
||||||
|
pods: []*v1.Pod{
|
||||||
|
test.BuildTestPod("p1", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||||
|
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||||
|
pod.Spec.NodeSelector = map[string]string{
|
||||||
|
nodeLabelKey: nodeLabelValue,
|
||||||
|
}
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
nodes: []*v1.Node{
|
||||||
|
test.BuildTestNode("node2", 1000, 2000, 13, func(node *v1.Node) {
|
||||||
|
node.ObjectMeta.Labels = map[string]string{
|
||||||
|
nodeLabelKey: nodeLabelValue,
|
||||||
|
}
|
||||||
|
}),
|
||||||
|
test.BuildTestNode("node3", 1000, 2000, 13, func(node *v1.Node) {
|
||||||
|
node.ObjectMeta.Labels = map[string]string{
|
||||||
|
nodeLabelKey: nodeLabelValue,
|
||||||
|
}
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
evictLocalStoragePods: false,
|
||||||
|
evictSystemCriticalPods: false,
|
||||||
|
nodeFit: true,
|
||||||
|
result: true,
|
||||||
|
}, {
|
||||||
|
description: "Pod with correct node selector, but only available node doesn't have enough CPU",
|
||||||
|
pods: []*v1.Pod{
|
||||||
|
test.BuildTestPod("p1", 12, 8, n1.Name, func(pod *v1.Pod) {
|
||||||
|
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||||
|
pod.Spec.NodeSelector = map[string]string{
|
||||||
|
nodeLabelKey: nodeLabelValue,
|
||||||
|
}
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
nodes: []*v1.Node{
|
||||||
|
test.BuildTestNode("node2-TEST", 10, 16, 10, func(node *v1.Node) {
|
||||||
|
node.ObjectMeta.Labels = map[string]string{
|
||||||
|
nodeLabelKey: nodeLabelValue,
|
||||||
|
}
|
||||||
|
}),
|
||||||
|
test.BuildTestNode("node3-TEST", 10, 16, 10, func(node *v1.Node) {
|
||||||
|
node.ObjectMeta.Labels = map[string]string{
|
||||||
|
nodeLabelKey: nodeLabelValue,
|
||||||
|
}
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
evictLocalStoragePods: false,
|
||||||
|
evictSystemCriticalPods: false,
|
||||||
|
nodeFit: true,
|
||||||
|
result: false,
|
||||||
|
}, {
|
||||||
|
description: "Pod with correct node selector, and one node has enough memory",
|
||||||
|
pods: []*v1.Pod{
|
||||||
|
test.BuildTestPod("p1", 12, 8, n1.Name, func(pod *v1.Pod) {
|
||||||
|
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||||
|
pod.Spec.NodeSelector = map[string]string{
|
||||||
|
nodeLabelKey: nodeLabelValue,
|
||||||
|
}
|
||||||
|
}),
|
||||||
|
test.BuildTestPod("node2-pod-10GB-mem", 20, 10, "node2", func(pod *v1.Pod) {
|
||||||
|
pod.ObjectMeta.Labels = map[string]string{
|
||||||
|
"test": "true",
|
||||||
|
}
|
||||||
|
}),
|
||||||
|
test.BuildTestPod("node3-pod-10GB-mem", 20, 10, "node3", func(pod *v1.Pod) {
|
||||||
|
pod.ObjectMeta.Labels = map[string]string{
|
||||||
|
"test": "true",
|
||||||
|
}
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
nodes: []*v1.Node{
|
||||||
|
test.BuildTestNode("node2", 100, 16, 10, func(node *v1.Node) {
|
||||||
|
node.ObjectMeta.Labels = map[string]string{
|
||||||
|
nodeLabelKey: nodeLabelValue,
|
||||||
|
}
|
||||||
|
}),
|
||||||
|
test.BuildTestNode("node3", 100, 20, 10, func(node *v1.Node) {
|
||||||
|
node.ObjectMeta.Labels = map[string]string{
|
||||||
|
nodeLabelKey: nodeLabelValue,
|
||||||
|
}
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
evictLocalStoragePods: false,
|
||||||
|
evictSystemCriticalPods: false,
|
||||||
|
nodeFit: true,
|
||||||
|
result: true,
|
||||||
|
}, {
|
||||||
|
description: "Pod with correct node selector, but both nodes don't have enough memory",
|
||||||
|
pods: []*v1.Pod{
|
||||||
|
test.BuildTestPod("p1", 12, 8, n1.Name, func(pod *v1.Pod) {
|
||||||
|
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||||
|
pod.Spec.NodeSelector = map[string]string{
|
||||||
|
nodeLabelKey: nodeLabelValue,
|
||||||
|
}
|
||||||
|
}),
|
||||||
|
test.BuildTestPod("node2-pod-10GB-mem", 10, 10, "node2", func(pod *v1.Pod) {
|
||||||
|
pod.ObjectMeta.Labels = map[string]string{
|
||||||
|
"test": "true",
|
||||||
|
}
|
||||||
|
}),
|
||||||
|
test.BuildTestPod("node3-pod-10GB-mem", 10, 10, "node3", func(pod *v1.Pod) {
|
||||||
|
pod.ObjectMeta.Labels = map[string]string{
|
||||||
|
"test": "true",
|
||||||
|
}
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
nodes: []*v1.Node{
|
||||||
|
test.BuildTestNode("node2", 100, 16, 10, func(node *v1.Node) {
|
||||||
|
node.ObjectMeta.Labels = map[string]string{
|
||||||
|
nodeLabelKey: nodeLabelValue,
|
||||||
|
}
|
||||||
|
}),
|
||||||
|
test.BuildTestNode("node3", 100, 16, 10, func(node *v1.Node) {
|
||||||
|
node.ObjectMeta.Labels = map[string]string{
|
||||||
|
nodeLabelKey: nodeLabelValue,
|
||||||
|
}
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
evictLocalStoragePods: false,
|
||||||
|
evictSystemCriticalPods: false,
|
||||||
|
nodeFit: true,
|
||||||
|
result: false,
|
||||||
|
}, {
|
||||||
|
description: "Pod with incorrect node selector, but nodefit false, should still be evicted",
|
||||||
|
pods: []*v1.Pod{
|
||||||
|
test.BuildTestPod("p1", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||||
|
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||||
|
pod.Spec.NodeSelector = map[string]string{
|
||||||
|
nodeLabelKey: "fail",
|
||||||
|
}
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
nodes: []*v1.Node{
|
||||||
|
test.BuildTestNode("node2", 1000, 2000, 13, func(node *v1.Node) {
|
||||||
|
node.ObjectMeta.Labels = map[string]string{
|
||||||
|
nodeLabelKey: nodeLabelValue,
|
||||||
|
}
|
||||||
|
}),
|
||||||
|
test.BuildTestNode("node3", 1000, 2000, 13, func(node *v1.Node) {
|
||||||
|
node.ObjectMeta.Labels = map[string]string{
|
||||||
|
nodeLabelKey: nodeLabelValue,
|
||||||
|
}
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
evictLocalStoragePods: false,
|
||||||
|
evictSystemCriticalPods: false,
|
||||||
|
nodeFit: false,
|
||||||
|
result: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range testCases {
|
||||||
|
t.Run(test.description, func(t *testing.T) {
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
var objs []runtime.Object
|
||||||
|
for _, node := range test.nodes {
|
||||||
|
objs = append(objs, node)
|
||||||
|
}
|
||||||
|
for _, pod := range test.pods {
|
||||||
|
objs = append(objs, pod)
|
||||||
|
}
|
||||||
|
|
||||||
|
fakeClient := fake.NewSimpleClientset(objs...)
|
||||||
|
|
||||||
|
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
|
||||||
|
podInformer := sharedInformerFactory.Core().V1().Pods().Informer()
|
||||||
|
|
||||||
|
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Build get pods assigned to node function error: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
sharedInformerFactory.Start(ctx.Done())
|
||||||
|
sharedInformerFactory.WaitForCacheSync(ctx.Done())
|
||||||
|
|
||||||
|
defaultEvictorArgs := &DefaultEvictorArgs{
|
||||||
|
EvictLocalStoragePods: test.evictLocalStoragePods,
|
||||||
|
EvictSystemCriticalPods: test.evictSystemCriticalPods,
|
||||||
|
IgnorePvcPods: false,
|
||||||
|
EvictFailedBarePods: test.evictFailedBarePods,
|
||||||
|
PriorityThreshold: &api.PriorityThreshold{
|
||||||
|
Value: test.priorityThreshold,
|
||||||
|
},
|
||||||
|
NodeFit: test.nodeFit,
|
||||||
|
}
|
||||||
|
|
||||||
|
evictorPlugin, err := New(
|
||||||
|
defaultEvictorArgs,
|
||||||
|
&frameworkfake.HandleImpl{
|
||||||
|
ClientsetImpl: fakeClient,
|
||||||
|
GetPodsAssignedToNodeFuncImpl: getPodsAssignedToNode,
|
||||||
|
SharedInformerFactoryImpl: sharedInformerFactory,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Unable to initialize the plugin: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
result := evictorPlugin.(framework.EvictorPlugin).PreEvictionFilter(test.pods[0])
|
||||||
|
if (result) != test.result {
|
||||||
|
t.Errorf("Filter should return for pod %s %t, but it returns %t", test.pods[0].Name, test.result, result)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDefaultEvictorFilter(t *testing.T) {
|
||||||
|
n1 := test.BuildTestNode("node1", 1000, 2000, 13, nil)
|
||||||
|
lowPriority := int32(800)
|
||||||
|
highPriority := int32(900)
|
||||||
|
|
||||||
|
nodeTaintKey := "hardware"
|
||||||
|
nodeTaintValue := "gpu"
|
||||||
|
|
||||||
|
type testCase struct {
|
||||||
|
description string
|
||||||
|
pods []*v1.Pod
|
||||||
|
nodes []*v1.Node
|
||||||
|
evictFailedBarePods bool
|
||||||
|
evictLocalStoragePods bool
|
||||||
|
evictSystemCriticalPods bool
|
||||||
|
priorityThreshold *int32
|
||||||
|
nodeFit bool
|
||||||
|
result bool
|
||||||
|
}
|
||||||
|
|
||||||
|
testCases := []testCase{
|
||||||
|
{
|
||||||
|
description: "Failed pod eviction with no ownerRefs",
|
||||||
|
pods: []*v1.Pod{
|
||||||
|
test.BuildTestPod("bare_pod_failed", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||||
|
pod.Status.Phase = v1.PodFailed
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
evictFailedBarePods: false,
|
||||||
|
result: false,
|
||||||
|
}, {
|
||||||
|
description: "Normal pod eviction with no ownerRefs and evictFailedBarePods enabled",
|
||||||
|
pods: []*v1.Pod{test.BuildTestPod("bare_pod", 400, 0, n1.Name, nil)},
|
||||||
|
evictFailedBarePods: true,
|
||||||
|
result: false,
|
||||||
|
}, {
|
||||||
|
description: "Failed pod eviction with no ownerRefs",
|
||||||
|
pods: []*v1.Pod{
|
||||||
|
test.BuildTestPod("bare_pod_failed_but_can_be_evicted", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||||
|
pod.Status.Phase = v1.PodFailed
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
evictFailedBarePods: true,
|
||||||
|
result: true,
|
||||||
|
}, {
|
||||||
|
description: "Normal pod eviction with normal ownerRefs",
|
||||||
|
pods: []*v1.Pod{
|
||||||
|
test.BuildTestPod("p1", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||||
|
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
evictLocalStoragePods: false,
|
||||||
|
evictSystemCriticalPods: false,
|
||||||
|
result: true,
|
||||||
|
}, {
|
||||||
|
description: "Normal pod eviction with normal ownerRefs and descheduler.alpha.kubernetes.io/evict annotation",
|
||||||
|
pods: []*v1.Pod{
|
||||||
|
test.BuildTestPod("p2", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||||
|
pod.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"}
|
||||||
|
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
evictLocalStoragePods: false,
|
||||||
|
evictSystemCriticalPods: false,
|
||||||
|
result: true,
|
||||||
|
}, {
|
||||||
|
description: "Normal pod eviction with replicaSet ownerRefs",
|
||||||
|
pods: []*v1.Pod{
|
||||||
|
test.BuildTestPod("p3", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||||
|
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
evictLocalStoragePods: false,
|
||||||
|
evictSystemCriticalPods: false,
|
||||||
|
result: true,
|
||||||
|
}, {
|
||||||
|
description: "Normal pod eviction with replicaSet ownerRefs and descheduler.alpha.kubernetes.io/evict annotation",
|
||||||
|
pods: []*v1.Pod{
|
||||||
|
test.BuildTestPod("p4", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||||
|
pod.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"}
|
||||||
|
pod.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
evictLocalStoragePods: false,
|
||||||
|
evictSystemCriticalPods: false,
|
||||||
|
result: true,
|
||||||
|
}, {
|
||||||
|
description: "Normal pod eviction with statefulSet ownerRefs",
|
||||||
|
pods: []*v1.Pod{
|
||||||
|
test.BuildTestPod("p18", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||||
|
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
evictLocalStoragePods: false,
|
||||||
|
evictSystemCriticalPods: false,
|
||||||
|
result: true,
|
||||||
|
}, {
|
||||||
|
description: "Normal pod eviction with statefulSet ownerRefs and descheduler.alpha.kubernetes.io/evict annotation",
|
||||||
|
pods: []*v1.Pod{
|
||||||
|
test.BuildTestPod("p19", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||||
|
pod.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"}
|
||||||
|
pod.ObjectMeta.OwnerReferences = test.GetStatefulSetOwnerRefList()
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
evictLocalStoragePods: false,
|
||||||
|
evictSystemCriticalPods: false,
|
||||||
|
result: true,
|
||||||
|
}, {
|
||||||
|
description: "Pod not evicted because it is bound to a PV and evictLocalStoragePods = false",
|
||||||
|
pods: []*v1.Pod{
|
||||||
|
test.BuildTestPod("p5", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||||
|
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||||
|
pod.Spec.Volumes = []v1.Volume{
|
||||||
|
{
|
||||||
|
Name: "sample",
|
||||||
|
VolumeSource: v1.VolumeSource{
|
||||||
|
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||||
|
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||||
|
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
evictLocalStoragePods: false,
|
||||||
|
evictSystemCriticalPods: false,
|
||||||
|
result: false,
|
||||||
|
}, {
|
||||||
|
description: "Pod is evicted because it is bound to a PV and evictLocalStoragePods = true",
|
||||||
|
pods: []*v1.Pod{
|
||||||
|
test.BuildTestPod("p6", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||||
|
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||||
|
pod.Spec.Volumes = []v1.Volume{
|
||||||
|
{
|
||||||
|
Name: "sample",
|
||||||
|
VolumeSource: v1.VolumeSource{
|
||||||
|
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||||
|
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||||
|
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
evictLocalStoragePods: true,
|
||||||
|
evictSystemCriticalPods: false,
|
||||||
|
result: true,
|
||||||
|
}, {
|
||||||
|
description: "Pod is evicted because it is bound to a PV and evictLocalStoragePods = false, but it has scheduler.alpha.kubernetes.io/evict annotation",
|
||||||
|
pods: []*v1.Pod{
|
||||||
|
test.BuildTestPod("p7", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||||
|
pod.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"}
|
||||||
|
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||||
|
pod.Spec.Volumes = []v1.Volume{
|
||||||
|
{
|
||||||
|
Name: "sample",
|
||||||
|
VolumeSource: v1.VolumeSource{
|
||||||
|
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||||
|
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||||
|
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
evictLocalStoragePods: false,
|
||||||
|
evictSystemCriticalPods: false,
|
||||||
|
result: true,
|
||||||
|
}, {
|
||||||
|
description: "Pod not evicted becasuse it is part of a daemonSet",
|
||||||
|
pods: []*v1.Pod{
|
||||||
|
test.BuildTestPod("p8", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||||
|
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||||
|
pod.ObjectMeta.OwnerReferences = test.GetDaemonSetOwnerRefList()
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
evictLocalStoragePods: false,
|
||||||
|
evictSystemCriticalPods: false,
|
||||||
|
result: false,
|
||||||
|
}, {
|
||||||
|
description: "Pod is evicted becasuse it is part of a daemonSet, but it has scheduler.alpha.kubernetes.io/evict annotation",
|
||||||
|
pods: []*v1.Pod{
|
||||||
|
test.BuildTestPod("p9", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||||
|
pod.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"}
|
||||||
|
pod.ObjectMeta.OwnerReferences = test.GetDaemonSetOwnerRefList()
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
evictLocalStoragePods: false,
|
||||||
|
evictSystemCriticalPods: false,
|
||||||
|
result: true,
|
||||||
|
}, {
|
||||||
|
description: "Pod not evicted becasuse it is a mirror poddsa",
|
||||||
|
pods: []*v1.Pod{
|
||||||
|
test.BuildTestPod("p10", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||||
|
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||||
|
pod.Annotations = test.GetMirrorPodAnnotation()
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
evictLocalStoragePods: false,
|
||||||
|
evictSystemCriticalPods: false,
|
||||||
|
result: false,
|
||||||
|
}, {
|
||||||
|
description: "Pod is evicted becasuse it is a mirror pod, but it has scheduler.alpha.kubernetes.io/evict annotation",
|
||||||
|
pods: []*v1.Pod{
|
||||||
|
test.BuildTestPod("p11", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||||
|
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||||
|
pod.Annotations = test.GetMirrorPodAnnotation()
|
||||||
|
pod.Annotations["descheduler.alpha.kubernetes.io/evict"] = "true"
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
evictLocalStoragePods: false,
|
||||||
|
evictSystemCriticalPods: false,
|
||||||
|
result: true,
|
||||||
|
}, {
|
||||||
|
description: "Pod not evicted becasuse it has system critical priority",
|
||||||
|
pods: []*v1.Pod{
|
||||||
|
test.BuildTestPod("p12", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||||
|
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||||
|
priority := utils.SystemCriticalPriority
|
||||||
|
pod.Spec.Priority = &priority
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
evictLocalStoragePods: false,
|
||||||
|
evictSystemCriticalPods: false,
|
||||||
|
result: false,
|
||||||
|
}, {
|
||||||
|
description: "Pod is evicted becasuse it has system critical priority, but it has scheduler.alpha.kubernetes.io/evict annotation",
|
||||||
|
pods: []*v1.Pod{
|
||||||
|
test.BuildTestPod("p13", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||||
|
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||||
|
priority := utils.SystemCriticalPriority
|
||||||
|
pod.Spec.Priority = &priority
|
||||||
|
pod.Annotations = map[string]string{
|
||||||
|
"descheduler.alpha.kubernetes.io/evict": "true",
|
||||||
|
}
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
evictLocalStoragePods: false,
|
||||||
|
evictSystemCriticalPods: false,
|
||||||
|
result: true,
|
||||||
|
}, {
|
||||||
|
description: "Pod not evicted becasuse it has a priority higher than the configured priority threshold",
|
||||||
|
pods: []*v1.Pod{
|
||||||
|
test.BuildTestPod("p14", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||||
|
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||||
|
pod.Spec.Priority = &highPriority
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
evictLocalStoragePods: false,
|
||||||
|
evictSystemCriticalPods: false,
|
||||||
|
priorityThreshold: &lowPriority,
|
||||||
|
result: false,
|
||||||
|
}, {
|
||||||
|
description: "Pod is evicted becasuse it has a priority higher than the configured priority threshold, but it has scheduler.alpha.kubernetes.io/evict annotation",
|
||||||
|
pods: []*v1.Pod{
|
||||||
|
test.BuildTestPod("p15", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||||
|
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||||
|
pod.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"}
|
||||||
|
pod.Spec.Priority = &highPriority
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
evictLocalStoragePods: false,
|
||||||
|
evictSystemCriticalPods: false,
|
||||||
|
priorityThreshold: &lowPriority,
|
||||||
|
result: true,
|
||||||
|
}, {
|
||||||
|
description: "Pod is evicted becasuse it has system critical priority, but evictSystemCriticalPods = true",
|
||||||
|
pods: []*v1.Pod{
|
||||||
|
test.BuildTestPod("p16", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||||
|
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||||
|
priority := utils.SystemCriticalPriority
|
||||||
|
pod.Spec.Priority = &priority
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
evictLocalStoragePods: false,
|
||||||
|
evictSystemCriticalPods: true,
|
||||||
|
result: true,
|
||||||
|
}, {
|
||||||
|
description: "Pod is evicted becasuse it has system critical priority, but evictSystemCriticalPods = true and it has scheduler.alpha.kubernetes.io/evict annotation",
|
||||||
|
pods: []*v1.Pod{
|
||||||
|
test.BuildTestPod("p16", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||||
|
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||||
|
pod.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"}
|
||||||
|
priority := utils.SystemCriticalPriority
|
||||||
|
pod.Spec.Priority = &priority
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
evictLocalStoragePods: false,
|
||||||
|
evictSystemCriticalPods: true,
|
||||||
|
result: true,
|
||||||
|
}, {
|
||||||
|
description: "Pod is evicted becasuse it has a priority higher than the configured priority threshold, but evictSystemCriticalPods = true",
|
||||||
|
pods: []*v1.Pod{
|
||||||
|
test.BuildTestPod("p17", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||||
|
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||||
|
pod.Spec.Priority = &highPriority
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
evictLocalStoragePods: false,
|
||||||
|
evictSystemCriticalPods: true,
|
||||||
|
priorityThreshold: &lowPriority,
|
||||||
|
result: true,
|
||||||
|
}, {
|
||||||
|
description: "Pod is evicted becasuse it has a priority higher than the configured priority threshold, but evictSystemCriticalPods = true and it has scheduler.alpha.kubernetes.io/evict annotation",
|
||||||
|
pods: []*v1.Pod{
|
||||||
|
test.BuildTestPod("p17", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||||
|
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||||
|
pod.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"}
|
||||||
|
pod.Spec.Priority = &highPriority
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
evictLocalStoragePods: false,
|
||||||
|
evictSystemCriticalPods: true,
|
||||||
|
priorityThreshold: &lowPriority,
|
||||||
|
result: true,
|
||||||
|
}, {
|
||||||
|
description: "Pod with no tolerations running on normal node, all other nodes tainted, no PreEvictionFilter, should ignore nodeFit",
|
||||||
|
pods: []*v1.Pod{
|
||||||
|
test.BuildTestPod("p1", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||||
|
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
nodes: []*v1.Node{
|
||||||
|
test.BuildTestNode("node2", 1000, 2000, 13, func(node *v1.Node) {
|
||||||
|
node.Spec.Taints = []v1.Taint{
|
||||||
|
{
|
||||||
|
Key: nodeTaintKey,
|
||||||
|
Value: nodeTaintValue,
|
||||||
|
Effect: v1.TaintEffectNoSchedule,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}),
|
||||||
|
test.BuildTestNode("node3", 1000, 2000, 13, func(node *v1.Node) {
|
||||||
|
node.Spec.Taints = []v1.Taint{
|
||||||
|
{
|
||||||
|
Key: nodeTaintKey,
|
||||||
|
Value: nodeTaintValue,
|
||||||
|
Effect: v1.TaintEffectNoSchedule,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
evictLocalStoragePods: false,
|
||||||
|
evictSystemCriticalPods: false,
|
||||||
|
nodeFit: true,
|
||||||
|
result: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range testCases {
|
||||||
|
t.Run(test.description, func(t *testing.T) {
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
var objs []runtime.Object
|
||||||
|
for _, node := range test.nodes {
|
||||||
|
objs = append(objs, node)
|
||||||
|
}
|
||||||
|
for _, pod := range test.pods {
|
||||||
|
objs = append(objs, pod)
|
||||||
|
}
|
||||||
|
|
||||||
|
fakeClient := fake.NewSimpleClientset(objs...)
|
||||||
|
|
||||||
|
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
|
||||||
|
podInformer := sharedInformerFactory.Core().V1().Pods().Informer()
|
||||||
|
|
||||||
|
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Build get pods assigned to node function error: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
sharedInformerFactory.Start(ctx.Done())
|
||||||
|
sharedInformerFactory.WaitForCacheSync(ctx.Done())
|
||||||
|
|
||||||
|
defaultEvictorArgs := &DefaultEvictorArgs{
|
||||||
|
EvictLocalStoragePods: test.evictLocalStoragePods,
|
||||||
|
EvictSystemCriticalPods: test.evictSystemCriticalPods,
|
||||||
|
IgnorePvcPods: false,
|
||||||
|
EvictFailedBarePods: test.evictFailedBarePods,
|
||||||
|
PriorityThreshold: &api.PriorityThreshold{
|
||||||
|
Value: test.priorityThreshold,
|
||||||
|
},
|
||||||
|
NodeFit: test.nodeFit,
|
||||||
|
}
|
||||||
|
|
||||||
|
evictorPlugin, err := New(
|
||||||
|
defaultEvictorArgs,
|
||||||
|
&frameworkfake.HandleImpl{
|
||||||
|
ClientsetImpl: fakeClient,
|
||||||
|
GetPodsAssignedToNodeFuncImpl: getPodsAssignedToNode,
|
||||||
|
SharedInformerFactoryImpl: sharedInformerFactory,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Unable to initialize the plugin: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
result := evictorPlugin.(framework.EvictorPlugin).Filter(test.pods[0])
|
||||||
|
if (result) != test.result {
|
||||||
|
t.Errorf("Filter should return for pod %s %t, but it returns %t", test.pods[0].Name, test.result, result)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
51
pkg/framework/plugins/defaultevictor/defaults.go
Normal file
51
pkg/framework/plugins/defaultevictor/defaults.go
Normal file
@@ -0,0 +1,51 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2022 The Kubernetes Authors.
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package defaultevictor
|
||||||
|
|
||||||
|
import (
|
||||||
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
|
)
|
||||||
|
|
||||||
|
func addDefaultingFuncs(scheme *runtime.Scheme) error {
|
||||||
|
return RegisterDefaults(scheme)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetDefaults_DefaultEvictorArgs
|
||||||
|
// TODO: the final default values would be discussed in community
|
||||||
|
func SetDefaults_DefaultEvictorArgs(obj *DefaultEvictorArgs) {
|
||||||
|
if obj.NodeSelector == "" {
|
||||||
|
obj.NodeSelector = ""
|
||||||
|
}
|
||||||
|
if !obj.EvictLocalStoragePods {
|
||||||
|
obj.EvictLocalStoragePods = false
|
||||||
|
}
|
||||||
|
if !obj.EvictSystemCriticalPods {
|
||||||
|
obj.EvictSystemCriticalPods = false
|
||||||
|
}
|
||||||
|
if !obj.IgnorePvcPods {
|
||||||
|
obj.IgnorePvcPods = false
|
||||||
|
}
|
||||||
|
if !obj.EvictFailedBarePods {
|
||||||
|
obj.EvictFailedBarePods = false
|
||||||
|
}
|
||||||
|
if obj.LabelSelector == nil {
|
||||||
|
obj.LabelSelector = nil
|
||||||
|
}
|
||||||
|
if obj.PriorityThreshold == nil {
|
||||||
|
obj.PriorityThreshold = nil
|
||||||
|
}
|
||||||
|
if !obj.NodeFit {
|
||||||
|
obj.NodeFit = false
|
||||||
|
}
|
||||||
|
}
|
||||||
84
pkg/framework/plugins/defaultevictor/defaults_test.go
Normal file
84
pkg/framework/plugins/defaultevictor/defaults_test.go
Normal file
@@ -0,0 +1,84 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2022 The Kubernetes Authors.
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package defaultevictor
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/google/go-cmp/cmp"
|
||||||
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
|
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||||
|
"k8s.io/utils/pointer"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/api"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestSetDefaults_DefaultEvictorArgs(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
in runtime.Object
|
||||||
|
want runtime.Object
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "DefaultEvictorArgs empty",
|
||||||
|
in: &DefaultEvictorArgs{},
|
||||||
|
want: &DefaultEvictorArgs{
|
||||||
|
NodeSelector: "",
|
||||||
|
EvictLocalStoragePods: false,
|
||||||
|
EvictSystemCriticalPods: false,
|
||||||
|
IgnorePvcPods: false,
|
||||||
|
EvictFailedBarePods: false,
|
||||||
|
LabelSelector: nil,
|
||||||
|
PriorityThreshold: nil,
|
||||||
|
NodeFit: false,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "DefaultEvictorArgs with value",
|
||||||
|
in: &DefaultEvictorArgs{
|
||||||
|
NodeSelector: "NodeSelector",
|
||||||
|
EvictLocalStoragePods: true,
|
||||||
|
EvictSystemCriticalPods: true,
|
||||||
|
IgnorePvcPods: true,
|
||||||
|
EvictFailedBarePods: true,
|
||||||
|
LabelSelector: nil,
|
||||||
|
PriorityThreshold: &api.PriorityThreshold{
|
||||||
|
Value: pointer.Int32(800),
|
||||||
|
},
|
||||||
|
NodeFit: true,
|
||||||
|
},
|
||||||
|
want: &DefaultEvictorArgs{
|
||||||
|
NodeSelector: "NodeSelector",
|
||||||
|
EvictLocalStoragePods: true,
|
||||||
|
EvictSystemCriticalPods: true,
|
||||||
|
IgnorePvcPods: true,
|
||||||
|
EvictFailedBarePods: true,
|
||||||
|
LabelSelector: nil,
|
||||||
|
PriorityThreshold: &api.PriorityThreshold{
|
||||||
|
Value: pointer.Int32(800),
|
||||||
|
},
|
||||||
|
NodeFit: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, tc := range tests {
|
||||||
|
scheme := runtime.NewScheme()
|
||||||
|
utilruntime.Must(AddToScheme(scheme))
|
||||||
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
scheme.Default(tc.in)
|
||||||
|
if diff := cmp.Diff(tc.in, tc.want); diff != "" {
|
||||||
|
t.Errorf("Got unexpected defaults (-want, +got):\n%s", diff)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
16
pkg/framework/plugins/defaultevictor/doc.go
Normal file
16
pkg/framework/plugins/defaultevictor/doc.go
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2022 The Kubernetes Authors.
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
// +k8s:defaulter-gen=TypeMeta
|
||||||
|
|
||||||
|
package defaultevictor
|
||||||
@@ -1,12 +1,9 @@
|
|||||||
/*
|
/*
|
||||||
Copyright 2018 The Kubernetes Authors.
|
Copyright 2022 The Kubernetes Authors.
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
you may not use this file except in compliance with the License.
|
you may not use this file except in compliance with the License.
|
||||||
You may obtain a copy of the License at
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
Unless required by applicable law or agreed to in writing, software
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
@@ -14,18 +11,21 @@ See the License for the specific language governing permissions and
|
|||||||
limitations under the License.
|
limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package v1alpha1
|
package defaultevictor
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
// SchemeBuilder is the scheme builder with scheme init functions to run for this API package
|
SchemeBuilder = runtime.NewSchemeBuilder()
|
||||||
SchemeBuilder runtime.SchemeBuilder
|
|
||||||
// localSchemeBuilder extends the SchemeBuilder instance with the external types. In this package,
|
|
||||||
// defaulting and conversion init funcs are registered as well.
|
|
||||||
localSchemeBuilder = &SchemeBuilder
|
localSchemeBuilder = &SchemeBuilder
|
||||||
// AddToScheme is a global function that registers this API group & version to a scheme
|
AddToScheme = localSchemeBuilder.AddToScheme
|
||||||
AddToScheme = localSchemeBuilder.AddToScheme
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
// We only register manually written functions here. The registration of the
|
||||||
|
// generated functions takes place in the generated files. The separation
|
||||||
|
// makes the code compile even when the generated files are missing.
|
||||||
|
localSchemeBuilder.Register(addDefaultingFuncs)
|
||||||
|
}
|
||||||
36
pkg/framework/plugins/defaultevictor/types.go
Normal file
36
pkg/framework/plugins/defaultevictor/types.go
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2022 The Kubernetes Authors.
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package defaultevictor
|
||||||
|
|
||||||
|
import (
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/api"
|
||||||
|
)
|
||||||
|
|
||||||
|
// +k8s:deepcopy-gen=true
|
||||||
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
|
||||||
|
// DefaultEvictorArgs holds arguments used to configure DefaultEvictor plugin.
|
||||||
|
type DefaultEvictorArgs struct {
|
||||||
|
metav1.TypeMeta `json:",inline"`
|
||||||
|
|
||||||
|
NodeSelector string `json:"nodeSelector"`
|
||||||
|
EvictLocalStoragePods bool `json:"evictLocalStoragePods"`
|
||||||
|
EvictSystemCriticalPods bool `json:"evictSystemCriticalPods"`
|
||||||
|
IgnorePvcPods bool `json:"ignorePvcPods"`
|
||||||
|
EvictFailedBarePods bool `json:"evictFailedBarePods"`
|
||||||
|
LabelSelector *metav1.LabelSelector `json:"labelSelector"`
|
||||||
|
PriorityThreshold *api.PriorityThreshold `json:"priorityThreshold"`
|
||||||
|
NodeFit bool `json:"nodeFit"`
|
||||||
|
}
|
||||||
@@ -0,0 +1,63 @@
|
|||||||
|
//go:build !ignore_autogenerated
|
||||||
|
// +build !ignore_autogenerated
|
||||||
|
|
||||||
|
/*
|
||||||
|
Copyright 2023 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
// Code generated by deepcopy-gen. DO NOT EDIT.
|
||||||
|
|
||||||
|
package defaultevictor
|
||||||
|
|
||||||
|
import (
|
||||||
|
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||||
|
api "sigs.k8s.io/descheduler/pkg/api"
|
||||||
|
)
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *DefaultEvictorArgs) DeepCopyInto(out *DefaultEvictorArgs) {
|
||||||
|
*out = *in
|
||||||
|
out.TypeMeta = in.TypeMeta
|
||||||
|
if in.LabelSelector != nil {
|
||||||
|
in, out := &in.LabelSelector, &out.LabelSelector
|
||||||
|
*out = new(v1.LabelSelector)
|
||||||
|
(*in).DeepCopyInto(*out)
|
||||||
|
}
|
||||||
|
if in.PriorityThreshold != nil {
|
||||||
|
in, out := &in.PriorityThreshold, &out.PriorityThreshold
|
||||||
|
*out = new(api.PriorityThreshold)
|
||||||
|
(*in).DeepCopyInto(*out)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DefaultEvictorArgs.
|
||||||
|
func (in *DefaultEvictorArgs) DeepCopy() *DefaultEvictorArgs {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(DefaultEvictorArgs)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||||
|
func (in *DefaultEvictorArgs) DeepCopyObject() runtime.Object {
|
||||||
|
if c := in.DeepCopy(); c != nil {
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
@@ -0,0 +1,38 @@
|
|||||||
|
//go:build !ignore_autogenerated
|
||||||
|
// +build !ignore_autogenerated
|
||||||
|
|
||||||
|
/*
|
||||||
|
Copyright 2023 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
// Code generated by defaulter-gen. DO NOT EDIT.
|
||||||
|
|
||||||
|
package defaultevictor
|
||||||
|
|
||||||
|
import (
|
||||||
|
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||||
|
)
|
||||||
|
|
||||||
|
// RegisterDefaults adds defaulters functions to the given scheme.
|
||||||
|
// Public to allow building arbitrary schemes.
|
||||||
|
// All generated defaulters are covering - they call all nested defaulters.
|
||||||
|
func RegisterDefaults(scheme *runtime.Scheme) error {
|
||||||
|
scheme.AddTypeDefaultingFunc(&DefaultEvictorArgs{}, func(obj interface{}) { SetObjectDefaults_DefaultEvictorArgs(obj.(*DefaultEvictorArgs)) })
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func SetObjectDefaults_DefaultEvictorArgs(in *DefaultEvictorArgs) {
|
||||||
|
SetDefaults_DefaultEvictorArgs(in)
|
||||||
|
}
|
||||||
50
pkg/framework/plugins/nodeutilization/defaults.go
Normal file
50
pkg/framework/plugins/nodeutilization/defaults.go
Normal file
@@ -0,0 +1,50 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2022 The Kubernetes Authors.
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package nodeutilization
|
||||||
|
|
||||||
|
import (
|
||||||
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
|
)
|
||||||
|
|
||||||
|
func addDefaultingFuncs(scheme *runtime.Scheme) error {
|
||||||
|
return RegisterDefaults(scheme)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetDefaults_LowNodeUtilizationArgs
|
||||||
|
// TODO: the final default values would be discussed in community
|
||||||
|
func SetDefaults_LowNodeUtilizationArgs(obj *LowNodeUtilizationArgs) {
|
||||||
|
if !obj.UseDeviationThresholds {
|
||||||
|
obj.UseDeviationThresholds = false
|
||||||
|
}
|
||||||
|
if obj.Thresholds == nil {
|
||||||
|
obj.Thresholds = nil
|
||||||
|
}
|
||||||
|
if obj.TargetThresholds == nil {
|
||||||
|
obj.TargetThresholds = nil
|
||||||
|
}
|
||||||
|
if obj.NumberOfNodes == 0 {
|
||||||
|
obj.NumberOfNodes = 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetDefaults_HighNodeUtilizationArgs
|
||||||
|
// TODO: the final default values would be discussed in community
|
||||||
|
func SetDefaults_HighNodeUtilizationArgs(obj *HighNodeUtilizationArgs) {
|
||||||
|
if obj.Thresholds == nil {
|
||||||
|
obj.Thresholds = nil
|
||||||
|
}
|
||||||
|
if obj.NumberOfNodes == 0 {
|
||||||
|
obj.NumberOfNodes = 0
|
||||||
|
}
|
||||||
|
}
|
||||||
124
pkg/framework/plugins/nodeutilization/defaults_test.go
Normal file
124
pkg/framework/plugins/nodeutilization/defaults_test.go
Normal file
@@ -0,0 +1,124 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2022 The Kubernetes Authors.
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package nodeutilization
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/google/go-cmp/cmp"
|
||||||
|
v1 "k8s.io/api/core/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
|
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/api"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestSetDefaults_LowNodeUtilizationArgs(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
in runtime.Object
|
||||||
|
want runtime.Object
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "LowNodeUtilizationArgs empty",
|
||||||
|
in: &LowNodeUtilizationArgs{},
|
||||||
|
want: &LowNodeUtilizationArgs{
|
||||||
|
UseDeviationThresholds: false,
|
||||||
|
Thresholds: nil,
|
||||||
|
TargetThresholds: nil,
|
||||||
|
NumberOfNodes: 0,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "LowNodeUtilizationArgs with value",
|
||||||
|
in: &LowNodeUtilizationArgs{
|
||||||
|
UseDeviationThresholds: true,
|
||||||
|
Thresholds: api.ResourceThresholds{
|
||||||
|
v1.ResourceCPU: 20,
|
||||||
|
v1.ResourceMemory: 120,
|
||||||
|
},
|
||||||
|
TargetThresholds: api.ResourceThresholds{
|
||||||
|
v1.ResourceCPU: 80,
|
||||||
|
v1.ResourceMemory: 80,
|
||||||
|
},
|
||||||
|
NumberOfNodes: 10,
|
||||||
|
},
|
||||||
|
want: &LowNodeUtilizationArgs{
|
||||||
|
UseDeviationThresholds: true,
|
||||||
|
Thresholds: api.ResourceThresholds{
|
||||||
|
v1.ResourceCPU: 20,
|
||||||
|
v1.ResourceMemory: 120,
|
||||||
|
},
|
||||||
|
TargetThresholds: api.ResourceThresholds{
|
||||||
|
v1.ResourceCPU: 80,
|
||||||
|
v1.ResourceMemory: 80,
|
||||||
|
},
|
||||||
|
NumberOfNodes: 10,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, tc := range tests {
|
||||||
|
scheme := runtime.NewScheme()
|
||||||
|
utilruntime.Must(AddToScheme(scheme))
|
||||||
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
scheme.Default(tc.in)
|
||||||
|
if diff := cmp.Diff(tc.in, tc.want); diff != "" {
|
||||||
|
t.Errorf("Got unexpected defaults (-want, +got):\n%s", diff)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSetDefaults_HighNodeUtilizationArgs(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
in runtime.Object
|
||||||
|
want runtime.Object
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "HighNodeUtilizationArgs empty",
|
||||||
|
in: &HighNodeUtilizationArgs{},
|
||||||
|
want: &HighNodeUtilizationArgs{
|
||||||
|
Thresholds: nil,
|
||||||
|
NumberOfNodes: 0,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "HighNodeUtilizationArgs with value",
|
||||||
|
in: &HighNodeUtilizationArgs{
|
||||||
|
Thresholds: api.ResourceThresholds{
|
||||||
|
v1.ResourceCPU: 20,
|
||||||
|
v1.ResourceMemory: 120,
|
||||||
|
},
|
||||||
|
NumberOfNodes: 10,
|
||||||
|
},
|
||||||
|
want: &HighNodeUtilizationArgs{
|
||||||
|
Thresholds: api.ResourceThresholds{
|
||||||
|
v1.ResourceCPU: 20,
|
||||||
|
v1.ResourceMemory: 120,
|
||||||
|
},
|
||||||
|
NumberOfNodes: 10,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, tc := range tests {
|
||||||
|
scheme := runtime.NewScheme()
|
||||||
|
utilruntime.Must(AddToScheme(scheme))
|
||||||
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
scheme.Default(tc.in)
|
||||||
|
if diff := cmp.Diff(tc.in, tc.want); diff != "" {
|
||||||
|
t.Errorf("Got unexpected defaults (-want, +got):\n%s", diff)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
16
pkg/framework/plugins/nodeutilization/doc.go
Normal file
16
pkg/framework/plugins/nodeutilization/doc.go
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2022 The Kubernetes Authors.
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
// +k8s:defaulter-gen=TypeMeta
|
||||||
|
|
||||||
|
package nodeutilization
|
||||||
@@ -1,11 +1,11 @@
|
|||||||
/*
|
/*
|
||||||
Copyright 2021 The Kubernetes Authors.
|
Copyright 2022 The Kubernetes Authors.
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
you may not use this file except in compliance with the License.
|
you may not use this file except in compliance with the License.
|
||||||
You may obtain a copy of the License at
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
Unless required by applicable law or agreed to in writing, software
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
@@ -22,49 +22,65 @@ import (
|
|||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
"k8s.io/klog/v2"
|
"k8s.io/klog/v2"
|
||||||
|
|
||||||
"sigs.k8s.io/descheduler/pkg/api"
|
"sigs.k8s.io/descheduler/pkg/api"
|
||||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
|
||||||
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
|
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
|
||||||
|
|
||||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||||
"sigs.k8s.io/descheduler/pkg/utils"
|
"sigs.k8s.io/descheduler/pkg/framework"
|
||||||
)
|
)
|
||||||
|
|
||||||
// HighNodeUtilization evicts pods from under utilized nodes so that scheduler can schedule according to its strategy.
|
const HighNodeUtilizationPluginName = "HighNodeUtilization"
|
||||||
|
|
||||||
|
// HighNodeUtilization evicts pods from under utilized nodes so that scheduler can schedule according to its plugin.
|
||||||
// Note that CPU/Memory requests are used to calculate nodes' utilization and not the actual resource usage.
|
// Note that CPU/Memory requests are used to calculate nodes' utilization and not the actual resource usage.
|
||||||
func HighNodeUtilization(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor, getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc) {
|
|
||||||
if err := validateNodeUtilizationParams(strategy.Params); err != nil {
|
type HighNodeUtilization struct {
|
||||||
klog.ErrorS(err, "Invalid HighNodeUtilization parameters")
|
handle framework.Handle
|
||||||
return
|
args *HighNodeUtilizationArgs
|
||||||
|
podFilter func(pod *v1.Pod) bool
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ framework.BalancePlugin = &HighNodeUtilization{}
|
||||||
|
|
||||||
|
// NewHighNodeUtilization builds plugin from its arguments while passing a handle
|
||||||
|
func NewHighNodeUtilization(args runtime.Object, handle framework.Handle) (framework.Plugin, error) {
|
||||||
|
highNodeUtilizatioArgs, ok := args.(*HighNodeUtilizationArgs)
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("want args to be of type HighNodeUtilizationArgs, got %T", args)
|
||||||
}
|
}
|
||||||
|
|
||||||
nodeFit := false
|
podFilter, err := podutil.NewOptions().
|
||||||
if strategy.Params != nil {
|
WithFilter(handle.Evictor().Filter).
|
||||||
nodeFit = strategy.Params.NodeFit
|
BuildFilterFunc()
|
||||||
}
|
|
||||||
|
|
||||||
thresholdPriority, err := utils.GetPriorityFromStrategyParams(ctx, client, strategy.Params)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.ErrorS(err, "Failed to get threshold priority from strategy's params")
|
return nil, fmt.Errorf("error initializing pod filter function: %v", err)
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
thresholds := strategy.Params.NodeResourceUtilizationThresholds.Thresholds
|
return &HighNodeUtilization{
|
||||||
targetThresholds := strategy.Params.NodeResourceUtilizationThresholds.TargetThresholds
|
handle: handle,
|
||||||
if err := validateHighUtilizationStrategyConfig(thresholds, targetThresholds); err != nil {
|
args: highNodeUtilizatioArgs,
|
||||||
klog.ErrorS(err, "HighNodeUtilization config is not valid")
|
podFilter: podFilter,
|
||||||
return
|
}, nil
|
||||||
}
|
}
|
||||||
targetThresholds = make(api.ResourceThresholds)
|
|
||||||
|
// Name retrieves the plugin name
|
||||||
|
func (h *HighNodeUtilization) Name() string {
|
||||||
|
return HighNodeUtilizationPluginName
|
||||||
|
}
|
||||||
|
|
||||||
|
// Balance extension point implementation for the plugin
|
||||||
|
func (h *HighNodeUtilization) Balance(ctx context.Context, nodes []*v1.Node) *framework.Status {
|
||||||
|
thresholds := h.args.Thresholds
|
||||||
|
targetThresholds := make(api.ResourceThresholds)
|
||||||
|
|
||||||
setDefaultForThresholds(thresholds, targetThresholds)
|
setDefaultForThresholds(thresholds, targetThresholds)
|
||||||
resourceNames := getResourceNames(targetThresholds)
|
resourceNames := getResourceNames(targetThresholds)
|
||||||
|
|
||||||
sourceNodes, highNodes := classifyNodes(
|
sourceNodes, highNodes := classifyNodes(
|
||||||
getNodeUsage(nodes, resourceNames, getPodsAssignedToNode),
|
getNodeUsage(nodes, resourceNames, h.handle.GetPodsAssignedToNodeFunc()),
|
||||||
getNodeThresholds(nodes, thresholds, targetThresholds, resourceNames, getPodsAssignedToNode, false),
|
getNodeThresholds(nodes, thresholds, targetThresholds, resourceNames, h.handle.GetPodsAssignedToNodeFunc(), false),
|
||||||
func(node *v1.Node, usage NodeUsage, threshold NodeThresholds) bool {
|
func(node *v1.Node, usage NodeUsage, threshold NodeThresholds) bool {
|
||||||
return isNodeWithLowUtilization(usage, threshold.lowResourceThreshold)
|
return isNodeWithLowUtilization(usage, threshold.lowResourceThreshold)
|
||||||
},
|
},
|
||||||
@@ -93,23 +109,21 @@ func HighNodeUtilization(ctx context.Context, client clientset.Interface, strate
|
|||||||
|
|
||||||
if len(sourceNodes) == 0 {
|
if len(sourceNodes) == 0 {
|
||||||
klog.V(1).InfoS("No node is underutilized, nothing to do here, you might tune your thresholds further")
|
klog.V(1).InfoS("No node is underutilized, nothing to do here, you might tune your thresholds further")
|
||||||
return
|
return nil
|
||||||
}
|
}
|
||||||
if len(sourceNodes) <= strategy.Params.NodeResourceUtilizationThresholds.NumberOfNodes {
|
if len(sourceNodes) <= h.args.NumberOfNodes {
|
||||||
klog.V(1).InfoS("Number of nodes underutilized is less or equal than NumberOfNodes, nothing to do here", "underutilizedNodes", len(sourceNodes), "numberOfNodes", strategy.Params.NodeResourceUtilizationThresholds.NumberOfNodes)
|
klog.V(1).InfoS("Number of nodes underutilized is less or equal than NumberOfNodes, nothing to do here", "underutilizedNodes", len(sourceNodes), "numberOfNodes", h.args.NumberOfNodes)
|
||||||
return
|
return nil
|
||||||
}
|
}
|
||||||
if len(sourceNodes) == len(nodes) {
|
if len(sourceNodes) == len(nodes) {
|
||||||
klog.V(1).InfoS("All nodes are underutilized, nothing to do here")
|
klog.V(1).InfoS("All nodes are underutilized, nothing to do here")
|
||||||
return
|
return nil
|
||||||
}
|
}
|
||||||
if len(highNodes) == 0 {
|
if len(highNodes) == 0 {
|
||||||
klog.V(1).InfoS("No node is available to schedule the pods, nothing to do here")
|
klog.V(1).InfoS("No node is available to schedule the pods, nothing to do here")
|
||||||
return
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
evictable := podEvictor.Evictable(evictions.WithPriorityThreshold(thresholdPriority), evictions.WithNodeFit(nodeFit))
|
|
||||||
|
|
||||||
// stop if the total available usage has dropped to zero - no more pods can be scheduled
|
// stop if the total available usage has dropped to zero - no more pods can be scheduled
|
||||||
continueEvictionCond := func(nodeInfo NodeInfo, totalAvailableUsage map[v1.ResourceName]*resource.Quantity) bool {
|
continueEvictionCond := func(nodeInfo NodeInfo, totalAvailableUsage map[v1.ResourceName]*resource.Quantity) bool {
|
||||||
for name := range totalAvailableUsage {
|
for name := range totalAvailableUsage {
|
||||||
@@ -126,23 +140,14 @@ func HighNodeUtilization(ctx context.Context, client clientset.Interface, strate
|
|||||||
|
|
||||||
evictPodsFromSourceNodes(
|
evictPodsFromSourceNodes(
|
||||||
ctx,
|
ctx,
|
||||||
|
h.args.EvictableNamespaces,
|
||||||
sourceNodes,
|
sourceNodes,
|
||||||
highNodes,
|
highNodes,
|
||||||
podEvictor,
|
h.handle.Evictor(),
|
||||||
evictable.IsEvictable,
|
h.podFilter,
|
||||||
resourceNames,
|
resourceNames,
|
||||||
"HighNodeUtilization",
|
|
||||||
continueEvictionCond)
|
continueEvictionCond)
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func validateHighUtilizationStrategyConfig(thresholds, targetThresholds api.ResourceThresholds) error {
|
|
||||||
if targetThresholds != nil {
|
|
||||||
return fmt.Errorf("targetThresholds is not applicable for HighNodeUtilization")
|
|
||||||
}
|
|
||||||
if err := validateThresholds(thresholds); err != nil {
|
|
||||||
return fmt.Errorf("thresholds config is not valid: %v", err)
|
|
||||||
}
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -22,16 +22,20 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/api/policy/v1beta1"
|
policy "k8s.io/api/policy/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
"k8s.io/client-go/informers"
|
"k8s.io/client-go/informers"
|
||||||
"k8s.io/client-go/kubernetes/fake"
|
"k8s.io/client-go/kubernetes/fake"
|
||||||
core "k8s.io/client-go/testing"
|
core "k8s.io/client-go/testing"
|
||||||
|
"k8s.io/client-go/tools/events"
|
||||||
|
|
||||||
"sigs.k8s.io/descheduler/pkg/api"
|
"sigs.k8s.io/descheduler/pkg/api"
|
||||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/framework"
|
||||||
|
frameworkfake "sigs.k8s.io/descheduler/pkg/framework/fake"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/framework/plugins/defaultevictor"
|
||||||
"sigs.k8s.io/descheduler/pkg/utils"
|
"sigs.k8s.io/descheduler/pkg/utils"
|
||||||
"sigs.k8s.io/descheduler/test"
|
"sigs.k8s.io/descheduler/test"
|
||||||
)
|
)
|
||||||
@@ -101,7 +105,8 @@ func TestHighNodeUtilization(t *testing.T) {
|
|||||||
VolumeSource: v1.VolumeSource{
|
VolumeSource: v1.VolumeSource{
|
||||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
|
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@@ -315,7 +320,7 @@ func TestHighNodeUtilization(t *testing.T) {
|
|||||||
test.BuildTestNode(n3NodeName, 4000, 3000, 10, test.SetNodeUnschedulable),
|
test.BuildTestNode(n3NodeName, 4000, 3000, 10, test.SetNodeUnschedulable),
|
||||||
},
|
},
|
||||||
pods: []*v1.Pod{
|
pods: []*v1.Pod{
|
||||||
//These won't be evicted
|
// These won't be evicted
|
||||||
test.BuildTestPod("p1", 100, 0, n1NodeName, func(pod *v1.Pod) {
|
test.BuildTestPod("p1", 100, 0, n1NodeName, func(pod *v1.Pod) {
|
||||||
test.SetRSOwnerRef(pod)
|
test.SetRSOwnerRef(pod)
|
||||||
test.SetPodExtendedResourceRequest(pod, extendedResource, 1)
|
test.SetPodExtendedResourceRequest(pod, extendedResource, 1)
|
||||||
@@ -446,7 +451,7 @@ func TestHighNodeUtilization(t *testing.T) {
|
|||||||
fakeClient := fake.NewSimpleClientset(objs...)
|
fakeClient := fake.NewSimpleClientset(objs...)
|
||||||
|
|
||||||
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
|
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
|
||||||
podInformer := sharedInformerFactory.Core().V1().Pods()
|
podInformer := sharedInformerFactory.Core().V1().Pods().Informer()
|
||||||
|
|
||||||
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
|
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -463,7 +468,7 @@ func TestHighNodeUtilization(t *testing.T) {
|
|||||||
fakeClient.Fake.AddReactor("create", "pods", func(action core.Action) (bool, runtime.Object, error) {
|
fakeClient.Fake.AddReactor("create", "pods", func(action core.Action) (bool, runtime.Object, error) {
|
||||||
getAction := action.(core.CreateAction)
|
getAction := action.(core.CreateAction)
|
||||||
obj := getAction.GetObject()
|
obj := getAction.GetObject()
|
||||||
if eviction, ok := obj.(*v1beta1.Eviction); ok {
|
if eviction, ok := obj.(*policy.Eviction); ok {
|
||||||
if _, exists := podsForEviction[eviction.Name]; exists {
|
if _, exists := podsForEviction[eviction.Name]; exists {
|
||||||
return true, obj, nil
|
return true, obj, nil
|
||||||
}
|
}
|
||||||
@@ -477,28 +482,7 @@ func TestHighNodeUtilization(t *testing.T) {
|
|||||||
sharedInformerFactory.Start(ctx.Done())
|
sharedInformerFactory.Start(ctx.Done())
|
||||||
sharedInformerFactory.WaitForCacheSync(ctx.Done())
|
sharedInformerFactory.WaitForCacheSync(ctx.Done())
|
||||||
|
|
||||||
//fakeClient := &fake.Clientset{}
|
eventRecorder := &events.FakeRecorder{}
|
||||||
//fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
|
|
||||||
// list := action.(core.ListAction)
|
|
||||||
// fieldString := list.GetListRestrictions().Fields.String()
|
|
||||||
// if strings.Contains(fieldString, n1NodeName) {
|
|
||||||
// return true, test.pods[n1NodeName], nil
|
|
||||||
// }
|
|
||||||
// if strings.Contains(fieldString, n2NodeName) {
|
|
||||||
// return true, test.pods[n2NodeName], nil
|
|
||||||
// }
|
|
||||||
// if strings.Contains(fieldString, n3NodeName) {
|
|
||||||
// return true, test.pods[n3NodeName], nil
|
|
||||||
// }
|
|
||||||
// return true, nil, fmt.Errorf("Failed to list: %v", list)
|
|
||||||
//})
|
|
||||||
//fakeClient.Fake.AddReactor("get", "nodes", func(action core.Action) (bool, runtime.Object, error) {
|
|
||||||
// getAction := action.(core.GetAction)
|
|
||||||
// if node, exists := testCase.nodes[getAction.GetName()]; exists {
|
|
||||||
// return true, node, nil
|
|
||||||
// }
|
|
||||||
// return true, nil, fmt.Errorf("Wrong node: %v", getAction.GetName())
|
|
||||||
//})
|
|
||||||
|
|
||||||
podEvictor := evictions.NewPodEvictor(
|
podEvictor := evictions.NewPodEvictor(
|
||||||
fakeClient,
|
fakeClient,
|
||||||
@@ -507,24 +491,46 @@ func TestHighNodeUtilization(t *testing.T) {
|
|||||||
nil,
|
nil,
|
||||||
nil,
|
nil,
|
||||||
testCase.nodes,
|
testCase.nodes,
|
||||||
getPodsAssignedToNode,
|
|
||||||
false,
|
|
||||||
false,
|
|
||||||
false,
|
|
||||||
false,
|
|
||||||
false,
|
false,
|
||||||
|
eventRecorder,
|
||||||
)
|
)
|
||||||
|
|
||||||
strategy := api.DeschedulerStrategy{
|
defaultevictorArgs := &defaultevictor.DefaultEvictorArgs{
|
||||||
Enabled: true,
|
EvictLocalStoragePods: false,
|
||||||
Params: &api.StrategyParameters{
|
EvictSystemCriticalPods: false,
|
||||||
NodeResourceUtilizationThresholds: &api.NodeResourceUtilizationThresholds{
|
IgnorePvcPods: false,
|
||||||
Thresholds: testCase.thresholds,
|
EvictFailedBarePods: false,
|
||||||
},
|
NodeFit: true,
|
||||||
NodeFit: true,
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
HighNodeUtilization(ctx, fakeClient, strategy, testCase.nodes, podEvictor, getPodsAssignedToNode)
|
|
||||||
|
evictorFilter, err := defaultevictor.New(
|
||||||
|
defaultevictorArgs,
|
||||||
|
&frameworkfake.HandleImpl{
|
||||||
|
ClientsetImpl: fakeClient,
|
||||||
|
GetPodsAssignedToNodeFuncImpl: getPodsAssignedToNode,
|
||||||
|
SharedInformerFactoryImpl: sharedInformerFactory,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Unable to initialize the plugin: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
handle := &frameworkfake.HandleImpl{
|
||||||
|
ClientsetImpl: fakeClient,
|
||||||
|
GetPodsAssignedToNodeFuncImpl: getPodsAssignedToNode,
|
||||||
|
PodEvictorImpl: podEvictor,
|
||||||
|
EvictorFilterImpl: evictorFilter.(framework.EvictorPlugin),
|
||||||
|
SharedInformerFactoryImpl: sharedInformerFactory,
|
||||||
|
}
|
||||||
|
|
||||||
|
plugin, err := NewHighNodeUtilization(&HighNodeUtilizationArgs{
|
||||||
|
Thresholds: testCase.thresholds,
|
||||||
|
},
|
||||||
|
handle)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Unable to initialize the plugin: %v", err)
|
||||||
|
}
|
||||||
|
plugin.(framework.BalancePlugin).Balance(ctx, testCase.nodes)
|
||||||
|
|
||||||
podsEvicted := podEvictor.TotalEvicted()
|
podsEvicted := podEvictor.TotalEvicted()
|
||||||
if testCase.expectedPodsEvicted != podsEvicted {
|
if testCase.expectedPodsEvicted != podsEvicted {
|
||||||
@@ -537,85 +543,7 @@ func TestHighNodeUtilization(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestValidateHighNodeUtilizationStrategyConfig(t *testing.T) {
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
thresholds api.ResourceThresholds
|
|
||||||
targetThresholds api.ResourceThresholds
|
|
||||||
errInfo error
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "passing target thresholds",
|
|
||||||
thresholds: api.ResourceThresholds{
|
|
||||||
v1.ResourceCPU: 20,
|
|
||||||
v1.ResourceMemory: 20,
|
|
||||||
},
|
|
||||||
targetThresholds: api.ResourceThresholds{
|
|
||||||
v1.ResourceCPU: 80,
|
|
||||||
v1.ResourceMemory: 80,
|
|
||||||
},
|
|
||||||
errInfo: fmt.Errorf("targetThresholds is not applicable for HighNodeUtilization"),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "passing empty thresholds",
|
|
||||||
thresholds: api.ResourceThresholds{},
|
|
||||||
errInfo: fmt.Errorf("thresholds config is not valid: no resource threshold is configured"),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "passing invalid thresholds",
|
|
||||||
thresholds: api.ResourceThresholds{
|
|
||||||
v1.ResourceCPU: 80,
|
|
||||||
v1.ResourceMemory: 120,
|
|
||||||
},
|
|
||||||
errInfo: fmt.Errorf("thresholds config is not valid: %v", fmt.Errorf(
|
|
||||||
"%v threshold not in [%v, %v] range", v1.ResourceMemory, MinResourcePercentage, MaxResourcePercentage)),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "passing valid strategy config",
|
|
||||||
thresholds: api.ResourceThresholds{
|
|
||||||
v1.ResourceCPU: 80,
|
|
||||||
v1.ResourceMemory: 80,
|
|
||||||
},
|
|
||||||
errInfo: nil,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "passing valid strategy config with extended resource",
|
|
||||||
thresholds: api.ResourceThresholds{
|
|
||||||
v1.ResourceCPU: 80,
|
|
||||||
v1.ResourceMemory: 80,
|
|
||||||
extendedResource: 80,
|
|
||||||
},
|
|
||||||
errInfo: nil,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, testCase := range tests {
|
|
||||||
validateErr := validateHighUtilizationStrategyConfig(testCase.thresholds, testCase.targetThresholds)
|
|
||||||
|
|
||||||
if validateErr == nil || testCase.errInfo == nil {
|
|
||||||
if validateErr != testCase.errInfo {
|
|
||||||
t.Errorf("expected validity of strategy config: thresholds %#v targetThresholds %#v to be %v but got %v instead",
|
|
||||||
testCase.thresholds, testCase.targetThresholds, testCase.errInfo, validateErr)
|
|
||||||
}
|
|
||||||
} else if validateErr.Error() != testCase.errInfo.Error() {
|
|
||||||
t.Errorf("expected validity of strategy config: thresholds %#v targetThresholds %#v to be %v but got %v instead",
|
|
||||||
testCase.thresholds, testCase.targetThresholds, testCase.errInfo, validateErr)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestHighNodeUtilizationWithTaints(t *testing.T) {
|
func TestHighNodeUtilizationWithTaints(t *testing.T) {
|
||||||
strategy := api.DeschedulerStrategy{
|
|
||||||
Enabled: true,
|
|
||||||
Params: &api.StrategyParameters{
|
|
||||||
NodeResourceUtilizationThresholds: &api.NodeResourceUtilizationThresholds{
|
|
||||||
Thresholds: api.ResourceThresholds{
|
|
||||||
v1.ResourceCPU: 40,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
n1 := test.BuildTestNode("n1", 1000, 3000, 10, nil)
|
n1 := test.BuildTestNode("n1", 1000, 3000, 10, nil)
|
||||||
n2 := test.BuildTestNode("n2", 1000, 3000, 10, nil)
|
n2 := test.BuildTestNode("n2", 1000, 3000, 10, nil)
|
||||||
n3 := test.BuildTestNode("n3", 1000, 3000, 10, nil)
|
n3 := test.BuildTestNode("n3", 1000, 3000, 10, nil)
|
||||||
@@ -646,7 +574,7 @@ func TestHighNodeUtilizationWithTaints(t *testing.T) {
|
|||||||
name: "No taints",
|
name: "No taints",
|
||||||
nodes: []*v1.Node{n1, n2, n3},
|
nodes: []*v1.Node{n1, n2, n3},
|
||||||
pods: []*v1.Pod{
|
pods: []*v1.Pod{
|
||||||
//Node 1 pods
|
// Node 1 pods
|
||||||
test.BuildTestPod(fmt.Sprintf("pod_1_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
|
test.BuildTestPod(fmt.Sprintf("pod_1_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
|
||||||
test.BuildTestPod(fmt.Sprintf("pod_2_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
|
test.BuildTestPod(fmt.Sprintf("pod_2_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
|
||||||
test.BuildTestPod(fmt.Sprintf("pod_3_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
|
test.BuildTestPod(fmt.Sprintf("pod_3_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
|
||||||
@@ -659,7 +587,7 @@ func TestHighNodeUtilizationWithTaints(t *testing.T) {
|
|||||||
name: "No pod tolerates node taint",
|
name: "No pod tolerates node taint",
|
||||||
nodes: []*v1.Node{n1, n3withTaints},
|
nodes: []*v1.Node{n1, n3withTaints},
|
||||||
pods: []*v1.Pod{
|
pods: []*v1.Pod{
|
||||||
//Node 1 pods
|
// Node 1 pods
|
||||||
test.BuildTestPod(fmt.Sprintf("pod_1_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
|
test.BuildTestPod(fmt.Sprintf("pod_1_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
|
||||||
// Node 3 pods
|
// Node 3 pods
|
||||||
test.BuildTestPod(fmt.Sprintf("pod_2_%s", n3withTaints.Name), 200, 0, n3withTaints.Name, test.SetRSOwnerRef),
|
test.BuildTestPod(fmt.Sprintf("pod_2_%s", n3withTaints.Name), 200, 0, n3withTaints.Name, test.SetRSOwnerRef),
|
||||||
@@ -670,7 +598,7 @@ func TestHighNodeUtilizationWithTaints(t *testing.T) {
|
|||||||
name: "Pod which tolerates node taint",
|
name: "Pod which tolerates node taint",
|
||||||
nodes: []*v1.Node{n1, n3withTaints},
|
nodes: []*v1.Node{n1, n3withTaints},
|
||||||
pods: []*v1.Pod{
|
pods: []*v1.Pod{
|
||||||
//Node 1 pods
|
// Node 1 pods
|
||||||
test.BuildTestPod(fmt.Sprintf("pod_1_%s", n1.Name), 100, 0, n1.Name, test.SetRSOwnerRef),
|
test.BuildTestPod(fmt.Sprintf("pod_1_%s", n1.Name), 100, 0, n1.Name, test.SetRSOwnerRef),
|
||||||
podThatToleratesTaint,
|
podThatToleratesTaint,
|
||||||
// Node 3 pods
|
// Node 3 pods
|
||||||
@@ -696,7 +624,7 @@ func TestHighNodeUtilizationWithTaints(t *testing.T) {
|
|||||||
|
|
||||||
fakeClient := fake.NewSimpleClientset(objs...)
|
fakeClient := fake.NewSimpleClientset(objs...)
|
||||||
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
|
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
|
||||||
podInformer := sharedInformerFactory.Core().V1().Pods()
|
podInformer := sharedInformerFactory.Core().V1().Pods().Informer()
|
||||||
|
|
||||||
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
|
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -706,6 +634,8 @@ func TestHighNodeUtilizationWithTaints(t *testing.T) {
|
|||||||
sharedInformerFactory.Start(ctx.Done())
|
sharedInformerFactory.Start(ctx.Done())
|
||||||
sharedInformerFactory.WaitForCacheSync(ctx.Done())
|
sharedInformerFactory.WaitForCacheSync(ctx.Done())
|
||||||
|
|
||||||
|
eventRecorder := &events.FakeRecorder{}
|
||||||
|
|
||||||
podEvictor := evictions.NewPodEvictor(
|
podEvictor := evictions.NewPodEvictor(
|
||||||
fakeClient,
|
fakeClient,
|
||||||
"policy/v1",
|
"policy/v1",
|
||||||
@@ -713,15 +643,47 @@ func TestHighNodeUtilizationWithTaints(t *testing.T) {
|
|||||||
&item.evictionsExpected,
|
&item.evictionsExpected,
|
||||||
nil,
|
nil,
|
||||||
item.nodes,
|
item.nodes,
|
||||||
getPodsAssignedToNode,
|
|
||||||
false,
|
|
||||||
false,
|
|
||||||
false,
|
|
||||||
false,
|
|
||||||
false,
|
false,
|
||||||
|
eventRecorder,
|
||||||
)
|
)
|
||||||
|
|
||||||
HighNodeUtilization(ctx, fakeClient, strategy, item.nodes, podEvictor, getPodsAssignedToNode)
|
defaultevictorArgs := &defaultevictor.DefaultEvictorArgs{
|
||||||
|
EvictLocalStoragePods: false,
|
||||||
|
EvictSystemCriticalPods: false,
|
||||||
|
IgnorePvcPods: false,
|
||||||
|
EvictFailedBarePods: false,
|
||||||
|
}
|
||||||
|
|
||||||
|
evictorFilter, err := defaultevictor.New(
|
||||||
|
defaultevictorArgs,
|
||||||
|
&frameworkfake.HandleImpl{
|
||||||
|
ClientsetImpl: fakeClient,
|
||||||
|
GetPodsAssignedToNodeFuncImpl: getPodsAssignedToNode,
|
||||||
|
SharedInformerFactoryImpl: sharedInformerFactory,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Unable to initialize the plugin: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
handle := &frameworkfake.HandleImpl{
|
||||||
|
ClientsetImpl: fakeClient,
|
||||||
|
GetPodsAssignedToNodeFuncImpl: getPodsAssignedToNode,
|
||||||
|
PodEvictorImpl: podEvictor,
|
||||||
|
EvictorFilterImpl: evictorFilter.(framework.EvictorPlugin),
|
||||||
|
SharedInformerFactoryImpl: sharedInformerFactory,
|
||||||
|
}
|
||||||
|
|
||||||
|
plugin, err := NewHighNodeUtilization(&HighNodeUtilizationArgs{
|
||||||
|
Thresholds: api.ResourceThresholds{
|
||||||
|
v1.ResourceCPU: 40,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
handle)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Unable to initialize the plugin: %v", err)
|
||||||
|
}
|
||||||
|
plugin.(framework.BalancePlugin).Balance(ctx, item.nodes)
|
||||||
|
|
||||||
if item.evictionsExpected != podEvictor.TotalEvicted() {
|
if item.evictionsExpected != podEvictor.TotalEvicted() {
|
||||||
t.Errorf("Expected %v evictions, got %v", item.evictionsExpected, podEvictor.TotalEvicted())
|
t.Errorf("Expected %v evictions, got %v", item.evictionsExpected, podEvictor.TotalEvicted())
|
||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
Copyright 2017 The Kubernetes Authors.
|
Copyright 2022 The Kubernetes Authors.
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
you may not use this file except in compliance with the License.
|
you may not use this file except in compliance with the License.
|
||||||
@@ -22,42 +22,58 @@ import (
|
|||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
"k8s.io/klog/v2"
|
"k8s.io/klog/v2"
|
||||||
|
|
||||||
"sigs.k8s.io/descheduler/pkg/api"
|
|
||||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
|
||||||
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
|
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
|
||||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||||
"sigs.k8s.io/descheduler/pkg/utils"
|
"sigs.k8s.io/descheduler/pkg/framework"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const LowNodeUtilizationPluginName = "LowNodeUtilization"
|
||||||
|
|
||||||
// LowNodeUtilization evicts pods from overutilized nodes to underutilized nodes. Note that CPU/Memory requests are used
|
// LowNodeUtilization evicts pods from overutilized nodes to underutilized nodes. Note that CPU/Memory requests are used
|
||||||
// to calculate nodes' utilization and not the actual resource usage.
|
// to calculate nodes' utilization and not the actual resource usage.
|
||||||
func LowNodeUtilization(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor, getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc) {
|
|
||||||
// TODO: May be create a struct for the strategy as well, so that we don't have to pass along the all the params?
|
type LowNodeUtilization struct {
|
||||||
if err := validateNodeUtilizationParams(strategy.Params); err != nil {
|
handle framework.Handle
|
||||||
klog.ErrorS(err, "Invalid LowNodeUtilization parameters")
|
args *LowNodeUtilizationArgs
|
||||||
return
|
podFilter func(pod *v1.Pod) bool
|
||||||
}
|
}
|
||||||
thresholdPriority, err := utils.GetPriorityFromStrategyParams(ctx, client, strategy.Params)
|
|
||||||
if err != nil {
|
var _ framework.BalancePlugin = &LowNodeUtilization{}
|
||||||
klog.ErrorS(err, "Failed to get threshold priority from strategy's params")
|
|
||||||
return
|
// NewLowNodeUtilization builds plugin from its arguments while passing a handle
|
||||||
|
func NewLowNodeUtilization(args runtime.Object, handle framework.Handle) (framework.Plugin, error) {
|
||||||
|
lowNodeUtilizationArgsArgs, ok := args.(*LowNodeUtilizationArgs)
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("want args to be of type LowNodeUtilizationArgs, got %T", args)
|
||||||
}
|
}
|
||||||
|
|
||||||
nodeFit := false
|
podFilter, err := podutil.NewOptions().
|
||||||
if strategy.Params != nil {
|
WithFilter(handle.Evictor().Filter).
|
||||||
nodeFit = strategy.Params.NodeFit
|
BuildFilterFunc()
|
||||||
}
|
if err != nil {
|
||||||
useDeviationThresholds := strategy.Params.NodeResourceUtilizationThresholds.UseDeviationThresholds
|
return nil, fmt.Errorf("error initializing pod filter function: %v", err)
|
||||||
thresholds := strategy.Params.NodeResourceUtilizationThresholds.Thresholds
|
|
||||||
targetThresholds := strategy.Params.NodeResourceUtilizationThresholds.TargetThresholds
|
|
||||||
if err := validateLowUtilizationStrategyConfig(thresholds, targetThresholds, useDeviationThresholds); err != nil {
|
|
||||||
klog.ErrorS(err, "LowNodeUtilization config is not valid")
|
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return &LowNodeUtilization{
|
||||||
|
handle: handle,
|
||||||
|
args: lowNodeUtilizationArgsArgs,
|
||||||
|
podFilter: podFilter,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Name retrieves the plugin name
|
||||||
|
func (l *LowNodeUtilization) Name() string {
|
||||||
|
return LowNodeUtilizationPluginName
|
||||||
|
}
|
||||||
|
|
||||||
|
// Balance extension point implementation for the plugin
|
||||||
|
func (l *LowNodeUtilization) Balance(ctx context.Context, nodes []*v1.Node) *framework.Status {
|
||||||
|
useDeviationThresholds := l.args.UseDeviationThresholds
|
||||||
|
thresholds := l.args.Thresholds
|
||||||
|
targetThresholds := l.args.TargetThresholds
|
||||||
|
|
||||||
// check if Pods/CPU/Mem are set, if not, set them to 100
|
// check if Pods/CPU/Mem are set, if not, set them to 100
|
||||||
if _, ok := thresholds[v1.ResourcePods]; !ok {
|
if _, ok := thresholds[v1.ResourcePods]; !ok {
|
||||||
if useDeviationThresholds {
|
if useDeviationThresholds {
|
||||||
@@ -89,8 +105,8 @@ func LowNodeUtilization(ctx context.Context, client clientset.Interface, strateg
|
|||||||
resourceNames := getResourceNames(thresholds)
|
resourceNames := getResourceNames(thresholds)
|
||||||
|
|
||||||
lowNodes, sourceNodes := classifyNodes(
|
lowNodes, sourceNodes := classifyNodes(
|
||||||
getNodeUsage(nodes, resourceNames, getPodsAssignedToNode),
|
getNodeUsage(nodes, resourceNames, l.handle.GetPodsAssignedToNodeFunc()),
|
||||||
getNodeThresholds(nodes, thresholds, targetThresholds, resourceNames, getPodsAssignedToNode, useDeviationThresholds),
|
getNodeThresholds(nodes, thresholds, targetThresholds, resourceNames, l.handle.GetPodsAssignedToNodeFunc(), useDeviationThresholds),
|
||||||
// The node has to be schedulable (to be able to move workload there)
|
// The node has to be schedulable (to be able to move workload there)
|
||||||
func(node *v1.Node, usage NodeUsage, threshold NodeThresholds) bool {
|
func(node *v1.Node, usage NodeUsage, threshold NodeThresholds) bool {
|
||||||
if nodeutil.IsNodeUnschedulable(node) {
|
if nodeutil.IsNodeUnschedulable(node) {
|
||||||
@@ -104,56 +120,54 @@ func LowNodeUtilization(ctx context.Context, client clientset.Interface, strateg
|
|||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
// log message in one line
|
// log message for nodes with low utilization
|
||||||
keysAndValues := []interface{}{
|
underutilizationCriteria := []interface{}{
|
||||||
"CPU", thresholds[v1.ResourceCPU],
|
"CPU", thresholds[v1.ResourceCPU],
|
||||||
"Mem", thresholds[v1.ResourceMemory],
|
"Mem", thresholds[v1.ResourceMemory],
|
||||||
"Pods", thresholds[v1.ResourcePods],
|
"Pods", thresholds[v1.ResourcePods],
|
||||||
}
|
}
|
||||||
for name := range thresholds {
|
for name := range thresholds {
|
||||||
if !nodeutil.IsBasicResource(name) {
|
if !nodeutil.IsBasicResource(name) {
|
||||||
keysAndValues = append(keysAndValues, string(name), int64(thresholds[name]))
|
underutilizationCriteria = append(underutilizationCriteria, string(name), int64(thresholds[name]))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
klog.V(1).InfoS("Criteria for a node under utilization", keysAndValues...)
|
klog.V(1).InfoS("Criteria for a node under utilization", underutilizationCriteria...)
|
||||||
klog.V(1).InfoS("Number of underutilized nodes", "totalNumber", len(lowNodes))
|
klog.V(1).InfoS("Number of underutilized nodes", "totalNumber", len(lowNodes))
|
||||||
|
|
||||||
// log message in one line
|
// log message for over utilized nodes
|
||||||
keysAndValues = []interface{}{
|
overutilizationCriteria := []interface{}{
|
||||||
"CPU", targetThresholds[v1.ResourceCPU],
|
"CPU", targetThresholds[v1.ResourceCPU],
|
||||||
"Mem", targetThresholds[v1.ResourceMemory],
|
"Mem", targetThresholds[v1.ResourceMemory],
|
||||||
"Pods", targetThresholds[v1.ResourcePods],
|
"Pods", targetThresholds[v1.ResourcePods],
|
||||||
}
|
}
|
||||||
for name := range targetThresholds {
|
for name := range targetThresholds {
|
||||||
if !nodeutil.IsBasicResource(name) {
|
if !nodeutil.IsBasicResource(name) {
|
||||||
keysAndValues = append(keysAndValues, string(name), int64(targetThresholds[name]))
|
overutilizationCriteria = append(overutilizationCriteria, string(name), int64(targetThresholds[name]))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
klog.V(1).InfoS("Criteria for a node above target utilization", keysAndValues...)
|
klog.V(1).InfoS("Criteria for a node above target utilization", overutilizationCriteria...)
|
||||||
klog.V(1).InfoS("Number of overutilized nodes", "totalNumber", len(sourceNodes))
|
klog.V(1).InfoS("Number of overutilized nodes", "totalNumber", len(sourceNodes))
|
||||||
|
|
||||||
if len(lowNodes) == 0 {
|
if len(lowNodes) == 0 {
|
||||||
klog.V(1).InfoS("No node is underutilized, nothing to do here, you might tune your thresholds further")
|
klog.V(1).InfoS("No node is underutilized, nothing to do here, you might tune your thresholds further")
|
||||||
return
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(lowNodes) <= strategy.Params.NodeResourceUtilizationThresholds.NumberOfNodes {
|
if len(lowNodes) <= l.args.NumberOfNodes {
|
||||||
klog.V(1).InfoS("Number of nodes underutilized is less or equal than NumberOfNodes, nothing to do here", "underutilizedNodes", len(lowNodes), "numberOfNodes", strategy.Params.NodeResourceUtilizationThresholds.NumberOfNodes)
|
klog.V(1).InfoS("Number of nodes underutilized is less or equal than NumberOfNodes, nothing to do here", "underutilizedNodes", len(lowNodes), "numberOfNodes", l.args.NumberOfNodes)
|
||||||
return
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(lowNodes) == len(nodes) {
|
if len(lowNodes) == len(nodes) {
|
||||||
klog.V(1).InfoS("All nodes are underutilized, nothing to do here")
|
klog.V(1).InfoS("All nodes are underutilized, nothing to do here")
|
||||||
return
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(sourceNodes) == 0 {
|
if len(sourceNodes) == 0 {
|
||||||
klog.V(1).InfoS("All nodes are under target utilization, nothing to do here")
|
klog.V(1).InfoS("All nodes are under target utilization, nothing to do here")
|
||||||
return
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
evictable := podEvictor.Evictable(evictions.WithPriorityThreshold(thresholdPriority), evictions.WithNodeFit(nodeFit))
|
|
||||||
|
|
||||||
// stop if node utilization drops below target threshold or any of required capacity (cpu, memory, pods) is moved
|
// stop if node utilization drops below target threshold or any of required capacity (cpu, memory, pods) is moved
|
||||||
continueEvictionCond := func(nodeInfo NodeInfo, totalAvailableUsage map[v1.ResourceName]*resource.Quantity) bool {
|
continueEvictionCond := func(nodeInfo NodeInfo, totalAvailableUsage map[v1.ResourceName]*resource.Quantity) bool {
|
||||||
if !isNodeAboveTargetUtilization(nodeInfo.NodeUsage, nodeInfo.thresholds.highResourceThreshold) {
|
if !isNodeAboveTargetUtilization(nodeInfo.NodeUsage, nodeInfo.thresholds.highResourceThreshold) {
|
||||||
@@ -173,37 +187,13 @@ func LowNodeUtilization(ctx context.Context, client clientset.Interface, strateg
|
|||||||
|
|
||||||
evictPodsFromSourceNodes(
|
evictPodsFromSourceNodes(
|
||||||
ctx,
|
ctx,
|
||||||
|
l.args.EvictableNamespaces,
|
||||||
sourceNodes,
|
sourceNodes,
|
||||||
lowNodes,
|
lowNodes,
|
||||||
podEvictor,
|
l.handle.Evictor(),
|
||||||
evictable.IsEvictable,
|
l.podFilter,
|
||||||
resourceNames,
|
resourceNames,
|
||||||
"LowNodeUtilization",
|
|
||||||
continueEvictionCond)
|
continueEvictionCond)
|
||||||
|
|
||||||
klog.V(1).InfoS("Total number of pods evicted", "evictedPods", podEvictor.TotalEvicted())
|
|
||||||
}
|
|
||||||
|
|
||||||
// validateLowUtilizationStrategyConfig checks if the strategy's config is valid
|
|
||||||
func validateLowUtilizationStrategyConfig(thresholds, targetThresholds api.ResourceThresholds, useDeviationThresholds bool) error {
|
|
||||||
// validate thresholds and targetThresholds config
|
|
||||||
if err := validateThresholds(thresholds); err != nil {
|
|
||||||
return fmt.Errorf("thresholds config is not valid: %v", err)
|
|
||||||
}
|
|
||||||
if err := validateThresholds(targetThresholds); err != nil {
|
|
||||||
return fmt.Errorf("targetThresholds config is not valid: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// validate if thresholds and targetThresholds have same resources configured
|
|
||||||
if len(thresholds) != len(targetThresholds) {
|
|
||||||
return fmt.Errorf("thresholds and targetThresholds configured different resources")
|
|
||||||
}
|
|
||||||
for resourceName, value := range thresholds {
|
|
||||||
if targetValue, ok := targetThresholds[resourceName]; !ok {
|
|
||||||
return fmt.Errorf("thresholds and targetThresholds configured different resources")
|
|
||||||
} else if value > targetValue && !useDeviationThresholds {
|
|
||||||
return fmt.Errorf("thresholds' %v percentage is greater than targetThresholds'", resourceName)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user