mirror of
https://github.com/kubernetes-sigs/descheduler.git
synced 2026-01-26 13:29:11 +01:00
Compare commits
277 Commits
v0.29.0
...
deschedule
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f06640cb0c | ||
|
|
e2844b8c7a | ||
|
|
a6e0ebff10 | ||
|
|
4c4c31f456 | ||
|
|
ea3e48396a | ||
|
|
0dd9328870 | ||
|
|
c98d340cbd | ||
|
|
8ec655b0de | ||
|
|
f68d06ad4a | ||
|
|
0b68495a17 | ||
|
|
8e6be70ff9 | ||
|
|
d536cf8ed0 | ||
|
|
48aede9fde | ||
|
|
bd5b95dbf9 | ||
|
|
71726c8c85 | ||
|
|
32e29973d8 | ||
|
|
d0fd115747 | ||
|
|
da65808f77 | ||
|
|
29ff28cbb5 | ||
|
|
d653537ee6 | ||
|
|
c3b9c97827 | ||
|
|
75c5c75e13 | ||
|
|
b66b5d35f0 | ||
|
|
5c3a3bdcf1 | ||
|
|
46fa370ede | ||
|
|
4e8c7e6702 | ||
|
|
bc6323611b | ||
|
|
51a004c848 | ||
|
|
44bde42b63 | ||
|
|
bbffb830b9 | ||
|
|
73fecfb7c4 | ||
|
|
f4c3fdf418 | ||
|
|
2c11481856 | ||
|
|
e6deb65299 | ||
|
|
677c6a60ce | ||
|
|
a2fd3aa1eb | ||
|
|
697ecc79e4 | ||
|
|
e619ec6c41 | ||
|
|
be9e971cda | ||
|
|
a8e14ec14d | ||
|
|
00b6e3528f | ||
|
|
18e3d17c29 | ||
|
|
a962cca90d | ||
|
|
6567f01e86 | ||
|
|
c86416612e | ||
|
|
a4c09bf560 | ||
|
|
7d4ec60e2d | ||
|
|
3a1a3ff9d8 | ||
|
|
343ebb9ff9 | ||
|
|
d1c64c48cd | ||
|
|
7b1178be9f | ||
|
|
23a6d26209 | ||
|
|
cd408dd785 | ||
|
|
9950b8a55d | ||
|
|
f115e780d8 | ||
|
|
af8a7445a4 | ||
|
|
5ba11e09c7 | ||
|
|
d41981644a | ||
|
|
67d3d52de8 | ||
|
|
e9f43856a9 | ||
|
|
e655a7eb27 | ||
|
|
da52983b27 | ||
|
|
1e48cfe6f8 | ||
|
|
fb4b8746ec | ||
|
|
269f16cf73 | ||
|
|
7eeb07d96a | ||
|
|
a18425a18d | ||
|
|
0c552b667f | ||
|
|
ef0c2c1c47 | ||
|
|
7696f00518 | ||
|
|
89bd188a35 | ||
|
|
e3c41d6ea6 | ||
|
|
e0ff750fa7 | ||
|
|
b07be078c3 | ||
|
|
22d9230a67 | ||
|
|
3e6166666b | ||
|
|
e1e537de95 | ||
|
|
8e762d2585 | ||
|
|
042fef7c91 | ||
|
|
2c033a1f6d | ||
|
|
e0a8c77d0e | ||
|
|
05ce561a06 | ||
|
|
8b6a67535f | ||
|
|
347a08a11a | ||
|
|
0ac05f6ea3 | ||
|
|
af495e65f7 | ||
|
|
18ef69584e | ||
|
|
d25cba08a9 | ||
|
|
8b0744c5b2 | ||
|
|
6e30321989 | ||
|
|
b094acb572 | ||
|
|
9f15e02245 | ||
|
|
3bf40c830a | ||
|
|
c9c03ee536 | ||
|
|
f19a297d64 | ||
|
|
2c005600cc | ||
|
|
b35e93ec7a | ||
|
|
4d6a0f1c0e | ||
|
|
73432b788c | ||
|
|
33868c44df | ||
|
|
4989cc3b6c | ||
|
|
ab6a3ca2d6 | ||
|
|
fdd69106a3 | ||
|
|
0f1890e5cd | ||
|
|
ed6a133449 | ||
|
|
0b505946bf | ||
|
|
dbe4423749 | ||
|
|
a300009b5d | ||
|
|
9fa48cd97e | ||
|
|
0cf1fc906e | ||
|
|
4e4c5f79fb | ||
|
|
8abb3509f9 | ||
|
|
3eece465fb | ||
|
|
33a747096b | ||
|
|
f6fe8fd0bd | ||
|
|
29c0a90998 | ||
|
|
640b675e86 | ||
|
|
c0c26e762b | ||
|
|
91e5e06b5f | ||
|
|
df7791fafa | ||
|
|
cbade38d23 | ||
|
|
1e0b1a9840 | ||
|
|
cb0c1b660d | ||
|
|
daaa3a277e | ||
|
|
683cd7f794 | ||
|
|
2189fe4479 | ||
|
|
e4c361d902 | ||
|
|
de7cec0640 | ||
|
|
48690989da | ||
|
|
601f213b4f | ||
|
|
8e70190c8a | ||
|
|
a3146a1705 | ||
|
|
55a0812ae6 | ||
|
|
f3569b5fe2 | ||
|
|
d2bd573cdb | ||
|
|
95ef2bbec3 | ||
|
|
355cff67c1 | ||
|
|
9220a1c009 | ||
|
|
b60a3fcfeb | ||
|
|
ab467a5dd2 | ||
|
|
f66efaf8db | ||
|
|
0c9750cc7f | ||
|
|
f23967a88e | ||
|
|
a6e75fe0bd | ||
|
|
9b5026314f | ||
|
|
c56a408b2c | ||
|
|
fc1b54318f | ||
|
|
da862a5698 | ||
|
|
eb6c325553 | ||
|
|
f8e128d862 | ||
|
|
a2a45db6de | ||
|
|
d8084e8b39 | ||
|
|
b614c8bc7c | ||
|
|
9b41edd382 | ||
|
|
bc60a058ef | ||
|
|
546a39e88c | ||
|
|
578086ca8e | ||
|
|
ea2eeccff4 | ||
|
|
d0695abea9 | ||
|
|
e60f525ec6 | ||
|
|
3362fec7b0 | ||
|
|
f240648df2 | ||
|
|
a818c01832 | ||
|
|
44b59f9b1d | ||
|
|
9d16c28f43 | ||
|
|
f8afd679ed | ||
|
|
db0df6c6ca | ||
|
|
18d0e4a540 | ||
|
|
7657345079 | ||
|
|
d1118354c9 | ||
|
|
e26f6429a2 | ||
|
|
686417b6de | ||
|
|
287d1b1573 | ||
|
|
7ab36daaec | ||
|
|
f2be3fd414 | ||
|
|
9eefbf05cb | ||
|
|
cfa6845a19 | ||
|
|
8a2b2eb37c | ||
|
|
972d28108a | ||
|
|
f294d953a3 | ||
|
|
85837b1063 | ||
|
|
fadef326ff | ||
|
|
f5060adcd1 | ||
|
|
75880226c0 | ||
|
|
0901cb18bf | ||
|
|
6fdee47cbc | ||
|
|
ae15fed7e7 | ||
|
|
ebae217631 | ||
|
|
d7178984df | ||
|
|
2253e9816c | ||
|
|
cdbd101eae | ||
|
|
15551bb834 | ||
|
|
bdaff92c10 | ||
|
|
9fea59821f | ||
|
|
2df11f837a | ||
|
|
0e2478ac41 | ||
|
|
ec33490314 | ||
|
|
1c8ae64726 | ||
|
|
fd7fcbddfe | ||
|
|
69e5c5a1ef | ||
|
|
8f1102b547 | ||
|
|
9a57a37cc0 | ||
|
|
1dd21ba507 | ||
|
|
2ae79bee64 | ||
|
|
32065f0caa | ||
|
|
8714397ba6 | ||
|
|
47cc875fe8 | ||
|
|
d699454d5e | ||
|
|
a889e57768 | ||
|
|
748495a022 | ||
|
|
c80556fc91 | ||
|
|
17af29afe4 | ||
|
|
8be82b008c | ||
|
|
fc01793949 | ||
|
|
22dfa5d559 | ||
|
|
4b8e2076e9 | ||
|
|
1c1b1a7207 | ||
|
|
2675dbedef | ||
|
|
77a0693e0c | ||
|
|
79990946eb | ||
|
|
4671199be7 | ||
|
|
a82fc7b4e4 | ||
|
|
2ac072e5da | ||
|
|
ee5bc6991d | ||
|
|
0ec8581964 | ||
|
|
2f7544344b | ||
|
|
b08f1fa8b5 | ||
|
|
fca4a0970f | ||
|
|
b1c6e24f93 | ||
|
|
fda4c96937 | ||
|
|
3ef05e9e7f | ||
|
|
4bbafe7c19 | ||
|
|
9f5fc14410 | ||
|
|
b2bb8272af | ||
|
|
149a4c11c4 | ||
|
|
2ce9d46b8c | ||
|
|
77ec804529 | ||
|
|
25b9edae3c | ||
|
|
8da68695e1 | ||
|
|
b95380641f | ||
|
|
10d0ce0dfa | ||
|
|
667df9b606 | ||
|
|
2c06a33d41 | ||
|
|
4bd348d9b7 | ||
|
|
82559025b1 | ||
|
|
a2c88582fa | ||
|
|
a321a38328 | ||
|
|
970b35d737 | ||
|
|
364f467421 | ||
|
|
dd94f2ed93 | ||
|
|
7f20b5c891 | ||
|
|
e63e159c04 | ||
|
|
b7697869f2 | ||
|
|
19ced3d630 | ||
|
|
82af9c6321 | ||
|
|
37df42df7c | ||
|
|
ed1efe436f | ||
|
|
749e81c51c | ||
|
|
dc2cf723bc | ||
|
|
d7c12c5f00 | ||
|
|
a5f322521e | ||
|
|
52d226321b | ||
|
|
bb5930eb21 | ||
|
|
6c865fdf32 | ||
|
|
af1ffe7a15 | ||
|
|
97c0044a74 | ||
|
|
c7f8670b11 | ||
|
|
99472b6223 | ||
|
|
492da1b8a9 | ||
|
|
70f3619fad | ||
|
|
0d096edbee | ||
|
|
e7980442ef | ||
|
|
c2cf78a760 | ||
|
|
5092595384 | ||
|
|
d513c5d9d7 | ||
|
|
ae7467fb27 | ||
|
|
b7a50fd772 |
26
.github/workflows/helm.yaml
vendored
26
.github/workflows/helm.yaml
vendored
@@ -20,27 +20,35 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Helm
|
||||
uses: azure/setup-helm@v2.1
|
||||
uses: azure/setup-helm@v4.2.0
|
||||
with:
|
||||
version: v3.9.2
|
||||
version: v3.15.1
|
||||
|
||||
- uses: actions/setup-python@v3.1.2
|
||||
- uses: actions/setup-python@v5.1.1
|
||||
with:
|
||||
python-version: 3.7
|
||||
python-version: 3.12
|
||||
|
||||
- uses: actions/setup-go@v3
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '1.21.5'
|
||||
go-version-file: 'go.mod'
|
||||
|
||||
- name: Set up chart-testing
|
||||
uses: helm/chart-testing-action@v2.2.1
|
||||
uses: helm/chart-testing-action@v2.6.1
|
||||
with:
|
||||
version: v3.7.0
|
||||
version: v3.11.0
|
||||
|
||||
- name: Install Helm Unit Test Plugin
|
||||
run: |
|
||||
helm plugin install https://github.com/helm-unittest/helm-unittest
|
||||
|
||||
- name: Run Helm Unit Tests
|
||||
run: |
|
||||
helm unittest charts/descheduler --strict -d
|
||||
|
||||
- name: Run chart-testing (list-changed)
|
||||
id: list-changed
|
||||
|
||||
10
.github/workflows/manifests.yaml
vendored
10
.github/workflows/manifests.yaml
vendored
@@ -7,16 +7,16 @@ jobs:
|
||||
deploy:
|
||||
strategy:
|
||||
matrix:
|
||||
k8s-version: ["v1.29.0"]
|
||||
descheduler-version: ["v0.29.0"]
|
||||
descheduler-api: ["v1alpha1", "v1alpha2"]
|
||||
k8s-version: ["v1.32.0"]
|
||||
descheduler-version: ["v0.32.2"]
|
||||
descheduler-api: ["v1alpha2"]
|
||||
manifest: ["deployment"]
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout Repo
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
- name: Create kind cluster
|
||||
uses: helm/kind-action@v1.5.0
|
||||
uses: helm/kind-action@v1.12.0
|
||||
with:
|
||||
node_image: kindest/node:${{ matrix.k8s-version }}
|
||||
kubectl_version: ${{ matrix.k8s-version }}
|
||||
|
||||
9
.github/workflows/release.yaml
vendored
9
.github/workflows/release.yaml
vendored
@@ -5,6 +5,9 @@ on:
|
||||
branches:
|
||||
- release-*
|
||||
|
||||
permissions:
|
||||
contents: write # allow actions to update gh-pages branch
|
||||
|
||||
jobs:
|
||||
release:
|
||||
runs-on: ubuntu-latest
|
||||
@@ -20,12 +23,12 @@ jobs:
|
||||
git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
|
||||
|
||||
- name: Install Helm
|
||||
uses: azure/setup-helm@v1
|
||||
uses: azure/setup-helm@v4.2.0
|
||||
with:
|
||||
version: v3.7.0
|
||||
version: v3.15.1
|
||||
|
||||
- name: Run chart-releaser
|
||||
uses: helm/chart-releaser-action@v1.1.0
|
||||
uses: helm/chart-releaser-action@v1.6.0
|
||||
env:
|
||||
CR_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
|
||||
CR_RELEASE_NAME_TEMPLATE: "descheduler-helm-chart-{{ .Version }}"
|
||||
|
||||
2
.github/workflows/security.yaml
vendored
2
.github/workflows/security.yaml
vendored
@@ -22,7 +22,7 @@ jobs:
|
||||
fail-fast: false
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
run:
|
||||
timeout: 2m
|
||||
timeout: 5m
|
||||
|
||||
linters:
|
||||
disable-all: true
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
FROM golang:1.21.5
|
||||
FROM golang:1.23.3
|
||||
|
||||
WORKDIR /go/src/sigs.k8s.io/descheduler
|
||||
COPY . .
|
||||
@@ -23,6 +23,8 @@ FROM scratch
|
||||
|
||||
MAINTAINER Kubernetes SIG Scheduling <kubernetes-sig-scheduling@googlegroups.com>
|
||||
|
||||
LABEL org.opencontainers.image.source https://github.com/kubernetes-sigs/descheduler
|
||||
|
||||
USER 1000
|
||||
|
||||
COPY --from=0 /go/src/sigs.k8s.io/descheduler/_output/bin/descheduler /bin/descheduler
|
||||
|
||||
10
Makefile
10
Makefile
@@ -26,12 +26,14 @@ ARCHS = amd64 arm arm64
|
||||
|
||||
LDFLAGS=-ldflags "-X ${LDFLAG_LOCATION}.version=${VERSION} -X ${LDFLAG_LOCATION}.buildDate=${BUILD} -X ${LDFLAG_LOCATION}.gitbranch=${BRANCH} -X ${LDFLAG_LOCATION}.gitsha1=${SHA1}"
|
||||
|
||||
GOLANGCI_VERSION := v1.55.2
|
||||
GOLANGCI_VERSION := v1.62.2
|
||||
HAS_GOLANGCI := $(shell ls _output/bin/golangci-lint 2> /dev/null)
|
||||
|
||||
GOFUMPT_VERSION := v0.4.0
|
||||
GOFUMPT_VERSION := v0.7.0
|
||||
HAS_GOFUMPT := $(shell command -v gofumpt 2> /dev/null)
|
||||
|
||||
GO_VERSION := $(shell (command -v jq > /dev/null && (go mod edit -json | jq -r .Go)) || (sed -En 's/^go (.*)$$/\1/p' go.mod))
|
||||
|
||||
# REGISTRY is the container registry to push
|
||||
# into. The default is to push to the staging
|
||||
# registry, not production.
|
||||
@@ -134,7 +136,7 @@ gen:
|
||||
./hack/update-docs.sh
|
||||
|
||||
gen-docker:
|
||||
$(CONTAINER_ENGINE) run --entrypoint make -it -v $(CURRENT_DIR):/go/src/sigs.k8s.io/descheduler -w /go/src/sigs.k8s.io/descheduler golang:1.21.5 gen
|
||||
$(CONTAINER_ENGINE) run --entrypoint make -it -v $(CURRENT_DIR):/go/src/sigs.k8s.io/descheduler -w /go/src/sigs.k8s.io/descheduler golang:$(GO_VERSION) gen
|
||||
|
||||
verify-gen:
|
||||
./hack/verify-conversions.sh
|
||||
@@ -146,7 +148,7 @@ lint:
|
||||
ifndef HAS_GOLANGCI
|
||||
curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b ./_output/bin ${GOLANGCI_VERSION}
|
||||
endif
|
||||
./_output/bin/golangci-lint run
|
||||
./_output/bin/golangci-lint run -v
|
||||
|
||||
fmt:
|
||||
ifndef HAS_GOFUMPT
|
||||
|
||||
139
README.md
139
README.md
@@ -2,7 +2,7 @@
|
||||

|
||||
|
||||
<p align="left">
|
||||
↖️ Click at the [bullet list icon] at the top left corner of the Readme visualization for the github generated table of contents.
|
||||
↗️️ Click at the [bullet list icon] at the top right corner of the Readme visualization for the github generated table of contents.
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
@@ -33,17 +33,15 @@ but relies on the default scheduler for that.
|
||||
## ⚠️ Documentation Versions by Release
|
||||
|
||||
If you are using a published release of Descheduler (such as
|
||||
`registry.k8s.io/descheduler/descheduler:v0.26.1`), follow the documentation in
|
||||
`registry.k8s.io/descheduler/descheduler:v0.31.0`), follow the documentation in
|
||||
that version's release branch, as listed below:
|
||||
|
||||
|Descheduler Version|Docs link|
|
||||
|---|---|
|
||||
|v0.32.x|[`release-1.32`](https://github.com/kubernetes-sigs/descheduler/blob/release-1.32/README.md)|
|
||||
|v0.31.x|[`release-1.31`](https://github.com/kubernetes-sigs/descheduler/blob/release-1.31/README.md)|
|
||||
|v0.30.x|[`release-1.30`](https://github.com/kubernetes-sigs/descheduler/blob/release-1.30/README.md)|
|
||||
|v0.29.x|[`release-1.29`](https://github.com/kubernetes-sigs/descheduler/blob/release-1.29/README.md)|
|
||||
|v0.28.x|[`release-1.28`](https://github.com/kubernetes-sigs/descheduler/blob/release-1.28/README.md)|
|
||||
|v0.27.x|[`release-1.27`](https://github.com/kubernetes-sigs/descheduler/blob/release-1.27/README.md)|
|
||||
|v0.26.x|[`release-1.26`](https://github.com/kubernetes-sigs/descheduler/blob/release-1.26/README.md)|
|
||||
|v0.25.x|[`release-1.25`](https://github.com/kubernetes-sigs/descheduler/blob/release-1.25/README.md)|
|
||||
|v0.24.x|[`release-1.24`](https://github.com/kubernetes-sigs/descheduler/blob/release-1.24/README.md)|
|
||||
|
||||
The
|
||||
[`master`](https://github.com/kubernetes-sigs/descheduler/blob/master/README.md)
|
||||
@@ -95,17 +93,17 @@ See the [resources | Kustomize](https://kubectl.docs.kubernetes.io/references/ku
|
||||
|
||||
Run As A Job
|
||||
```
|
||||
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/job?ref=v0.26.1' | kubectl apply -f -
|
||||
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/job?ref=release-1.32' | kubectl apply -f -
|
||||
```
|
||||
|
||||
Run As A CronJob
|
||||
```
|
||||
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/cronjob?ref=v0.26.1' | kubectl apply -f -
|
||||
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/cronjob?ref=release-1.32' | kubectl apply -f -
|
||||
```
|
||||
|
||||
Run As A Deployment
|
||||
```
|
||||
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/deployment?ref=v0.26.1' | kubectl apply -f -
|
||||
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/deployment?ref=release-1.32' | kubectl apply -f -
|
||||
```
|
||||
|
||||
## User Guide
|
||||
@@ -114,8 +112,6 @@ See the [user guide](docs/user-guide.md) in the `/docs` directory.
|
||||
|
||||
## Policy, Default Evictor and Strategy plugins
|
||||
|
||||
**⚠️ v1alpha1 configuration is still supported, but deprecated (and soon will be removed). Please consider migrating to v1alpha2 (described bellow). For previous v1alpha1 documentation go to [docs/deprecated/v1alpha1.md](docs/deprecated/v1alpha1.md) ⚠️**
|
||||
|
||||
The Descheduler Policy is configurable and includes default strategy plugins that can be enabled or disabled. It includes a common eviction configuration at the top level, as well as configuration from the Evictor plugin (Default Evictor, if not specified otherwise). Top-level configuration and Evictor plugin configuration are applied to all evictions.
|
||||
|
||||
### Top Level configuration
|
||||
@@ -127,21 +123,27 @@ These are top level keys in the Descheduler Policy that you can use to configure
|
||||
| `nodeSelector` |`string`| `nil` | limiting the nodes which are processed. Only used when `nodeFit`=`true` and only by the PreEvictionFilter Extension Point |
|
||||
| `maxNoOfPodsToEvictPerNode` |`int`| `nil` | maximum number of pods evicted from each node (summed through all strategies) |
|
||||
| `maxNoOfPodsToEvictPerNamespace` |`int`| `nil` | maximum number of pods evicted from each namespace (summed through all strategies) |
|
||||
| `maxNoOfPodsToEvictTotal` |`int`| `nil` | maximum number of pods evicted per rescheduling cycle (summed through all strategies) |
|
||||
| `metricsCollector` |`object`| `nil` | configures collection of metrics for actual resource utilization |
|
||||
| `metricsCollector.enabled` |`bool`| `false` | enables kubernetes [metrics server](https://kubernetes-sigs.github.io/metrics-server/) collection |
|
||||
|
||||
### Evictor Plugin configuration (Default Evictor)
|
||||
|
||||
The Default Evictor Plugin is used by default for filtering pods before processing them in an strategy plugin, or for applying a PreEvictionFilter of pods before eviction. You can also create your own Evictor Plugin or use the Default one provided by Descheduler. Other uses for the Evictor plugin can be to sort, filter, validate or group pods by different criteria, and that's why this is handled by a plugin and not configured in the top level config.
|
||||
|
||||
| Name |type| Default Value | Description |
|
||||
|------|----|---------------|-------------|
|
||||
| `nodeSelector` |`string`| `nil` | limiting the nodes which are processed |
|
||||
| `evictLocalStoragePods` |`bool`| `false` | allows eviction of pods with local storage |
|
||||
| Name |type| Default Value | Description |
|
||||
|---------------------------|----|---------------|-----------------------------------------------------------------------------------------------------------------------------|
|
||||
| `nodeSelector` |`string`| `nil` | limiting the nodes which are processed |
|
||||
| `evictLocalStoragePods` |`bool`| `false` | allows eviction of pods with local storage |
|
||||
| `evictSystemCriticalPods` |`bool`| `false` | [Warning: Will evict Kubernetes system pods] allows eviction of pods with any priority, including system pods like kube-dns |
|
||||
| `ignorePvcPods` |`bool`| `false` | set whether PVC pods should be evicted or ignored |
|
||||
| `evictFailedBarePods` |`bool`| `false` | allow eviction of pods without owner references and in failed phase |
|
||||
|`labelSelector`|`metav1.LabelSelector`||(see [label filtering](#label-filtering))|
|
||||
|`priorityThreshold`|`priorityThreshold`||(see [priority filtering](#priority-filtering))|
|
||||
|`nodeFit`|`bool`|`false`|(see [node fit filtering](#node-fit-filtering))|
|
||||
| `ignorePvcPods` |`bool`| `false` | set whether PVC pods should be evicted or ignored |
|
||||
| `evictFailedBarePods` |`bool`| `false` | allow eviction of pods without owner references and in failed phase |
|
||||
| `labelSelector` |`metav1.LabelSelector`|| (see [label filtering](#label-filtering)) |
|
||||
| `priorityThreshold` |`priorityThreshold`|| (see [priority filtering](#priority-filtering)) |
|
||||
| `nodeFit` |`bool`|`false`| (see [node fit filtering](#node-fit-filtering)) |
|
||||
| `minReplicas` |`uint`|`0`| ignore eviction of pods where owner (e.g. `ReplicaSet`) replicas is below this threshold |
|
||||
| `minPodAge` |`metav1.Duration`|`0`| ignore eviction of pods with a creation time within this threshold |
|
||||
| `ignorePodsWithoutPDB` |`bool`|`false`| set whether pods without PodDisruptionBudget should be evicted or ignored |
|
||||
|
||||
### Example policy
|
||||
|
||||
@@ -157,6 +159,9 @@ kind: "DeschedulerPolicy"
|
||||
nodeSelector: "node=node1" # you don't need to set this, if not set all will be processed
|
||||
maxNoOfPodsToEvictPerNode: 5000 # you don't need to set this, unlimited if not set
|
||||
maxNoOfPodsToEvictPerNamespace: 5000 # you don't need to set this, unlimited if not set
|
||||
maxNoOfPodsToEvictTotal: 5000 # you don't need to set this, unlimited if not set
|
||||
metricsCollector:
|
||||
enabled: true # you don't need to set this, metrics are not collected if not set
|
||||
profiles:
|
||||
- name: ProfileName
|
||||
pluginConfig:
|
||||
@@ -166,6 +171,7 @@ profiles:
|
||||
evictFailedBarePods: true
|
||||
evictLocalStoragePods: true
|
||||
nodeFit: true
|
||||
minReplicas: 2
|
||||
plugins:
|
||||
# DefaultEvictor is enabled for both `filter` and `preEvictionFilter`
|
||||
# filter:
|
||||
@@ -205,7 +211,7 @@ Balance Plugins: These plugins process all pods, or groups of pods, and determin
|
||||
| [RemovePodsViolatingTopologySpreadConstraint](#removepodsviolatingtopologyspreadconstraint) |Balance|Evicts pods violating TopologySpreadConstraints|
|
||||
| [RemovePodsHavingTooManyRestarts](#removepodshavingtoomanyrestarts) |Deschedule|Evicts pods having too many restarts|
|
||||
| [PodLifeTime](#podlifetime) |Deschedule|Evicts pods that have exceeded a specified age limit|
|
||||
| [RemoveFailedPods](#removefailedpods) |Deschedule|Evicts pods with certain failed reasons|
|
||||
| [RemoveFailedPods](#removefailedpods) |Deschedule|Evicts pods with certain failed reasons and exit codes|
|
||||
|
||||
|
||||
### RemoveDuplicates
|
||||
@@ -275,11 +281,13 @@ If that parameter is set to `true`, the thresholds are considered as percentage
|
||||
`thresholds` will be deducted from the mean among all nodes and `targetThresholds` will be added to the mean.
|
||||
A resource consumption above (resp. below) this window is considered as overutilization (resp. underutilization).
|
||||
|
||||
**NOTE:** Node resource consumption is determined by the requests and limits of pods, not actual usage.
|
||||
**NOTE:** By default node resource consumption is determined by the requests and limits of pods, not actual usage.
|
||||
This approach is chosen in order to maintain consistency with the kube-scheduler, which follows the same
|
||||
design for scheduling pods onto nodes. This means that resource usage as reported by Kubelet (or commands
|
||||
like `kubectl top`) may differ from the calculated consumption, due to these components reporting
|
||||
actual usage metrics. Implementing metrics-based descheduling is currently TODO for the project.
|
||||
actual usage metrics. Metrics-based descheduling can be enabled by setting `metricsUtilization.metricsServer` field.
|
||||
In order to have the plugin consume the metrics the metric collector needs to be configured as well.
|
||||
See `metricsCollector` field at [Top Level configuration](#top-level-configuration) for available options.
|
||||
|
||||
**Parameters:**
|
||||
|
||||
@@ -290,6 +298,9 @@ actual usage metrics. Implementing metrics-based descheduling is currently TODO
|
||||
|`targetThresholds`|map(string:int)|
|
||||
|`numberOfNodes`|int|
|
||||
|`evictableNamespaces`|(see [namespace filtering](#namespace-filtering))|
|
||||
|`metricsUtilization`|object|
|
||||
|`metricsUtilization.metricsServer`|bool|
|
||||
|
||||
|
||||
**Example:**
|
||||
|
||||
@@ -309,6 +320,8 @@ profiles:
|
||||
"cpu" : 50
|
||||
"memory": 50
|
||||
"pods": 50
|
||||
metricsUtilization:
|
||||
metricsServer: true
|
||||
plugins:
|
||||
balance:
|
||||
enabled:
|
||||
@@ -500,18 +513,22 @@ key=value matches an excludedTaints entry, the taint will be ignored.
|
||||
For example, excludedTaints entry "dedicated" would match all taints with key "dedicated", regardless of value.
|
||||
excludedTaints entry "dedicated=special-user" would match taints with key "dedicated" and value "special-user".
|
||||
|
||||
If a list of includedTaints is provided, a taint will be considered if and only if it matches an included key **or** key=value from the list. Otherwise it will be ignored. Leaving includedTaints unset will include any taint by default.
|
||||
|
||||
**Parameters:**
|
||||
|
||||
|Name|Type|
|
||||
|---|---|
|
||||
|`excludedTaints`|list(string)|
|
||||
|`includedTaints`|list(string)|
|
||||
|`includePreferNoSchedule`|bool|
|
||||
|`namespaces`|(see [namespace filtering](#namespace-filtering))|
|
||||
|`labelSelector`|(see [label filtering](#label-filtering))|
|
||||
|
||||
**Example:**
|
||||
|
||||
````yaml
|
||||
Setting `excludedTaints`
|
||||
```yaml
|
||||
apiVersion: "descheduler/v1alpha2"
|
||||
kind: "DeschedulerPolicy"
|
||||
profiles:
|
||||
@@ -526,7 +543,25 @@ profiles:
|
||||
deschedule:
|
||||
enabled:
|
||||
- "RemovePodsViolatingNodeTaints"
|
||||
````
|
||||
```
|
||||
|
||||
Setting `includedTaints`
|
||||
```yaml
|
||||
apiVersion: "descheduler/v1alpha2"
|
||||
kind: "DeschedulerPolicy"
|
||||
profiles:
|
||||
- name: ProfileName
|
||||
pluginConfig:
|
||||
- name: "RemovePodsViolatingNodeTaints"
|
||||
args:
|
||||
includedTaints:
|
||||
- decommissioned=end-of-life # include only taints with key "decommissioned" and value "end-of-life"
|
||||
- reserved # include all taints with key "reserved"
|
||||
plugins:
|
||||
deschedule:
|
||||
enabled:
|
||||
- "RemovePodsViolatingNodeTaints"
|
||||
```
|
||||
|
||||
### RemovePodsViolatingTopologySpreadConstraint
|
||||
|
||||
@@ -637,20 +672,23 @@ profiles:
|
||||
This strategy evicts pods that are older than `maxPodLifeTimeSeconds`.
|
||||
|
||||
You can also specify `states` parameter to **only** evict pods matching the following conditions:
|
||||
- [Pod Phase](https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-phase) status of: `Running`, `Pending`
|
||||
- [Container State Waiting](https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-state-waiting) condition of: `PodInitializing`, `ContainerCreating`, `ImagePullBackOff`
|
||||
- [Pod Phase](https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-phase) status of: `Running`, `Pending`, `Unknown`
|
||||
- [Pod Reason](https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-conditions) reasons of: `NodeAffinity`, `NodeLost`, `Shutdown`, `UnexpectedAdmissionError`
|
||||
- [Container State Waiting](https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-state-waiting) condition of: `PodInitializing`, `ContainerCreating`, `ImagePullBackOff`, `CrashLoopBackOff`, `CreateContainerConfigError`, `ErrImagePull`, `ImagePullBackOff`, `CreateContainerError`, `InvalidImageName`
|
||||
|
||||
If a value for `states` or `podStatusPhases` is not specified,
|
||||
Pods in any state (even `Running`) are considered for eviction.
|
||||
|
||||
**Parameters:**
|
||||
|
||||
|Name|Type|Notes|
|
||||
|---|---|---|
|
||||
|`maxPodLifeTimeSeconds`|int||
|
||||
|`states`|list(string)|Only supported in v0.25+|
|
||||
|`namespaces`|(see [namespace filtering](#namespace-filtering))||
|
||||
|`labelSelector`|(see [label filtering](#label-filtering))||
|
||||
| Name | Type | Notes |
|
||||
|--------------------------------|---------------------------------------------------|--------------------------|
|
||||
| `maxPodLifeTimeSeconds` | int | |
|
||||
| `states` | list(string) | Only supported in v0.25+ |
|
||||
| `includingInitContainers` | bool | Only supported in v0.31+ |
|
||||
| `includingEphemeralContainers` | bool | Only supported in v0.31+ |
|
||||
| `namespaces` | (see [namespace filtering](#namespace-filtering)) | |
|
||||
| `labelSelector` | (see [label filtering](#label-filtering)) | |
|
||||
|
||||
**Example:**
|
||||
|
||||
@@ -673,10 +711,8 @@ profiles:
|
||||
```
|
||||
|
||||
### RemoveFailedPods
|
||||
|
||||
This strategy evicts pods that are in failed status phase.
|
||||
You can provide an optional parameter to filter by failed `reasons`.
|
||||
`reasons` can be expanded to include reasons of InitContainers as well by setting the optional parameter `includingInitContainers` to `true`.
|
||||
You can provide optional parameters to filter by failed pods' and containters' `reasons`. and `exitCodes`. `exitCodes` apply to failed pods' containers with `terminated` state only. `reasons` and `exitCodes` can be expanded to include those of InitContainers as well by setting the optional parameter `includingInitContainers` to `true`.
|
||||
You can specify an optional parameter `minPodLifetimeSeconds` to evict pods that are older than specified seconds.
|
||||
Lastly, you can specify the optional parameter `excludeOwnerKinds` and if a pod
|
||||
has any of these `Kind`s listed as an `OwnerRef`, that pod will not be considered for eviction.
|
||||
@@ -688,6 +724,7 @@ has any of these `Kind`s listed as an `OwnerRef`, that pod will not be considere
|
||||
|`minPodLifetimeSeconds`|uint|
|
||||
|`excludeOwnerKinds`|list(string)|
|
||||
|`reasons`|list(string)|
|
||||
|`exitCodes`|list(int32)|
|
||||
|`includingInitContainers`|bool|
|
||||
|`namespaces`|(see [namespace filtering](#namespace-filtering))|
|
||||
|`labelSelector`|(see [label filtering](#label-filtering))|
|
||||
@@ -704,6 +741,8 @@ profiles:
|
||||
args:
|
||||
reasons:
|
||||
- "NodeAffinity"
|
||||
exitCodes:
|
||||
- 1
|
||||
includingInitContainers: true
|
||||
excludeOwnerKinds:
|
||||
- "Job"
|
||||
@@ -718,7 +757,7 @@ profiles:
|
||||
|
||||
### Namespace filtering
|
||||
|
||||
The following strategies accept a `namespaces` parameter which allows to specify a list of including, resp. excluding namespaces:
|
||||
The following strategies accept a `namespaces` parameter which allows to specify a list of including and excluding namespaces respectively:
|
||||
* `PodLifeTime`
|
||||
* `RemovePodsHavingTooManyRestarts`
|
||||
* `RemovePodsViolatingNodeTaints`
|
||||
@@ -728,11 +767,10 @@ The following strategies accept a `namespaces` parameter which allows to specify
|
||||
* `RemovePodsViolatingTopologySpreadConstraint`
|
||||
* `RemoveFailedPods`
|
||||
|
||||
|
||||
The following strategies accept a `evictableNamespaces` parameter which allows to specify a list of excluding namespaces:
|
||||
The following strategies accept an `evictableNamespaces` parameter which allows to specify a list of excluding namespaces:
|
||||
* `LowNodeUtilization` and `HighNodeUtilization` (Only filtered right before eviction)
|
||||
|
||||
For example with PodLifeTime:
|
||||
In the following example with `PodLifeTime`, `PodLifeTime` gets executed only over `namespace1` and `namespace2`.
|
||||
|
||||
```yaml
|
||||
apiVersion: "descheduler/v1alpha2"
|
||||
@@ -753,8 +791,7 @@ profiles:
|
||||
- "PodLifeTime"
|
||||
```
|
||||
|
||||
In the example `PodLifeTime` gets executed only over `namespace1` and `namespace2`.
|
||||
The similar holds for `exclude` field:
|
||||
The similar holds for `exclude` field. The strategy gets executed over all namespaces but `namespace1` and `namespace2` in the following example.
|
||||
|
||||
```yaml
|
||||
apiVersion: "descheduler/v1alpha2"
|
||||
@@ -775,9 +812,7 @@ profiles:
|
||||
- "PodLifeTime"
|
||||
```
|
||||
|
||||
The strategy gets executed over all namespaces but `namespace1` and `namespace2`.
|
||||
|
||||
It's not allowed to compute `include` with `exclude` field.
|
||||
It's not allowed to combine `include` with `exclude` field.
|
||||
|
||||
### Priority filtering
|
||||
|
||||
@@ -835,7 +870,7 @@ does not exist, descheduler won't create it and will throw an error.
|
||||
|
||||
### Label filtering
|
||||
|
||||
The following strategies can configure a [standard kubernetes labelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#labelselector-v1-meta)
|
||||
The following strategies can configure a [standard kubernetes labelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#labelselector-v1-meta)
|
||||
to filter pods by their labels:
|
||||
|
||||
* `PodLifeTime`
|
||||
@@ -880,6 +915,7 @@ profiles:
|
||||
- `nodeAffinity` on the pod
|
||||
- Resource `requests` made by the pod and the resources available on other nodes
|
||||
- Whether any of the other nodes are marked as `unschedulable`
|
||||
- Any `podAntiAffinity` between the pod and the pods on the other nodes
|
||||
|
||||
E.g.
|
||||
|
||||
@@ -901,7 +937,7 @@ profiles:
|
||||
- "PodLifeTime"
|
||||
```
|
||||
|
||||
Note that node fit filtering references the current pod spec, and not that of it's owner.
|
||||
Note that node fit filtering references the current pod spec, and not that of its owner.
|
||||
Thus, if the pod is owned by a ReplicationController (and that ReplicationController was modified recently),
|
||||
the pod may be running with an outdated spec, which the descheduler will reference when determining node fit.
|
||||
This is expected behavior as the descheduler is a "best-effort" mechanism.
|
||||
@@ -915,7 +951,7 @@ When the descheduler decides to evict pods from a node, it employs the following
|
||||
* [Critical pods](https://kubernetes.io/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/) (with priorityClassName set to system-cluster-critical or system-node-critical) are never evicted (unless `evictSystemCriticalPods: true` is set).
|
||||
* Pods (static or mirrored pods or standalone pods) not part of an ReplicationController, ReplicaSet(Deployment), StatefulSet, or Job are
|
||||
never evicted because these pods won't be recreated. (Standalone pods in failed status phase can be evicted by setting `evictFailedBarePods: true`)
|
||||
* Pods associated with DaemonSets are never evicted.
|
||||
* Pods associated with DaemonSets are never evicted (unless `evictDaemonSetPods: true` is set).
|
||||
* Pods with local storage are never evicted (unless `evictLocalStoragePods: true` is set).
|
||||
* Pods with PVCs are evicted (unless `ignorePvcPods: true` is set).
|
||||
* In `LowNodeUtilization` and `RemovePodsViolatingInterPodAntiAffinity`, pods are evicted by their priority from low to high, and if they have same priority,
|
||||
@@ -973,6 +1009,11 @@ packages that it is compiled with.
|
||||
|
||||
| Descheduler | Supported Kubernetes Version |
|
||||
|-------------|------------------------------|
|
||||
| v0.32 | v1.32 |
|
||||
| v0.31 | v1.31 |
|
||||
| v0.30 | v1.30 |
|
||||
| v0.29 | v1.29 |
|
||||
| v0.28 | v1.28 |
|
||||
| v0.27 | v1.27 |
|
||||
| v0.26 | v1.26 |
|
||||
| v0.25 | v1.25 |
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
apiVersion: v1
|
||||
name: descheduler
|
||||
version: 0.29.0
|
||||
appVersion: 0.29.0
|
||||
version: 0.32.2
|
||||
appVersion: 0.32.2
|
||||
description: Descheduler for Kubernetes is used to rebalance clusters by evicting pods that can potentially be scheduled on better nodes. In the current implementation, descheduler does not schedule replacement of evicted pods but relies on the default scheduler for that.
|
||||
keywords:
|
||||
- kubernetes
|
||||
|
||||
@@ -52,6 +52,7 @@ The following table lists the configurable parameters of the _descheduler_ chart
|
||||
| `imagePullSecrets` | Docker repository secrets | `[]` |
|
||||
| `nameOverride` | String to partially override `descheduler.fullname` template (will prepend the release name) | `""` |
|
||||
| `fullnameOverride` | String to fully override `descheduler.fullname` template | `""` |
|
||||
| `namespaceOverride` | Override the deployment namespace; defaults to .Release.Namespace | `""` |
|
||||
| `cronJobApiVersion` | CronJob API Group Version | `"batch/v1"` |
|
||||
| `schedule` | The cron schedule to run the _descheduler_ job on | `"*/2 * * * *"` |
|
||||
| `startingDeadlineSeconds` | If set, configure `startingDeadlineSeconds` for the _descheduler_ job | `nil` |
|
||||
@@ -63,7 +64,6 @@ The following table lists the configurable parameters of the _descheduler_ chart
|
||||
| `replicas` | The replica count for Deployment | `1` |
|
||||
| `leaderElection` | The options for high availability when running replicated components | _see values.yaml_ |
|
||||
| `cmdOptions` | The options to pass to the _descheduler_ command | _see values.yaml_ |
|
||||
| `deschedulerPolicy.strategies` | The _descheduler_ strategies to apply | _see values.yaml_ |
|
||||
| `priorityClassName` | The name of the priority class to add to pods | `system-cluster-critical` |
|
||||
| `rbac.create` | If `true`, create & use RBAC resources | `true` |
|
||||
| `resources` | Descheduler container CPU and memory requests/limits | _see values.yaml_ |
|
||||
@@ -84,6 +84,7 @@ The following table lists the configurable parameters of the _descheduler_ chart
|
||||
| `serviceMonitor.metricRelabelings` | MetricRelabelConfigs to apply to samples after scraping, but before ingestion | `[]` |
|
||||
| `serviceMonitor.relabelings` | RelabelConfigs to apply to samples before scraping | `[]` |
|
||||
| `affinity` | Node affinity to run the descheduler cronjob/deployment on specific nodes | `nil` |
|
||||
| `topologySpreadConstraints` | Topology Spread Constraints to spread the descheduler cronjob/deployment across the cluster | `[]` |
|
||||
| `tolerations` | tolerations to run the descheduler cronjob/deployment on specific nodes | `nil` |
|
||||
| `suspend` | Set spec.suspend in descheduler cronjob | `false` |
|
||||
| `commonLabels` | Labels to apply to all resources | `{}` |
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
Descheduler installed as a {{ .Values.kind }}.
|
||||
|
||||
{{- if eq .Values.kind "Deployment" }}
|
||||
{{- if eq .Values.replicas 1.0}}
|
||||
{{- if eq (.Values.replicas | int) 1 }}
|
||||
WARNING: You set replica count as 1 and workload kind as Deployment however leaderElection is not enabled. Consider enabling Leader Election for HA mode.
|
||||
{{- end}}
|
||||
{{- if .Values.leaderElection }}
|
||||
|
||||
@@ -24,6 +24,14 @@ If release name contains chart name it will be used as a full name.
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Expand the namespace of the release.
|
||||
Allows overriding it for multi-namespace deployments in combined charts.
|
||||
*/}}
|
||||
{{- define "descheduler.namespace" -}}
|
||||
{{- default .Release.Namespace .Values.namespaceOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create chart name and version as used by the chart label.
|
||||
*/}}
|
||||
@@ -87,8 +95,10 @@ Leader Election
|
||||
{{- if .Values.leaderElection.resourceName }}
|
||||
- --leader-elect-resource-name={{ .Values.leaderElection.resourceName }}
|
||||
{{- end }}
|
||||
{{- if .Values.leaderElection.resourceNamescape }}
|
||||
- --leader-elect-resource-namespace={{ .Values.leaderElection.resourceNamescape }}
|
||||
{{/* resource namespace value starts with a typo so keeping resourceNamescape for backwards compatibility */}}
|
||||
{{- $resourceNamespace := default .Values.leaderElection.resourceNamespace .Values.leaderElection.resourceNamescape -}}
|
||||
{{- if $resourceNamespace -}}
|
||||
- --leader-elect-resource-namespace={{ $resourceNamespace }}
|
||||
{{- end -}}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
@@ -24,6 +24,9 @@ rules:
|
||||
- apiGroups: ["scheduling.k8s.io"]
|
||||
resources: ["priorityclasses"]
|
||||
verbs: ["get", "watch", "list"]
|
||||
- apiGroups: ["policy"]
|
||||
resources: ["poddisruptionbudgets"]
|
||||
verbs: ["get", "watch", "list"]
|
||||
{{- if .Values.leaderElection.enabled }}
|
||||
- apiGroups: ["coordination.k8s.io"]
|
||||
resources: ["leases"]
|
||||
@@ -33,4 +36,9 @@ rules:
|
||||
resourceNames: ["{{ .Values.leaderElection.resourceName | default "descheduler" }}"]
|
||||
verbs: ["get", "patch", "delete"]
|
||||
{{- end }}
|
||||
{{- if and .Values.deschedulerPolicy .Values.deschedulerPolicy.metricsCollector .Values.deschedulerPolicy.metricsCollector.enabled }}
|
||||
- apiGroups: ["metrics.k8s.io"]
|
||||
resources: ["pods", "nodes"]
|
||||
verbs: ["get", "list"]
|
||||
{{- end }}
|
||||
{{- end -}}
|
||||
|
||||
@@ -12,5 +12,5 @@ roleRef:
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ template "descheduler.serviceAccountName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
namespace: {{ include "descheduler.namespace" . }}
|
||||
{{- end -}}
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
{{- if .Values.deschedulerPolicy }}
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ template "descheduler.fullname" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
namespace: {{ include "descheduler.namespace" . }}
|
||||
labels:
|
||||
{{- include "descheduler.labels" . | nindent 4 }}
|
||||
data:
|
||||
@@ -10,3 +11,4 @@ data:
|
||||
apiVersion: "{{ .Values.deschedulerPolicyAPIVersion }}"
|
||||
kind: "DeschedulerPolicy"
|
||||
{{ toYaml .Values.deschedulerPolicy | trim | indent 4 }}
|
||||
{{- end }}
|
||||
|
||||
@@ -3,7 +3,7 @@ apiVersion: {{ .Values.cronJobApiVersion | default "batch/v1" }}
|
||||
kind: CronJob
|
||||
metadata:
|
||||
name: {{ template "descheduler.fullname" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
namespace: {{ include "descheduler.namespace" . }}
|
||||
labels:
|
||||
{{- include "descheduler.labels" . | nindent 4 }}
|
||||
spec:
|
||||
@@ -15,10 +15,10 @@ spec:
|
||||
{{- if .Values.startingDeadlineSeconds }}
|
||||
startingDeadlineSeconds: {{ .Values.startingDeadlineSeconds }}
|
||||
{{- end }}
|
||||
{{- if .Values.successfulJobsHistoryLimit }}
|
||||
{{- if ne .Values.successfulJobsHistoryLimit nil }}
|
||||
successfulJobsHistoryLimit: {{ .Values.successfulJobsHistoryLimit }}
|
||||
{{- end }}
|
||||
{{- if .Values.failedJobsHistoryLimit }}
|
||||
{{- if ne .Values.failedJobsHistoryLimit nil }}
|
||||
failedJobsHistoryLimit: {{ .Values.failedJobsHistoryLimit }}
|
||||
{{- end }}
|
||||
{{- if .Values.timeZone }}
|
||||
@@ -51,6 +51,10 @@ spec:
|
||||
affinity:
|
||||
{{- toYaml . | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- with .Values.topologySpreadConstraints }}
|
||||
topologySpreadConstraints:
|
||||
{{- toYaml . | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- if .Values.dnsConfig }}
|
||||
dnsConfig:
|
||||
{{- .Values.dnsConfig | toYaml | nindent 12 }}
|
||||
@@ -77,14 +81,22 @@ spec:
|
||||
args:
|
||||
- --policy-config-file=/policy-dir/policy.yaml
|
||||
{{- range $key, $value := .Values.cmdOptions }}
|
||||
- {{ printf "--%s" $key }}{{ if $value }}={{ $value }}{{ end }}
|
||||
{{- if ne $value nil }}
|
||||
- {{ printf "--%s=%s" $key (toString $value) }}
|
||||
{{- else }}
|
||||
- {{ printf "--%s" $key }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
livenessProbe:
|
||||
{{- toYaml .Values.livenessProbe | nindent 16 }}
|
||||
ports:
|
||||
{{- toYaml .Values.ports | nindent 16 }}
|
||||
resources:
|
||||
{{- toYaml .Values.resources | nindent 16 }}
|
||||
{{- if .Values.securityContext }}
|
||||
securityContext:
|
||||
{{- toYaml .Values.securityContext | nindent 16 }}
|
||||
{{- end }}
|
||||
volumeMounts:
|
||||
- mountPath: /policy-dir
|
||||
name: policy-volume
|
||||
|
||||
@@ -3,11 +3,11 @@ apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ template "descheduler.fullname" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
namespace: {{ include "descheduler.namespace" . }}
|
||||
labels:
|
||||
{{- include "descheduler.labels" . | nindent 4 }}
|
||||
spec:
|
||||
{{- if gt .Values.replicas 1.0}}
|
||||
{{- if gt (.Values.replicas | int) 1 }}
|
||||
{{- if not .Values.leaderElection.enabled }}
|
||||
{{- fail "You must set leaderElection to use more than 1 replica"}}
|
||||
{{- end}}
|
||||
@@ -53,18 +53,23 @@ spec:
|
||||
- --policy-config-file=/policy-dir/policy.yaml
|
||||
- --descheduling-interval={{ required "deschedulingInterval required for running as Deployment" .Values.deschedulingInterval }}
|
||||
{{- range $key, $value := .Values.cmdOptions }}
|
||||
- {{ printf "--%s" $key }}{{ if $value }}={{ $value }}{{ end }}
|
||||
{{- if ne $value nil }}
|
||||
- {{ printf "--%s=%s" $key (toString $value) }}
|
||||
{{- else }}
|
||||
- {{ printf "--%s" $key }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- include "descheduler.leaderElection" . | nindent 12 }}
|
||||
ports:
|
||||
- containerPort: 10258
|
||||
protocol: TCP
|
||||
{{- toYaml .Values.ports | nindent 12 }}
|
||||
livenessProbe:
|
||||
{{- toYaml .Values.livenessProbe | nindent 12 }}
|
||||
resources:
|
||||
{{- toYaml .Values.resources | nindent 12 }}
|
||||
{{- if .Values.securityContext }}
|
||||
securityContext:
|
||||
{{- toYaml .Values.securityContext | nindent 12 }}
|
||||
{{- end }}
|
||||
volumeMounts:
|
||||
- mountPath: /policy-dir
|
||||
name: policy-volume
|
||||
@@ -84,6 +89,10 @@ spec:
|
||||
affinity:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.topologySpreadConstraints }}
|
||||
topologySpreadConstraints:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.tolerations }}
|
||||
tolerations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
|
||||
@@ -6,7 +6,7 @@ metadata:
|
||||
labels:
|
||||
{{- include "descheduler.labels" . | nindent 4 }}
|
||||
name: {{ template "descheduler.fullname" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
namespace: {{ include "descheduler.namespace" . }}
|
||||
spec:
|
||||
clusterIP: None
|
||||
{{- if .Values.service.ipFamilyPolicy }}
|
||||
|
||||
@@ -3,7 +3,7 @@ apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ template "descheduler.serviceAccountName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
namespace: {{ include "descheduler.namespace" . }}
|
||||
labels:
|
||||
{{- include "descheduler.labels" . | nindent 4 }}
|
||||
{{- if .Values.serviceAccount.annotations }}
|
||||
|
||||
@@ -14,7 +14,7 @@ spec:
|
||||
jobLabel: jobLabel
|
||||
namespaceSelector:
|
||||
matchNames:
|
||||
- {{ .Release.Namespace }}
|
||||
- {{ include "descheduler.namespace" . }}
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "descheduler.selectorLabels" . | nindent 6 }}
|
||||
|
||||
1
charts/descheduler/tests/.gitignore
vendored
Normal file
1
charts/descheduler/tests/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
||||
__snapshot__
|
||||
17
charts/descheduler/tests/cronjob_test.yaml
Normal file
17
charts/descheduler/tests/cronjob_test.yaml
Normal file
@@ -0,0 +1,17 @@
|
||||
suite: Test Descheduler CronJob
|
||||
|
||||
templates:
|
||||
- "*.yaml"
|
||||
|
||||
release:
|
||||
name: descheduler
|
||||
|
||||
set:
|
||||
kind: CronJob
|
||||
|
||||
tests:
|
||||
- it: creates CronJob when kind is set
|
||||
template: templates/cronjob.yaml
|
||||
asserts:
|
||||
- isKind:
|
||||
of: CronJob
|
||||
49
charts/descheduler/tests/deployment_test.yaml
Normal file
49
charts/descheduler/tests/deployment_test.yaml
Normal file
@@ -0,0 +1,49 @@
|
||||
suite: Test Descheduler Deployment
|
||||
|
||||
templates:
|
||||
- "*.yaml"
|
||||
|
||||
release:
|
||||
name: descheduler
|
||||
|
||||
set:
|
||||
kind: Deployment
|
||||
|
||||
tests:
|
||||
- it: creates Deployment when kind is set
|
||||
template: templates/deployment.yaml
|
||||
asserts:
|
||||
- isKind:
|
||||
of: Deployment
|
||||
|
||||
- it: enables leader-election
|
||||
set:
|
||||
leaderElection:
|
||||
enabled: true
|
||||
template: templates/deployment.yaml
|
||||
asserts:
|
||||
- contains:
|
||||
path: spec.template.spec.containers[0].args
|
||||
content: --leader-elect=true
|
||||
|
||||
- it: support leader-election resourceNamespace
|
||||
set:
|
||||
leaderElection:
|
||||
enabled: true
|
||||
resourceNamespace: test
|
||||
template: templates/deployment.yaml
|
||||
asserts:
|
||||
- contains:
|
||||
path: spec.template.spec.containers[0].args
|
||||
content: --leader-elect-resource-namespace=test
|
||||
|
||||
- it: support legacy leader-election resourceNamescape
|
||||
set:
|
||||
leaderElection:
|
||||
enabled: true
|
||||
resourceNamescape: typo
|
||||
template: templates/deployment.yaml
|
||||
asserts:
|
||||
- contains:
|
||||
path: spec.template.spec.containers[0].args
|
||||
content: --leader-elect-resource-namespace=typo
|
||||
@@ -18,9 +18,13 @@ resources:
|
||||
requests:
|
||||
cpu: 500m
|
||||
memory: 256Mi
|
||||
# limits:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
limits:
|
||||
cpu: 500m
|
||||
memory: 256Mi
|
||||
|
||||
ports:
|
||||
- containerPort: 10258
|
||||
protocol: TCP
|
||||
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
@@ -39,6 +43,9 @@ podSecurityContext: {}
|
||||
nameOverride: ""
|
||||
fullnameOverride: ""
|
||||
|
||||
# -- Override the deployment namespace; defaults to .Release.Namespace
|
||||
namespaceOverride: ""
|
||||
|
||||
# labels that'll be applied to all resources
|
||||
commonLabels: {}
|
||||
|
||||
@@ -70,7 +77,7 @@ leaderElection: {}
|
||||
# retryPeriod: 2s
|
||||
# resourceLock: "leases"
|
||||
# resourceName: "descheduler"
|
||||
# resourceNamescape: "kube-system"
|
||||
# resourceNamespace: "kube-system"
|
||||
|
||||
command:
|
||||
- "/bin/descheduler"
|
||||
@@ -79,14 +86,21 @@ cmdOptions:
|
||||
v: 3
|
||||
|
||||
# Recommended to use the latest Policy API version supported by the Descheduler app version
|
||||
deschedulerPolicyAPIVersion: "descheduler/v1alpha1"
|
||||
deschedulerPolicyAPIVersion: "descheduler/v1alpha2"
|
||||
|
||||
# deschedulerPolicy contains the policies the descheduler will execute.
|
||||
# To use policies stored in an existing configMap use:
|
||||
# NOTE: The name of the cm should comply to {{ template "descheduler.fullname" . }}
|
||||
# deschedulerPolicy: {}
|
||||
deschedulerPolicy:
|
||||
# nodeSelector: "key1=value1,key2=value2"
|
||||
# maxNoOfPodsToEvictPerNode: 10
|
||||
# maxNoOfPodsToEvictPerNamespace: 10
|
||||
# metricsCollector:
|
||||
# enabled: true
|
||||
# ignorePvcPods: true
|
||||
# evictLocalStoragePods: true
|
||||
# evictDaemonSetPods: true
|
||||
# tracing:
|
||||
# collectorEndpoint: otel-collector.observability.svc.cluster.local:4317
|
||||
# transportCert: ""
|
||||
@@ -94,40 +108,47 @@ deschedulerPolicy:
|
||||
# serviceNamespace: ""
|
||||
# sampleRate: 1.0
|
||||
# fallbackToNoOpProviderOnError: true
|
||||
strategies:
|
||||
RemoveDuplicates:
|
||||
enabled: true
|
||||
RemovePodsHavingTooManyRestarts:
|
||||
enabled: true
|
||||
params:
|
||||
podsHavingTooManyRestarts:
|
||||
podRestartThreshold: 100
|
||||
includingInitContainers: true
|
||||
RemovePodsViolatingNodeTaints:
|
||||
enabled: true
|
||||
RemovePodsViolatingNodeAffinity:
|
||||
enabled: true
|
||||
params:
|
||||
nodeAffinityType:
|
||||
- requiredDuringSchedulingIgnoredDuringExecution
|
||||
RemovePodsViolatingInterPodAntiAffinity:
|
||||
enabled: true
|
||||
RemovePodsViolatingTopologySpreadConstraint:
|
||||
enabled: true
|
||||
params:
|
||||
includeSoftConstraints: false
|
||||
LowNodeUtilization:
|
||||
enabled: true
|
||||
params:
|
||||
nodeResourceUtilizationThresholds:
|
||||
thresholds:
|
||||
cpu: 20
|
||||
memory: 20
|
||||
pods: 20
|
||||
targetThresholds:
|
||||
cpu: 50
|
||||
memory: 50
|
||||
pods: 50
|
||||
profiles:
|
||||
- name: default
|
||||
pluginConfig:
|
||||
- name: DefaultEvictor
|
||||
args:
|
||||
ignorePvcPods: true
|
||||
evictLocalStoragePods: true
|
||||
- name: RemoveDuplicates
|
||||
- name: RemovePodsHavingTooManyRestarts
|
||||
args:
|
||||
podRestartThreshold: 100
|
||||
includingInitContainers: true
|
||||
- name: RemovePodsViolatingNodeAffinity
|
||||
args:
|
||||
nodeAffinityType:
|
||||
- requiredDuringSchedulingIgnoredDuringExecution
|
||||
- name: RemovePodsViolatingNodeTaints
|
||||
- name: RemovePodsViolatingInterPodAntiAffinity
|
||||
- name: RemovePodsViolatingTopologySpreadConstraint
|
||||
- name: LowNodeUtilization
|
||||
args:
|
||||
thresholds:
|
||||
cpu: 20
|
||||
memory: 20
|
||||
pods: 20
|
||||
targetThresholds:
|
||||
cpu: 50
|
||||
memory: 50
|
||||
pods: 50
|
||||
plugins:
|
||||
balance:
|
||||
enabled:
|
||||
- RemoveDuplicates
|
||||
- RemovePodsViolatingTopologySpreadConstraint
|
||||
- LowNodeUtilization
|
||||
deschedule:
|
||||
enabled:
|
||||
- RemovePodsHavingTooManyRestarts
|
||||
- RemovePodsViolatingNodeTaints
|
||||
- RemovePodsViolatingNodeAffinity
|
||||
- RemovePodsViolatingInterPodAntiAffinity
|
||||
|
||||
priorityClassName: system-cluster-critical
|
||||
|
||||
@@ -153,6 +174,13 @@ affinity: {}
|
||||
# values:
|
||||
# - descheduler
|
||||
# topologyKey: "kubernetes.io/hostname"
|
||||
topologySpreadConstraints: []
|
||||
# - maxSkew: 1
|
||||
# topologyKey: kubernetes.io/hostname
|
||||
# whenUnsatisfiable: DoNotSchedule
|
||||
# labelSelector:
|
||||
# matchLabels:
|
||||
# app.kubernetes.io/name: descheduler
|
||||
tolerations: []
|
||||
# - key: 'management'
|
||||
# operator: 'Equal'
|
||||
|
||||
@@ -18,17 +18,28 @@ limitations under the License.
|
||||
package options
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/spf13/pflag"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
apiserver "k8s.io/apiserver/pkg/server"
|
||||
apiserveroptions "k8s.io/apiserver/pkg/server/options"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
|
||||
restclient "k8s.io/client-go/rest"
|
||||
cliflag "k8s.io/component-base/cli/flag"
|
||||
componentbaseconfig "k8s.io/component-base/config"
|
||||
componentbaseoptions "k8s.io/component-base/config/options"
|
||||
"k8s.io/component-base/featuregate"
|
||||
"k8s.io/klog/v2"
|
||||
metricsclient "k8s.io/metrics/pkg/client/clientset/versioned"
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/apis/componentconfig"
|
||||
"sigs.k8s.io/descheduler/pkg/apis/componentconfig/v1alpha1"
|
||||
deschedulerscheme "sigs.k8s.io/descheduler/pkg/descheduler/scheme"
|
||||
"sigs.k8s.io/descheduler/pkg/features"
|
||||
"sigs.k8s.io/descheduler/pkg/tracing"
|
||||
)
|
||||
|
||||
@@ -40,11 +51,17 @@ const (
|
||||
type DeschedulerServer struct {
|
||||
componentconfig.DeschedulerConfiguration
|
||||
|
||||
Client clientset.Interface
|
||||
EventClient clientset.Interface
|
||||
SecureServing *apiserveroptions.SecureServingOptionsWithLoopback
|
||||
DisableMetrics bool
|
||||
EnableHTTP2 bool
|
||||
Client clientset.Interface
|
||||
EventClient clientset.Interface
|
||||
MetricsClient metricsclient.Interface
|
||||
SecureServing *apiserveroptions.SecureServingOptionsWithLoopback
|
||||
SecureServingInfo *apiserver.SecureServingInfo
|
||||
DisableMetrics bool
|
||||
EnableHTTP2 bool
|
||||
// FeatureGates enabled by the user
|
||||
FeatureGates map[string]bool
|
||||
// DefaultFeatureGates for internal accessing so unit tests can enable/disable specific features
|
||||
DefaultFeatureGates featuregate.FeatureGate
|
||||
}
|
||||
|
||||
// NewDeschedulerServer creates a new DeschedulerServer with default parameters
|
||||
@@ -102,8 +119,31 @@ func (rs *DeschedulerServer) AddFlags(fs *pflag.FlagSet) {
|
||||
fs.Float64Var(&rs.Tracing.SampleRate, "otel-sample-rate", 1.0, "Sample rate to collect the Traces")
|
||||
fs.BoolVar(&rs.Tracing.FallbackToNoOpProviderOnError, "otel-fallback-no-op-on-error", false, "Fallback to NoOp Tracer in case of error")
|
||||
fs.BoolVar(&rs.EnableHTTP2, "enable-http2", false, "If http/2 should be enabled for the metrics and health check")
|
||||
fs.Var(cliflag.NewMapStringBool(&rs.FeatureGates), "feature-gates", "A set of key=value pairs that describe feature gates for alpha/experimental features. "+
|
||||
"Options are:\n"+strings.Join(features.DefaultMutableFeatureGate.KnownFeatures(), "\n"))
|
||||
|
||||
componentbaseoptions.BindLeaderElectionFlags(&rs.LeaderElection, fs)
|
||||
|
||||
rs.SecureServing.AddFlags(fs)
|
||||
}
|
||||
|
||||
func (rs *DeschedulerServer) Apply() error {
|
||||
err := features.DefaultMutableFeatureGate.SetFromMap(rs.FeatureGates)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rs.DefaultFeatureGates = features.DefaultMutableFeatureGate
|
||||
|
||||
// loopbackClientConfig is a config for a privileged loopback connection
|
||||
var loopbackClientConfig *restclient.Config
|
||||
var secureServing *apiserver.SecureServingInfo
|
||||
if err := rs.SecureServing.ApplyTo(&secureServing, &loopbackClientConfig); err != nil {
|
||||
klog.ErrorS(err, "failed to apply secure server configuration")
|
||||
return err
|
||||
}
|
||||
|
||||
secureServing.DisableHTTP2 = !rs.EnableHTTP2
|
||||
rs.SecureServingInfo = secureServing
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -23,19 +23,16 @@ import (
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
||||
"k8s.io/apiserver/pkg/server/healthz"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler"
|
||||
"sigs.k8s.io/descheduler/pkg/tracing"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
apiserver "k8s.io/apiserver/pkg/server"
|
||||
"k8s.io/apiserver/pkg/server/healthz"
|
||||
"k8s.io/apiserver/pkg/server/mux"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/component-base/featuregate"
|
||||
"k8s.io/component-base/logs"
|
||||
logsapi "k8s.io/component-base/logs/api/v1"
|
||||
@@ -67,40 +64,16 @@ func NewDeschedulerCommand(out io.Writer) *cobra.Command {
|
||||
return nil
|
||||
},
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
// loopbackClientConfig is a config for a privileged loopback connection
|
||||
var loopbackClientConfig *restclient.Config
|
||||
var secureServing *apiserver.SecureServingInfo
|
||||
if err := s.SecureServing.ApplyTo(&secureServing, &loopbackClientConfig); err != nil {
|
||||
klog.ErrorS(err, "failed to apply secure server configuration")
|
||||
if err = s.Apply(); err != nil {
|
||||
klog.ErrorS(err, "failed to apply")
|
||||
return err
|
||||
}
|
||||
|
||||
secureServing.DisableHTTP2 = !s.EnableHTTP2
|
||||
|
||||
ctx, done := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM)
|
||||
|
||||
pathRecorderMux := mux.NewPathRecorderMux("descheduler")
|
||||
if !s.DisableMetrics {
|
||||
pathRecorderMux.Handle("/metrics", legacyregistry.HandlerWithReset())
|
||||
}
|
||||
|
||||
healthz.InstallHandler(pathRecorderMux, healthz.NamedCheck("Descheduler", healthz.PingHealthz.Check))
|
||||
|
||||
stoppedCh, _, err := secureServing.Serve(pathRecorderMux, 0, ctx.Done())
|
||||
if err != nil {
|
||||
klog.Fatalf("failed to start secure server: %v", err)
|
||||
if err = Run(cmd.Context(), s); err != nil {
|
||||
klog.ErrorS(err, "failed to run descheduler server")
|
||||
return err
|
||||
}
|
||||
|
||||
if err = Run(ctx, s); err != nil {
|
||||
klog.ErrorS(err, "descheduler server")
|
||||
return err
|
||||
}
|
||||
|
||||
done()
|
||||
// wait for metrics server to close
|
||||
<-stoppedCh
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
@@ -114,14 +87,39 @@ func NewDeschedulerCommand(out io.Writer) *cobra.Command {
|
||||
return cmd
|
||||
}
|
||||
|
||||
func Run(ctx context.Context, rs *options.DeschedulerServer) error {
|
||||
err := tracing.NewTracerProvider(ctx, rs.Tracing.CollectorEndpoint, rs.Tracing.TransportCert, rs.Tracing.ServiceName, rs.Tracing.ServiceNamespace, rs.Tracing.SampleRate, rs.Tracing.FallbackToNoOpProviderOnError)
|
||||
func Run(rootCtx context.Context, rs *options.DeschedulerServer) error {
|
||||
ctx, done := signal.NotifyContext(rootCtx, syscall.SIGINT, syscall.SIGTERM)
|
||||
|
||||
pathRecorderMux := mux.NewPathRecorderMux("descheduler")
|
||||
if !rs.DisableMetrics {
|
||||
pathRecorderMux.Handle("/metrics", legacyregistry.HandlerWithReset())
|
||||
}
|
||||
|
||||
healthz.InstallHandler(pathRecorderMux, healthz.NamedCheck("Descheduler", healthz.PingHealthz.Check))
|
||||
|
||||
stoppedCh, _, err := rs.SecureServingInfo.Serve(pathRecorderMux, 0, ctx.Done())
|
||||
if err != nil {
|
||||
klog.Fatalf("failed to start secure server: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
err = tracing.NewTracerProvider(ctx, rs.Tracing.CollectorEndpoint, rs.Tracing.TransportCert, rs.Tracing.ServiceName, rs.Tracing.ServiceNamespace, rs.Tracing.SampleRate, rs.Tracing.FallbackToNoOpProviderOnError)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "failed to create tracer provider")
|
||||
}
|
||||
defer tracing.Shutdown(ctx)
|
||||
|
||||
// increase the fake watch channel so the dry-run mode can be run
|
||||
// over a cluster with thousands of pods
|
||||
watch.DefaultChanSize = 100000
|
||||
return descheduler.Run(ctx, rs)
|
||||
err = descheduler.Run(ctx, rs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
done()
|
||||
// wait for metrics server to close
|
||||
<-stoppedCh
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -19,22 +19,29 @@ descheduler [flags]
|
||||
--client-connection-kubeconfig string File path to kube configuration for interacting with kubernetes apiserver.
|
||||
--client-connection-qps float32 QPS to use for interacting with kubernetes apiserver.
|
||||
--descheduling-interval duration Time interval between two consecutive descheduler executions. Setting this value instructs the descheduler to run in a continuous loop at the interval specified.
|
||||
--disable-http2-serving If true, HTTP2 serving will be disabled [default=false]
|
||||
--disable-metrics Disables metrics. The metrics are by default served through https://localhost:10258/metrics. Secure address, resp. port can be changed through --bind-address, resp. --secure-port flags.
|
||||
--dry-run Execute descheduler in dry run mode.
|
||||
--enable-http2 If http/2 should be enabled for the metrics and health check
|
||||
--feature-gates mapStringBool A set of key=value pairs that describe feature gates for alpha/experimental features. Options are:
|
||||
AllAlpha=true|false (ALPHA - default=false)
|
||||
AllBeta=true|false (BETA - default=false)
|
||||
EvictionsInBackground=true|false (ALPHA - default=false)
|
||||
-h, --help help for descheduler
|
||||
--http2-max-streams-per-connection int The limit that the server gives to clients for the maximum number of streams in an HTTP/2 connection. Zero means to use golang's default.
|
||||
--kubeconfig string File with kube configuration. Deprecated, use client-connection-kubeconfig instead.
|
||||
--leader-elect Start a leader election client and gain leadership before executing the main loop. Enable this when running replicated components for high availability.
|
||||
--leader-elect-lease-duration duration The duration that non-leader candidates will wait after observing a leadership renewal until attempting to acquire leadership of a led but unrenewed leader slot. This is effectively the maximum duration that a leader can be stopped before it is replaced by another candidate. This is only applicable if leader election is enabled. (default 2m17s)
|
||||
--leader-elect-renew-deadline duration The interval between attempts by the acting master to renew a leadership slot before it stops leading. This must be less than the lease duration. This is only applicable if leader election is enabled. (default 1m47s)
|
||||
--leader-elect-resource-lock string The type of resource object that is used for locking during leader election. Supported options are 'leases', 'endpointsleases' and 'configmapsleases'. (default "leases")
|
||||
--leader-elect-resource-lock string The type of resource object that is used for locking during leader election. Supported options are 'leases'. (default "leases")
|
||||
--leader-elect-resource-name string The name of resource object that is used for locking during leader election. (default "descheduler")
|
||||
--leader-elect-resource-namespace string The namespace of resource object that is used for locking during leader election. (default "kube-system")
|
||||
--leader-elect-retry-period duration The duration the clients should wait between attempting acquisition and renewal of a leadership. This is only applicable if leader election is enabled. (default 26s)
|
||||
--log-flush-frequency duration Maximum number of seconds between log flushes (default 5s)
|
||||
--log-json-info-buffer-size quantity [Alpha] In JSON format with split output streams, the info messages can be buffered for a while to increase performance. The default value of zero bytes disables buffering. The size can be specified as number of bytes (512), multiples of 1000 (1K), multiples of 1024 (2Ki), or powers of those (3M, 4G, 5Mi, 6Gi). Enable the LoggingAlphaOptions feature gate to use this.
|
||||
--log-json-split-stream [Alpha] In JSON format, write error messages to stderr and info messages to stdout. The default is to write a single stream to stdout. Enable the LoggingAlphaOptions feature gate to use this.
|
||||
--log-text-info-buffer-size quantity [Alpha] In text format with split output streams, the info messages can be buffered for a while to increase performance. The default value of zero bytes disables buffering. The size can be specified as number of bytes (512), multiples of 1000 (1K), multiples of 1024 (2Ki), or powers of those (3M, 4G, 5Mi, 6Gi). Enable the LoggingAlphaOptions feature gate to use this.
|
||||
--log-text-split-stream [Alpha] In text format, write error messages to stderr and info messages to stdout. The default is to write a single stream to stdout. Enable the LoggingAlphaOptions feature gate to use this.
|
||||
--logging-format string Sets the log format. Permitted formats: "json" (gated by LoggingBetaOptions), "text". (default "text")
|
||||
--otel-collector-endpoint string Set this flag to the OpenTelemetry Collector Service Address
|
||||
--otel-fallback-no-op-on-error Fallback to NoOp Tracer in case of error
|
||||
@@ -48,8 +55,8 @@ descheduler [flags]
|
||||
--secure-port int The port on which to serve HTTPS with authentication and authorization. If 0, don't serve HTTPS at all. (default 10258)
|
||||
--tls-cert-file string File containing the default x509 Certificate for HTTPS. (CA cert, if any, concatenated after server cert). If HTTPS serving is enabled, and --tls-cert-file and --tls-private-key-file are not provided, a self-signed certificate and key are generated for the public address and saved to the directory specified by --cert-dir.
|
||||
--tls-cipher-suites strings Comma-separated list of cipher suites for the server. If omitted, the default Go cipher suites will be used.
|
||||
Preferred values: TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, TLS_RSA_WITH_AES_128_CBC_SHA, TLS_RSA_WITH_AES_128_GCM_SHA256, TLS_RSA_WITH_AES_256_CBC_SHA, TLS_RSA_WITH_AES_256_GCM_SHA384.
|
||||
Insecure values: TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_RSA_WITH_RC4_128_SHA, TLS_RSA_WITH_3DES_EDE_CBC_SHA, TLS_RSA_WITH_AES_128_CBC_SHA256, TLS_RSA_WITH_RC4_128_SHA.
|
||||
Preferred values: TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256.
|
||||
Insecure values: TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_RSA_WITH_RC4_128_SHA, TLS_RSA_WITH_3DES_EDE_CBC_SHA, TLS_RSA_WITH_AES_128_CBC_SHA, TLS_RSA_WITH_AES_128_CBC_SHA256, TLS_RSA_WITH_AES_128_GCM_SHA256, TLS_RSA_WITH_AES_256_CBC_SHA, TLS_RSA_WITH_AES_256_GCM_SHA384, TLS_RSA_WITH_RC4_128_SHA.
|
||||
--tls-min-version string Minimum TLS version supported. Possible values: VersionTLS10, VersionTLS11, VersionTLS12, VersionTLS13
|
||||
--tls-private-key-file string File containing the default x509 private key matching --tls-cert-file.
|
||||
--tls-sni-cert-key namedCertKey A pair of x509 certificate and private key file paths, optionally suffixed with a list of domain patterns which are fully qualified domain names, possibly with prefixed wildcard segments. The domain patterns also allow IP addresses, but IPs should only be used if the apiserver has visibility to the IP address requested by a client. If no domain patterns are provided, the names of the certificate are extracted. Non-wildcard matches trump over wildcard matches, explicit domain patterns trump over extracted names. For multiple key/certificate pairs, use the --tls-sni-cert-key multiple times. Examples: "example.crt,example.key" or "foo.crt,foo.key:*.foo.com,foo.com". (default [])
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
## Required Tools
|
||||
|
||||
- [Git](https://git-scm.com/downloads)
|
||||
- [Go 1.16+](https://golang.org/dl/)
|
||||
- [Go 1.23+](https://golang.org/dl/)
|
||||
- [Docker](https://docs.docker.com/install/)
|
||||
- [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl)
|
||||
- [kind v0.10.0+](https://kind.sigs.k8s.io/)
|
||||
|
||||
@@ -1,784 +0,0 @@
|
||||
[](https://goreportcard.com/report/sigs.k8s.io/descheduler)
|
||||

|
||||
|
||||
<p align="center">
|
||||
<img src="assets/logo/descheduler-stacked-color.png" width="40%" align="center" alt="descheduler">
|
||||
</p>
|
||||
|
||||
# Descheduler for Kubernetes
|
||||
|
||||
Scheduling in Kubernetes is the process of binding pending pods to nodes, and is performed by
|
||||
a component of Kubernetes called kube-scheduler. The scheduler's decisions, whether or where a
|
||||
pod can or can not be scheduled, are guided by its configurable policy which comprises of set of
|
||||
rules, called predicates and priorities. The scheduler's decisions are influenced by its view of
|
||||
a Kubernetes cluster at that point of time when a new pod appears for scheduling.
|
||||
As Kubernetes clusters are very dynamic and their state changes over time, there may be desire
|
||||
to move already running pods to some other nodes for various reasons:
|
||||
|
||||
* Some nodes are under or over utilized.
|
||||
* The original scheduling decision does not hold true any more, as taints or labels are added to
|
||||
or removed from nodes, pod/node affinity requirements are not satisfied any more.
|
||||
* Some nodes failed and their pods moved to other nodes.
|
||||
* New nodes are added to clusters.
|
||||
|
||||
Consequently, there might be several pods scheduled on less desired nodes in a cluster.
|
||||
Descheduler, based on its policy, finds pods that can be moved and evicts them. Please
|
||||
note, in current implementation, descheduler does not schedule replacement of evicted pods
|
||||
but relies on the default scheduler for that.
|
||||
|
||||
Table of Contents
|
||||
=================
|
||||
<!-- toc -->
|
||||
- [Quick Start](#quick-start)
|
||||
- [Run As A Job](#run-as-a-job)
|
||||
- [Run As A CronJob](#run-as-a-cronjob)
|
||||
- [Run As A Deployment](#run-as-a-deployment)
|
||||
- [Install Using Helm](#install-using-helm)
|
||||
- [Install Using Kustomize](#install-using-kustomize)
|
||||
- [User Guide](#user-guide)
|
||||
- [Policy and Strategies](#policy-and-strategies)
|
||||
- [RemoveDuplicates](#removeduplicates)
|
||||
- [LowNodeUtilization](#lownodeutilization)
|
||||
- [HighNodeUtilization](#highnodeutilization)
|
||||
- [RemovePodsViolatingInterPodAntiAffinity](#removepodsviolatinginterpodantiaffinity)
|
||||
- [RemovePodsViolatingNodeAffinity](#removepodsviolatingnodeaffinity)
|
||||
- [RemovePodsViolatingNodeTaints](#removepodsviolatingnodetaints)
|
||||
- [RemovePodsViolatingTopologySpreadConstraint](#removepodsviolatingtopologyspreadconstraint)
|
||||
- [RemovePodsHavingTooManyRestarts](#removepodshavingtoomanyrestarts)
|
||||
- [PodLifeTime](#podlifetime)
|
||||
- [RemoveFailedPods](#removefailedpods)
|
||||
- [Filter Pods](#filter-pods)
|
||||
- [Namespace filtering](#namespace-filtering)
|
||||
- [Priority filtering](#priority-filtering)
|
||||
- [Label filtering](#label-filtering)
|
||||
- [Node Fit filtering](#node-fit-filtering)
|
||||
- [Pod Evictions](#pod-evictions)
|
||||
- [Pod Disruption Budget (PDB)](#pod-disruption-budget-pdb)
|
||||
- [High Availability](#high-availability)
|
||||
- [Configure HA Mode](#configure-ha-mode)
|
||||
- [Metrics](#metrics)
|
||||
- [Compatibility Matrix](#compatibility-matrix)
|
||||
- [Getting Involved and Contributing](#getting-involved-and-contributing)
|
||||
- [Communicating With Contributors](#communicating-with-contributors)
|
||||
- [Roadmap](#roadmap)
|
||||
- [Code of conduct](#code-of-conduct)
|
||||
<!-- /toc -->
|
||||
|
||||
## Quick Start
|
||||
|
||||
The descheduler can be run as a `Job`, `CronJob`, or `Deployment` inside of a k8s cluster. It has the
|
||||
advantage of being able to be run multiple times without needing user intervention.
|
||||
The descheduler pod is run as a critical pod in the `kube-system` namespace to avoid
|
||||
being evicted by itself or by the kubelet.
|
||||
|
||||
### Run As A Job
|
||||
|
||||
```
|
||||
kubectl create -f kubernetes/base/rbac.yaml
|
||||
kubectl create -f kubernetes/base/configmap.yaml
|
||||
kubectl create -f kubernetes/job/job.yaml
|
||||
```
|
||||
|
||||
### Run As A CronJob
|
||||
|
||||
```
|
||||
kubectl create -f kubernetes/base/rbac.yaml
|
||||
kubectl create -f kubernetes/base/configmap.yaml
|
||||
kubectl create -f kubernetes/cronjob/cronjob.yaml
|
||||
```
|
||||
|
||||
### Run As A Deployment
|
||||
|
||||
```
|
||||
kubectl create -f kubernetes/base/rbac.yaml
|
||||
kubectl create -f kubernetes/base/configmap.yaml
|
||||
kubectl create -f kubernetes/deployment/deployment.yaml
|
||||
```
|
||||
|
||||
### Install Using Helm
|
||||
|
||||
Starting with release v0.18.0 there is an official helm chart that can be used to install the
|
||||
descheduler. See the [helm chart README](https://github.com/kubernetes-sigs/descheduler/blob/master/charts/descheduler/README.md) for detailed instructions.
|
||||
|
||||
The descheduler helm chart is also listed on the [artifact hub](https://artifacthub.io/packages/helm/descheduler/descheduler).
|
||||
|
||||
### Install Using Kustomize
|
||||
|
||||
You can use kustomize to install descheduler.
|
||||
See the [resources | Kustomize](https://kubectl.docs.kubernetes.io/references/kustomize/cmd/build/) for detailed instructions.
|
||||
|
||||
Run As A Job
|
||||
```
|
||||
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/job?ref=v0.29.0' | kubectl apply -f -
|
||||
```
|
||||
|
||||
Run As A CronJob
|
||||
```
|
||||
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/cronjob?ref=v0.29.0' | kubectl apply -f -
|
||||
```
|
||||
|
||||
Run As A Deployment
|
||||
```
|
||||
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/deployment?ref=v0.29.0' | kubectl apply -f -
|
||||
```
|
||||
|
||||
## User Guide
|
||||
|
||||
See the [user guide](docs/user-guide.md) in the `/docs` directory.
|
||||
|
||||
## Policy and Strategies
|
||||
|
||||
Descheduler's policy is configurable and includes strategies that can be enabled or disabled. By default, all strategies are enabled.
|
||||
|
||||
The policy includes a common configuration that applies to all the strategies:
|
||||
| Name | Default Value | Description |
|
||||
|------|---------------|-------------|
|
||||
| `nodeSelector` | `nil` | limiting the nodes which are processed |
|
||||
| `evictLocalStoragePods` | `false` | allows eviction of pods with local storage |
|
||||
| `evictSystemCriticalPods` | `false` | [Warning: Will evict Kubernetes system pods] allows eviction of pods with any priority, including system pods like kube-dns |
|
||||
| `ignorePvcPods` | `false` | set whether PVC pods should be evicted or ignored |
|
||||
| `maxNoOfPodsToEvictPerNode` | `nil` | maximum number of pods evicted from each node (summed through all strategies) |
|
||||
| `maxNoOfPodsToEvictPerNamespace` | `nil` | maximum number of pods evicted from each namespace (summed through all strategies) |
|
||||
| `evictFailedBarePods` | `false` | allow eviction of pods without owner references and in failed phase |
|
||||
|
||||
As part of the policy, the parameters associated with each strategy can be configured.
|
||||
See each strategy for details on available parameters.
|
||||
|
||||
**Policy:**
|
||||
|
||||
```yaml
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
nodeSelector: prod=dev
|
||||
evictFailedBarePods: false
|
||||
evictLocalStoragePods: true
|
||||
evictSystemCriticalPods: true
|
||||
maxNoOfPodsToEvictPerNode: 40
|
||||
ignorePvcPods: false
|
||||
strategies:
|
||||
...
|
||||
```
|
||||
|
||||
The following diagram provides a visualization of most of the strategies to help
|
||||
categorize how strategies fit together.
|
||||
|
||||

|
||||
|
||||
### RemoveDuplicates
|
||||
|
||||
This strategy makes sure that there is only one pod associated with a ReplicaSet (RS),
|
||||
ReplicationController (RC), StatefulSet, or Job running on the same node. If there are more,
|
||||
those duplicate pods are evicted for better spreading of pods in a cluster. This issue could happen
|
||||
if some nodes went down due to whatever reasons, and pods on them were moved to other nodes leading to
|
||||
more than one pod associated with a RS or RC, for example, running on the same node. Once the failed nodes
|
||||
are ready again, this strategy could be enabled to evict those duplicate pods.
|
||||
|
||||
It provides one optional parameter, `excludeOwnerKinds`, which is a list of OwnerRef `Kind`s. If a pod
|
||||
has any of these `Kind`s listed as an `OwnerRef`, that pod will not be considered for eviction. Note that
|
||||
pods created by Deployments are considered for eviction by this strategy. The `excludeOwnerKinds` parameter
|
||||
should include `ReplicaSet` to have pods created by Deployments excluded.
|
||||
|
||||
**Parameters:**
|
||||
|
||||
|Name|Type|
|
||||
|---|---|
|
||||
|`excludeOwnerKinds`|list(string)|
|
||||
|`namespaces`|(see [namespace filtering](#namespace-filtering))|
|
||||
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))|
|
||||
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))|
|
||||
|`nodeFit`|bool (see [node fit filtering](#node-fit-filtering))|
|
||||
|
||||
**Example:**
|
||||
```yaml
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
strategies:
|
||||
"RemoveDuplicates":
|
||||
enabled: true
|
||||
params:
|
||||
removeDuplicates:
|
||||
excludeOwnerKinds:
|
||||
- "ReplicaSet"
|
||||
```
|
||||
|
||||
### LowNodeUtilization
|
||||
|
||||
This strategy finds nodes that are under utilized and evicts pods, if possible, from other nodes
|
||||
in the hope that recreation of evicted pods will be scheduled on these underutilized nodes. The
|
||||
parameters of this strategy are configured under `nodeResourceUtilizationThresholds`.
|
||||
|
||||
The under utilization of nodes is determined by a configurable threshold `thresholds`. The threshold
|
||||
`thresholds` can be configured for cpu, memory, number of pods, and extended resources in terms of percentage (the percentage is
|
||||
calculated as the current resources requested on the node vs [total allocatable](https://kubernetes.io/docs/concepts/architecture/nodes/#capacity).
|
||||
For pods, this means the number of pods on the node as a fraction of the pod capacity set for that node).
|
||||
|
||||
If a node's usage is below threshold for all (cpu, memory, number of pods and extended resources), the node is considered underutilized.
|
||||
Currently, pods request resource requirements are considered for computing node resource utilization.
|
||||
|
||||
There is another configurable threshold, `targetThresholds`, that is used to compute those potential nodes
|
||||
from where pods could be evicted. If a node's usage is above targetThreshold for any (cpu, memory, number of pods, or extended resources),
|
||||
the node is considered over utilized. Any node between the thresholds, `thresholds` and `targetThresholds` is
|
||||
considered appropriately utilized and is not considered for eviction. The threshold, `targetThresholds`,
|
||||
can be configured for cpu, memory, and number of pods too in terms of percentage.
|
||||
|
||||
These thresholds, `thresholds` and `targetThresholds`, could be tuned as per your cluster requirements. Note that this
|
||||
strategy evicts pods from `overutilized nodes` (those with usage above `targetThresholds`) to `underutilized nodes`
|
||||
(those with usage below `thresholds`), it will abort if any number of `underutilized nodes` or `overutilized nodes` is zero.
|
||||
|
||||
Additionally, the strategy accepts a `useDeviationThresholds` parameter.
|
||||
If that parameter is set to `true`, the thresholds are considered as percentage deviations from mean resource usage.
|
||||
`thresholds` will be deducted from the mean among all nodes and `targetThresholds` will be added to the mean.
|
||||
A resource consumption above (resp. below) this window is considered as overutilization (resp. underutilization).
|
||||
|
||||
**NOTE:** Node resource consumption is determined by the requests and limits of pods, not actual usage.
|
||||
This approach is chosen in order to maintain consistency with the kube-scheduler, which follows the same
|
||||
design for scheduling pods onto nodes. This means that resource usage as reported by Kubelet (or commands
|
||||
like `kubectl top`) may differ from the calculated consumption, due to these components reporting
|
||||
actual usage metrics. Implementing metrics-based descheduling is currently TODO for the project.
|
||||
|
||||
**Parameters:**
|
||||
|
||||
|Name|Type|
|
||||
|---|---|
|
||||
|`thresholds`|map(string:int)|
|
||||
|`targetThresholds`|map(string:int)|
|
||||
|`numberOfNodes`|int|
|
||||
|`useDeviationThresholds`|bool|
|
||||
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))|
|
||||
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))|
|
||||
|`nodeFit`|bool (see [node fit filtering](#node-fit-filtering))|
|
||||
|`Namespaces`|(see [namespace filtering](#namespace-filtering))|
|
||||
|
||||
**Example:**
|
||||
|
||||
```yaml
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
strategies:
|
||||
"LowNodeUtilization":
|
||||
enabled: true
|
||||
params:
|
||||
nodeResourceUtilizationThresholds:
|
||||
thresholds:
|
||||
"cpu" : 20
|
||||
"memory": 20
|
||||
"pods": 20
|
||||
targetThresholds:
|
||||
"cpu" : 50
|
||||
"memory": 50
|
||||
"pods": 50
|
||||
```
|
||||
|
||||
Policy should pass the following validation checks:
|
||||
* Three basic native types of resources are supported: `cpu`, `memory` and `pods`.
|
||||
If any of these resource types is not specified, all its thresholds default to 100% to avoid nodes going from underutilized to overutilized.
|
||||
* Extended resources are supported. For example, resource type `nvidia.com/gpu` is specified for GPU node utilization. Extended resources are optional,
|
||||
and will not be used to compute node's usage if it's not specified in `thresholds` and `targetThresholds` explicitly.
|
||||
* `thresholds` or `targetThresholds` can not be nil and they must configure exactly the same types of resources.
|
||||
* The valid range of the resource's percentage value is \[0, 100\]
|
||||
* Percentage value of `thresholds` can not be greater than `targetThresholds` for the same resource.
|
||||
|
||||
There is another parameter associated with the `LowNodeUtilization` strategy, called `numberOfNodes`.
|
||||
This parameter can be configured to activate the strategy only when the number of under utilized nodes
|
||||
are above the configured value. This could be helpful in large clusters where a few nodes could go
|
||||
under utilized frequently or for a short period of time. By default, `numberOfNodes` is set to zero.
|
||||
|
||||
### HighNodeUtilization
|
||||
|
||||
This strategy finds nodes that are under utilized and evicts pods from the nodes in the hope that these pods will be
|
||||
scheduled compactly into fewer nodes. Used in conjunction with node auto-scaling, this strategy is intended to help
|
||||
trigger down scaling of under utilized nodes.
|
||||
This strategy **must** be used with the scheduler scoring strategy `MostAllocated`. The parameters of this strategy are
|
||||
configured under `nodeResourceUtilizationThresholds`.
|
||||
|
||||
The under utilization of nodes is determined by a configurable threshold `thresholds`. The threshold
|
||||
`thresholds` can be configured for cpu, memory, number of pods, and extended resources in terms of percentage. The percentage is
|
||||
calculated as the current resources requested on the node vs [total allocatable](https://kubernetes.io/docs/concepts/architecture/nodes/#capacity).
|
||||
For pods, this means the number of pods on the node as a fraction of the pod capacity set for that node.
|
||||
|
||||
If a node's usage is below threshold for all (cpu, memory, number of pods and extended resources), the node is considered underutilized.
|
||||
Currently, pods request resource requirements are considered for computing node resource utilization.
|
||||
Any node above `thresholds` is considered appropriately utilized and is not considered for eviction.
|
||||
|
||||
The `thresholds` param could be tuned as per your cluster requirements. Note that this
|
||||
strategy evicts pods from `underutilized nodes` (those with usage below `thresholds`)
|
||||
so that they can be recreated in appropriately utilized nodes.
|
||||
The strategy will abort if any number of `underutilized nodes` or `appropriately utilized nodes` is zero.
|
||||
|
||||
**NOTE:** Node resource consumption is determined by the requests and limits of pods, not actual usage.
|
||||
This approach is chosen in order to maintain consistency with the kube-scheduler, which follows the same
|
||||
design for scheduling pods onto nodes. This means that resource usage as reported by Kubelet (or commands
|
||||
like `kubectl top`) may differ from the calculated consumption, due to these components reporting
|
||||
actual usage metrics. Implementing metrics-based descheduling is currently TODO for the project.
|
||||
|
||||
**Parameters:**
|
||||
|
||||
|Name|Type|
|
||||
|---|---|
|
||||
|`thresholds`|map(string:int)|
|
||||
|`numberOfNodes`|int|
|
||||
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))|
|
||||
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))|
|
||||
|`nodeFit`|bool (see [node fit filtering](#node-fit-filtering))|
|
||||
|`Namespaces`|(see [namespace filtering](#namespace-filtering))|
|
||||
|
||||
**Example:**
|
||||
|
||||
```yaml
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
strategies:
|
||||
"HighNodeUtilization":
|
||||
enabled: true
|
||||
params:
|
||||
nodeResourceUtilizationThresholds:
|
||||
thresholds:
|
||||
"cpu" : 20
|
||||
"memory": 20
|
||||
"pods": 20
|
||||
```
|
||||
|
||||
Policy should pass the following validation checks:
|
||||
* Three basic native types of resources are supported: `cpu`, `memory` and `pods`. If any of these resource types is not specified, all its thresholds default to 100%.
|
||||
* Extended resources are supported. For example, resource type `nvidia.com/gpu` is specified for GPU node utilization. Extended resources are optional, and will not be used to compute node's usage if it's not specified in `thresholds` explicitly.
|
||||
* `thresholds` can not be nil.
|
||||
* The valid range of the resource's percentage value is \[0, 100\]
|
||||
|
||||
There is another parameter associated with the `HighNodeUtilization` strategy, called `numberOfNodes`.
|
||||
This parameter can be configured to activate the strategy only when the number of under utilized nodes
|
||||
is above the configured value. This could be helpful in large clusters where a few nodes could go
|
||||
under utilized frequently or for a short period of time. By default, `numberOfNodes` is set to zero.
|
||||
|
||||
### RemovePodsViolatingInterPodAntiAffinity
|
||||
|
||||
This strategy makes sure that pods violating interpod anti-affinity are removed from nodes. For example,
|
||||
if there is podA on a node and podB and podC (running on the same node) have anti-affinity rules which prohibit
|
||||
them to run on the same node, then podA will be evicted from the node so that podB and podC could run. This
|
||||
issue could happen, when the anti-affinity rules for podB and podC are created when they are already running on
|
||||
node.
|
||||
|
||||
**Parameters:**
|
||||
|
||||
|Name|Type|
|
||||
|---|---|
|
||||
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))|
|
||||
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))|
|
||||
|`namespaces`|(see [namespace filtering](#namespace-filtering))|
|
||||
|`labelSelector`|(see [label filtering](#label-filtering))|
|
||||
|`nodeFit`|bool (see [node fit filtering](#node-fit-filtering))|
|
||||
|
||||
**Example:**
|
||||
|
||||
```yaml
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
strategies:
|
||||
"RemovePodsViolatingInterPodAntiAffinity":
|
||||
enabled: true
|
||||
```
|
||||
|
||||
### RemovePodsViolatingNodeAffinity
|
||||
|
||||
This strategy makes sure all pods violating
|
||||
[node affinity](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#node-affinity)
|
||||
are eventually removed from nodes. Node affinity rules allow a pod to specify
|
||||
`requiredDuringSchedulingIgnoredDuringExecution` type, which tells the scheduler
|
||||
to respect node affinity when scheduling the pod but kubelet to ignore
|
||||
in case node changes over time and no longer respects the affinity.
|
||||
When enabled, the strategy serves as a temporary implementation
|
||||
of `requiredDuringSchedulingRequiredDuringExecution` and evicts pod for kubelet
|
||||
that no longer respects node affinity.
|
||||
|
||||
For example, there is podA scheduled on nodeA which satisfies the node
|
||||
affinity rule `requiredDuringSchedulingIgnoredDuringExecution` at the time
|
||||
of scheduling. Over time nodeA stops to satisfy the rule. When the strategy gets
|
||||
executed and there is another node available that satisfies the node affinity rule,
|
||||
podA gets evicted from nodeA.
|
||||
|
||||
**Parameters:**
|
||||
|
||||
|Name|Type|
|
||||
|---|---|
|
||||
|`nodeAffinityType`|list(string)|
|
||||
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))|
|
||||
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))|
|
||||
|`namespaces`|(see [namespace filtering](#namespace-filtering))|
|
||||
|`labelSelector`|(see [label filtering](#label-filtering))|
|
||||
|`nodeFit`|bool (see [node fit filtering](#node-fit-filtering))|
|
||||
|
||||
**Example:**
|
||||
|
||||
```yaml
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
strategies:
|
||||
"RemovePodsViolatingNodeAffinity":
|
||||
enabled: true
|
||||
params:
|
||||
nodeAffinityType:
|
||||
- "requiredDuringSchedulingIgnoredDuringExecution"
|
||||
```
|
||||
|
||||
### RemovePodsViolatingNodeTaints
|
||||
|
||||
This strategy makes sure that pods violating NoSchedule taints on nodes are removed. For example there is a
|
||||
pod "podA" with a toleration to tolerate a taint ``key=value:NoSchedule`` scheduled and running on the tainted
|
||||
node. If the node's taint is subsequently updated/removed, taint is no longer satisfied by its pods' tolerations
|
||||
and will be evicted.
|
||||
|
||||
Node taints can be excluded from consideration by specifying a list of excludedTaints. If a node taint key **or**
|
||||
key=value matches an excludedTaints entry, the taint will be ignored.
|
||||
|
||||
For example, excludedTaints entry "dedicated" would match all taints with key "dedicated", regardless of value.
|
||||
excludedTaints entry "dedicated=special-user" would match taints with key "dedicated" and value "special-user".
|
||||
|
||||
**Parameters:**
|
||||
|
||||
|Name|Type|
|
||||
|---|---|
|
||||
|`excludedTaints`|list(string)|
|
||||
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))|
|
||||
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))|
|
||||
|`namespaces`|(see [namespace filtering](#namespace-filtering))|
|
||||
|`labelSelector`|(see [label filtering](#label-filtering))|
|
||||
|`nodeFit`|bool (see [node fit filtering](#node-fit-filtering))|
|
||||
|
||||
**Example:**
|
||||
|
||||
````yaml
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
strategies:
|
||||
"RemovePodsViolatingNodeTaints":
|
||||
enabled: true
|
||||
params:
|
||||
excludedTaints:
|
||||
- dedicated=special-user # exclude taints with key "dedicated" and value "special-user"
|
||||
- reserved # exclude all taints with key "reserved"
|
||||
````
|
||||
|
||||
### RemovePodsViolatingTopologySpreadConstraint
|
||||
|
||||
This strategy makes sure that pods violating [topology spread constraints](https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/)
|
||||
are evicted from nodes. Specifically, it tries to evict the minimum number of pods required to balance topology domains to within each constraint's `maxSkew`.
|
||||
This strategy requires k8s version 1.18 at a minimum.
|
||||
|
||||
By default, this strategy only deals with hard constraints, setting parameter `includeSoftConstraints` to `true` will
|
||||
include soft constraints.
|
||||
|
||||
Strategy parameter `labelSelector` is not utilized when balancing topology domains and is only applied during eviction to determine if the pod can be evicted.
|
||||
|
||||
**Parameters:**
|
||||
|
||||
|Name|Type|
|
||||
|---|---|
|
||||
|`includeSoftConstraints`|bool|
|
||||
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))|
|
||||
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))|
|
||||
|`namespaces`|(see [namespace filtering](#namespace-filtering))|
|
||||
|`labelSelector`|(see [label filtering](#label-filtering))|
|
||||
|`nodeFit`|bool (see [node fit filtering](#node-fit-filtering))|
|
||||
|
||||
**Example:**
|
||||
|
||||
```yaml
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
strategies:
|
||||
"RemovePodsViolatingTopologySpreadConstraint":
|
||||
enabled: true
|
||||
params:
|
||||
includeSoftConstraints: false
|
||||
```
|
||||
|
||||
|
||||
### RemovePodsHavingTooManyRestarts
|
||||
|
||||
This strategy makes sure that pods having too many restarts are removed from nodes. For example a pod with EBS/PD that
|
||||
can't get the volume/disk attached to the instance, then the pod should be re-scheduled to other nodes. Its parameters
|
||||
include `podRestartThreshold`, which is the number of restarts (summed over all eligible containers) at which a pod
|
||||
should be evicted, and `includingInitContainers`, which determines whether init container restarts should be factored
|
||||
into that calculation.
|
||||
|
||||
**Parameters:**
|
||||
|
||||
|Name|Type|
|
||||
|---|---|
|
||||
|`podRestartThreshold`|int|
|
||||
|`includingInitContainers`|bool|
|
||||
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))|
|
||||
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))|
|
||||
|`namespaces`|(see [namespace filtering](#namespace-filtering))|
|
||||
|`labelSelector`|(see [label filtering](#label-filtering))|
|
||||
|`nodeFit`|bool (see [node fit filtering](#node-fit-filtering))|
|
||||
|
||||
**Example:**
|
||||
|
||||
```yaml
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
strategies:
|
||||
"RemovePodsHavingTooManyRestarts":
|
||||
enabled: true
|
||||
params:
|
||||
podsHavingTooManyRestarts:
|
||||
podRestartThreshold: 100
|
||||
includingInitContainers: true
|
||||
```
|
||||
|
||||
### PodLifeTime
|
||||
|
||||
This strategy evicts pods that are older than `maxPodLifeTimeSeconds`.
|
||||
|
||||
You can also specify `states` parameter to **only** evict pods matching the following conditions:
|
||||
- [Pod Phase](https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-phase) status of: `Running`, `Pending`
|
||||
- [Container State Waiting](https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-state-waiting) condition of: `PodInitializing`, `ContainerCreating`
|
||||
|
||||
If a value for `states` or `podStatusPhases` is not specified,
|
||||
Pods in any state (even `Running`) are considered for eviction.
|
||||
|
||||
**Parameters:**
|
||||
|
||||
|Name|Type|Notes|
|
||||
|---|---|---|
|
||||
|`maxPodLifeTimeSeconds`|int||
|
||||
|`podStatusPhases`|list(string)|Deprecated in v0.25+ Use `states` instead|
|
||||
|`states`|list(string)|Only supported in v0.25+|
|
||||
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))||
|
||||
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))||
|
||||
|`namespaces`|(see [namespace filtering](#namespace-filtering))||
|
||||
|`labelSelector`|(see [label filtering](#label-filtering))||
|
||||
|
||||
**Example:**
|
||||
|
||||
```yaml
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
strategies:
|
||||
"PodLifeTime":
|
||||
enabled: true
|
||||
params:
|
||||
podLifeTime:
|
||||
maxPodLifeTimeSeconds: 86400
|
||||
states:
|
||||
- "Pending"
|
||||
- "PodInitializing"
|
||||
```
|
||||
|
||||
### RemoveFailedPods
|
||||
|
||||
This strategy evicts pods that are in failed status phase.
|
||||
You can provide an optional parameter to filter by failed `reasons`.
|
||||
`reasons` can be expanded to include reasons of InitContainers as well by setting the optional parameter `includingInitContainers` to `true`.
|
||||
You can specify an optional parameter `minPodLifetimeSeconds` to evict pods that are older than specified seconds.
|
||||
Lastly, you can specify the optional parameter `excludeOwnerKinds` and if a pod
|
||||
has any of these `Kind`s listed as an `OwnerRef`, that pod will not be considered for eviction.
|
||||
|
||||
**Parameters:**
|
||||
|
||||
|Name|Type|
|
||||
|---|---|
|
||||
|`minPodLifetimeSeconds`|uint|
|
||||
|`excludeOwnerKinds`|list(string)|
|
||||
|`reasons`|list(string)|
|
||||
|`includingInitContainers`|bool|
|
||||
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))|
|
||||
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))|
|
||||
|`namespaces`|(see [namespace filtering](#namespace-filtering))|
|
||||
|`labelSelector`|(see [label filtering](#label-filtering))|
|
||||
|`nodeFit`|bool (see [node fit filtering](#node-fit-filtering))|
|
||||
|
||||
**Example:**
|
||||
|
||||
```yaml
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
strategies:
|
||||
"RemoveFailedPods":
|
||||
enabled: true
|
||||
params:
|
||||
failedPods:
|
||||
reasons:
|
||||
- "NodeAffinity"
|
||||
includingInitContainers: true
|
||||
excludeOwnerKinds:
|
||||
- "Job"
|
||||
minPodLifetimeSeconds: 3600
|
||||
```
|
||||
|
||||
## Filter Pods
|
||||
|
||||
### Namespace filtering
|
||||
|
||||
The following strategies accept a `namespaces` parameter which allows to specify a list of including, resp. excluding namespaces:
|
||||
* `PodLifeTime`
|
||||
* `RemovePodsHavingTooManyRestarts`
|
||||
* `RemovePodsViolatingNodeTaints`
|
||||
* `RemovePodsViolatingNodeAffinity`
|
||||
* `RemovePodsViolatingInterPodAntiAffinity`
|
||||
* `RemoveDuplicates`
|
||||
* `RemovePodsViolatingTopologySpreadConstraint`
|
||||
* `RemoveFailedPods`
|
||||
* `LowNodeUtilization` and `HighNodeUtilization` (Only filtered right before eviction)
|
||||
|
||||
For example:
|
||||
|
||||
```yaml
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
strategies:
|
||||
"PodLifeTime":
|
||||
enabled: true
|
||||
params:
|
||||
podLifeTime:
|
||||
maxPodLifeTimeSeconds: 86400
|
||||
namespaces:
|
||||
include:
|
||||
- "namespace1"
|
||||
- "namespace2"
|
||||
```
|
||||
|
||||
In the examples `PodLifeTime` gets executed only over `namespace1` and `namespace2`.
|
||||
The similar holds for `exclude` field:
|
||||
|
||||
```yaml
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
strategies:
|
||||
"PodLifeTime":
|
||||
enabled: true
|
||||
params:
|
||||
podLifeTime:
|
||||
maxPodLifeTimeSeconds: 86400
|
||||
namespaces:
|
||||
exclude:
|
||||
- "namespace1"
|
||||
- "namespace2"
|
||||
```
|
||||
|
||||
The strategy gets executed over all namespaces but `namespace1` and `namespace2`.
|
||||
|
||||
It's not allowed to compute `include` with `exclude` field.
|
||||
|
||||
### Priority filtering
|
||||
|
||||
All strategies are able to configure a priority threshold, only pods under the threshold can be evicted. You can
|
||||
specify this threshold by setting `thresholdPriorityClassName`(setting the threshold to the value of the given
|
||||
priority class) or `thresholdPriority`(directly setting the threshold) parameters. By default, this threshold
|
||||
is set to the value of `system-cluster-critical` priority class.
|
||||
|
||||
Note: Setting `evictSystemCriticalPods` to true disables priority filtering entirely.
|
||||
|
||||
E.g.
|
||||
|
||||
Setting `thresholdPriority`
|
||||
```yaml
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
strategies:
|
||||
"PodLifeTime":
|
||||
enabled: true
|
||||
params:
|
||||
podLifeTime:
|
||||
maxPodLifeTimeSeconds: 86400
|
||||
thresholdPriority: 10000
|
||||
```
|
||||
|
||||
Setting `thresholdPriorityClassName`
|
||||
```yaml
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
strategies:
|
||||
"PodLifeTime":
|
||||
enabled: true
|
||||
params:
|
||||
podLifeTime:
|
||||
maxPodLifeTimeSeconds: 86400
|
||||
thresholdPriorityClassName: "priorityclass1"
|
||||
```
|
||||
|
||||
Note that you can't configure both `thresholdPriority` and `thresholdPriorityClassName`, if the given priority class
|
||||
does not exist, descheduler won't create it and will throw an error.
|
||||
|
||||
### Label filtering
|
||||
|
||||
The following strategies can configure a [standard kubernetes labelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#labelselector-v1-meta)
|
||||
to filter pods by their labels:
|
||||
|
||||
* `PodLifeTime`
|
||||
* `RemovePodsHavingTooManyRestarts`
|
||||
* `RemovePodsViolatingNodeTaints`
|
||||
* `RemovePodsViolatingNodeAffinity`
|
||||
* `RemovePodsViolatingInterPodAntiAffinity`
|
||||
* `RemovePodsViolatingTopologySpreadConstraint`
|
||||
* `RemoveFailedPods`
|
||||
|
||||
This allows running strategies among pods the descheduler is interested in.
|
||||
|
||||
For example:
|
||||
|
||||
```yaml
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
strategies:
|
||||
"PodLifeTime":
|
||||
enabled: true
|
||||
params:
|
||||
podLifeTime:
|
||||
maxPodLifeTimeSeconds: 86400
|
||||
labelSelector:
|
||||
matchLabels:
|
||||
component: redis
|
||||
matchExpressions:
|
||||
- {key: tier, operator: In, values: [cache]}
|
||||
- {key: environment, operator: NotIn, values: [dev]}
|
||||
```
|
||||
|
||||
|
||||
### Node Fit filtering
|
||||
|
||||
The following strategies accept a `nodeFit` boolean parameter which can optimize descheduling:
|
||||
* `RemoveDuplicates`
|
||||
* `LowNodeUtilization`
|
||||
* `HighNodeUtilization`
|
||||
* `RemovePodsViolatingInterPodAntiAffinity`
|
||||
* `RemovePodsViolatingNodeAffinity`
|
||||
* `RemovePodsViolatingNodeTaints`
|
||||
* `RemovePodsViolatingTopologySpreadConstraint`
|
||||
* `RemovePodsHavingTooManyRestarts`
|
||||
* `RemoveFailedPods`
|
||||
|
||||
If set to `true` the descheduler will consider whether or not the pods that meet eviction criteria will fit on other nodes before evicting them. If a pod cannot be rescheduled to another node, it will not be evicted. Currently the following criteria are considered when setting `nodeFit` to `true`:
|
||||
- A `nodeSelector` on the pod
|
||||
- Any `tolerations` on the pod and any `taints` on the other nodes
|
||||
- `nodeAffinity` on the pod
|
||||
- Resource `requests` made by the pod and the resources available on other nodes
|
||||
- Whether any of the other nodes are marked as `unschedulable`
|
||||
|
||||
E.g.
|
||||
|
||||
```yaml
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
strategies:
|
||||
"LowNodeUtilization":
|
||||
enabled: true
|
||||
params:
|
||||
nodeFit: true
|
||||
nodeResourceUtilizationThresholds:
|
||||
thresholds:
|
||||
"cpu": 20
|
||||
"memory": 20
|
||||
"pods": 20
|
||||
targetThresholds:
|
||||
"cpu": 50
|
||||
"memory": 50
|
||||
"pods": 50
|
||||
```
|
||||
|
||||
Note that node fit filtering references the current pod spec, and not that of it's owner.
|
||||
Thus, if the pod is owned by a ReplicationController (and that ReplicationController was modified recently),
|
||||
the pod may be running with an outdated spec, which the descheduler will reference when determining node fit.
|
||||
This is expected behavior as the descheduler is a "best-effort" mechanism.
|
||||
|
||||
Using Deployments instead of ReplicationControllers provides an automated rollout of pod spec changes, therefore ensuring that the descheduler has an up-to-date view of the cluster state.
|
||||
@@ -4,24 +4,13 @@ Starting with descheduler release v0.10.0 container images are available in the
|
||||
|
||||
Descheduler Version | Container Image | Architectures |
|
||||
------------------- |-------------------------------------------------|-------------------------|
|
||||
v0.32.2 | registry.k8s.io/descheduler/descheduler:v0.32.2 | AMD64<br>ARM64<br>ARMv7 |
|
||||
v0.32.1 | registry.k8s.io/descheduler/descheduler:v0.32.1 | AMD64<br>ARM64<br>ARMv7 |
|
||||
v0.32.0 | registry.k8s.io/descheduler/descheduler:v0.32.0 | AMD64<br>ARM64<br>ARMv7 |
|
||||
v0.31.0 | registry.k8s.io/descheduler/descheduler:v0.31.0 | AMD64<br>ARM64<br>ARMv7 |
|
||||
v0.30.1 | registry.k8s.io/descheduler/descheduler:v0.30.1 | AMD64<br>ARM64<br>ARMv7 |
|
||||
v0.30.0 | registry.k8s.io/descheduler/descheduler:v0.30.0 | AMD64<br>ARM64<br>ARMv7 |
|
||||
v0.29.0 | registry.k8s.io/descheduler/descheduler:v0.29.0 | AMD64<br>ARM64<br>ARMv7 |
|
||||
v0.28.1 | registry.k8s.io/descheduler/descheduler:v0.28.1 | AMD64<br>ARM64<br>ARMv7 |
|
||||
v0.28.0 | registry.k8s.io/descheduler/descheduler:v0.28.0 | AMD64<br>ARM64<br>ARMv7 |
|
||||
v0.27.1 | registry.k8s.io/descheduler/descheduler:v0.27.1 | AMD64<br>ARM64<br>ARMv7 |
|
||||
v0.27.0 | registry.k8s.io/descheduler/descheduler:v0.27.0 | AMD64<br>ARM64<br>ARMv7 |
|
||||
v0.26.1 | registry.k8s.io/descheduler/descheduler:v0.26.1 | AMD64<br>ARM64<br>ARMv7 |
|
||||
v0.26.0 | registry.k8s.io/descheduler/descheduler:v0.26.0 | AMD64<br>ARM64<br>ARMv7 |
|
||||
v0.25.1 | registry.k8s.io/descheduler/descheduler:v0.25.1 | AMD64<br>ARM64<br>ARMv7 |
|
||||
v0.25.0 | registry.k8s.io/descheduler/descheduler:v0.25.0 | AMD64<br>ARM64<br>ARMv7 |
|
||||
v0.24.1 | registry.k8s.io/descheduler/descheduler:v0.24.1 | AMD64<br>ARM64<br>ARMv7 |
|
||||
v0.24.0 | registry.k8s.io/descheduler/descheduler:v0.24.0 | AMD64<br>ARM64<br>ARMv7 |
|
||||
v0.23.1 | registry.k8s.io/descheduler/descheduler:v0.23.1 | AMD64<br>ARM64<br>ARMv7 |
|
||||
v0.22.0 | registry.k8s.io/descheduler/descheduler:v0.22.0 | AMD64<br>ARM64<br>ARMv7 |
|
||||
v0.21.0 | registry.k8s.io/descheduler/descheduler:v0.21.0 | AMD64<br>ARM64<br>ARMv7 |
|
||||
v0.20.0 | registry.k8s.io/descheduler/descheduler:v0.20.0 | AMD64<br>ARM64 |
|
||||
v0.19.0 | registry.k8s.io/descheduler/descheduler:v0.19.0 | AMD64 |
|
||||
v0.18.0 | registry.k8s.io/descheduler/descheduler:v0.18.0 | AMD64 |
|
||||
v0.10.0 | registry.k8s.io/descheduler/descheduler:v0.10.0 | AMD64 |
|
||||
|
||||
Note that multi-arch container images cannot be pulled by [kind](https://kind.sigs.k8s.io) from a registry. Therefore
|
||||
starting with descheduler release v0.20.0 use the below process to download the official descheduler
|
||||
|
||||
166
go.mod
166
go.mod
@@ -1,125 +1,131 @@
|
||||
module sigs.k8s.io/descheduler
|
||||
|
||||
go 1.21
|
||||
go 1.23.3
|
||||
|
||||
require (
|
||||
github.com/client9/misspell v0.3.4
|
||||
github.com/google/go-cmp v0.6.0
|
||||
github.com/spf13/cobra v1.8.0
|
||||
github.com/spf13/cobra v1.8.1
|
||||
github.com/spf13/pflag v1.0.5
|
||||
go.opentelemetry.io/otel v1.21.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0
|
||||
go.opentelemetry.io/otel/sdk v1.21.0
|
||||
go.opentelemetry.io/otel/trace v1.21.0
|
||||
google.golang.org/grpc v1.60.1
|
||||
k8s.io/api v0.29.0
|
||||
k8s.io/apimachinery v0.29.0
|
||||
k8s.io/apiserver v0.29.0
|
||||
k8s.io/client-go v0.29.0
|
||||
k8s.io/code-generator v0.29.0
|
||||
k8s.io/component-base v0.29.0
|
||||
k8s.io/component-helpers v0.29.0
|
||||
k8s.io/klog/v2 v2.110.1
|
||||
k8s.io/utils v0.0.0-20231127182322-b307cd553661
|
||||
go.opentelemetry.io/otel v1.28.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0
|
||||
go.opentelemetry.io/otel/sdk v1.28.0
|
||||
go.opentelemetry.io/otel/trace v1.28.0
|
||||
google.golang.org/grpc v1.65.0
|
||||
k8s.io/api v0.32.0
|
||||
k8s.io/apimachinery v0.32.0
|
||||
k8s.io/apiserver v0.32.0
|
||||
k8s.io/client-go v0.32.0
|
||||
k8s.io/code-generator v0.32.0
|
||||
k8s.io/component-base v0.32.0
|
||||
k8s.io/component-helpers v0.32.0
|
||||
k8s.io/klog/v2 v2.130.1
|
||||
k8s.io/metrics v0.32.0
|
||||
k8s.io/utils v0.0.0-20241210054802-24370beab758
|
||||
kubevirt.io/api v1.3.0
|
||||
kubevirt.io/client-go v1.3.0
|
||||
kubevirt.io/containerized-data-importer-api v1.60.1 // indirect; drops dependency on o/api
|
||||
sigs.k8s.io/mdtoc v1.1.0
|
||||
sigs.k8s.io/yaml v1.4.0
|
||||
)
|
||||
|
||||
require (
|
||||
cel.dev/expr v0.18.0 // indirect
|
||||
github.com/BurntSushi/toml v0.3.1 // indirect
|
||||
github.com/NYTimes/gziphandler v1.1.1 // indirect
|
||||
github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df // indirect
|
||||
github.com/antlr4-go/antlr/v4 v4.13.0 // indirect
|
||||
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/blang/semver/v4 v4.0.0 // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.2.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/coreos/go-semver v0.3.1 // indirect
|
||||
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.4 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.11.0 // indirect
|
||||
github.com/evanphx/json-patch v5.6.0+incompatible // indirect
|
||||
github.com/felixge/httpsnoop v1.0.3 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/fsnotify/fsnotify v1.7.0 // indirect
|
||||
github.com/go-logr/logr v1.3.0 // indirect
|
||||
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
|
||||
github.com/go-kit/kit v0.13.0 // indirect
|
||||
github.com/go-kit/log v0.2.1 // indirect
|
||||
github.com/go-logfmt/logfmt v0.6.0 // indirect
|
||||
github.com/go-logr/logr v1.4.2 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-logr/zapr v1.2.3 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.19.6 // indirect
|
||||
github.com/go-logr/zapr v1.3.0 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.21.0 // indirect
|
||||
github.com/go-openapi/jsonreference v0.20.2 // indirect
|
||||
github.com/go-openapi/swag v0.22.3 // indirect
|
||||
github.com/go-openapi/swag v0.23.0 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/protobuf v1.5.3 // indirect
|
||||
github.com/golang/glog v1.2.1 // indirect
|
||||
github.com/golang/protobuf v1.5.4 // indirect
|
||||
github.com/gomarkdown/markdown v0.0.0-20210514010506-3b9f47219fe7 // indirect
|
||||
github.com/google/cel-go v0.17.7 // indirect
|
||||
github.com/google/btree v1.0.1 // indirect
|
||||
github.com/google/cel-go v0.22.0 // indirect
|
||||
github.com/google/gnostic-models v0.6.8 // indirect
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/google/uuid v1.3.1 // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
github.com/gorilla/websocket v1.5.0 // indirect
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect
|
||||
github.com/imdario/mergo v0.3.6 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
|
||||
github.com/mmarkdown/mmark v2.0.40+incompatible // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/openshift/custom-resource-status v1.1.2 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/prometheus/client_golang v1.16.0 // indirect
|
||||
github.com/prometheus/client_model v0.4.0 // indirect
|
||||
github.com/prometheus/common v0.44.0 // indirect
|
||||
github.com/prometheus/procfs v0.10.1 // indirect
|
||||
github.com/prometheus/client_golang v1.19.1 // indirect
|
||||
github.com/prometheus/client_model v0.6.1 // indirect
|
||||
github.com/prometheus/common v0.55.0 // indirect
|
||||
github.com/prometheus/procfs v0.15.1 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/stoewer/go-strcase v1.2.0 // indirect
|
||||
go.etcd.io/etcd/api/v3 v3.5.10 // indirect
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.10 // indirect
|
||||
go.etcd.io/etcd/client/v3 v3.5.10 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.45.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.21.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.0.0 // indirect
|
||||
go.uber.org/atomic v1.10.0 // indirect
|
||||
github.com/stoewer/go-strcase v1.3.0 // indirect
|
||||
github.com/x448/float16 v0.8.4 // indirect
|
||||
go.etcd.io/etcd/api/v3 v3.5.16 // indirect
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.16 // indirect
|
||||
go.etcd.io/etcd/client/v3 v3.5.16 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.28.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.3.1 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
go.uber.org/zap v1.19.0 // indirect
|
||||
golang.org/x/crypto v0.16.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e // indirect
|
||||
golang.org/x/mod v0.14.0 // indirect
|
||||
golang.org/x/net v0.19.0 // indirect
|
||||
golang.org/x/oauth2 v0.13.0 // indirect
|
||||
golang.org/x/sync v0.5.0 // indirect
|
||||
golang.org/x/sys v0.15.0 // indirect
|
||||
golang.org/x/term v0.15.0 // indirect
|
||||
golang.org/x/text v0.14.0 // indirect
|
||||
golang.org/x/time v0.3.0 // indirect
|
||||
golang.org/x/tools v0.16.1 // indirect
|
||||
google.golang.org/appengine v1.6.8 // indirect
|
||||
google.golang.org/genproto v0.0.0-20231002182017-d307bd883b97 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20231002182017-d307bd883b97 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20231002182017-d307bd883b97 // indirect
|
||||
google.golang.org/protobuf v1.31.0 // indirect
|
||||
go.uber.org/zap v1.27.0 // indirect
|
||||
golang.org/x/crypto v0.31.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect
|
||||
golang.org/x/mod v0.21.0 // indirect
|
||||
golang.org/x/net v0.30.0 // indirect
|
||||
golang.org/x/oauth2 v0.23.0 // indirect
|
||||
golang.org/x/sync v0.10.0 // indirect
|
||||
golang.org/x/sys v0.28.0 // indirect
|
||||
golang.org/x/term v0.27.0 // indirect
|
||||
golang.org/x/text v0.21.0 // indirect
|
||||
golang.org/x/time v0.7.0 // indirect
|
||||
golang.org/x/tools v0.26.0 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7 // indirect
|
||||
google.golang.org/protobuf v1.35.1 // indirect
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
k8s.io/gengo v0.0.0-20230829151522-9cce18d56c01 // indirect
|
||||
k8s.io/kms v0.29.0 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20231113174909-778a5567bc1e // indirect
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect
|
||||
sigs.k8s.io/yaml v1.3.0 // indirect
|
||||
k8s.io/apiextensions-apiserver v0.30.0 // indirect
|
||||
k8s.io/gengo/v2 v2.0.0-20240911193312-2b36238f13e9 // indirect
|
||||
k8s.io/kms v0.32.0 // indirect
|
||||
k8s.io/kube-openapi v0.30.0 // indirect
|
||||
kubevirt.io/controller-lifecycle-operator-sdk/api v0.0.0-20220329064328-f3cc58c6ed90 // indirect
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.2 // indirect
|
||||
)
|
||||
|
||||
replace (
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.45.0 => go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1
|
||||
replace k8s.io/kube-openapi => k8s.io/kube-openapi v0.0.0-20240430033511-f0e62f92d13f
|
||||
|
||||
// CVE-2023-48795: https://github.com/kubernetes-sigs/descheduler/security/code-scanning/17
|
||||
// TODO: remove this replace once the upstream dependency is updated to v0.29.1
|
||||
k8s.io/api => k8s.io/api v0.0.0-20231220172311-84c476802242
|
||||
k8s.io/apimachinery => k8s.io/apimachinery v0.0.0-20231220171733-60eaa653342b
|
||||
k8s.io/client-go => k8s.io/client-go v0.0.0-20231220173006-5a0a4247921d
|
||||
)
|
||||
replace golang.org/x/net => golang.org/x/net v0.33.0
|
||||
|
||||
replace golang.org/x/crypto => golang.org/x/crypto v0.31.0
|
||||
|
||||
641
go.sum
641
go.sum
@@ -1,92 +1,147 @@
|
||||
cloud.google.com/go v0.110.8 h1:tyNdfIxjzaWctIiLYOTalaLKZ17SI44SKFW26QbOhME=
|
||||
cloud.google.com/go/compute v1.23.0 h1:tP41Zoavr8ptEqaW6j+LQOnyBBhO7OkOMAGrgLopTwY=
|
||||
cloud.google.com/go/compute v1.23.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM=
|
||||
cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY=
|
||||
cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA=
|
||||
cel.dev/expr v0.18.0 h1:CJ6drgk+Hf96lkLikr4rFf19WrU0BOWEihyZnI2TAzo=
|
||||
cel.dev/expr v0.18.0/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw=
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
|
||||
github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I=
|
||||
github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c=
|
||||
github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df h1:7RFfzj4SSt6nnvCPbCqijJi1nWCd+TqAT3bYCStRC18=
|
||||
github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df/go.mod h1:pSwJ0fSY5KhvocuWSx4fz3BA8OrA1bQn+K1Eli3BRwM=
|
||||
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
|
||||
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
|
||||
github.com/antlr4-go/antlr/v4 v4.13.0 h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8TVTI=
|
||||
github.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g=
|
||||
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA=
|
||||
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
|
||||
github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
|
||||
github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM=
|
||||
github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ=
|
||||
github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM=
|
||||
github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
|
||||
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
|
||||
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
|
||||
github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
||||
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/chromedp/cdproto v0.0.0-20230802225258-3cf4e6d46a89/go.mod h1:GKljq0VrfU4D5yc+2qA6OVr8pmO/MBbPEWqWQ/oqGEs=
|
||||
github.com/chromedp/chromedp v0.9.2/go.mod h1:LkSXJKONWTCHAfQasKFUZI+mxqS4tZqhmtGzzhLsnLs=
|
||||
github.com/chromedp/sysutil v1.0.0/go.mod h1:kgWmDdq8fTzXYcKIBqIYvRRTnYb9aNS9moAV0xufSww=
|
||||
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
|
||||
github.com/chzyer/logex v1.2.1/go.mod h1:JLbx6lG2kDbNRFnfkgvh4eRJRPX1QCoOIWomwysCBrQ=
|
||||
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
|
||||
github.com/chzyer/readline v1.5.1/go.mod h1:Eh+b79XXUwfKfcPLepksvw2tcLE/Ct21YObkaSkeBlk=
|
||||
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
||||
github.com/chzyer/test v1.0.0/go.mod h1:2JlltgoNkt4TW/z9V/IzDdFaMTM2JPIi26O1pF38GC8=
|
||||
github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 h1:/inchEIKaYC1Akx+H+gqO04wryn5h75LSazbRlnya1k=
|
||||
github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4=
|
||||
github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec=
|
||||
github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
|
||||
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.3 h1:qMCsGGgs+MAzDFyp9LpAe1Lqy/fY/qCovCm0qnXZOBM=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.4 h1:wfIWP927BUkWJb2NmU/kNDYIBTh/ziUX91+lVfRxZq4=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
|
||||
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
|
||||
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
|
||||
github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
|
||||
github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
|
||||
github.com/emicklei/go-restful v2.15.0+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
|
||||
github.com/emicklei/go-restful/v3 v3.8.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
|
||||
github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g=
|
||||
github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
|
||||
github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA=
|
||||
github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE=
|
||||
github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U=
|
||||
github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk=
|
||||
github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
|
||||
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
|
||||
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
|
||||
github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E=
|
||||
github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ=
|
||||
github.com/go-kit/kit v0.13.0 h1:OoneCcHKHQ03LfBpoQCUfCluwd2Vt3ohz+kvbJneZAU=
|
||||
github.com/go-kit/kit v0.13.0/go.mod h1:phqEHMMUbyrCFCTgH48JueqrM3md2HcAZ8N3XE4FKDg=
|
||||
github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU=
|
||||
github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0=
|
||||
github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4=
|
||||
github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
|
||||
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
|
||||
github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
|
||||
github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY=
|
||||
github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
|
||||
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
||||
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
||||
github.com/go-logr/zapr v1.2.3 h1:a9vnzlIBPQBBkeaR9IuMUfmVOrQlkoC4YfPoFkX3T7A=
|
||||
github.com/go-logr/zapr v1.2.3/go.mod h1:eIauM6P8qSvTw5o2ez6UEAfGjQKrxQTl5EoK+Qa2oG4=
|
||||
github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE=
|
||||
github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ=
|
||||
github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg=
|
||||
github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
|
||||
github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs=
|
||||
github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ=
|
||||
github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY=
|
||||
github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg=
|
||||
github.com/go-openapi/jsonreference v0.19.6/go.mod h1:diGHMEHg2IqXZGKxqyvWdfWU/aim5Dprw5bqpKkTvns=
|
||||
github.com/go-openapi/jsonreference v0.20.1/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k=
|
||||
github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE=
|
||||
github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k=
|
||||
github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g=
|
||||
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
|
||||
github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
|
||||
github.com/go-openapi/swag v0.21.1/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
|
||||
github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
|
||||
github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE=
|
||||
github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ=
|
||||
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
|
||||
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
|
||||
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls=
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
|
||||
github.com/gobwas/httphead v0.1.0/go.mod h1:O/RXo79gxV8G+RqlR/otEwx4Q36zl9rqC5u12GKvMCM=
|
||||
github.com/gobwas/pool v0.2.1/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw=
|
||||
github.com/gobwas/ws v1.2.1/go.mod h1:hRKAFb8wOxFROYNsT1bqfWnhX+b5MFeJM9r2ZSwg/KY=
|
||||
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||
github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg=
|
||||
github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
|
||||
github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo=
|
||||
github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ=
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/glog v1.2.1 h1:OptwRhECazUx5ix5TTWC3EZhsZEHWcYWY4FQHTIubm4=
|
||||
github.com/golang/glog v1.2.1/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
|
||||
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
|
||||
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
|
||||
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
|
||||
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
||||
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
|
||||
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
|
||||
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
|
||||
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
|
||||
github.com/gomarkdown/markdown v0.0.0-20210514010506-3b9f47219fe7 h1:oKYOfNR7Hp6XpZ4JqolL5u642Js5Z0n7psPVl+S5heo=
|
||||
github.com/gomarkdown/markdown v0.0.0-20210514010506-3b9f47219fe7/go.mod h1:aii0r/K0ZnHv7G0KF7xy1v0A7s2Ljrb5byB7MO5p6TU=
|
||||
github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4=
|
||||
github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
|
||||
github.com/google/cel-go v0.17.7 h1:6ebJFzu1xO2n7TLtN+UBqShGBhlD85bhvglh5DpcfqQ=
|
||||
github.com/google/cel-go v0.17.7/go.mod h1:HXZKzB0LXqer5lHHgfWAnlYwJaQBDKMjxjulNQzhwhY=
|
||||
github.com/google/cel-go v0.22.0 h1:b3FJZxpiv1vTMo2/5RDUqAHPxkT8mmMfJIrq1llbf7g=
|
||||
github.com/google/cel-go v0.22.0/go.mod h1:BuznPXXfQDpXKWQ9sPW3TzlAJN5zzFe+i9tIs0yC4s8=
|
||||
github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I=
|
||||
github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
@@ -94,10 +149,15 @@ github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/
|
||||
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
|
||||
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec=
|
||||
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4=
|
||||
github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6/go.mod h1:kf6iHlnVGwgKolg33glAes7Yg/8iWP8ukqeldJSO7jw=
|
||||
github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo=
|
||||
github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144=
|
||||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA=
|
||||
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
|
||||
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw=
|
||||
@@ -106,16 +166,18 @@ github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92Bcuy
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg=
|
||||
github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28=
|
||||
github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20240312041847-bd984b5ce465/go.mod h1:gx7rwoVhcfuVKG5uya9Hs3Sxj7EIvldVofAWIUtGouw=
|
||||
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
||||
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
||||
github.com/jonboulle/clockwork v0.2.2 h1:UOGuzwb1PwsrDAObMuhUnj0p5ULPj8V/xJ7Kx9qUBdQ=
|
||||
github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8=
|
||||
github.com/jonboulle/clockwork v0.4.0 h1:p4Cf1aMWXnXAUh8lVfewRBx1zaTSYKrKMF2g3ST4RZ4=
|
||||
github.com/jonboulle/clockwork v0.4.0/go.mod h1:xgRqUGwRcjKCO1vbZUEtSLrqKoPSsUpK7fnezOII0kc=
|
||||
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
|
||||
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
|
||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
|
||||
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
|
||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||
@@ -129,246 +191,433 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/ledongthuc/pdf v0.0.0-20220302134840-0c2507a12d80/go.mod h1:imJHygn/1yfhB7XSJJKlFZKl/J+dCPAknuiaGOshXAs=
|
||||
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
||||
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
|
||||
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
|
||||
github.com/mmarkdown/mmark v2.0.40+incompatible h1:vMeUeDzBK3H+/mU0oMVfMuhSXJlIA+DE/DMPQNAj5C4=
|
||||
github.com/mmarkdown/mmark v2.0.40+incompatible/go.mod h1:Uvmoz7tvsWpr7bMVxIpqZPyN3FbOtzDmnsJDFp7ltJs=
|
||||
github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
|
||||
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||
github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
github.com/onsi/ginkgo/v2 v2.13.2 h1:Bi2gGVkfn6gQcjNjZJVO8Gf0FHzMPf2phUei9tejVMs=
|
||||
github.com/onsi/ginkgo/v2 v2.13.2/go.mod h1:XStQ8QcGwLyF4HdfcZB8SFOS/MWCgDuXMSBe6zrvLgM=
|
||||
github.com/onsi/gomega v1.30.0 h1:hvMK7xYz4D3HapigLTeGdId/NcfQx1VHMJc60ew99+8=
|
||||
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
|
||||
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
|
||||
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
|
||||
github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
|
||||
github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
|
||||
github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc=
|
||||
github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
|
||||
github.com/onsi/ginkgo/v2 v2.0.0/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c=
|
||||
github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c=
|
||||
github.com/onsi/ginkgo/v2 v2.1.4/go.mod h1:um6tUpWM/cxCK3/FK8BXqEiUMUwRgSM4JXG47RKZmLU=
|
||||
github.com/onsi/ginkgo/v2 v2.1.6/go.mod h1:MEH45j8TBi6u9BMogfbp0stKC5cdGjumZj5Y7AG4VIk=
|
||||
github.com/onsi/ginkgo/v2 v2.3.0/go.mod h1:Eew0uilEqZmIEZr8JrvYlvOM7Rr6xzTmMV8AyFNU9d0=
|
||||
github.com/onsi/ginkgo/v2 v2.4.0/go.mod h1:iHkDK1fKGcBoEHT5W7YBq4RFWaQulw+caOMkAt4OrFo=
|
||||
github.com/onsi/ginkgo/v2 v2.5.0/go.mod h1:Luc4sArBICYCS8THh8v3i3i5CuSZO+RaQRaJoeNwomw=
|
||||
github.com/onsi/ginkgo/v2 v2.7.0/go.mod h1:yjiuMwPokqY1XauOgju45q3sJt6VzQ/Fict1LFVcsAo=
|
||||
github.com/onsi/ginkgo/v2 v2.8.1/go.mod h1:N1/NbDngAFcSLdyZ+/aYTYGSlq9qMCS/cNKGJjy+csc=
|
||||
github.com/onsi/ginkgo/v2 v2.9.0/go.mod h1:4xkjoL/tZv4SMWeww56BU5kAt19mVB47gTWxmrTcxyk=
|
||||
github.com/onsi/ginkgo/v2 v2.9.1/go.mod h1:FEcmzVcCHl+4o9bQZVab+4dC9+j+91t2FHSzmGAPfuo=
|
||||
github.com/onsi/ginkgo/v2 v2.9.2/go.mod h1:WHcJJG2dIlcCqVfBAwUCrJxSPFb6v4azBwgxeMeDuts=
|
||||
github.com/onsi/ginkgo/v2 v2.9.5/go.mod h1:tvAoo1QUJwNEU2ITftXTpR7R1RbCzoZUOs3RonqW57k=
|
||||
github.com/onsi/ginkgo/v2 v2.9.7/go.mod h1:cxrmXWykAwTwhQsJOPfdIDiJ+l2RYq7U8hFU+M/1uw0=
|
||||
github.com/onsi/ginkgo/v2 v2.11.0/go.mod h1:ZhrRA5XmEE3x3rhlzamx/JJvujdZoJ2uvgI7kR0iZvM=
|
||||
github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o=
|
||||
github.com/onsi/ginkgo/v2 v2.17.1/go.mod h1:llBI3WDLL9Z6taip6f33H76YcWtJv+7R3HigUjbIBOs=
|
||||
github.com/onsi/ginkgo/v2 v2.17.2/go.mod h1:nP2DPOQoNsQmsVyv5rDA8JkXQoCs6goXIvr/PRJ1eCc=
|
||||
github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM=
|
||||
github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo=
|
||||
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
|
||||
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
||||
github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
|
||||
github.com/onsi/gomega v1.18.1/go.mod h1:0q+aL8jAiMXy9hbwj2mr5GziHiwhAIQpFmmtT5hitRs=
|
||||
github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro=
|
||||
github.com/onsi/gomega v1.20.1/go.mod h1:DtrZpjmvpn2mPm4YWQa0/ALMDj9v4YxLgojwPeREyVo=
|
||||
github.com/onsi/gomega v1.21.1/go.mod h1:iYAIXgPSaDHak0LCMA+AWBpIKBr8WZicMxnE8luStNc=
|
||||
github.com/onsi/gomega v1.22.1/go.mod h1:x6n7VNe4hw0vkyYUM4mjIXx3JbLiPaBPNgB7PRQ1tuM=
|
||||
github.com/onsi/gomega v1.24.0/go.mod h1:Z/NWtiqwBrwUt4/2loMmHL63EDLnYHmVbuBpDr2vQAg=
|
||||
github.com/onsi/gomega v1.24.1/go.mod h1:3AOiACssS3/MajrniINInwbfOOtfZvplPzuRSmvt1jM=
|
||||
github.com/onsi/gomega v1.26.0/go.mod h1:r+zV744Re+DiYCIPRlYOTxn0YkOLcAnW8k1xXdMPGhM=
|
||||
github.com/onsi/gomega v1.27.1/go.mod h1:aHX5xOykVYzWOV4WqQy0sy8BQptgukenXpCXfadcIAw=
|
||||
github.com/onsi/gomega v1.27.3/go.mod h1:5vG284IBtfDAmDyrK+eGyZmUgUlmi+Wngqo557cZ6Gw=
|
||||
github.com/onsi/gomega v1.27.4/go.mod h1:riYq/GJKh8hhoM01HN6Vmuy93AarCXCBGpvFDK3q3fQ=
|
||||
github.com/onsi/gomega v1.27.6/go.mod h1:PIQNjfQwkP3aQAH7lf7j87O/5FiNr+ZR8+ipb+qQlhg=
|
||||
github.com/onsi/gomega v1.27.7/go.mod h1:1p8OOlwo2iUUDsHnOrjE5UKYJ+e3W8eQ3qSlRahPmr4=
|
||||
github.com/onsi/gomega v1.27.8/go.mod h1:2J8vzI/s+2shY9XHRApDkdgPo1TKT7P2u6fXeJKFnNQ=
|
||||
github.com/onsi/gomega v1.27.10/go.mod h1:RsS8tutOdbdgzbPtzzATp12yT7kM5I5aElG3evPbQ0M=
|
||||
github.com/onsi/gomega v1.30.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/onsi/gomega v1.33.0/go.mod h1:+925n5YtiFsLzzafLUHzVMBpvvRAzrydIBiSIxjX3wY=
|
||||
github.com/onsi/gomega v1.33.1/go.mod h1:U4R44UsT+9eLIaYRB2a5qajjtQYn0hauxvRm16AVYg0=
|
||||
github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4=
|
||||
github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog=
|
||||
github.com/openshift/custom-resource-status v1.1.2 h1:C3DL44LEbvlbItfd8mT5jWrqPfHnSOQoQf/sypqA6A4=
|
||||
github.com/openshift/custom-resource-status v1.1.2/go.mod h1:DB/Mf2oTeiAmVVX1gN+NEqweonAPY0TKUwADizj8+ZA=
|
||||
github.com/orisano/pixelmatch v0.0.0-20220722002657-fb0b55479cde/go.mod h1:nZgzbfBr3hhjoZnS66nKrHmduYNpc34ny7RK4z5/HM0=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8=
|
||||
github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc=
|
||||
github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY=
|
||||
github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU=
|
||||
github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY=
|
||||
github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY=
|
||||
github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg=
|
||||
github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM=
|
||||
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
|
||||
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE=
|
||||
github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
|
||||
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
|
||||
github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc=
|
||||
github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8=
|
||||
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
|
||||
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
|
||||
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
|
||||
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0=
|
||||
github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
|
||||
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||
github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js=
|
||||
github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0=
|
||||
github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0=
|
||||
github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho=
|
||||
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
|
||||
github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM=
|
||||
github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y=
|
||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/stoewer/go-strcase v1.2.0 h1:Z2iHWqGXH00XYgqDmNgQbIBxf3wrNq0F3feEy0ainaU=
|
||||
github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8=
|
||||
github.com/stoewer/go-strcase v1.3.0 h1:g0eASXYtp+yvN9fK8sH94oCIk0fau9uV1/ZdJ0AVEzs=
|
||||
github.com/stoewer/go-strcase v1.3.0/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8wodgtPmh1xo=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
|
||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
||||
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 h1:6fotK7otjonDflCTK0BCfls4SPy3NcCVb5dqqmbRknE=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75/go.mod h1:KO6IkyS8Y3j8OdNO85qEYBsRPuteD+YciPomcXdrMnk=
|
||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8=
|
||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
||||
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
|
||||
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
|
||||
github.com/xiang90/probing v0.0.0-20221125231312-a49e3df8f510 h1:S2dVYn90KE98chqDkyE9Z4N61UnQd+KOfgp5Iu53llk=
|
||||
github.com/xiang90/probing v0.0.0-20221125231312-a49e3df8f510/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
||||
github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||
go.etcd.io/bbolt v1.3.8 h1:xs88BrvEv273UsB79e0hcVrlUWmS0a8upikMFhSyAtA=
|
||||
go.etcd.io/bbolt v1.3.8/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw=
|
||||
go.etcd.io/etcd/api/v3 v3.5.10 h1:szRajuUUbLyppkhs9K6BRtjY37l66XQQmw7oZRANE4k=
|
||||
go.etcd.io/etcd/api/v3 v3.5.10/go.mod h1:TidfmT4Uycad3NM/o25fG3J07odo4GBB9hoxaodFCtI=
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.10 h1:kfYIdQftBnbAq8pUWFXfpuuxFSKzlmM5cSn76JByiT0=
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.10/go.mod h1:DYivfIviIuQ8+/lCq4vcxuseg2P2XbHygkKwFo9fc8U=
|
||||
go.etcd.io/etcd/client/v2 v2.305.10 h1:MrmRktzv/XF8CvtQt+P6wLUlURaNpSDJHFZhe//2QE4=
|
||||
go.etcd.io/etcd/client/v2 v2.305.10/go.mod h1:m3CKZi69HzilhVqtPDcjhSGp+kA1OmbNn0qamH80xjA=
|
||||
go.etcd.io/etcd/client/v3 v3.5.10 h1:W9TXNZ+oB3MCd/8UjxHTWK5J9Nquw9fQBLJd5ne5/Ao=
|
||||
go.etcd.io/etcd/client/v3 v3.5.10/go.mod h1:RVeBnDz2PUEZqTpgqwAtUd8nAPf5kjyFyND7P1VkOKc=
|
||||
go.etcd.io/etcd/pkg/v3 v3.5.10 h1:WPR8K0e9kWl1gAhB5A7gEa5ZBTNkT9NdNWrR8Qpo1CM=
|
||||
go.etcd.io/etcd/pkg/v3 v3.5.10/go.mod h1:TKTuCKKcF1zxmfKWDkfz5qqYaE3JncKKZPFf8c1nFUs=
|
||||
go.etcd.io/etcd/raft/v3 v3.5.10 h1:cgNAYe7xrsrn/5kXMSaH8kM/Ky8mAdMqGOxyYwpP0LA=
|
||||
go.etcd.io/etcd/raft/v3 v3.5.10/go.mod h1:odD6kr8XQXTy9oQnyMPBOr0TVe+gT0neQhElQ6jbGRc=
|
||||
go.etcd.io/etcd/server/v3 v3.5.10 h1:4NOGyOwD5sUZ22PiWYKmfxqoeh72z6EhYjNosKGLmZg=
|
||||
go.etcd.io/etcd/server/v3 v3.5.10/go.mod h1:gBplPHfs6YI0L+RpGkTQO7buDbHv5HJGG/Bst0/zIPo=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1 h1:SpGay3w+nEwMpfVnbqOLH5gY52/foP8RE8UzTZ1pdSE=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1/go.mod h1:4UoMYEZOC0yN/sPGH76KPkkU7zgiEWYWL9vwmbnTJPE=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 h1:x8Z78aZx8cOF0+Kkazoc7lwUNMGy0LrzEMxTm4BbTxg=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0/go.mod h1:62CPTSry9QZtOaSsE3tOzhx6LzDhHnXJ6xHeMNNiM6Q=
|
||||
go.opentelemetry.io/otel v1.21.0 h1:hzLeKBZEL7Okw2mGzZ0cc4k/A7Fta0uoPgaJCr8fsFc=
|
||||
go.opentelemetry.io/otel v1.21.0/go.mod h1:QZzNPQPm1zLX4gZK4cMi+71eaorMSGT3A4znnUvNNEo=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0 h1:cl5P5/GIfFh4t6xyruOgJP5QiA1pw4fYYdv6nc6CBWw=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0/go.mod h1:zgBdWWAu7oEEMC06MMKc5NLbA/1YDXV1sMpSqEeLQLg=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0 h1:tIqheXEFWAZ7O8A7m+J0aPTmpJN3YQ7qetUAdkkkKpk=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0/go.mod h1:nUeKExfxAQVbiVFn32YXpXZZHZ61Cc3s3Rn1pDBGAb0=
|
||||
go.opentelemetry.io/otel/metric v1.21.0 h1:tlYWfeo+Bocx5kLEloTjbcDwBuELRrIFxwdQ36PlJu4=
|
||||
go.opentelemetry.io/otel/metric v1.21.0/go.mod h1:o1p3CA8nNHW8j5yuQLdc1eeqEaPfzug24uvsyIEJRWM=
|
||||
go.opentelemetry.io/otel/sdk v1.21.0 h1:FTt8qirL1EysG6sTQRZ5TokkU8d0ugCj8htOgThZXQ8=
|
||||
go.opentelemetry.io/otel/sdk v1.21.0/go.mod h1:Nna6Yv7PWTdgJHVRD9hIYywQBRx7pbox6nwBnZIxl/E=
|
||||
go.opentelemetry.io/otel/trace v1.21.0 h1:WD9i5gzvoUPuXIXH24ZNBudiarZDKuekPqi/E8fpfLc=
|
||||
go.opentelemetry.io/otel/trace v1.21.0/go.mod h1:LGbsEB0f9LGjN+OZaQQ26sohbOmiMR+BaslueVtS/qQ=
|
||||
go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I=
|
||||
go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM=
|
||||
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
|
||||
go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ=
|
||||
go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
|
||||
go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
|
||||
go.etcd.io/bbolt v1.3.11 h1:yGEzV1wPz2yVCLsD8ZAiGHhHVlczyC9d1rP43/VCRJ0=
|
||||
go.etcd.io/bbolt v1.3.11/go.mod h1:dksAq7YMXoljX0xu6VF5DMZGbhYYoLUalEiSySYAS4I=
|
||||
go.etcd.io/etcd/api/v3 v3.5.16 h1:WvmyJVbjWqK4R1E+B12RRHz3bRGy9XVfh++MgbN+6n0=
|
||||
go.etcd.io/etcd/api/v3 v3.5.16/go.mod h1:1P4SlIP/VwkDmGo3OlOD7faPeP8KDIFhqvciH5EfN28=
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.16 h1:ZgY48uH6UvB+/7R9Yf4x574uCO3jIx0TRDyetSfId3Q=
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.16/go.mod h1:V8acl8pcEK0Y2g19YlOV9m9ssUe6MgiDSobSoaBAM0E=
|
||||
go.etcd.io/etcd/client/v2 v2.305.16 h1:kQrn9o5czVNaukf2A2At43cE9ZtWauOtf9vRZuiKXow=
|
||||
go.etcd.io/etcd/client/v2 v2.305.16/go.mod h1:h9YxWCzcdvZENbfzBTFCnoNumr2ax3F19sKMqHFmXHE=
|
||||
go.etcd.io/etcd/client/v3 v3.5.16 h1:sSmVYOAHeC9doqi0gv7v86oY/BTld0SEFGaxsU9eRhE=
|
||||
go.etcd.io/etcd/client/v3 v3.5.16/go.mod h1:X+rExSGkyqxvu276cr2OwPLBaeqFu1cIl4vmRjAD/50=
|
||||
go.etcd.io/etcd/pkg/v3 v3.5.16 h1:cnavs5WSPWeK4TYwPYfmcr3Joz9BH+TZ6qoUtz6/+mc=
|
||||
go.etcd.io/etcd/pkg/v3 v3.5.16/go.mod h1:+lutCZHG5MBBFI/U4eYT5yL7sJfnexsoM20Y0t2uNuY=
|
||||
go.etcd.io/etcd/raft/v3 v3.5.16 h1:zBXA3ZUpYs1AwiLGPafYAKKl/CORn/uaxYDwlNwndAk=
|
||||
go.etcd.io/etcd/raft/v3 v3.5.16/go.mod h1:P4UP14AxofMJ/54boWilabqqWoW9eLodl6I5GdGzazI=
|
||||
go.etcd.io/etcd/server/v3 v3.5.16 h1:d0/SAdJ3vVsZvF8IFVb1k8zqMZ+heGcNfft71ul9GWE=
|
||||
go.etcd.io/etcd/server/v3 v3.5.16/go.mod h1:ynhyZZpdDp1Gq49jkUg5mfkDWZwXnn3eIqCqtJnrD/s=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 h1:9G6E0TXzGFVfTnawRzrPl83iHOAV7L8NJiR8RSGYV1g=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0/go.mod h1:azvtTADFQJA8mX80jIH/akaE7h+dbm/sVuaHqN13w74=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg=
|
||||
go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo=
|
||||
go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 h1:3Q/xZUyC1BBkualc9ROb4G8qkH90LXEIICcs5zv1OYY=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0/go.mod h1:s75jGIWA9OfCMzF0xr+ZgfrB5FEbbV7UuYo32ahUiFI=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 h1:R3X6ZXmNPRR8ul6i3WgFURCHzaXjHdm0karRG/+dj3s=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0/go.mod h1:QWFXnDavXWwMx2EEcZsf3yxgEKAqsxQ+Syjp+seyInw=
|
||||
go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q=
|
||||
go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s=
|
||||
go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE=
|
||||
go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg=
|
||||
go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g=
|
||||
go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI=
|
||||
go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0=
|
||||
go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8=
|
||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
|
||||
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
|
||||
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
|
||||
go.uber.org/zap v1.19.0 h1:mZQZefskPPCMIBCSEH0v2/iUqqLrYtaeqwD6FUGUnFE=
|
||||
go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI=
|
||||
go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
|
||||
go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
|
||||
golang.org/dl v0.0.0-20190829154251-82a15e2f2ead/go.mod h1:IUMfjQLJQd4UTqG1Z90tenwKoCX93Gn3MAQJMOSBsDQ=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.16.0 h1:mMMrFzRSCF0GvB7Ne27XVtVAaXLrPmgPC7/v0tkwHaY=
|
||||
golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
|
||||
golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e h1:+WEEuIdZHnUeJJmEUjyYC2gfUMj69yZXw17EnHg/otA=
|
||||
golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e/go.mod h1:Kr81I6Kryrl9sr8s2FK3vxD90NdsKWRuOIl2O4CvYbA=
|
||||
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U=
|
||||
golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8=
|
||||
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0=
|
||||
golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI=
|
||||
golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c=
|
||||
golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U=
|
||||
golang.org/x/oauth2 v0.13.0 h1:jDDenyj+WgFtmV3zYVoi8aE2BwtXFLWOA67ZfNWftiY=
|
||||
golang.org/x/oauth2 v0.13.0/go.mod h1:/JMhi4ZRXAf4HG9LiNmxvk+45+96RUlVThiH8FzNBn0=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0=
|
||||
golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY=
|
||||
golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I=
|
||||
golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs=
|
||||
golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE=
|
||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
|
||||
golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ=
|
||||
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc=
|
||||
golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.15.0 h1:y/Oo/a/q3IXu26lQgl04j/gjuBDOBlx7X6Om1j2CPW4=
|
||||
golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0=
|
||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA=
|
||||
golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE=
|
||||
golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q=
|
||||
golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
|
||||
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
|
||||
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||
golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
|
||||
golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
|
||||
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
|
||||
golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ=
|
||||
golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.1.6-0.20210820212750-d4cc65f0b2ff/go.mod h1:YD9qOF0M9xpSpdWTBbzEl5e/RnCefISl8E5Noe10jFM=
|
||||
golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU=
|
||||
golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/tools v0.16.1 h1:TLyB3WofjdOEepBHAU20JdNC1Zbg87elYofWYAY5oZA=
|
||||
golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA=
|
||||
golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ=
|
||||
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
||||
golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s=
|
||||
golang.org/x/tools v0.9.1/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc=
|
||||
golang.org/x/tools v0.9.3/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc=
|
||||
golang.org/x/tools v0.12.0/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM=
|
||||
golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58=
|
||||
golang.org/x/tools v0.16.1/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0=
|
||||
golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps=
|
||||
golang.org/x/tools v0.20.0/go.mod h1:WvitBU7JJf6A4jOdg4S1tviW9bhUxkgeCui/0JHctQg=
|
||||
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
|
||||
golang.org/x/tools v0.26.0 h1:v/60pFQmzmT9ExmjDv2gGIfi3OqfKoEP6I5+umXlbnQ=
|
||||
golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM=
|
||||
google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds=
|
||||
google.golang.org/genproto v0.0.0-20231002182017-d307bd883b97 h1:SeZZZx0cP0fqUyA+oRzP9k7cSwJlvDFiROO72uwD6i0=
|
||||
google.golang.org/genproto v0.0.0-20231002182017-d307bd883b97/go.mod h1:t1VqOqqvce95G3hIDCT5FeO3YUc6Q4Oe24L/+rNMxRk=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20231002182017-d307bd883b97 h1:W18sezcAYs+3tDZX4F80yctqa12jcP1PUS2gQu1zTPU=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20231002182017-d307bd883b97/go.mod h1:iargEX0SFPm3xcfMI0d1domjg0ZF4Aa0p2awqyxhvF0=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20231002182017-d307bd883b97 h1:6GQBEOdGkX6MMTLT9V+TjtIRZCw9VPD5Z+yHY9wMgS0=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20231002182017-d307bd883b97/go.mod h1:v7nGkzlmW8P3n/bKmWBn2WpBjpOEx8Q6gMueudAmKfY=
|
||||
google.golang.org/grpc v1.60.1 h1:26+wFr+cNqSGFcOXcabYC0lUVJVRa2Sb2ortSK7VrEU=
|
||||
google.golang.org/grpc v1.60.1/go.mod h1:OlCHIeLYqSSsLi6i49B5QGdzaMZK9+M7LXN2FKz4eGM=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||
google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 h1:KAeGQVN3M9nD0/bQXnr/ClcEMJ968gUXJQ9pwfSynuQ=
|
||||
google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80/go.mod h1:cc8bqMqtv9gMOr0zHg2Vzff5ULhhL2IXP4sbcn32Dro=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7 h1:YcyjlL1PRr2Q17/I0dPk2JmYS5CDXfcdb2Z3YRioEbw=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:OCdP9MfskevB/rbYvHTsXTtKC+3bHWajPdoKgjcYkfo=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7 h1:2035KHhUv+EpyB+hWgJnaWKJOdX1E95w2S8Rr4uWKTs=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc=
|
||||
google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
|
||||
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
|
||||
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
|
||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8=
|
||||
google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||
google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
|
||||
google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA=
|
||||
google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4=
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
|
||||
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc=
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
k8s.io/api v0.0.0-20231220172311-84c476802242 h1:0vL/RpEDBpP1Ib7D/+Gn1GcXRKoLfzHk39Bimd2kBNI=
|
||||
k8s.io/api v0.0.0-20231220172311-84c476802242/go.mod h1:77+m1u5YUTiZfSm1ad6sO9fCphn5EYI5cFzfE0VKOuY=
|
||||
k8s.io/apimachinery v0.0.0-20231220171733-60eaa653342b h1:Z+R5mA4fTuumBjSg84ZIY+W8Gi3zW50/iw58LL1tUwQ=
|
||||
k8s.io/apimachinery v0.0.0-20231220171733-60eaa653342b/go.mod h1:HcS5UNnaHsno0i/Q7QQQzqFd5WS+W26bysH+Ez3FLxA=
|
||||
k8s.io/apiserver v0.29.0 h1:Y1xEMjJkP+BIi0GSEv1BBrf1jLU9UPfAnnGGbbDdp7o=
|
||||
k8s.io/apiserver v0.29.0/go.mod h1:31n78PsRKPmfpee7/l9NYEv67u6hOL6AfcE761HapDM=
|
||||
k8s.io/client-go v0.0.0-20231220173006-5a0a4247921d h1:Y5RdiizB/2/3NM2YcasfigRdLYEkSa0AslQLcUQGDMw=
|
||||
k8s.io/client-go v0.0.0-20231220173006-5a0a4247921d/go.mod h1:1ou2dbm4LO33nvBBLNRvuzwYldF1e1DAjhtXzRCjF6o=
|
||||
k8s.io/code-generator v0.29.0 h1:2LQfayGDhaIlaamXjIjEQlCMy4JNCH9lrzas4DNW1GQ=
|
||||
k8s.io/code-generator v0.29.0/go.mod h1:5bqIZoCxs2zTRKMWNYqyQWW/bajc+ah4rh0tMY8zdGA=
|
||||
k8s.io/component-base v0.29.0 h1:T7rjd5wvLnPBV1vC4zWd/iWRbV8Mdxs+nGaoaFzGw3s=
|
||||
k8s.io/component-base v0.29.0/go.mod h1:sADonFTQ9Zc9yFLghpDpmNXEdHyQmFIGbiuZbqAXQ1M=
|
||||
k8s.io/component-helpers v0.29.0 h1:Y8W70NGeitKxWwhsPo/vEQbQx5VqJV+3xfLpP3V1VxU=
|
||||
k8s.io/component-helpers v0.29.0/go.mod h1:j2coxVfmzTOXWSE6sta0MTgNSr572Dcx68F6DD+8fWc=
|
||||
k8s.io/gengo v0.0.0-20230829151522-9cce18d56c01 h1:pWEwq4Asjm4vjW7vcsmijwBhOr1/shsbSYiWXmNGlks=
|
||||
k8s.io/gengo v0.0.0-20230829151522-9cce18d56c01/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
k8s.io/api v0.23.3/go.mod h1:w258XdGyvCmnBj/vGzQMj6kzdufJZVUwEM1U2fRJwSQ=
|
||||
k8s.io/api v0.32.0 h1:OL9JpbvAU5ny9ga2fb24X8H6xQlVp+aJMFlgtQjR9CE=
|
||||
k8s.io/api v0.32.0/go.mod h1:4LEwHZEf6Q/cG96F3dqR965sYOfmPM7rq81BLgsE0p0=
|
||||
k8s.io/apiextensions-apiserver v0.30.0 h1:jcZFKMqnICJfRxTgnC4E+Hpcq8UEhT8B2lhBcQ+6uAs=
|
||||
k8s.io/apiextensions-apiserver v0.30.0/go.mod h1:N9ogQFGcrbWqAY9p2mUAL5mGxsLqwgtUce127VtRX5Y=
|
||||
k8s.io/apimachinery v0.23.3/go.mod h1:BEuFMMBaIbcOqVIJqNZJXGFTP4W6AycEpb5+m/97hrM=
|
||||
k8s.io/apimachinery v0.32.0 h1:cFSE7N3rmEEtv4ei5X6DaJPHHX0C+upp+v5lVPiEwpg=
|
||||
k8s.io/apimachinery v0.32.0/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE=
|
||||
k8s.io/apiserver v0.32.0 h1:VJ89ZvQZ8p1sLeiWdRJpRD6oLozNZD2+qVSLi+ft5Qs=
|
||||
k8s.io/apiserver v0.32.0/go.mod h1:HFh+dM1/BE/Hm4bS4nTXHVfN6Z6tFIZPi649n83b4Ag=
|
||||
k8s.io/client-go v0.32.0 h1:DimtMcnN/JIKZcrSrstiwvvZvLjG0aSxy8PxN8IChp8=
|
||||
k8s.io/client-go v0.32.0/go.mod h1:boDWvdM1Drk4NJj/VddSLnx59X3OPgwrOo0vGbtq9+8=
|
||||
k8s.io/code-generator v0.23.3/go.mod h1:S0Q1JVA+kSzTI1oUvbKAxZY/DYbA/ZUb4Uknog12ETk=
|
||||
k8s.io/code-generator v0.32.0 h1:s0lNN8VSWny8LBz5t5iy7MCdgwdOhdg7vAGVxvS+VWU=
|
||||
k8s.io/code-generator v0.32.0/go.mod h1:b7Q7KMZkvsYFy72A79QYjiv4aTz3GvW0f1T3UfhFq4s=
|
||||
k8s.io/component-base v0.32.0 h1:d6cWHZkCiiep41ObYQS6IcgzOUQUNpywm39KVYaUqzU=
|
||||
k8s.io/component-base v0.32.0/go.mod h1:JLG2W5TUxUu5uDyKiH2R/7NnxJo1HlPoRIIbVLkK5eM=
|
||||
k8s.io/component-helpers v0.32.0 h1:pQEEBmRt3pDJJX98cQvZshDgJFeKRM4YtYkMmfOlczw=
|
||||
k8s.io/component-helpers v0.32.0/go.mod h1:9RuClQatbClcokXOcDWSzFKQm1huIf0FzQlPRpizlMc=
|
||||
k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
|
||||
k8s.io/gengo v0.0.0-20211129171323-c02415ce4185/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
|
||||
k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70/go.mod h1:VH3AT8AaQOqiGjMF9p0/IM1Dj+82ZwjfxUP1IxaHE+8=
|
||||
k8s.io/gengo/v2 v2.0.0-20240911193312-2b36238f13e9 h1:si3PfKm8dDYxgfbeA6orqrtLkvvIeH8UqffFJDl0bz4=
|
||||
k8s.io/gengo/v2 v2.0.0-20240911193312-2b36238f13e9/go.mod h1:EJykeLsmFC60UQbYJezXkEsG2FLrt0GPNkU5iK5GWxU=
|
||||
k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
|
||||
k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
|
||||
k8s.io/klog/v2 v2.110.1 h1:U/Af64HJf7FcwMcXyKm2RPM22WZzyR7OSpYj5tg3cL0=
|
||||
k8s.io/klog/v2 v2.110.1/go.mod h1:YGtd1984u+GgbuZ7e08/yBuAfKLSO0+uR1Fhi6ExXjo=
|
||||
k8s.io/kms v0.29.0 h1:KJ1zaZt74CgvgV3NR7tnURJ/mJOKC5X3nwon/WdwgxI=
|
||||
k8s.io/kms v0.29.0/go.mod h1:mB0f9HLxRXeXUfHfn1A7rpwOlzXI1gIWu86z6buNoYA=
|
||||
k8s.io/kube-openapi v0.0.0-20231113174909-778a5567bc1e h1:snPmy96t93RredGRjKfMFt+gvxuVAncqSAyBveJtr4Q=
|
||||
k8s.io/kube-openapi v0.0.0-20231113174909-778a5567bc1e/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA=
|
||||
k8s.io/utils v0.0.0-20231127182322-b307cd553661 h1:FepOBzJ0GXm8t0su67ln2wAZjbQ6RxQGZDnzuLcrUTI=
|
||||
k8s.io/utils v0.0.0-20231127182322-b307cd553661/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0 h1:TgtAeesdhpm2SGwkQasmbeqDo8th5wOBA5h/AjTKA4I=
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0/go.mod h1:VHVDI/KrK4fjnV61bE2g3sA7tiETLn8sooImelsCx3Y=
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
|
||||
k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
|
||||
k8s.io/klog/v2 v2.40.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
|
||||
k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
|
||||
k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
|
||||
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
|
||||
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
|
||||
k8s.io/kms v0.32.0 h1:jwOfunHIrcdYl5FRcA+uUKKtg6qiqoPCwmS2T3XTYL4=
|
||||
k8s.io/kms v0.32.0/go.mod h1:Bk2evz/Yvk0oVrvm4MvZbgq8BD34Ksxs2SRHn4/UiOM=
|
||||
k8s.io/kube-openapi v0.0.0-20240430033511-f0e62f92d13f h1:0LQagt0gDpKqvIkAMPaRGcXawNMouPECM1+F9BVxEaM=
|
||||
k8s.io/kube-openapi v0.0.0-20240430033511-f0e62f92d13f/go.mod h1:S9tOR0FxgyusSNR+MboCuiDpVWkAifZvaYI1Q2ubgro=
|
||||
k8s.io/metrics v0.32.0 h1:70qJ3ZS/9DrtH0UA0NVBI6gW2ip2GAn9e7NtoKERpns=
|
||||
k8s.io/metrics v0.32.0/go.mod h1:skdg9pDjVjCPIQqmc5rBzDL4noY64ORhKu9KCPv1+QI=
|
||||
k8s.io/utils v0.0.0-20211116205334-6203023598ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
||||
k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
k8s.io/utils v0.0.0-20241210054802-24370beab758 h1:sdbE21q2nlQtFh65saZY+rRM6x6aJJI8IUa1AmH/qa0=
|
||||
k8s.io/utils v0.0.0-20241210054802-24370beab758/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
kubevirt.io/api v1.3.0 h1:9sGElMmnRU50pGED+MPPD2OwQl4S5lvjCUjm+t0mI90=
|
||||
kubevirt.io/api v1.3.0/go.mod h1:e6LkElYZZm8NcP2gKlFVHZS9pgNhIARHIjSBSfeiP1s=
|
||||
kubevirt.io/client-go v1.3.0 h1:/HKn4exzwsctEVTwVtEFaeT9D2v4TgWr2SmxITVEZ/4=
|
||||
kubevirt.io/client-go v1.3.0/go.mod h1:qmcJZvUjbmggY1pp7irO3zesBJj7wwGIWAdnYEoh3yc=
|
||||
kubevirt.io/containerized-data-importer-api v1.60.1 h1:chmxuINvA7TPmIe8LpShCoKPxoegcKjkG9tYboFBs/U=
|
||||
kubevirt.io/containerized-data-importer-api v1.60.1/go.mod h1:8mwrkZIdy8j/LmCyKt2wFXbiMavLUIqDaegaIF67CZs=
|
||||
kubevirt.io/controller-lifecycle-operator-sdk/api v0.0.0-20220329064328-f3cc58c6ed90 h1:QMrd0nKP0BGbnxTqakhDZAUhGKxPiPiN5gSDqKUmGGc=
|
||||
kubevirt.io/controller-lifecycle-operator-sdk/api v0.0.0-20220329064328-f3cc58c6ed90/go.mod h1:018lASpFYBsYN6XwmA2TIrPCx6e0gviTd/ZNtSitKgc=
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0 h1:CPT0ExVicCzcpeN4baWEV2ko2Z/AsiZgEdwgcfwLgMo=
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw=
|
||||
sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6/go.mod h1:p4QtZmO4uMYipTQNzagwnNoseA6OxSUutVw05NhYDRs=
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
|
||||
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8=
|
||||
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo=
|
||||
sigs.k8s.io/mdtoc v1.1.0 h1:q3YtqYzmC2e0hgLXRIOm7/QLuPux1CX3ZHCwlbABxZo=
|
||||
sigs.k8s.io/mdtoc v1.1.0/go.mod h1:QZLVEdHH2iNIR4uHAZyvFRtjloHgVItk8lo/mzCtq3w=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.1/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.2 h1:MdmvkGuXi/8io6ixD5wud3vOLwc1rj0aNqRlpuvjmwA=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.2/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4=
|
||||
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
|
||||
sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo=
|
||||
sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8=
|
||||
sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
|
||||
sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=
|
||||
|
||||
@@ -20,6 +20,6 @@ function find_dirs_containing_comment_tags() {
|
||||
| LC_ALL=C sort -u \
|
||||
)
|
||||
|
||||
IFS=",";
|
||||
IFS=" ";
|
||||
printf '%s' "${array[*]}";
|
||||
}
|
||||
|
||||
26
hack/lib/go.sh
Normal file
26
hack/lib/go.sh
Normal file
@@ -0,0 +1,26 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2024 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# go::verify_version verifies the go version is supported by the project.
|
||||
# descheduler actively supports 3 versions, therefore 3 go versions are supported.
|
||||
go::verify_version() {
|
||||
GO_VERSION=($(go version))
|
||||
|
||||
if [[ -z $(echo "${GO_VERSION[2]}" | grep -E 'go1.21|go1.22|go1.23') ]]; then
|
||||
echo "Unknown go version '${GO_VERSION[2]}', skipping gofmt."
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
@@ -6,5 +6,5 @@ go build -o "${OS_OUTPUT_BINPATH}/conversion-gen" "k8s.io/code-generator/cmd/con
|
||||
|
||||
${OS_OUTPUT_BINPATH}/conversion-gen \
|
||||
--go-header-file "hack/boilerplate/boilerplate.go.txt" \
|
||||
--input-dirs "$(find_dirs_containing_comment_tags "+k8s:conversion-gen=")" \
|
||||
--output-file-base zz_generated.conversion
|
||||
--output-file zz_generated.conversion.go \
|
||||
$(find_dirs_containing_comment_tags "+k8s:conversion-gen=")
|
||||
|
||||
@@ -6,6 +6,6 @@ go build -o "${OS_OUTPUT_BINPATH}/deepcopy-gen" "k8s.io/code-generator/cmd/deepc
|
||||
|
||||
${OS_OUTPUT_BINPATH}/deepcopy-gen \
|
||||
--go-header-file "hack/boilerplate/boilerplate.go.txt" \
|
||||
--input-dirs "$(find_dirs_containing_comment_tags "+k8s:deepcopy-gen=")" \
|
||||
--output-file-base zz_generated.deepcopy
|
||||
--output-file zz_generated.deepcopy.go \
|
||||
$(find_dirs_containing_comment_tags "+k8s:deepcopy-gen=")
|
||||
|
||||
|
||||
@@ -6,6 +6,6 @@ go build -o "${OS_OUTPUT_BINPATH}/defaulter-gen" "k8s.io/code-generator/cmd/defa
|
||||
|
||||
${OS_OUTPUT_BINPATH}/defaulter-gen \
|
||||
--go-header-file "hack/boilerplate/boilerplate.go.txt" \
|
||||
--input-dirs "$(find_dirs_containing_comment_tags "+k8s:defaulter-gen=")" \
|
||||
--extra-peer-dirs "${PRJ_PREFIX}/pkg/apis/componentconfig/v1alpha1,${PRJ_PREFIX}/pkg/api/v1alpha1" \
|
||||
--output-file-base zz_generated.defaults
|
||||
--extra-peer-dirs "${PRJ_PREFIX}/pkg/apis/componentconfig/v1alpha1,${PRJ_PREFIX}/pkg/api/v1alpha2" \
|
||||
--output-file zz_generated.defaults.go \
|
||||
$(find_dirs_containing_comment_tags "+k8s:defaulter-gen=")
|
||||
|
||||
@@ -20,13 +20,9 @@ set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
DESCHEDULER_ROOT=$(dirname "${BASH_SOURCE}")/..
|
||||
source "${DESCHEDULER_ROOT}/hack/lib/go.sh"
|
||||
|
||||
GO_VERSION=($(go version))
|
||||
|
||||
if [[ -z $(echo "${GO_VERSION[2]}" | grep -E 'go1.18|go1.19|go1.20|go1.21') ]]; then
|
||||
echo "Unknown go version '${GO_VERSION[2]}', skipping gofmt."
|
||||
exit 1
|
||||
fi
|
||||
go::verify_version
|
||||
|
||||
cd "${DESCHEDULER_ROOT}"
|
||||
|
||||
|
||||
@@ -20,13 +20,9 @@ set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
DESCHEDULER_ROOT=$(dirname "${BASH_SOURCE}")/..
|
||||
source "${DESCHEDULER_ROOT}/hack/lib/go.sh"
|
||||
|
||||
GO_VERSION=($(go version))
|
||||
|
||||
if [[ -z $(echo "${GO_VERSION[2]}" | grep -E 'go1.18|go1.19|go1.20|go1.21') ]]; then
|
||||
echo "Unknown go version '${GO_VERSION[2]}', skipping gofmt."
|
||||
exit 1
|
||||
fi
|
||||
go::verify_version
|
||||
|
||||
cd "${DESCHEDULER_ROOT}"
|
||||
|
||||
|
||||
@@ -70,7 +70,7 @@ pushd "${DESCHEDULER_ROOT}" > /dev/null 2>&1
|
||||
ret=1
|
||||
fi
|
||||
|
||||
if ! _out="$(diff -Naupr -x "BUILD" -x "AUTHORS*" -x "CONTRIBUTORS*" vendor "${_deschedulertmp}/vendor")"; then
|
||||
if ! _out="$(diff -Naupr -x "BUILD" -x "AUTHORS*" -x "CONTRIBUTORS*" -x "README*" vendor "${_deschedulertmp}/vendor")"; then
|
||||
echo "Your vendored results are different:" >&2
|
||||
echo "${_out}" >&2
|
||||
echo "Vendor Verify failed." >&2
|
||||
|
||||
1147
keps/1397-evictions-in-background/README.md
Normal file
1147
keps/1397-evictions-in-background/README.md
Normal file
File diff suppressed because it is too large
Load Diff
16
keps/1397-evictions-in-background/kep.yaml
Normal file
16
keps/1397-evictions-in-background/kep.yaml
Normal file
@@ -0,0 +1,16 @@
|
||||
title: descheduler integration with evacuation API as an alternative to eviction API
|
||||
kep-number: 1397
|
||||
authors:
|
||||
- "@ingvagabund"
|
||||
owning-sig: sig-scheduling
|
||||
participating-sigs:
|
||||
- sig-apps
|
||||
status: provisional
|
||||
creation-date: 2024-04-14
|
||||
reviewers:
|
||||
- atiratree
|
||||
approvers:
|
||||
- TBD
|
||||
feature-gates:
|
||||
- TBD
|
||||
stage: alpha
|
||||
1211
keps/753-descheduling-framework/README.md
Normal file
1211
keps/753-descheduling-framework/README.md
Normal file
File diff suppressed because it is too large
Load Diff
BIN
keps/753-descheduling-framework/framework-workflow-diagram.png
Normal file
BIN
keps/753-descheduling-framework/framework-workflow-diagram.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 48 KiB |
29
keps/753-descheduling-framework/kep.yaml
Normal file
29
keps/753-descheduling-framework/kep.yaml
Normal file
@@ -0,0 +1,29 @@
|
||||
title: Descheduling framework
|
||||
kep-number: 753
|
||||
authors:
|
||||
- "@ingvagabund"
|
||||
- "@damemi"
|
||||
owning-sig: sig-scheduling
|
||||
status: provisional
|
||||
creation-date: 2024-04-08
|
||||
reviewers:
|
||||
- "@ingvagabund"
|
||||
- "@damemi"
|
||||
- "@a7i"
|
||||
- "@knelasevero"
|
||||
approvers:
|
||||
- "@ingvagabund"
|
||||
- "@damemi"
|
||||
- "@a7i"
|
||||
- "@knelasevero"
|
||||
#replaces:
|
||||
|
||||
# The target maturity stage in the current dev cycle for this KEP.
|
||||
stage: alpha
|
||||
|
||||
# The most recent milestone for which work toward delivery of this KEP has been
|
||||
# done. This can be the current (upcoming) milestone, if it is being actively
|
||||
# worked on.
|
||||
# latest-milestone: "v1.19"
|
||||
|
||||
# disable-supported: true
|
||||
@@ -22,13 +22,19 @@ rules:
|
||||
- apiGroups: ["scheduling.k8s.io"]
|
||||
resources: ["priorityclasses"]
|
||||
verbs: ["get", "watch", "list"]
|
||||
- apiGroups: ["policy"]
|
||||
resources: ["poddisruptionbudgets"]
|
||||
verbs: ["get", "watch", "list"]
|
||||
- apiGroups: ["coordination.k8s.io"]
|
||||
resources: ["leases"]
|
||||
verbs: ["create"]
|
||||
verbs: ["create", "update"]
|
||||
- apiGroups: ["coordination.k8s.io"]
|
||||
resources: ["leases"]
|
||||
resourceNames: ["descheduler"]
|
||||
verbs: ["get", "patch", "delete"]
|
||||
- apiGroups: ["metrics.k8s.io"]
|
||||
resources: ["nodes", "pods"]
|
||||
verbs: ["get", "list"]
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
|
||||
@@ -16,7 +16,7 @@ spec:
|
||||
priorityClassName: system-cluster-critical
|
||||
containers:
|
||||
- name: descheduler
|
||||
image: registry.k8s.io/descheduler/descheduler:v0.29.0
|
||||
image: registry.k8s.io/descheduler/descheduler:v0.32.2
|
||||
volumeMounts:
|
||||
- mountPath: /policy-dir
|
||||
name: policy-volume
|
||||
|
||||
@@ -19,7 +19,7 @@ spec:
|
||||
serviceAccountName: descheduler-sa
|
||||
containers:
|
||||
- name: descheduler
|
||||
image: registry.k8s.io/descheduler/descheduler:v0.29.0
|
||||
image: registry.k8s.io/descheduler/descheduler:v0.32.2
|
||||
imagePullPolicy: IfNotPresent
|
||||
command:
|
||||
- "/bin/descheduler"
|
||||
|
||||
@@ -14,7 +14,7 @@ spec:
|
||||
priorityClassName: system-cluster-critical
|
||||
containers:
|
||||
- name: descheduler
|
||||
image: registry.k8s.io/descheduler/descheduler:v0.29.0
|
||||
image: registry.k8s.io/descheduler/descheduler:v0.32.2
|
||||
volumeMounts:
|
||||
- mountPath: /policy-dir
|
||||
name: policy-volume
|
||||
|
||||
@@ -36,7 +36,7 @@ var (
|
||||
Name: "pods_evicted",
|
||||
Help: "Number of evicted pods, by the result, by the strategy, by the namespace, by the node name. 'error' result means a pod could not be evicted",
|
||||
StabilityLevel: metrics.ALPHA,
|
||||
}, []string{"result", "strategy", "namespace", "node"})
|
||||
}, []string{"result", "strategy", "profile", "namespace", "node"})
|
||||
|
||||
buildInfo = metrics.NewGauge(
|
||||
&metrics.GaugeOpts{
|
||||
|
||||
@@ -38,13 +38,23 @@ type DeschedulerPolicy struct {
|
||||
|
||||
// MaxNoOfPodsToEvictPerNamespace restricts maximum of pods to be evicted per namespace.
|
||||
MaxNoOfPodsToEvictPerNamespace *uint
|
||||
|
||||
// MaxNoOfPodsToTotal restricts maximum of pods to be evicted total.
|
||||
MaxNoOfPodsToEvictTotal *uint
|
||||
|
||||
// EvictionFailureEventNotification should be set to true to enable eviction failure event notification.
|
||||
// Default is false.
|
||||
EvictionFailureEventNotification *bool
|
||||
|
||||
// MetricsCollector configures collection of metrics about actual resource utilization
|
||||
MetricsCollector MetricsCollector
|
||||
}
|
||||
|
||||
// Namespaces carries a list of included/excluded namespaces
|
||||
// for which a given strategy is applicable
|
||||
type Namespaces struct {
|
||||
Include []string `json:"include"`
|
||||
Exclude []string `json:"exclude"`
|
||||
Include []string `json:"include,omitempty"`
|
||||
Exclude []string `json:"exclude,omitempty"`
|
||||
}
|
||||
|
||||
type (
|
||||
@@ -81,3 +91,10 @@ type PluginSet struct {
|
||||
Enabled []string
|
||||
Disabled []string
|
||||
}
|
||||
|
||||
// MetricsCollector configures collection of metrics about actual resource utilization
|
||||
type MetricsCollector struct {
|
||||
// Enabled metrics collection from kubernetes metrics.
|
||||
// Later, the collection can be extended to other providers.
|
||||
Enabled bool
|
||||
}
|
||||
|
||||
@@ -1,271 +0,0 @@
|
||||
/*
|
||||
Copyright 2023 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/conversion"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/serializer"
|
||||
"k8s.io/client-go/informers"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/klog/v2"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/pluginregistry"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/defaultevictor"
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
)
|
||||
|
||||
var (
|
||||
// pluginArgConversionScheme is a scheme with internal and v1alpha2 registered,
|
||||
// used for defaulting/converting typed PluginConfig Args.
|
||||
// Access via getPluginArgConversionScheme()
|
||||
|
||||
Scheme = runtime.NewScheme()
|
||||
Codecs = serializer.NewCodecFactory(Scheme, serializer.EnableStrict)
|
||||
)
|
||||
|
||||
// evictorImpl implements the Evictor interface so plugins
|
||||
// can evict a pod without importing a specific pod evictor
|
||||
type evictorImpl struct {
|
||||
podEvictor *evictions.PodEvictor
|
||||
evictorFilter frameworktypes.EvictorPlugin
|
||||
}
|
||||
|
||||
var _ frameworktypes.Evictor = &evictorImpl{}
|
||||
|
||||
// Filter checks if a pod can be evicted
|
||||
func (ei *evictorImpl) Filter(pod *v1.Pod) bool {
|
||||
return ei.evictorFilter.Filter(pod)
|
||||
}
|
||||
|
||||
// PreEvictionFilter checks if pod can be evicted right before eviction
|
||||
func (ei *evictorImpl) PreEvictionFilter(pod *v1.Pod) bool {
|
||||
return ei.evictorFilter.PreEvictionFilter(pod)
|
||||
}
|
||||
|
||||
// Evict evicts a pod (no pre-check performed)
|
||||
func (ei *evictorImpl) Evict(ctx context.Context, pod *v1.Pod, opts evictions.EvictOptions) bool {
|
||||
return ei.podEvictor.EvictPod(ctx, pod, opts)
|
||||
}
|
||||
|
||||
func (ei *evictorImpl) NodeLimitExceeded(node *v1.Node) bool {
|
||||
return ei.podEvictor.NodeLimitExceeded(node)
|
||||
}
|
||||
|
||||
// handleImpl implements the framework handle which gets passed to plugins
|
||||
type handleImpl struct {
|
||||
clientSet clientset.Interface
|
||||
getPodsAssignedToNodeFunc podutil.GetPodsAssignedToNodeFunc
|
||||
sharedInformerFactory informers.SharedInformerFactory
|
||||
evictor *evictorImpl
|
||||
}
|
||||
|
||||
var _ frameworktypes.Handle = &handleImpl{}
|
||||
|
||||
// ClientSet retrieves kube client set
|
||||
func (hi *handleImpl) ClientSet() clientset.Interface {
|
||||
return hi.clientSet
|
||||
}
|
||||
|
||||
// GetPodsAssignedToNodeFunc retrieves GetPodsAssignedToNodeFunc implementation
|
||||
func (hi *handleImpl) GetPodsAssignedToNodeFunc() podutil.GetPodsAssignedToNodeFunc {
|
||||
return hi.getPodsAssignedToNodeFunc
|
||||
}
|
||||
|
||||
// SharedInformerFactory retrieves shared informer factory
|
||||
func (hi *handleImpl) SharedInformerFactory() informers.SharedInformerFactory {
|
||||
return hi.sharedInformerFactory
|
||||
}
|
||||
|
||||
// Evictor retrieves evictor so plugins can filter and evict pods
|
||||
func (hi *handleImpl) Evictor() frameworktypes.Evictor {
|
||||
return hi.evictor
|
||||
}
|
||||
|
||||
func Convert_v1alpha1_DeschedulerPolicy_To_api_DeschedulerPolicy(in *DeschedulerPolicy, out *api.DeschedulerPolicy, s conversion.Scope) error {
|
||||
err := V1alpha1ToInternal(in, pluginregistry.PluginRegistry, out, s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func V1alpha1ToInternal(
|
||||
deschedulerPolicy *DeschedulerPolicy,
|
||||
registry pluginregistry.Registry,
|
||||
out *api.DeschedulerPolicy,
|
||||
s conversion.Scope,
|
||||
) error {
|
||||
var evictLocalStoragePods bool
|
||||
if deschedulerPolicy.EvictLocalStoragePods != nil {
|
||||
evictLocalStoragePods = *deschedulerPolicy.EvictLocalStoragePods
|
||||
}
|
||||
|
||||
evictBarePods := false
|
||||
if deschedulerPolicy.EvictFailedBarePods != nil {
|
||||
evictBarePods = *deschedulerPolicy.EvictFailedBarePods
|
||||
if evictBarePods {
|
||||
klog.V(1).Info("Warning: EvictFailedBarePods is set to True. This could cause eviction of pods without ownerReferences.")
|
||||
}
|
||||
}
|
||||
|
||||
evictSystemCriticalPods := false
|
||||
if deschedulerPolicy.EvictSystemCriticalPods != nil {
|
||||
evictSystemCriticalPods = *deschedulerPolicy.EvictSystemCriticalPods
|
||||
if evictSystemCriticalPods {
|
||||
klog.V(1).Info("Warning: EvictSystemCriticalPods is set to True. This could cause eviction of Kubernetes system pods.")
|
||||
}
|
||||
}
|
||||
|
||||
ignorePvcPods := false
|
||||
if deschedulerPolicy.IgnorePVCPods != nil {
|
||||
ignorePvcPods = *deschedulerPolicy.IgnorePVCPods
|
||||
}
|
||||
|
||||
var profiles []api.DeschedulerProfile
|
||||
|
||||
// Build profiles
|
||||
for name, strategy := range deschedulerPolicy.Strategies {
|
||||
if _, ok := pluginregistry.PluginRegistry[string(name)]; ok {
|
||||
if strategy.Enabled {
|
||||
params := strategy.Params
|
||||
if params == nil {
|
||||
params = &StrategyParameters{}
|
||||
}
|
||||
|
||||
nodeFit := false
|
||||
if name != "PodLifeTime" {
|
||||
nodeFit = params.NodeFit
|
||||
}
|
||||
|
||||
if params.ThresholdPriority != nil && params.ThresholdPriorityClassName != "" {
|
||||
klog.ErrorS(fmt.Errorf("priority threshold misconfigured"), "only one of priorityThreshold fields can be set", "pluginName", name)
|
||||
return fmt.Errorf("priority threshold misconfigured for plugin %v", name)
|
||||
}
|
||||
|
||||
var priorityThreshold *api.PriorityThreshold
|
||||
if strategy.Params != nil {
|
||||
priorityThreshold = &api.PriorityThreshold{
|
||||
Value: strategy.Params.ThresholdPriority,
|
||||
Name: strategy.Params.ThresholdPriorityClassName,
|
||||
}
|
||||
}
|
||||
|
||||
var pluginConfig *api.PluginConfig
|
||||
var err error
|
||||
if pcFnc, exists := StrategyParamsToPluginArgs[string(name)]; exists {
|
||||
pluginConfig, err = pcFnc(params)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "skipping strategy", "strategy", name)
|
||||
return fmt.Errorf("failed to get plugin config for strategy %v: %v", name, err)
|
||||
}
|
||||
} else {
|
||||
klog.ErrorS(fmt.Errorf("unknown strategy name"), "skipping strategy", "strategy", name)
|
||||
return fmt.Errorf("unknown strategy name: %v", name)
|
||||
}
|
||||
|
||||
profile := api.DeschedulerProfile{
|
||||
Name: fmt.Sprintf("strategy-%v-profile", name),
|
||||
PluginConfigs: []api.PluginConfig{
|
||||
{
|
||||
Name: defaultevictor.PluginName,
|
||||
Args: &defaultevictor.DefaultEvictorArgs{
|
||||
EvictLocalStoragePods: evictLocalStoragePods,
|
||||
EvictSystemCriticalPods: evictSystemCriticalPods,
|
||||
IgnorePvcPods: ignorePvcPods,
|
||||
EvictFailedBarePods: evictBarePods,
|
||||
NodeFit: nodeFit,
|
||||
PriorityThreshold: priorityThreshold,
|
||||
},
|
||||
},
|
||||
*pluginConfig,
|
||||
},
|
||||
Plugins: api.Plugins{
|
||||
Filter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
PreEvictionFilter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
pluginArgs := registry[string(name)].PluginArgInstance
|
||||
pluginInstance, err := registry[string(name)].PluginBuilder(pluginArgs, &handleImpl{})
|
||||
if err != nil {
|
||||
klog.ErrorS(fmt.Errorf("could not build plugin"), "plugin build error", "plugin", name)
|
||||
return fmt.Errorf("could not build plugin: %v", name)
|
||||
}
|
||||
|
||||
// pluginInstance can be of any of each type, or both
|
||||
profilePlugins := profile.Plugins
|
||||
profile.Plugins = enableProfilePluginsByType(profilePlugins, pluginInstance, pluginConfig)
|
||||
profiles = append(profiles, profile)
|
||||
}
|
||||
} else {
|
||||
klog.ErrorS(fmt.Errorf("unknown strategy name"), "skipping strategy", "strategy", name)
|
||||
return fmt.Errorf("unknown strategy name: %v", name)
|
||||
}
|
||||
}
|
||||
|
||||
out.Profiles = profiles
|
||||
out.NodeSelector = deschedulerPolicy.NodeSelector
|
||||
out.MaxNoOfPodsToEvictPerNamespace = deschedulerPolicy.MaxNoOfPodsToEvictPerNamespace
|
||||
out.MaxNoOfPodsToEvictPerNode = deschedulerPolicy.MaxNoOfPodsToEvictPerNode
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func enableProfilePluginsByType(profilePlugins api.Plugins, pluginInstance frameworktypes.Plugin, pluginConfig *api.PluginConfig) api.Plugins {
|
||||
profilePlugins = checkBalance(profilePlugins, pluginInstance, pluginConfig)
|
||||
profilePlugins = checkDeschedule(profilePlugins, pluginInstance, pluginConfig)
|
||||
return profilePlugins
|
||||
}
|
||||
|
||||
func checkBalance(profilePlugins api.Plugins, pluginInstance frameworktypes.Plugin, pluginConfig *api.PluginConfig) api.Plugins {
|
||||
_, ok := pluginInstance.(frameworktypes.BalancePlugin)
|
||||
if ok {
|
||||
klog.V(3).Infof("converting Balance plugin: %s", pluginInstance.Name())
|
||||
profilePlugins.Balance.Enabled = []string{pluginConfig.Name}
|
||||
}
|
||||
return profilePlugins
|
||||
}
|
||||
|
||||
func checkDeschedule(profilePlugins api.Plugins, pluginInstance frameworktypes.Plugin, pluginConfig *api.PluginConfig) api.Plugins {
|
||||
_, ok := pluginInstance.(frameworktypes.DeschedulePlugin)
|
||||
if ok {
|
||||
klog.V(3).Infof("converting Deschedule plugin: %s", pluginInstance.Name())
|
||||
profilePlugins.Deschedule.Enabled = []string{pluginConfig.Name}
|
||||
}
|
||||
return profilePlugins
|
||||
}
|
||||
|
||||
// Register Conversions
|
||||
func RegisterConversions(s *runtime.Scheme) error {
|
||||
if err := s.AddGeneratedConversionFunc((*DeschedulerPolicy)(nil), (*api.DeschedulerPolicy)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1alpha1_DeschedulerPolicy_To_api_DeschedulerPolicy(a.(*DeschedulerPolicy), b.(*api.DeschedulerPolicy), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1,256 +0,0 @@
|
||||
/*
|
||||
Copyright 2023 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/klog/v2"
|
||||
utilpointer "k8s.io/utils/pointer"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/nodeutilization"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/podlifetime"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/removeduplicates"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/removefailedpods"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/removepodshavingtoomanyrestarts"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/removepodsviolatinginterpodantiaffinity"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/removepodsviolatingnodeaffinity"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/removepodsviolatingnodetaints"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/removepodsviolatingtopologyspreadconstraint"
|
||||
)
|
||||
|
||||
// Once all strategies are migrated the arguments get read from the configuration file
|
||||
// without any wiring. Keeping the wiring here so the descheduler can still use
|
||||
// the v1alpha1 configuration during the strategy migration to plugins.
|
||||
|
||||
var StrategyParamsToPluginArgs = map[string]func(params *StrategyParameters) (*api.PluginConfig, error){
|
||||
"RemovePodsViolatingNodeTaints": func(params *StrategyParameters) (*api.PluginConfig, error) {
|
||||
args := &removepodsviolatingnodetaints.RemovePodsViolatingNodeTaintsArgs{
|
||||
Namespaces: v1alpha1NamespacesToInternal(params.Namespaces),
|
||||
LabelSelector: params.LabelSelector,
|
||||
IncludePreferNoSchedule: params.IncludePreferNoSchedule,
|
||||
ExcludedTaints: params.ExcludedTaints,
|
||||
}
|
||||
if err := removepodsviolatingnodetaints.ValidateRemovePodsViolatingNodeTaintsArgs(args); err != nil {
|
||||
klog.ErrorS(err, "unable to validate plugin arguments", "pluginName", removepodsviolatingnodetaints.PluginName)
|
||||
return nil, fmt.Errorf("strategy %q param validation failed: %v", removepodsviolatingnodetaints.PluginName, err)
|
||||
}
|
||||
return &api.PluginConfig{
|
||||
Name: removepodsviolatingnodetaints.PluginName,
|
||||
Args: args,
|
||||
}, nil
|
||||
},
|
||||
"RemoveFailedPods": func(params *StrategyParameters) (*api.PluginConfig, error) {
|
||||
failedPodsParams := params.FailedPods
|
||||
if failedPodsParams == nil {
|
||||
failedPodsParams = &FailedPods{}
|
||||
}
|
||||
args := &removefailedpods.RemoveFailedPodsArgs{
|
||||
Namespaces: v1alpha1NamespacesToInternal(params.Namespaces),
|
||||
LabelSelector: params.LabelSelector,
|
||||
IncludingInitContainers: failedPodsParams.IncludingInitContainers,
|
||||
MinPodLifetimeSeconds: failedPodsParams.MinPodLifetimeSeconds,
|
||||
ExcludeOwnerKinds: failedPodsParams.ExcludeOwnerKinds,
|
||||
Reasons: failedPodsParams.Reasons,
|
||||
}
|
||||
if err := removefailedpods.ValidateRemoveFailedPodsArgs(args); err != nil {
|
||||
klog.ErrorS(err, "unable to validate plugin arguments", "pluginName", removefailedpods.PluginName)
|
||||
return nil, fmt.Errorf("strategy %q param validation failed: %v", removefailedpods.PluginName, err)
|
||||
}
|
||||
return &api.PluginConfig{
|
||||
Name: removefailedpods.PluginName,
|
||||
Args: args,
|
||||
}, nil
|
||||
},
|
||||
"RemovePodsViolatingNodeAffinity": func(params *StrategyParameters) (*api.PluginConfig, error) {
|
||||
args := &removepodsviolatingnodeaffinity.RemovePodsViolatingNodeAffinityArgs{
|
||||
Namespaces: v1alpha1NamespacesToInternal(params.Namespaces),
|
||||
LabelSelector: params.LabelSelector,
|
||||
NodeAffinityType: params.NodeAffinityType,
|
||||
}
|
||||
if err := removepodsviolatingnodeaffinity.ValidateRemovePodsViolatingNodeAffinityArgs(args); err != nil {
|
||||
klog.ErrorS(err, "unable to validate plugin arguments", "pluginName", removepodsviolatingnodeaffinity.PluginName)
|
||||
return nil, fmt.Errorf("strategy %q param validation failed: %v", removepodsviolatingnodeaffinity.PluginName, err)
|
||||
}
|
||||
return &api.PluginConfig{
|
||||
Name: removepodsviolatingnodeaffinity.PluginName,
|
||||
Args: args,
|
||||
}, nil
|
||||
},
|
||||
"RemovePodsViolatingInterPodAntiAffinity": func(params *StrategyParameters) (*api.PluginConfig, error) {
|
||||
args := &removepodsviolatinginterpodantiaffinity.RemovePodsViolatingInterPodAntiAffinityArgs{
|
||||
Namespaces: v1alpha1NamespacesToInternal(params.Namespaces),
|
||||
LabelSelector: params.LabelSelector,
|
||||
}
|
||||
if err := removepodsviolatinginterpodantiaffinity.ValidateRemovePodsViolatingInterPodAntiAffinityArgs(args); err != nil {
|
||||
klog.ErrorS(err, "unable to validate plugin arguments", "pluginName", removepodsviolatinginterpodantiaffinity.PluginName)
|
||||
return nil, fmt.Errorf("strategy %q param validation failed: %v", removepodsviolatinginterpodantiaffinity.PluginName, err)
|
||||
}
|
||||
return &api.PluginConfig{
|
||||
Name: removepodsviolatinginterpodantiaffinity.PluginName,
|
||||
Args: args,
|
||||
}, nil
|
||||
},
|
||||
"RemovePodsHavingTooManyRestarts": func(params *StrategyParameters) (*api.PluginConfig, error) {
|
||||
tooManyRestartsParams := params.PodsHavingTooManyRestarts
|
||||
if tooManyRestartsParams == nil {
|
||||
tooManyRestartsParams = &PodsHavingTooManyRestarts{}
|
||||
}
|
||||
args := &removepodshavingtoomanyrestarts.RemovePodsHavingTooManyRestartsArgs{
|
||||
Namespaces: v1alpha1NamespacesToInternal(params.Namespaces),
|
||||
LabelSelector: params.LabelSelector,
|
||||
PodRestartThreshold: tooManyRestartsParams.PodRestartThreshold,
|
||||
IncludingInitContainers: tooManyRestartsParams.IncludingInitContainers,
|
||||
}
|
||||
if err := removepodshavingtoomanyrestarts.ValidateRemovePodsHavingTooManyRestartsArgs(args); err != nil {
|
||||
klog.ErrorS(err, "unable to validate plugin arguments", "pluginName", removepodshavingtoomanyrestarts.PluginName)
|
||||
return nil, fmt.Errorf("strategy %q param validation failed: %v", removepodshavingtoomanyrestarts.PluginName, err)
|
||||
}
|
||||
return &api.PluginConfig{
|
||||
Name: removepodshavingtoomanyrestarts.PluginName,
|
||||
Args: args,
|
||||
}, nil
|
||||
},
|
||||
"PodLifeTime": func(params *StrategyParameters) (*api.PluginConfig, error) {
|
||||
podLifeTimeParams := params.PodLifeTime
|
||||
if podLifeTimeParams == nil {
|
||||
podLifeTimeParams = &PodLifeTime{}
|
||||
}
|
||||
|
||||
var states []string
|
||||
if podLifeTimeParams.PodStatusPhases != nil {
|
||||
states = append(states, podLifeTimeParams.PodStatusPhases...)
|
||||
}
|
||||
if podLifeTimeParams.States != nil {
|
||||
states = append(states, podLifeTimeParams.States...)
|
||||
}
|
||||
|
||||
args := &podlifetime.PodLifeTimeArgs{
|
||||
Namespaces: v1alpha1NamespacesToInternal(params.Namespaces),
|
||||
LabelSelector: params.LabelSelector,
|
||||
MaxPodLifeTimeSeconds: podLifeTimeParams.MaxPodLifeTimeSeconds,
|
||||
States: states,
|
||||
}
|
||||
if err := podlifetime.ValidatePodLifeTimeArgs(args); err != nil {
|
||||
klog.ErrorS(err, "unable to validate plugin arguments", "pluginName", podlifetime.PluginName)
|
||||
return nil, fmt.Errorf("strategy %q param validation failed: %v", podlifetime.PluginName, err)
|
||||
}
|
||||
return &api.PluginConfig{
|
||||
Name: podlifetime.PluginName,
|
||||
Args: args,
|
||||
}, nil
|
||||
},
|
||||
"RemoveDuplicates": func(params *StrategyParameters) (*api.PluginConfig, error) {
|
||||
args := &removeduplicates.RemoveDuplicatesArgs{
|
||||
Namespaces: v1alpha1NamespacesToInternal(params.Namespaces),
|
||||
}
|
||||
if params.RemoveDuplicates != nil {
|
||||
args.ExcludeOwnerKinds = params.RemoveDuplicates.ExcludeOwnerKinds
|
||||
}
|
||||
if err := removeduplicates.ValidateRemoveDuplicatesArgs(args); err != nil {
|
||||
klog.ErrorS(err, "unable to validate plugin arguments", "pluginName", removeduplicates.PluginName)
|
||||
return nil, fmt.Errorf("strategy %q param validation failed: %v", removeduplicates.PluginName, err)
|
||||
}
|
||||
return &api.PluginConfig{
|
||||
Name: removeduplicates.PluginName,
|
||||
Args: args,
|
||||
}, nil
|
||||
},
|
||||
"RemovePodsViolatingTopologySpreadConstraint": func(params *StrategyParameters) (*api.PluginConfig, error) {
|
||||
constraints := []v1.UnsatisfiableConstraintAction{v1.DoNotSchedule}
|
||||
if params.IncludeSoftConstraints {
|
||||
constraints = append(constraints, v1.ScheduleAnyway)
|
||||
}
|
||||
args := &removepodsviolatingtopologyspreadconstraint.RemovePodsViolatingTopologySpreadConstraintArgs{
|
||||
Namespaces: v1alpha1NamespacesToInternal(params.Namespaces),
|
||||
LabelSelector: params.LabelSelector,
|
||||
Constraints: constraints,
|
||||
TopologyBalanceNodeFit: utilpointer.Bool(true),
|
||||
}
|
||||
if err := removepodsviolatingtopologyspreadconstraint.ValidateRemovePodsViolatingTopologySpreadConstraintArgs(args); err != nil {
|
||||
klog.ErrorS(err, "unable to validate plugin arguments", "pluginName", removepodsviolatingtopologyspreadconstraint.PluginName)
|
||||
return nil, fmt.Errorf("strategy %q param validation failed: %v", removepodsviolatingtopologyspreadconstraint.PluginName, err)
|
||||
}
|
||||
return &api.PluginConfig{
|
||||
Name: removepodsviolatingtopologyspreadconstraint.PluginName,
|
||||
Args: args,
|
||||
}, nil
|
||||
},
|
||||
"HighNodeUtilization": func(params *StrategyParameters) (*api.PluginConfig, error) {
|
||||
if params.NodeResourceUtilizationThresholds == nil {
|
||||
params.NodeResourceUtilizationThresholds = &NodeResourceUtilizationThresholds{}
|
||||
}
|
||||
args := &nodeutilization.HighNodeUtilizationArgs{
|
||||
EvictableNamespaces: v1alpha1NamespacesToInternal(params.Namespaces),
|
||||
Thresholds: v1alpha1ThresholdToInternal(params.NodeResourceUtilizationThresholds.Thresholds),
|
||||
NumberOfNodes: params.NodeResourceUtilizationThresholds.NumberOfNodes,
|
||||
}
|
||||
if err := nodeutilization.ValidateHighNodeUtilizationArgs(args); err != nil {
|
||||
klog.ErrorS(err, "unable to validate plugin arguments", "pluginName", nodeutilization.HighNodeUtilizationPluginName)
|
||||
return nil, fmt.Errorf("strategy %q param validation failed: %v", nodeutilization.HighNodeUtilizationPluginName, err)
|
||||
}
|
||||
return &api.PluginConfig{
|
||||
Name: nodeutilization.HighNodeUtilizationPluginName,
|
||||
Args: args,
|
||||
}, nil
|
||||
},
|
||||
"LowNodeUtilization": func(params *StrategyParameters) (*api.PluginConfig, error) {
|
||||
if params.NodeResourceUtilizationThresholds == nil {
|
||||
params.NodeResourceUtilizationThresholds = &NodeResourceUtilizationThresholds{}
|
||||
}
|
||||
args := &nodeutilization.LowNodeUtilizationArgs{
|
||||
EvictableNamespaces: v1alpha1NamespacesToInternal(params.Namespaces),
|
||||
Thresholds: v1alpha1ThresholdToInternal(params.NodeResourceUtilizationThresholds.Thresholds),
|
||||
TargetThresholds: v1alpha1ThresholdToInternal(params.NodeResourceUtilizationThresholds.TargetThresholds),
|
||||
UseDeviationThresholds: params.NodeResourceUtilizationThresholds.UseDeviationThresholds,
|
||||
NumberOfNodes: params.NodeResourceUtilizationThresholds.NumberOfNodes,
|
||||
}
|
||||
|
||||
if err := nodeutilization.ValidateLowNodeUtilizationArgs(args); err != nil {
|
||||
klog.ErrorS(err, "unable to validate plugin arguments", "pluginName", nodeutilization.LowNodeUtilizationPluginName)
|
||||
return nil, fmt.Errorf("strategy %q param validation failed: %v", nodeutilization.LowNodeUtilizationPluginName, err)
|
||||
}
|
||||
return &api.PluginConfig{
|
||||
Name: nodeutilization.LowNodeUtilizationPluginName,
|
||||
Args: args,
|
||||
}, nil
|
||||
},
|
||||
}
|
||||
|
||||
func v1alpha1NamespacesToInternal(namespaces *Namespaces) *api.Namespaces {
|
||||
internal := &api.Namespaces{}
|
||||
if namespaces != nil {
|
||||
if namespaces.Exclude != nil {
|
||||
internal.Exclude = namespaces.Exclude
|
||||
}
|
||||
if namespaces.Include != nil {
|
||||
internal.Include = namespaces.Include
|
||||
}
|
||||
} else {
|
||||
internal = nil
|
||||
}
|
||||
return internal
|
||||
}
|
||||
|
||||
func v1alpha1ThresholdToInternal(thresholds ResourceThresholds) api.ResourceThresholds {
|
||||
internal := make(api.ResourceThresholds, len(thresholds))
|
||||
for k, v := range thresholds {
|
||||
internal[k] = api.Percentage(float64(v))
|
||||
}
|
||||
return internal
|
||||
}
|
||||
@@ -1,859 +0,0 @@
|
||||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
utilpointer "k8s.io/utils/pointer"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/nodeutilization"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/podlifetime"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/removeduplicates"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/removefailedpods"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/removepodshavingtoomanyrestarts"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/removepodsviolatinginterpodantiaffinity"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/removepodsviolatingnodeaffinity"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/removepodsviolatingnodetaints"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/removepodsviolatingtopologyspreadconstraint"
|
||||
)
|
||||
|
||||
func TestStrategyParamsToPluginArgsRemovePodsViolatingNodeTaints(t *testing.T) {
|
||||
strategyName := "RemovePodsViolatingNodeTaints"
|
||||
type testCase struct {
|
||||
description string
|
||||
params *StrategyParameters
|
||||
err error
|
||||
result *api.PluginConfig
|
||||
}
|
||||
testCases := []testCase{
|
||||
{
|
||||
description: "wire in all valid parameters",
|
||||
params: &StrategyParameters{
|
||||
ExcludedTaints: []string{
|
||||
"dedicated=special-user",
|
||||
"reserved",
|
||||
},
|
||||
ThresholdPriority: utilpointer.Int32(100),
|
||||
Namespaces: &Namespaces{
|
||||
Exclude: []string{"test1"},
|
||||
},
|
||||
},
|
||||
err: nil,
|
||||
result: &api.PluginConfig{
|
||||
Name: removepodsviolatingnodetaints.PluginName,
|
||||
Args: &removepodsviolatingnodetaints.RemovePodsViolatingNodeTaintsArgs{
|
||||
Namespaces: &api.Namespaces{
|
||||
Exclude: []string{"test1"},
|
||||
},
|
||||
ExcludedTaints: []string{"dedicated=special-user", "reserved"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "invalid params namespaces",
|
||||
params: &StrategyParameters{
|
||||
Namespaces: &Namespaces{
|
||||
Exclude: []string{"test1"},
|
||||
Include: []string{"test2"},
|
||||
},
|
||||
},
|
||||
err: fmt.Errorf("strategy \"%s\" param validation failed: only one of Include/Exclude namespaces can be set", strategyName),
|
||||
result: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.description, func(t *testing.T) {
|
||||
var result *api.PluginConfig
|
||||
var err error
|
||||
if pcFnc, exists := StrategyParamsToPluginArgs[strategyName]; exists {
|
||||
result, err = pcFnc(tc.params)
|
||||
}
|
||||
if err != nil {
|
||||
if err.Error() != tc.err.Error() {
|
||||
t.Errorf("unexpected error: %s", err.Error())
|
||||
}
|
||||
}
|
||||
if err == nil {
|
||||
// sort to easily compare deepequality
|
||||
diff := cmp.Diff(tc.result, result)
|
||||
if diff != "" {
|
||||
t.Errorf("test '%s' failed. Results are not deep equal. mismatch (-want +got):\n%s", tc.description, diff)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestStrategyParamsToPluginArgsRemoveFailedPods(t *testing.T) {
|
||||
strategyName := "RemoveFailedPods"
|
||||
type testCase struct {
|
||||
description string
|
||||
params *StrategyParameters
|
||||
err error
|
||||
result *api.PluginConfig
|
||||
}
|
||||
testCases := []testCase{
|
||||
{
|
||||
description: "wire in all valid parameters",
|
||||
params: &StrategyParameters{
|
||||
FailedPods: &FailedPods{
|
||||
MinPodLifetimeSeconds: utilpointer.Uint(3600),
|
||||
ExcludeOwnerKinds: []string{"Job"},
|
||||
Reasons: []string{"NodeAffinity"},
|
||||
IncludingInitContainers: true,
|
||||
},
|
||||
ThresholdPriority: utilpointer.Int32(100),
|
||||
Namespaces: &Namespaces{
|
||||
Exclude: []string{"test1"},
|
||||
},
|
||||
},
|
||||
err: nil,
|
||||
result: &api.PluginConfig{
|
||||
Name: removefailedpods.PluginName,
|
||||
Args: &removefailedpods.RemoveFailedPodsArgs{
|
||||
ExcludeOwnerKinds: []string{"Job"},
|
||||
MinPodLifetimeSeconds: utilpointer.Uint(3600),
|
||||
Reasons: []string{"NodeAffinity"},
|
||||
IncludingInitContainers: true,
|
||||
Namespaces: &api.Namespaces{
|
||||
Exclude: []string{"test1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "invalid params namespaces",
|
||||
params: &StrategyParameters{
|
||||
Namespaces: &Namespaces{
|
||||
Exclude: []string{"test1"},
|
||||
Include: []string{"test2"},
|
||||
},
|
||||
},
|
||||
err: fmt.Errorf("strategy \"%s\" param validation failed: only one of Include/Exclude namespaces can be set", strategyName),
|
||||
result: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.description, func(t *testing.T) {
|
||||
var result *api.PluginConfig
|
||||
var err error
|
||||
if pcFnc, exists := StrategyParamsToPluginArgs[strategyName]; exists {
|
||||
result, err = pcFnc(tc.params)
|
||||
}
|
||||
if err != nil {
|
||||
if err.Error() != tc.err.Error() {
|
||||
t.Errorf("unexpected error: %s", err.Error())
|
||||
}
|
||||
}
|
||||
if err == nil {
|
||||
// sort to easily compare deepequality
|
||||
diff := cmp.Diff(tc.result, result)
|
||||
if diff != "" {
|
||||
t.Errorf("test '%s' failed. Results are not deep equal. mismatch (-want +got):\n%s", tc.description, diff)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestStrategyParamsToPluginArgsRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||
strategyName := "RemovePodsViolatingNodeAffinity"
|
||||
type testCase struct {
|
||||
description string
|
||||
params *StrategyParameters
|
||||
err error
|
||||
result *api.PluginConfig
|
||||
}
|
||||
testCases := []testCase{
|
||||
{
|
||||
description: "wire in all valid parameters",
|
||||
params: &StrategyParameters{
|
||||
NodeAffinityType: []string{"requiredDuringSchedulingIgnoredDuringExecution"},
|
||||
ThresholdPriority: utilpointer.Int32(100),
|
||||
Namespaces: &Namespaces{
|
||||
Exclude: []string{"test1"},
|
||||
},
|
||||
},
|
||||
err: nil,
|
||||
result: &api.PluginConfig{
|
||||
Name: removepodsviolatingnodeaffinity.PluginName,
|
||||
Args: &removepodsviolatingnodeaffinity.RemovePodsViolatingNodeAffinityArgs{
|
||||
NodeAffinityType: []string{"requiredDuringSchedulingIgnoredDuringExecution"},
|
||||
Namespaces: &api.Namespaces{
|
||||
Exclude: []string{"test1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "invalid params, not setting nodeaffinity type",
|
||||
params: &StrategyParameters{},
|
||||
err: fmt.Errorf("strategy \"%s\" param validation failed: nodeAffinityType needs to be set", strategyName),
|
||||
result: nil,
|
||||
},
|
||||
{
|
||||
description: "invalid params namespaces",
|
||||
params: &StrategyParameters{
|
||||
NodeAffinityType: []string{"requiredDuringSchedulingIgnoredDuringExecution"},
|
||||
Namespaces: &Namespaces{
|
||||
Exclude: []string{"test1"},
|
||||
Include: []string{"test2"},
|
||||
},
|
||||
},
|
||||
err: fmt.Errorf("strategy \"%s\" param validation failed: only one of Include/Exclude namespaces can be set", strategyName),
|
||||
result: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.description, func(t *testing.T) {
|
||||
var result *api.PluginConfig
|
||||
var err error
|
||||
if pcFnc, exists := StrategyParamsToPluginArgs[strategyName]; exists {
|
||||
result, err = pcFnc(tc.params)
|
||||
}
|
||||
if err != nil {
|
||||
if err.Error() != tc.err.Error() {
|
||||
t.Errorf("unexpected error: %s", err.Error())
|
||||
}
|
||||
}
|
||||
if err == nil {
|
||||
// sort to easily compare deepequality
|
||||
diff := cmp.Diff(tc.result, result)
|
||||
if diff != "" {
|
||||
t.Errorf("test '%s' failed. Results are not deep equal. mismatch (-want +got):\n%s", tc.description, diff)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestStrategyParamsToPluginArgsRemovePodsViolatingInterPodAntiAffinity(t *testing.T) {
|
||||
strategyName := "RemovePodsViolatingInterPodAntiAffinity"
|
||||
type testCase struct {
|
||||
description string
|
||||
params *StrategyParameters
|
||||
err error
|
||||
result *api.PluginConfig
|
||||
}
|
||||
testCases := []testCase{
|
||||
{
|
||||
description: "wire in all valid parameters",
|
||||
params: &StrategyParameters{
|
||||
ThresholdPriority: utilpointer.Int32(100),
|
||||
Namespaces: &Namespaces{
|
||||
Exclude: []string{"test1"},
|
||||
},
|
||||
},
|
||||
err: nil,
|
||||
result: &api.PluginConfig{
|
||||
Name: removepodsviolatinginterpodantiaffinity.PluginName,
|
||||
Args: &removepodsviolatinginterpodantiaffinity.RemovePodsViolatingInterPodAntiAffinityArgs{
|
||||
Namespaces: &api.Namespaces{
|
||||
Exclude: []string{"test1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "invalid params namespaces",
|
||||
params: &StrategyParameters{
|
||||
Namespaces: &Namespaces{
|
||||
Exclude: []string{"test1"},
|
||||
Include: []string{"test2"},
|
||||
},
|
||||
},
|
||||
err: fmt.Errorf("strategy \"%s\" param validation failed: only one of Include/Exclude namespaces can be set", strategyName),
|
||||
result: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.description, func(t *testing.T) {
|
||||
var result *api.PluginConfig
|
||||
var err error
|
||||
if pcFnc, exists := StrategyParamsToPluginArgs[strategyName]; exists {
|
||||
result, err = pcFnc(tc.params)
|
||||
}
|
||||
if err != nil {
|
||||
if err.Error() != tc.err.Error() {
|
||||
t.Errorf("unexpected error: %s", err.Error())
|
||||
}
|
||||
}
|
||||
if err == nil {
|
||||
// sort to easily compare deepequality
|
||||
diff := cmp.Diff(tc.result, result)
|
||||
if diff != "" {
|
||||
t.Errorf("test '%s' failed. Results are not deep equal. mismatch (-want +got):\n%s", tc.description, diff)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestStrategyParamsToPluginArgsRemovePodsHavingTooManyRestarts(t *testing.T) {
|
||||
strategyName := "RemovePodsHavingTooManyRestarts"
|
||||
type testCase struct {
|
||||
description string
|
||||
params *StrategyParameters
|
||||
err error
|
||||
result *api.PluginConfig
|
||||
}
|
||||
testCases := []testCase{
|
||||
{
|
||||
description: "wire in all valid parameters",
|
||||
params: &StrategyParameters{
|
||||
PodsHavingTooManyRestarts: &PodsHavingTooManyRestarts{
|
||||
PodRestartThreshold: 100,
|
||||
IncludingInitContainers: true,
|
||||
},
|
||||
ThresholdPriority: utilpointer.Int32(100),
|
||||
Namespaces: &Namespaces{
|
||||
Exclude: []string{"test1"},
|
||||
},
|
||||
},
|
||||
err: nil,
|
||||
result: &api.PluginConfig{
|
||||
Name: removepodshavingtoomanyrestarts.PluginName,
|
||||
Args: &removepodshavingtoomanyrestarts.RemovePodsHavingTooManyRestartsArgs{
|
||||
PodRestartThreshold: 100,
|
||||
IncludingInitContainers: true,
|
||||
Namespaces: &api.Namespaces{
|
||||
Exclude: []string{"test1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "invalid params namespaces",
|
||||
params: &StrategyParameters{
|
||||
Namespaces: &Namespaces{
|
||||
Exclude: []string{"test1"},
|
||||
Include: []string{"test2"},
|
||||
},
|
||||
},
|
||||
err: fmt.Errorf("strategy \"%s\" param validation failed: only one of Include/Exclude namespaces can be set", strategyName),
|
||||
result: nil,
|
||||
},
|
||||
{
|
||||
description: "invalid params restart threshold",
|
||||
params: &StrategyParameters{
|
||||
PodsHavingTooManyRestarts: &PodsHavingTooManyRestarts{
|
||||
PodRestartThreshold: 0,
|
||||
},
|
||||
},
|
||||
err: fmt.Errorf("strategy \"%s\" param validation failed: invalid PodsHavingTooManyRestarts threshold", strategyName),
|
||||
result: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.description, func(t *testing.T) {
|
||||
var result *api.PluginConfig
|
||||
var err error
|
||||
if pcFnc, exists := StrategyParamsToPluginArgs[strategyName]; exists {
|
||||
result, err = pcFnc(tc.params)
|
||||
}
|
||||
if err != nil {
|
||||
if err.Error() != tc.err.Error() {
|
||||
t.Errorf("unexpected error: %s", err.Error())
|
||||
}
|
||||
}
|
||||
if err == nil {
|
||||
// sort to easily compare deepequality
|
||||
diff := cmp.Diff(tc.result, result)
|
||||
if diff != "" {
|
||||
t.Errorf("test '%s' failed. Results are not deep equal. mismatch (-want +got):\n%s", tc.description, diff)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestStrategyParamsToPluginArgsPodLifeTime(t *testing.T) {
|
||||
strategyName := "PodLifeTime"
|
||||
type testCase struct {
|
||||
description string
|
||||
params *StrategyParameters
|
||||
err error
|
||||
result *api.PluginConfig
|
||||
}
|
||||
testCases := []testCase{
|
||||
{
|
||||
description: "wire in all valid parameters",
|
||||
params: &StrategyParameters{
|
||||
PodLifeTime: &PodLifeTime{
|
||||
MaxPodLifeTimeSeconds: utilpointer.Uint(86400),
|
||||
States: []string{
|
||||
"Pending",
|
||||
"PodInitializing",
|
||||
},
|
||||
},
|
||||
ThresholdPriority: utilpointer.Int32(100),
|
||||
Namespaces: &Namespaces{
|
||||
Exclude: []string{"test1"},
|
||||
},
|
||||
},
|
||||
err: nil,
|
||||
result: &api.PluginConfig{
|
||||
Name: podlifetime.PluginName,
|
||||
Args: &podlifetime.PodLifeTimeArgs{
|
||||
MaxPodLifeTimeSeconds: utilpointer.Uint(86400),
|
||||
States: []string{
|
||||
"Pending",
|
||||
"PodInitializing",
|
||||
},
|
||||
Namespaces: &api.Namespaces{
|
||||
Exclude: []string{"test1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "invalid params namespaces",
|
||||
params: &StrategyParameters{
|
||||
PodLifeTime: &PodLifeTime{
|
||||
MaxPodLifeTimeSeconds: utilpointer.Uint(86400),
|
||||
},
|
||||
Namespaces: &Namespaces{
|
||||
Exclude: []string{"test1"},
|
||||
Include: []string{"test2"},
|
||||
},
|
||||
},
|
||||
err: fmt.Errorf("strategy \"%s\" param validation failed: only one of Include/Exclude namespaces can be set", strategyName),
|
||||
result: nil,
|
||||
},
|
||||
{
|
||||
description: "invalid params MaxPodLifeTimeSeconds not set",
|
||||
params: &StrategyParameters{
|
||||
PodLifeTime: &PodLifeTime{},
|
||||
},
|
||||
err: fmt.Errorf("strategy \"%s\" param validation failed: MaxPodLifeTimeSeconds not set", strategyName),
|
||||
result: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.description, func(t *testing.T) {
|
||||
var result *api.PluginConfig
|
||||
var err error
|
||||
if pcFnc, exists := StrategyParamsToPluginArgs[strategyName]; exists {
|
||||
result, err = pcFnc(tc.params)
|
||||
}
|
||||
if err != nil {
|
||||
if err.Error() != tc.err.Error() {
|
||||
t.Errorf("unexpected error: %s", err.Error())
|
||||
}
|
||||
}
|
||||
if err == nil {
|
||||
// sort to easily compare deepequality
|
||||
diff := cmp.Diff(tc.result, result)
|
||||
if diff != "" {
|
||||
t.Errorf("test '%s' failed. Results are not deep equal. mismatch (-want +got):\n%s", tc.description, diff)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestStrategyParamsToPluginArgsRemoveDuplicates(t *testing.T) {
|
||||
strategyName := "RemoveDuplicates"
|
||||
type testCase struct {
|
||||
description string
|
||||
params *StrategyParameters
|
||||
err error
|
||||
result *api.PluginConfig
|
||||
}
|
||||
testCases := []testCase{
|
||||
{
|
||||
description: "wire in all valid parameters",
|
||||
params: &StrategyParameters{
|
||||
RemoveDuplicates: &RemoveDuplicates{
|
||||
ExcludeOwnerKinds: []string{"ReplicaSet"},
|
||||
},
|
||||
ThresholdPriority: utilpointer.Int32(100),
|
||||
Namespaces: &Namespaces{
|
||||
Exclude: []string{"test1"},
|
||||
},
|
||||
},
|
||||
err: nil,
|
||||
result: &api.PluginConfig{
|
||||
Name: removeduplicates.PluginName,
|
||||
Args: &removeduplicates.RemoveDuplicatesArgs{
|
||||
ExcludeOwnerKinds: []string{"ReplicaSet"},
|
||||
Namespaces: &api.Namespaces{
|
||||
Exclude: []string{"test1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "invalid params namespaces",
|
||||
params: &StrategyParameters{
|
||||
PodLifeTime: &PodLifeTime{
|
||||
MaxPodLifeTimeSeconds: utilpointer.Uint(86400),
|
||||
},
|
||||
Namespaces: &Namespaces{
|
||||
Exclude: []string{"test1"},
|
||||
Include: []string{"test2"},
|
||||
},
|
||||
},
|
||||
err: fmt.Errorf("strategy \"%s\" param validation failed: only one of Include/Exclude namespaces can be set", strategyName),
|
||||
result: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.description, func(t *testing.T) {
|
||||
var result *api.PluginConfig
|
||||
var err error
|
||||
if pcFnc, exists := StrategyParamsToPluginArgs[strategyName]; exists {
|
||||
result, err = pcFnc(tc.params)
|
||||
}
|
||||
if err != nil {
|
||||
if err.Error() != tc.err.Error() {
|
||||
t.Errorf("unexpected error: %s", err.Error())
|
||||
}
|
||||
}
|
||||
if err == nil {
|
||||
// sort to easily compare deepequality
|
||||
diff := cmp.Diff(tc.result, result)
|
||||
if diff != "" {
|
||||
t.Errorf("test '%s' failed. Results are not deep equal. mismatch (-want +got):\n%s", tc.description, diff)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestStrategyParamsToPluginArgsRemovePodsViolatingTopologySpreadConstraint(t *testing.T) {
|
||||
strategyName := "RemovePodsViolatingTopologySpreadConstraint"
|
||||
type testCase struct {
|
||||
description string
|
||||
params *StrategyParameters
|
||||
err error
|
||||
result *api.PluginConfig
|
||||
}
|
||||
testCases := []testCase{
|
||||
{
|
||||
description: "wire in all valid parameters",
|
||||
params: &StrategyParameters{
|
||||
IncludeSoftConstraints: true,
|
||||
ThresholdPriority: utilpointer.Int32(100),
|
||||
Namespaces: &Namespaces{
|
||||
Exclude: []string{"test1"},
|
||||
},
|
||||
},
|
||||
err: nil,
|
||||
result: &api.PluginConfig{
|
||||
Name: removepodsviolatingtopologyspreadconstraint.PluginName,
|
||||
Args: &removepodsviolatingtopologyspreadconstraint.RemovePodsViolatingTopologySpreadConstraintArgs{
|
||||
Constraints: []v1.UnsatisfiableConstraintAction{v1.DoNotSchedule, v1.ScheduleAnyway},
|
||||
TopologyBalanceNodeFit: utilpointer.Bool(true),
|
||||
Namespaces: &api.Namespaces{
|
||||
Exclude: []string{"test1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "params without soft constraints",
|
||||
params: &StrategyParameters{
|
||||
IncludeSoftConstraints: false,
|
||||
},
|
||||
err: nil,
|
||||
result: &api.PluginConfig{
|
||||
Name: removepodsviolatingtopologyspreadconstraint.PluginName,
|
||||
Args: &removepodsviolatingtopologyspreadconstraint.RemovePodsViolatingTopologySpreadConstraintArgs{
|
||||
Constraints: []v1.UnsatisfiableConstraintAction{v1.DoNotSchedule},
|
||||
TopologyBalanceNodeFit: utilpointer.Bool(true),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "invalid params namespaces",
|
||||
params: &StrategyParameters{
|
||||
Namespaces: &Namespaces{
|
||||
Exclude: []string{"test1"},
|
||||
Include: []string{"test2"},
|
||||
},
|
||||
},
|
||||
err: fmt.Errorf("strategy \"%s\" param validation failed: only one of Include/Exclude namespaces can be set", strategyName),
|
||||
result: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.description, func(t *testing.T) {
|
||||
var result *api.PluginConfig
|
||||
var err error
|
||||
if pcFnc, exists := StrategyParamsToPluginArgs[strategyName]; exists {
|
||||
result, err = pcFnc(tc.params)
|
||||
}
|
||||
if err != nil {
|
||||
if err.Error() != tc.err.Error() {
|
||||
t.Errorf("unexpected error: %s", err.Error())
|
||||
}
|
||||
}
|
||||
if err == nil {
|
||||
// sort to easily compare deepequality
|
||||
diff := cmp.Diff(tc.result, result)
|
||||
if diff != "" {
|
||||
t.Errorf("test '%s' failed. Results are not deep equal. mismatch (-want +got):\n%s", tc.description, diff)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestStrategyParamsToPluginArgsHighNodeUtilization(t *testing.T) {
|
||||
strategyName := "HighNodeUtilization"
|
||||
type testCase struct {
|
||||
description string
|
||||
params *StrategyParameters
|
||||
err error
|
||||
result *api.PluginConfig
|
||||
}
|
||||
testCases := []testCase{
|
||||
{
|
||||
description: "wire in all valid parameters",
|
||||
params: &StrategyParameters{
|
||||
NodeResourceUtilizationThresholds: &NodeResourceUtilizationThresholds{
|
||||
NumberOfNodes: 3,
|
||||
Thresholds: ResourceThresholds{
|
||||
"cpu": Percentage(20),
|
||||
"memory": Percentage(20),
|
||||
"pods": Percentage(20),
|
||||
},
|
||||
},
|
||||
ThresholdPriority: utilpointer.Int32(100),
|
||||
Namespaces: &Namespaces{
|
||||
Exclude: []string{"test1"},
|
||||
},
|
||||
},
|
||||
err: nil,
|
||||
result: &api.PluginConfig{
|
||||
Name: nodeutilization.HighNodeUtilizationPluginName,
|
||||
Args: &nodeutilization.HighNodeUtilizationArgs{
|
||||
Thresholds: api.ResourceThresholds{
|
||||
"cpu": api.Percentage(20),
|
||||
"memory": api.Percentage(20),
|
||||
"pods": api.Percentage(20),
|
||||
},
|
||||
NumberOfNodes: 3,
|
||||
EvictableNamespaces: &api.Namespaces{
|
||||
Exclude: []string{"test1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "invalid params namespaces",
|
||||
params: &StrategyParameters{
|
||||
NodeResourceUtilizationThresholds: &NodeResourceUtilizationThresholds{
|
||||
NumberOfNodes: 3,
|
||||
Thresholds: ResourceThresholds{
|
||||
"cpu": Percentage(20),
|
||||
"memory": Percentage(20),
|
||||
"pods": Percentage(20),
|
||||
},
|
||||
},
|
||||
Namespaces: &Namespaces{
|
||||
Include: []string{"test2"},
|
||||
},
|
||||
},
|
||||
err: fmt.Errorf("strategy \"%s\" param validation failed: only Exclude namespaces can be set, inclusion is not supported", strategyName),
|
||||
result: nil,
|
||||
},
|
||||
{
|
||||
description: "invalid params nil ResourceThresholds",
|
||||
params: &StrategyParameters{
|
||||
NodeResourceUtilizationThresholds: &NodeResourceUtilizationThresholds{
|
||||
NumberOfNodes: 3,
|
||||
},
|
||||
},
|
||||
err: fmt.Errorf("strategy \"%s\" param validation failed: no resource threshold is configured", strategyName),
|
||||
result: nil,
|
||||
},
|
||||
{
|
||||
description: "invalid params out of bounds threshold",
|
||||
params: &StrategyParameters{
|
||||
NodeResourceUtilizationThresholds: &NodeResourceUtilizationThresholds{
|
||||
NumberOfNodes: 3,
|
||||
Thresholds: ResourceThresholds{
|
||||
"cpu": Percentage(150),
|
||||
},
|
||||
},
|
||||
},
|
||||
err: fmt.Errorf("strategy \"%s\" param validation failed: cpu threshold not in [0, 100] range", strategyName),
|
||||
result: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.description, func(t *testing.T) {
|
||||
var result *api.PluginConfig
|
||||
var err error
|
||||
if pcFnc, exists := StrategyParamsToPluginArgs[strategyName]; exists {
|
||||
result, err = pcFnc(tc.params)
|
||||
}
|
||||
if err != nil {
|
||||
if err.Error() != tc.err.Error() {
|
||||
t.Errorf("unexpected error: %s", err.Error())
|
||||
}
|
||||
}
|
||||
if err == nil {
|
||||
// sort to easily compare deepequality
|
||||
diff := cmp.Diff(tc.result, result)
|
||||
if diff != "" {
|
||||
t.Errorf("test '%s' failed. Results are not deep equal. mismatch (-want +got):\n%s", tc.description, diff)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestStrategyParamsToPluginArgsLowNodeUtilization(t *testing.T) {
|
||||
strategyName := "LowNodeUtilization"
|
||||
type testCase struct {
|
||||
description string
|
||||
params *StrategyParameters
|
||||
err error
|
||||
result *api.PluginConfig
|
||||
}
|
||||
testCases := []testCase{
|
||||
{
|
||||
description: "wire in all valid parameters",
|
||||
params: &StrategyParameters{
|
||||
NodeResourceUtilizationThresholds: &NodeResourceUtilizationThresholds{
|
||||
NumberOfNodes: 3,
|
||||
Thresholds: ResourceThresholds{
|
||||
"cpu": Percentage(20),
|
||||
"memory": Percentage(20),
|
||||
"pods": Percentage(20),
|
||||
},
|
||||
TargetThresholds: ResourceThresholds{
|
||||
"cpu": Percentage(50),
|
||||
"memory": Percentage(50),
|
||||
"pods": Percentage(50),
|
||||
},
|
||||
UseDeviationThresholds: true,
|
||||
},
|
||||
ThresholdPriority: utilpointer.Int32(100),
|
||||
Namespaces: &Namespaces{
|
||||
Exclude: []string{"test1"},
|
||||
},
|
||||
},
|
||||
err: nil,
|
||||
result: &api.PluginConfig{
|
||||
Name: nodeutilization.LowNodeUtilizationPluginName,
|
||||
Args: &nodeutilization.LowNodeUtilizationArgs{
|
||||
Thresholds: api.ResourceThresholds{
|
||||
"cpu": api.Percentage(20),
|
||||
"memory": api.Percentage(20),
|
||||
"pods": api.Percentage(20),
|
||||
},
|
||||
TargetThresholds: api.ResourceThresholds{
|
||||
"cpu": api.Percentage(50),
|
||||
"memory": api.Percentage(50),
|
||||
"pods": api.Percentage(50),
|
||||
},
|
||||
UseDeviationThresholds: true,
|
||||
NumberOfNodes: 3,
|
||||
EvictableNamespaces: &api.Namespaces{
|
||||
Exclude: []string{"test1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "invalid params namespaces",
|
||||
params: &StrategyParameters{
|
||||
NodeResourceUtilizationThresholds: &NodeResourceUtilizationThresholds{
|
||||
NumberOfNodes: 3,
|
||||
Thresholds: ResourceThresholds{
|
||||
"cpu": Percentage(20),
|
||||
"memory": Percentage(20),
|
||||
"pods": Percentage(20),
|
||||
},
|
||||
TargetThresholds: ResourceThresholds{
|
||||
"cpu": Percentage(50),
|
||||
"memory": Percentage(50),
|
||||
"pods": Percentage(50),
|
||||
},
|
||||
UseDeviationThresholds: true,
|
||||
},
|
||||
Namespaces: &Namespaces{
|
||||
Include: []string{"test2"},
|
||||
},
|
||||
},
|
||||
err: fmt.Errorf("strategy \"%s\" param validation failed: only Exclude namespaces can be set, inclusion is not supported", strategyName),
|
||||
result: nil,
|
||||
},
|
||||
{
|
||||
description: "invalid params nil ResourceThresholds",
|
||||
params: &StrategyParameters{
|
||||
NodeResourceUtilizationThresholds: &NodeResourceUtilizationThresholds{
|
||||
NumberOfNodes: 3,
|
||||
},
|
||||
},
|
||||
err: fmt.Errorf("strategy \"%s\" param validation failed: thresholds config is not valid: no resource threshold is configured", strategyName),
|
||||
result: nil,
|
||||
},
|
||||
{
|
||||
description: "invalid params out of bounds threshold",
|
||||
params: &StrategyParameters{
|
||||
NodeResourceUtilizationThresholds: &NodeResourceUtilizationThresholds{
|
||||
NumberOfNodes: 3,
|
||||
Thresholds: ResourceThresholds{
|
||||
"cpu": Percentage(150),
|
||||
},
|
||||
},
|
||||
},
|
||||
err: fmt.Errorf("strategy \"%s\" param validation failed: thresholds config is not valid: cpu threshold not in [0, 100] range", strategyName),
|
||||
result: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.description, func(t *testing.T) {
|
||||
var result *api.PluginConfig
|
||||
var err error
|
||||
if pcFnc, exists := StrategyParamsToPluginArgs[strategyName]; exists {
|
||||
result, err = pcFnc(tc.params)
|
||||
}
|
||||
if err != nil {
|
||||
if err.Error() != tc.err.Error() {
|
||||
t.Errorf("unexpected error: %s", err.Error())
|
||||
}
|
||||
}
|
||||
if err == nil {
|
||||
// sort to easily compare deepequality
|
||||
diff := cmp.Diff(tc.result, result)
|
||||
if diff != "" {
|
||||
t.Errorf("test '%s' failed. Results are not deep equal. mismatch (-want +got):\n%s", tc.description, diff)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,129 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
type DeschedulerPolicy struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
|
||||
// Strategies
|
||||
Strategies StrategyList `json:"strategies,omitempty"`
|
||||
|
||||
// NodeSelector for a set of nodes to operate over
|
||||
NodeSelector *string `json:"nodeSelector,omitempty"`
|
||||
|
||||
// EvictFailedBarePods allows pods without ownerReferences and in failed phase to be evicted.
|
||||
EvictFailedBarePods *bool `json:"evictFailedBarePods,omitempty"`
|
||||
|
||||
// EvictLocalStoragePods allows pods using local storage to be evicted.
|
||||
EvictLocalStoragePods *bool `json:"evictLocalStoragePods,omitempty"`
|
||||
|
||||
// EvictSystemCriticalPods allows eviction of pods of any priority (including Kubernetes system pods)
|
||||
EvictSystemCriticalPods *bool `json:"evictSystemCriticalPods,omitempty"`
|
||||
|
||||
// IgnorePVCPods prevents pods with PVCs from being evicted.
|
||||
IgnorePVCPods *bool `json:"ignorePvcPods,omitempty"`
|
||||
|
||||
// MaxNoOfPodsToEvictPerNode restricts maximum of pods to be evicted per node.
|
||||
MaxNoOfPodsToEvictPerNode *uint `json:"maxNoOfPodsToEvictPerNode,omitempty"`
|
||||
|
||||
// MaxNoOfPodsToEvictPerNamespace restricts maximum of pods to be evicted per namespace.
|
||||
MaxNoOfPodsToEvictPerNamespace *uint `json:"maxNoOfPodsToEvictPerNamespace,omitempty"`
|
||||
}
|
||||
|
||||
type (
|
||||
StrategyName string
|
||||
StrategyList map[StrategyName]DeschedulerStrategy
|
||||
)
|
||||
|
||||
type DeschedulerStrategy struct {
|
||||
// Enabled or disabled
|
||||
Enabled bool `json:"enabled,omitempty"`
|
||||
|
||||
// Weight
|
||||
Weight int `json:"weight,omitempty"`
|
||||
|
||||
// Strategy parameters
|
||||
Params *StrategyParameters `json:"params,omitempty"`
|
||||
}
|
||||
|
||||
// Namespaces carries a list of included/excluded namespaces
|
||||
// for which a given strategy is applicable.
|
||||
type Namespaces struct {
|
||||
Include []string `json:"include"`
|
||||
Exclude []string `json:"exclude"`
|
||||
}
|
||||
|
||||
// Besides Namespaces ThresholdPriority and ThresholdPriorityClassName only one of its members may be specified
|
||||
type StrategyParameters struct {
|
||||
NodeResourceUtilizationThresholds *NodeResourceUtilizationThresholds `json:"nodeResourceUtilizationThresholds,omitempty"`
|
||||
NodeAffinityType []string `json:"nodeAffinityType,omitempty"`
|
||||
PodsHavingTooManyRestarts *PodsHavingTooManyRestarts `json:"podsHavingTooManyRestarts,omitempty"`
|
||||
PodLifeTime *PodLifeTime `json:"podLifeTime,omitempty"`
|
||||
RemoveDuplicates *RemoveDuplicates `json:"removeDuplicates,omitempty"`
|
||||
FailedPods *FailedPods `json:"failedPods,omitempty"`
|
||||
IncludeSoftConstraints bool `json:"includeSoftConstraints"`
|
||||
Namespaces *Namespaces `json:"namespaces"`
|
||||
ThresholdPriority *int32 `json:"thresholdPriority"`
|
||||
ThresholdPriorityClassName string `json:"thresholdPriorityClassName"`
|
||||
LabelSelector *metav1.LabelSelector `json:"labelSelector"`
|
||||
NodeFit bool `json:"nodeFit"`
|
||||
IncludePreferNoSchedule bool `json:"includePreferNoSchedule"`
|
||||
ExcludedTaints []string `json:"excludedTaints,omitempty"`
|
||||
}
|
||||
|
||||
type (
|
||||
Percentage float64
|
||||
ResourceThresholds map[v1.ResourceName]Percentage
|
||||
)
|
||||
|
||||
type NodeResourceUtilizationThresholds struct {
|
||||
UseDeviationThresholds bool `json:"useDeviationThresholds,omitempty"`
|
||||
Thresholds ResourceThresholds `json:"thresholds,omitempty"`
|
||||
TargetThresholds ResourceThresholds `json:"targetThresholds,omitempty"`
|
||||
NumberOfNodes int `json:"numberOfNodes,omitempty"`
|
||||
}
|
||||
|
||||
type PodsHavingTooManyRestarts struct {
|
||||
PodRestartThreshold int32 `json:"podRestartThreshold,omitempty"`
|
||||
IncludingInitContainers bool `json:"includingInitContainers,omitempty"`
|
||||
}
|
||||
|
||||
type RemoveDuplicates struct {
|
||||
ExcludeOwnerKinds []string `json:"excludeOwnerKinds,omitempty"`
|
||||
}
|
||||
|
||||
type PodLifeTime struct {
|
||||
MaxPodLifeTimeSeconds *uint `json:"maxPodLifeTimeSeconds,omitempty"`
|
||||
States []string `json:"states,omitempty"`
|
||||
|
||||
// Deprecated: Use States instead.
|
||||
PodStatusPhases []string `json:"podStatusPhases,omitempty"`
|
||||
}
|
||||
|
||||
type FailedPods struct {
|
||||
ExcludeOwnerKinds []string `json:"excludeOwnerKinds,omitempty"`
|
||||
MinPodLifetimeSeconds *uint `json:"minPodLifetimeSeconds,omitempty"`
|
||||
Reasons []string `json:"reasons,omitempty"`
|
||||
IncludingInitContainers bool `json:"includingInitContainers,omitempty"`
|
||||
}
|
||||
380
pkg/api/v1alpha1/zz_generated.deepcopy.go
generated
380
pkg/api/v1alpha1/zz_generated.deepcopy.go
generated
@@ -1,380 +0,0 @@
|
||||
//go:build !ignore_autogenerated
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by deepcopy-gen. DO NOT EDIT.
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *DeschedulerPolicy) DeepCopyInto(out *DeschedulerPolicy) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
if in.Strategies != nil {
|
||||
in, out := &in.Strategies, &out.Strategies
|
||||
*out = make(StrategyList, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = *val.DeepCopy()
|
||||
}
|
||||
}
|
||||
if in.NodeSelector != nil {
|
||||
in, out := &in.NodeSelector, &out.NodeSelector
|
||||
*out = new(string)
|
||||
**out = **in
|
||||
}
|
||||
if in.EvictFailedBarePods != nil {
|
||||
in, out := &in.EvictFailedBarePods, &out.EvictFailedBarePods
|
||||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
if in.EvictLocalStoragePods != nil {
|
||||
in, out := &in.EvictLocalStoragePods, &out.EvictLocalStoragePods
|
||||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
if in.EvictSystemCriticalPods != nil {
|
||||
in, out := &in.EvictSystemCriticalPods, &out.EvictSystemCriticalPods
|
||||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
if in.IgnorePVCPods != nil {
|
||||
in, out := &in.IgnorePVCPods, &out.IgnorePVCPods
|
||||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
if in.MaxNoOfPodsToEvictPerNode != nil {
|
||||
in, out := &in.MaxNoOfPodsToEvictPerNode, &out.MaxNoOfPodsToEvictPerNode
|
||||
*out = new(uint)
|
||||
**out = **in
|
||||
}
|
||||
if in.MaxNoOfPodsToEvictPerNamespace != nil {
|
||||
in, out := &in.MaxNoOfPodsToEvictPerNamespace, &out.MaxNoOfPodsToEvictPerNamespace
|
||||
*out = new(uint)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeschedulerPolicy.
|
||||
func (in *DeschedulerPolicy) DeepCopy() *DeschedulerPolicy {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(DeschedulerPolicy)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *DeschedulerPolicy) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *DeschedulerStrategy) DeepCopyInto(out *DeschedulerStrategy) {
|
||||
*out = *in
|
||||
if in.Params != nil {
|
||||
in, out := &in.Params, &out.Params
|
||||
*out = new(StrategyParameters)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeschedulerStrategy.
|
||||
func (in *DeschedulerStrategy) DeepCopy() *DeschedulerStrategy {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(DeschedulerStrategy)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *FailedPods) DeepCopyInto(out *FailedPods) {
|
||||
*out = *in
|
||||
if in.ExcludeOwnerKinds != nil {
|
||||
in, out := &in.ExcludeOwnerKinds, &out.ExcludeOwnerKinds
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.MinPodLifetimeSeconds != nil {
|
||||
in, out := &in.MinPodLifetimeSeconds, &out.MinPodLifetimeSeconds
|
||||
*out = new(uint)
|
||||
**out = **in
|
||||
}
|
||||
if in.Reasons != nil {
|
||||
in, out := &in.Reasons, &out.Reasons
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FailedPods.
|
||||
func (in *FailedPods) DeepCopy() *FailedPods {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(FailedPods)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Namespaces) DeepCopyInto(out *Namespaces) {
|
||||
*out = *in
|
||||
if in.Include != nil {
|
||||
in, out := &in.Include, &out.Include
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.Exclude != nil {
|
||||
in, out := &in.Exclude, &out.Exclude
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Namespaces.
|
||||
func (in *Namespaces) DeepCopy() *Namespaces {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(Namespaces)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *NodeResourceUtilizationThresholds) DeepCopyInto(out *NodeResourceUtilizationThresholds) {
|
||||
*out = *in
|
||||
if in.Thresholds != nil {
|
||||
in, out := &in.Thresholds, &out.Thresholds
|
||||
*out = make(ResourceThresholds, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
if in.TargetThresholds != nil {
|
||||
in, out := &in.TargetThresholds, &out.TargetThresholds
|
||||
*out = make(ResourceThresholds, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeResourceUtilizationThresholds.
|
||||
func (in *NodeResourceUtilizationThresholds) DeepCopy() *NodeResourceUtilizationThresholds {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(NodeResourceUtilizationThresholds)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *PodLifeTime) DeepCopyInto(out *PodLifeTime) {
|
||||
*out = *in
|
||||
if in.MaxPodLifeTimeSeconds != nil {
|
||||
in, out := &in.MaxPodLifeTimeSeconds, &out.MaxPodLifeTimeSeconds
|
||||
*out = new(uint)
|
||||
**out = **in
|
||||
}
|
||||
if in.States != nil {
|
||||
in, out := &in.States, &out.States
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.PodStatusPhases != nil {
|
||||
in, out := &in.PodStatusPhases, &out.PodStatusPhases
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodLifeTime.
|
||||
func (in *PodLifeTime) DeepCopy() *PodLifeTime {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(PodLifeTime)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *PodsHavingTooManyRestarts) DeepCopyInto(out *PodsHavingTooManyRestarts) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodsHavingTooManyRestarts.
|
||||
func (in *PodsHavingTooManyRestarts) DeepCopy() *PodsHavingTooManyRestarts {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(PodsHavingTooManyRestarts)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *RemoveDuplicates) DeepCopyInto(out *RemoveDuplicates) {
|
||||
*out = *in
|
||||
if in.ExcludeOwnerKinds != nil {
|
||||
in, out := &in.ExcludeOwnerKinds, &out.ExcludeOwnerKinds
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RemoveDuplicates.
|
||||
func (in *RemoveDuplicates) DeepCopy() *RemoveDuplicates {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(RemoveDuplicates)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in ResourceThresholds) DeepCopyInto(out *ResourceThresholds) {
|
||||
{
|
||||
in := &in
|
||||
*out = make(ResourceThresholds, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceThresholds.
|
||||
func (in ResourceThresholds) DeepCopy() ResourceThresholds {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ResourceThresholds)
|
||||
in.DeepCopyInto(out)
|
||||
return *out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in StrategyList) DeepCopyInto(out *StrategyList) {
|
||||
{
|
||||
in := &in
|
||||
*out = make(StrategyList, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = *val.DeepCopy()
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StrategyList.
|
||||
func (in StrategyList) DeepCopy() StrategyList {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(StrategyList)
|
||||
in.DeepCopyInto(out)
|
||||
return *out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *StrategyParameters) DeepCopyInto(out *StrategyParameters) {
|
||||
*out = *in
|
||||
if in.NodeResourceUtilizationThresholds != nil {
|
||||
in, out := &in.NodeResourceUtilizationThresholds, &out.NodeResourceUtilizationThresholds
|
||||
*out = new(NodeResourceUtilizationThresholds)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.NodeAffinityType != nil {
|
||||
in, out := &in.NodeAffinityType, &out.NodeAffinityType
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.PodsHavingTooManyRestarts != nil {
|
||||
in, out := &in.PodsHavingTooManyRestarts, &out.PodsHavingTooManyRestarts
|
||||
*out = new(PodsHavingTooManyRestarts)
|
||||
**out = **in
|
||||
}
|
||||
if in.PodLifeTime != nil {
|
||||
in, out := &in.PodLifeTime, &out.PodLifeTime
|
||||
*out = new(PodLifeTime)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.RemoveDuplicates != nil {
|
||||
in, out := &in.RemoveDuplicates, &out.RemoveDuplicates
|
||||
*out = new(RemoveDuplicates)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.FailedPods != nil {
|
||||
in, out := &in.FailedPods, &out.FailedPods
|
||||
*out = new(FailedPods)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.Namespaces != nil {
|
||||
in, out := &in.Namespaces, &out.Namespaces
|
||||
*out = new(Namespaces)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.ThresholdPriority != nil {
|
||||
in, out := &in.ThresholdPriority, &out.ThresholdPriority
|
||||
*out = new(int32)
|
||||
**out = **in
|
||||
}
|
||||
if in.LabelSelector != nil {
|
||||
in, out := &in.LabelSelector, &out.LabelSelector
|
||||
*out = new(v1.LabelSelector)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.ExcludedTaints != nil {
|
||||
in, out := &in.ExcludedTaints, &out.ExcludedTaints
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StrategyParameters.
|
||||
func (in *StrategyParameters) DeepCopy() *StrategyParameters {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(StrategyParameters)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
@@ -37,6 +37,16 @@ type DeschedulerPolicy struct {
|
||||
|
||||
// MaxNoOfPodsToEvictPerNamespace restricts maximum of pods to be evicted per namespace.
|
||||
MaxNoOfPodsToEvictPerNamespace *uint `json:"maxNoOfPodsToEvictPerNamespace,omitempty"`
|
||||
|
||||
// MaxNoOfPodsToTotal restricts maximum of pods to be evicted total.
|
||||
MaxNoOfPodsToEvictTotal *uint `json:"maxNoOfPodsToEvictTotal,omitempty"`
|
||||
|
||||
// EvictionFailureEventNotification should be set to true to enable eviction failure event notification.
|
||||
// Default is false.
|
||||
EvictionFailureEventNotification *bool
|
||||
|
||||
// MetricsCollector configures collection of metrics for actual resource utilization
|
||||
MetricsCollector MetricsCollector `json:"metricsCollector,omitempty"`
|
||||
}
|
||||
|
||||
type DeschedulerProfile struct {
|
||||
@@ -63,3 +73,10 @@ type PluginSet struct {
|
||||
Enabled []string `json:"enabled"`
|
||||
Disabled []string `json:"disabled"`
|
||||
}
|
||||
|
||||
// MetricsCollector configures collection of metrics about actual resource utilization
|
||||
type MetricsCollector struct {
|
||||
// Enabled metrics collection from kubernetes metrics.
|
||||
// Later, the collection can be extended to other providers.
|
||||
Enabled bool `json:"enabled,omitempty"`
|
||||
}
|
||||
|
||||
42
pkg/api/v1alpha2/zz_generated.conversion.go
generated
42
pkg/api/v1alpha2/zz_generated.conversion.go
generated
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@@ -46,6 +46,16 @@ func RegisterConversions(s *runtime.Scheme) error {
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*MetricsCollector)(nil), (*api.MetricsCollector)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1alpha2_MetricsCollector_To_api_MetricsCollector(a.(*MetricsCollector), b.(*api.MetricsCollector), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*api.MetricsCollector)(nil), (*MetricsCollector)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_api_MetricsCollector_To_v1alpha2_MetricsCollector(a.(*api.MetricsCollector), b.(*MetricsCollector), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*api.PluginConfig)(nil), (*PluginConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_api_PluginConfig_To_v1alpha2_PluginConfig(a.(*api.PluginConfig), b.(*PluginConfig), scope)
|
||||
}); err != nil {
|
||||
@@ -104,6 +114,11 @@ func autoConvert_v1alpha2_DeschedulerPolicy_To_api_DeschedulerPolicy(in *Desched
|
||||
out.NodeSelector = (*string)(unsafe.Pointer(in.NodeSelector))
|
||||
out.MaxNoOfPodsToEvictPerNode = (*uint)(unsafe.Pointer(in.MaxNoOfPodsToEvictPerNode))
|
||||
out.MaxNoOfPodsToEvictPerNamespace = (*uint)(unsafe.Pointer(in.MaxNoOfPodsToEvictPerNamespace))
|
||||
out.MaxNoOfPodsToEvictTotal = (*uint)(unsafe.Pointer(in.MaxNoOfPodsToEvictTotal))
|
||||
out.EvictionFailureEventNotification = (*bool)(unsafe.Pointer(in.EvictionFailureEventNotification))
|
||||
if err := Convert_v1alpha2_MetricsCollector_To_api_MetricsCollector(&in.MetricsCollector, &out.MetricsCollector, s); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -122,6 +137,11 @@ func autoConvert_api_DeschedulerPolicy_To_v1alpha2_DeschedulerPolicy(in *api.Des
|
||||
out.NodeSelector = (*string)(unsafe.Pointer(in.NodeSelector))
|
||||
out.MaxNoOfPodsToEvictPerNode = (*uint)(unsafe.Pointer(in.MaxNoOfPodsToEvictPerNode))
|
||||
out.MaxNoOfPodsToEvictPerNamespace = (*uint)(unsafe.Pointer(in.MaxNoOfPodsToEvictPerNamespace))
|
||||
out.MaxNoOfPodsToEvictTotal = (*uint)(unsafe.Pointer(in.MaxNoOfPodsToEvictTotal))
|
||||
out.EvictionFailureEventNotification = (*bool)(unsafe.Pointer(in.EvictionFailureEventNotification))
|
||||
if err := Convert_api_MetricsCollector_To_v1alpha2_MetricsCollector(&in.MetricsCollector, &out.MetricsCollector, s); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -173,6 +193,26 @@ func Convert_api_DeschedulerProfile_To_v1alpha2_DeschedulerProfile(in *api.Desch
|
||||
return autoConvert_api_DeschedulerProfile_To_v1alpha2_DeschedulerProfile(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1alpha2_MetricsCollector_To_api_MetricsCollector(in *MetricsCollector, out *api.MetricsCollector, s conversion.Scope) error {
|
||||
out.Enabled = in.Enabled
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_v1alpha2_MetricsCollector_To_api_MetricsCollector is an autogenerated conversion function.
|
||||
func Convert_v1alpha2_MetricsCollector_To_api_MetricsCollector(in *MetricsCollector, out *api.MetricsCollector, s conversion.Scope) error {
|
||||
return autoConvert_v1alpha2_MetricsCollector_To_api_MetricsCollector(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_api_MetricsCollector_To_v1alpha2_MetricsCollector(in *api.MetricsCollector, out *MetricsCollector, s conversion.Scope) error {
|
||||
out.Enabled = in.Enabled
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_api_MetricsCollector_To_v1alpha2_MetricsCollector is an autogenerated conversion function.
|
||||
func Convert_api_MetricsCollector_To_v1alpha2_MetricsCollector(in *api.MetricsCollector, out *MetricsCollector, s conversion.Scope) error {
|
||||
return autoConvert_api_MetricsCollector_To_v1alpha2_MetricsCollector(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1alpha2_PluginConfig_To_api_PluginConfig(in *PluginConfig, out *api.PluginConfig, s conversion.Scope) error {
|
||||
out.Name = in.Name
|
||||
if err := runtime.Convert_runtime_RawExtension_To_runtime_Object(&in.Args, &out.Args, s); err != nil {
|
||||
|
||||
29
pkg/api/v1alpha2/zz_generated.deepcopy.go
generated
29
pkg/api/v1alpha2/zz_generated.deepcopy.go
generated
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@@ -51,6 +51,17 @@ func (in *DeschedulerPolicy) DeepCopyInto(out *DeschedulerPolicy) {
|
||||
*out = new(uint)
|
||||
**out = **in
|
||||
}
|
||||
if in.MaxNoOfPodsToEvictTotal != nil {
|
||||
in, out := &in.MaxNoOfPodsToEvictTotal, &out.MaxNoOfPodsToEvictTotal
|
||||
*out = new(uint)
|
||||
**out = **in
|
||||
}
|
||||
if in.EvictionFailureEventNotification != nil {
|
||||
in, out := &in.EvictionFailureEventNotification, &out.EvictionFailureEventNotification
|
||||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
out.MetricsCollector = in.MetricsCollector
|
||||
return
|
||||
}
|
||||
|
||||
@@ -96,6 +107,22 @@ func (in *DeschedulerProfile) DeepCopy() *DeschedulerProfile {
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *MetricsCollector) DeepCopyInto(out *MetricsCollector) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricsCollector.
|
||||
func (in *MetricsCollector) DeepCopy() *MetricsCollector {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(MetricsCollector)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *PluginConfig) DeepCopyInto(out *PluginConfig) {
|
||||
*out = *in
|
||||
|
||||
2
pkg/api/v1alpha2/zz_generated.defaults.go
generated
2
pkg/api/v1alpha2/zz_generated.defaults.go
generated
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
29
pkg/api/zz_generated.deepcopy.go
generated
29
pkg/api/zz_generated.deepcopy.go
generated
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@@ -51,6 +51,17 @@ func (in *DeschedulerPolicy) DeepCopyInto(out *DeschedulerPolicy) {
|
||||
*out = new(uint)
|
||||
**out = **in
|
||||
}
|
||||
if in.MaxNoOfPodsToEvictTotal != nil {
|
||||
in, out := &in.MaxNoOfPodsToEvictTotal, &out.MaxNoOfPodsToEvictTotal
|
||||
*out = new(uint)
|
||||
**out = **in
|
||||
}
|
||||
if in.EvictionFailureEventNotification != nil {
|
||||
in, out := &in.EvictionFailureEventNotification, &out.EvictionFailureEventNotification
|
||||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
out.MetricsCollector = in.MetricsCollector
|
||||
return
|
||||
}
|
||||
|
||||
@@ -96,6 +107,22 @@ func (in *DeschedulerProfile) DeepCopy() *DeschedulerProfile {
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *MetricsCollector) DeepCopyInto(out *MetricsCollector) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricsCollector.
|
||||
func (in *MetricsCollector) DeepCopy() *MetricsCollector {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(MetricsCollector)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Namespaces) DeepCopyInto(out *Namespaces) {
|
||||
*out = *in
|
||||
|
||||
@@ -51,6 +51,9 @@ type DeschedulerConfiguration struct {
|
||||
// EvictLocalStoragePods allows pods using local storage to be evicted.
|
||||
EvictLocalStoragePods bool
|
||||
|
||||
// EvictDaemonSetPods allows pods owned by a DaemonSet resource to be evicted.
|
||||
EvictDaemonSetPods bool
|
||||
|
||||
// IgnorePVCPods sets whether PVC pods should be allowed to be evicted
|
||||
IgnorePVCPods bool
|
||||
|
||||
|
||||
@@ -51,6 +51,9 @@ type DeschedulerConfiguration struct {
|
||||
// EvictLocalStoragePods allows pods using local storage to be evicted.
|
||||
EvictLocalStoragePods bool `json:"evictLocalStoragePods,omitempty"`
|
||||
|
||||
// EvictDaemonSetPods allows pods owned by a DaemonSet resource to be evicted.
|
||||
EvictDaemonSetPods bool `json:"evictDaemonSetPods,omitempty"`
|
||||
|
||||
// IgnorePVCPods sets whether PVC pods should be allowed to be evicted
|
||||
IgnorePVCPods bool `json:"ignorePvcPods,omitempty"`
|
||||
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@@ -67,6 +67,7 @@ func autoConvert_v1alpha1_DeschedulerConfiguration_To_componentconfig_Deschedule
|
||||
out.NodeSelector = in.NodeSelector
|
||||
out.MaxNoOfPodsToEvictPerNode = in.MaxNoOfPodsToEvictPerNode
|
||||
out.EvictLocalStoragePods = in.EvictLocalStoragePods
|
||||
out.EvictDaemonSetPods = in.EvictDaemonSetPods
|
||||
out.IgnorePVCPods = in.IgnorePVCPods
|
||||
if err := Convert_v1alpha1_TracingConfiguration_To_componentconfig_TracingConfiguration(&in.Tracing, &out.Tracing, s); err != nil {
|
||||
return err
|
||||
@@ -89,6 +90,7 @@ func autoConvert_componentconfig_DeschedulerConfiguration_To_v1alpha1_Deschedule
|
||||
out.NodeSelector = in.NodeSelector
|
||||
out.MaxNoOfPodsToEvictPerNode = in.MaxNoOfPodsToEvictPerNode
|
||||
out.EvictLocalStoragePods = in.EvictLocalStoragePods
|
||||
out.EvictDaemonSetPods = in.EvictDaemonSetPods
|
||||
out.IgnorePVCPods = in.IgnorePVCPods
|
||||
if err := Convert_componentconfig_TracingConfiguration_To_v1alpha1_TracingConfiguration(&in.Tracing, &out.Tracing, s); err != nil {
|
||||
return err
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -19,16 +19,16 @@ package client
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
componentbaseconfig "k8s.io/component-base/config"
|
||||
|
||||
// Ensure to load all auth plugins.
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
_ "k8s.io/client-go/plugin/pkg/client/auth"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
componentbaseconfig "k8s.io/component-base/config"
|
||||
metricsclient "k8s.io/metrics/pkg/client/clientset/versioned"
|
||||
)
|
||||
|
||||
func CreateClient(clientConnection componentbaseconfig.ClientConnectionConfiguration, userAgt string) (clientset.Interface, error) {
|
||||
func createConfig(clientConnection componentbaseconfig.ClientConnectionConfiguration, userAgt string) (*rest.Config, error) {
|
||||
var cfg *rest.Config
|
||||
if len(clientConnection.Kubeconfig) != 0 {
|
||||
master, err := GetMasterFromKubeconfig(clientConnection.Kubeconfig)
|
||||
@@ -56,9 +56,28 @@ func CreateClient(clientConnection componentbaseconfig.ClientConnectionConfigura
|
||||
cfg = rest.AddUserAgent(cfg, userAgt)
|
||||
}
|
||||
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
func CreateClient(clientConnection componentbaseconfig.ClientConnectionConfiguration, userAgt string) (clientset.Interface, error) {
|
||||
cfg, err := createConfig(clientConnection, userAgt)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to create config: %v", err)
|
||||
}
|
||||
|
||||
return clientset.NewForConfig(cfg)
|
||||
}
|
||||
|
||||
func CreateMetricsClient(clientConnection componentbaseconfig.ClientConnectionConfiguration, userAgt string) (metricsclient.Interface, error) {
|
||||
cfg, err := createConfig(clientConnection, userAgt)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to create config: %v", err)
|
||||
}
|
||||
|
||||
// Create the metrics clientset to access the metrics.k8s.io API
|
||||
return metricsclient.NewForConfig(cfg)
|
||||
}
|
||||
|
||||
func GetMasterFromKubeconfig(filename string) (string, error) {
|
||||
config, err := clientcmd.LoadFromFile(filename)
|
||||
if err != nil {
|
||||
@@ -67,11 +86,11 @@ func GetMasterFromKubeconfig(filename string) (string, error) {
|
||||
|
||||
context, ok := config.Contexts[config.CurrentContext]
|
||||
if !ok {
|
||||
return "", fmt.Errorf("failed to get master address from kubeconfig")
|
||||
return "", fmt.Errorf("failed to get master address from kubeconfig: current context not found")
|
||||
}
|
||||
|
||||
if val, ok := config.Clusters[context.Cluster]; ok {
|
||||
return val.Server, nil
|
||||
}
|
||||
return "", fmt.Errorf("failed to get master address from kubeconfig")
|
||||
return "", fmt.Errorf("failed to get master address from kubeconfig: cluster information not found")
|
||||
}
|
||||
|
||||
@@ -18,47 +18,48 @@ package descheduler
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
policy "k8s.io/api/policy/v1"
|
||||
policyv1 "k8s.io/api/policy/v1"
|
||||
schedulingv1 "k8s.io/api/scheduling/v1"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
utilversion "k8s.io/apimachinery/pkg/util/version"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/discovery"
|
||||
"k8s.io/client-go/informers"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
fakeclientset "k8s.io/client-go/kubernetes/fake"
|
||||
core "k8s.io/client-go/testing"
|
||||
"k8s.io/client-go/tools/events"
|
||||
componentbaseconfig "k8s.io/component-base/config"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
policy "k8s.io/api/policy/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilversion "k8s.io/apimachinery/pkg/util/version"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
fakeclientset "k8s.io/client-go/kubernetes/fake"
|
||||
listersv1 "k8s.io/client-go/listers/core/v1"
|
||||
schedulingv1 "k8s.io/client-go/listers/scheduling/v1"
|
||||
core "k8s.io/client-go/testing"
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/client"
|
||||
eutils "sigs.k8s.io/descheduler/pkg/descheduler/evictions/utils"
|
||||
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
|
||||
"sigs.k8s.io/descheduler/pkg/tracing"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
"sigs.k8s.io/descheduler/pkg/version"
|
||||
|
||||
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
|
||||
"sigs.k8s.io/descheduler/metrics"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/client"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
eutils "sigs.k8s.io/descheduler/pkg/descheduler/evictions/utils"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/metricscollector"
|
||||
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/pluginregistry"
|
||||
frameworkprofile "sigs.k8s.io/descheduler/pkg/framework/profile"
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
"sigs.k8s.io/descheduler/pkg/tracing"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
"sigs.k8s.io/descheduler/pkg/version"
|
||||
)
|
||||
|
||||
type eprunner func(ctx context.Context, nodes []*v1.Node) *frameworktypes.Status
|
||||
@@ -69,41 +70,123 @@ type profileRunner struct {
|
||||
}
|
||||
|
||||
type descheduler struct {
|
||||
rs *options.DeschedulerServer
|
||||
podLister listersv1.PodLister
|
||||
nodeLister listersv1.NodeLister
|
||||
namespaceLister listersv1.NamespaceLister
|
||||
priorityClassLister schedulingv1.PriorityClassLister
|
||||
getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc
|
||||
sharedInformerFactory informers.SharedInformerFactory
|
||||
evictionPolicyGroupVersion string
|
||||
deschedulerPolicy *api.DeschedulerPolicy
|
||||
eventRecorder events.EventRecorder
|
||||
rs *options.DeschedulerServer
|
||||
ir *informerResources
|
||||
getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc
|
||||
sharedInformerFactory informers.SharedInformerFactory
|
||||
deschedulerPolicy *api.DeschedulerPolicy
|
||||
eventRecorder events.EventRecorder
|
||||
podEvictor *evictions.PodEvictor
|
||||
podEvictionReactionFnc func(*fakeclientset.Clientset) func(action core.Action) (bool, runtime.Object, error)
|
||||
metricsCollector *metricscollector.MetricsCollector
|
||||
}
|
||||
|
||||
func newDescheduler(ctx context.Context, rs *options.DeschedulerServer, deschedulerPolicy *api.DeschedulerPolicy, evictionPolicyGroupVersion string, eventRecorder events.EventRecorder, sharedInformerFactory informers.SharedInformerFactory) (*descheduler, error) {
|
||||
type informerResources struct {
|
||||
sharedInformerFactory informers.SharedInformerFactory
|
||||
resourceToInformer map[schema.GroupVersionResource]informers.GenericInformer
|
||||
}
|
||||
|
||||
func newInformerResources(sharedInformerFactory informers.SharedInformerFactory) *informerResources {
|
||||
return &informerResources{
|
||||
sharedInformerFactory: sharedInformerFactory,
|
||||
resourceToInformer: make(map[schema.GroupVersionResource]informers.GenericInformer),
|
||||
}
|
||||
}
|
||||
|
||||
func (ir *informerResources) Uses(resources ...schema.GroupVersionResource) error {
|
||||
for _, resource := range resources {
|
||||
informer, err := ir.sharedInformerFactory.ForResource(resource)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ir.resourceToInformer[resource] = informer
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CopyTo Copy informer subscriptions to the new factory and objects to the fake client so that the backing caches are populated for when listers are used.
|
||||
func (ir *informerResources) CopyTo(fakeClient *fakeclientset.Clientset, newFactory informers.SharedInformerFactory) error {
|
||||
for resource, informer := range ir.resourceToInformer {
|
||||
_, err := newFactory.ForResource(resource)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error getting resource %s: %w", resource, err)
|
||||
}
|
||||
|
||||
objects, err := informer.Lister().List(labels.Everything())
|
||||
if err != nil {
|
||||
return fmt.Errorf("error listing %s: %w", informer, err)
|
||||
}
|
||||
|
||||
for _, object := range objects {
|
||||
fakeClient.Tracker().Add(object)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func newDescheduler(ctx context.Context, rs *options.DeschedulerServer, deschedulerPolicy *api.DeschedulerPolicy, evictionPolicyGroupVersion string, eventRecorder events.EventRecorder, sharedInformerFactory informers.SharedInformerFactory,
|
||||
) (*descheduler, error) {
|
||||
podInformer := sharedInformerFactory.Core().V1().Pods().Informer()
|
||||
podLister := sharedInformerFactory.Core().V1().Pods().Lister()
|
||||
nodeLister := sharedInformerFactory.Core().V1().Nodes().Lister()
|
||||
namespaceLister := sharedInformerFactory.Core().V1().Namespaces().Lister()
|
||||
priorityClassLister := sharedInformerFactory.Scheduling().V1().PriorityClasses().Lister()
|
||||
|
||||
ir := newInformerResources(sharedInformerFactory)
|
||||
ir.Uses(v1.SchemeGroupVersion.WithResource("pods"),
|
||||
v1.SchemeGroupVersion.WithResource("nodes"),
|
||||
// Future work could be to let each plugin declare what type of resources it needs; that way dry runs would stay
|
||||
// consistent with the real runs without having to keep the list here in sync.
|
||||
v1.SchemeGroupVersion.WithResource("namespaces"), // Used by the defaultevictor plugin
|
||||
schedulingv1.SchemeGroupVersion.WithResource("priorityclasses"), // Used by the defaultevictor plugin
|
||||
policyv1.SchemeGroupVersion.WithResource("poddisruptionbudgets"), // Used by the defaultevictor plugin
|
||||
|
||||
) // Used by the defaultevictor plugin
|
||||
|
||||
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("build get pods assigned to node function error: %v", err)
|
||||
}
|
||||
|
||||
podEvictor, err := evictions.NewPodEvictor(
|
||||
ctx,
|
||||
rs.Client,
|
||||
eventRecorder,
|
||||
podInformer,
|
||||
rs.DefaultFeatureGates,
|
||||
evictions.NewOptions().
|
||||
WithPolicyGroupVersion(evictionPolicyGroupVersion).
|
||||
WithMaxPodsToEvictPerNode(deschedulerPolicy.MaxNoOfPodsToEvictPerNode).
|
||||
WithMaxPodsToEvictPerNamespace(deschedulerPolicy.MaxNoOfPodsToEvictPerNamespace).
|
||||
WithMaxPodsToEvictTotal(deschedulerPolicy.MaxNoOfPodsToEvictTotal).
|
||||
WithEvictionFailureEventNotification(deschedulerPolicy.EvictionFailureEventNotification).
|
||||
WithDryRun(rs.DryRun).
|
||||
WithMetricsEnabled(!rs.DisableMetrics),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var metricsCollector *metricscollector.MetricsCollector
|
||||
if deschedulerPolicy.MetricsCollector.Enabled {
|
||||
nodeSelector := labels.Everything()
|
||||
if deschedulerPolicy.NodeSelector != nil {
|
||||
sel, err := labels.Parse(*deschedulerPolicy.NodeSelector)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nodeSelector = sel
|
||||
}
|
||||
metricsCollector = metricscollector.NewMetricsCollector(sharedInformerFactory.Core().V1().Nodes().Lister(), rs.MetricsClient, nodeSelector)
|
||||
}
|
||||
|
||||
return &descheduler{
|
||||
rs: rs,
|
||||
podLister: podLister,
|
||||
nodeLister: nodeLister,
|
||||
namespaceLister: namespaceLister,
|
||||
priorityClassLister: priorityClassLister,
|
||||
getPodsAssignedToNode: getPodsAssignedToNode,
|
||||
sharedInformerFactory: sharedInformerFactory,
|
||||
evictionPolicyGroupVersion: evictionPolicyGroupVersion,
|
||||
deschedulerPolicy: deschedulerPolicy,
|
||||
eventRecorder: eventRecorder,
|
||||
rs: rs,
|
||||
ir: ir,
|
||||
getPodsAssignedToNode: getPodsAssignedToNode,
|
||||
sharedInformerFactory: sharedInformerFactory,
|
||||
deschedulerPolicy: deschedulerPolicy,
|
||||
eventRecorder: eventRecorder,
|
||||
podEvictor: podEvictor,
|
||||
podEvictionReactionFnc: podEvictionReactionFnc,
|
||||
metricsCollector: metricsCollector,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -128,13 +211,17 @@ func (d *descheduler) runDeschedulerLoop(ctx context.Context, nodes []*v1.Node)
|
||||
if d.rs.DryRun {
|
||||
klog.V(3).Infof("Building a cached client from the cluster for the dry run")
|
||||
// Create a new cache so we start from scratch without any leftovers
|
||||
fakeClient, err := cachedClient(d.rs.Client, d.podLister, d.nodeLister, d.namespaceLister, d.priorityClassLister)
|
||||
fakeClient := fakeclientset.NewSimpleClientset()
|
||||
// simulate a pod eviction by deleting a pod
|
||||
fakeClient.PrependReactor("create", "pods", d.podEvictionReactionFnc(fakeClient))
|
||||
fakeSharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
|
||||
|
||||
err := d.ir.CopyTo(fakeClient, fakeSharedInformerFactory)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// create a new instance of the shared informer factor from the cached client
|
||||
fakeSharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
|
||||
// register the pod informer, otherwise it will not get running
|
||||
d.getPodsAssignedToNode, err = podutil.BuildGetPodsAssignedToNodeFunc(fakeSharedInformerFactory.Core().V1().Pods().Informer())
|
||||
if err != nil {
|
||||
@@ -152,21 +239,13 @@ func (d *descheduler) runDeschedulerLoop(ctx context.Context, nodes []*v1.Node)
|
||||
client = d.rs.Client
|
||||
}
|
||||
|
||||
klog.V(3).Infof("Building a pod evictor")
|
||||
podEvictor := evictions.NewPodEvictor(
|
||||
client,
|
||||
d.evictionPolicyGroupVersion,
|
||||
d.rs.DryRun,
|
||||
d.deschedulerPolicy.MaxNoOfPodsToEvictPerNode,
|
||||
d.deschedulerPolicy.MaxNoOfPodsToEvictPerNamespace,
|
||||
nodes,
|
||||
!d.rs.DisableMetrics,
|
||||
d.eventRecorder,
|
||||
)
|
||||
klog.V(3).Infof("Setting up the pod evictor")
|
||||
d.podEvictor.SetClient(client)
|
||||
d.podEvictor.ResetCounters()
|
||||
|
||||
d.runProfiles(ctx, client, nodes, podEvictor)
|
||||
d.runProfiles(ctx, client, nodes)
|
||||
|
||||
klog.V(1).InfoS("Number of evicted pods", "totalEvicted", podEvictor.TotalEvicted())
|
||||
klog.V(1).InfoS("Number of evictions/requests", "totalEvicted", d.podEvictor.TotalEvicted(), "evictionRequests", d.podEvictor.TotalEvictionRequests())
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -174,7 +253,7 @@ func (d *descheduler) runDeschedulerLoop(ctx context.Context, nodes []*v1.Node)
|
||||
// runProfiles runs all the deschedule plugins of all profiles and
|
||||
// later runs through all balance plugins of all profiles. (All Balance plugins should come after all Deschedule plugins)
|
||||
// see https://github.com/kubernetes-sigs/descheduler/issues/979
|
||||
func (d *descheduler) runProfiles(ctx context.Context, client clientset.Interface, nodes []*v1.Node, podEvictor *evictions.PodEvictor) {
|
||||
func (d *descheduler) runProfiles(ctx context.Context, client clientset.Interface, nodes []*v1.Node) {
|
||||
var span trace.Span
|
||||
ctx, span = tracing.Tracer().Start(ctx, "runProfiles")
|
||||
defer span.End()
|
||||
@@ -185,8 +264,9 @@ func (d *descheduler) runProfiles(ctx context.Context, client clientset.Interfac
|
||||
pluginregistry.PluginRegistry,
|
||||
frameworkprofile.WithClientSet(client),
|
||||
frameworkprofile.WithSharedInformerFactory(d.sharedInformerFactory),
|
||||
frameworkprofile.WithPodEvictor(podEvictor),
|
||||
frameworkprofile.WithPodEvictor(d.podEvictor),
|
||||
frameworkprofile.WithGetPodsAssignedToNodeFnc(d.getPodsAssignedToNode),
|
||||
frameworkprofile.WithMetricsCollector(d.metricsCollector),
|
||||
)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "unable to create a profile", "profile", profile.Name)
|
||||
@@ -251,6 +331,14 @@ func Run(ctx context.Context, rs *options.DeschedulerServer) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if deschedulerPolicy.MetricsCollector.Enabled {
|
||||
metricsClient, err := client.CreateMetricsClient(clientConnection, "descheduler")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rs.MetricsClient = metricsClient
|
||||
}
|
||||
|
||||
runFn := func() error {
|
||||
return RunDeschedulerStrategies(ctx, rs, deschedulerPolicy, evictionPolicyGroupVersion)
|
||||
}
|
||||
@@ -275,46 +363,38 @@ func Run(ctx context.Context, rs *options.DeschedulerServer) error {
|
||||
return runFn()
|
||||
}
|
||||
|
||||
func validateVersionCompatibility(discovery discovery.DiscoveryInterface, versionInfo version.Info) error {
|
||||
serverVersionInfo, err := discovery.ServerVersion()
|
||||
func validateVersionCompatibility(discovery discovery.DiscoveryInterface, deschedulerVersionInfo version.Info) error {
|
||||
kubeServerVersionInfo, err := discovery.ServerVersion()
|
||||
if err != nil {
|
||||
return errors.New("failed to discover Kubernetes server version")
|
||||
return fmt.Errorf("failed to discover Kubernetes server version: %v", err)
|
||||
}
|
||||
|
||||
serverVersion, err := utilversion.ParseSemantic(serverVersionInfo.String())
|
||||
kubeServerVersion, err := utilversion.ParseSemantic(kubeServerVersionInfo.String())
|
||||
if err != nil {
|
||||
return errors.New("failed to parse Kubernetes server version")
|
||||
return fmt.Errorf("failed to parse Kubernetes server version '%s': %v", kubeServerVersionInfo.String(), err)
|
||||
}
|
||||
|
||||
deschedulerVersion, err := utilversion.ParseGeneric(versionInfo.GitVersion)
|
||||
deschedulerMinor, err := strconv.ParseFloat(deschedulerVersionInfo.Minor, 64)
|
||||
if err != nil {
|
||||
return errors.New("failed to convert Descheduler minor version to float")
|
||||
return fmt.Errorf("failed to convert Descheduler minor version '%s' to float: %v", deschedulerVersionInfo.Minor, err)
|
||||
}
|
||||
|
||||
deschedulerMinor := float64(deschedulerVersion.Minor())
|
||||
serverMinor := float64(serverVersion.Minor())
|
||||
if math.Abs(deschedulerMinor-serverMinor) > 3 {
|
||||
kubeServerMinor := float64(kubeServerVersion.Minor())
|
||||
if math.Abs(deschedulerMinor-kubeServerMinor) > 3 {
|
||||
return fmt.Errorf(
|
||||
"descheduler version %v may not be supported on your version of Kubernetes %v."+
|
||||
"descheduler version %s.%s may not be supported on your version of Kubernetes %v."+
|
||||
"See compatibility docs for more info: https://github.com/kubernetes-sigs/descheduler#compatibility-matrix",
|
||||
deschedulerVersion.String(),
|
||||
serverVersionInfo.String(),
|
||||
deschedulerVersionInfo.Major,
|
||||
deschedulerVersionInfo.Minor,
|
||||
kubeServerVersionInfo.String(),
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func cachedClient(
|
||||
realClient clientset.Interface,
|
||||
podLister listersv1.PodLister,
|
||||
nodeLister listersv1.NodeLister,
|
||||
namespaceLister listersv1.NamespaceLister,
|
||||
priorityClassLister schedulingv1.PriorityClassLister,
|
||||
) (clientset.Interface, error) {
|
||||
fakeClient := fakeclientset.NewSimpleClientset()
|
||||
// simulate a pod eviction by deleting a pod
|
||||
fakeClient.PrependReactor("create", "pods", func(action core.Action) (bool, runtime.Object, error) {
|
||||
func podEvictionReactionFnc(fakeClient *fakeclientset.Clientset) func(action core.Action) (bool, runtime.Object, error) {
|
||||
return func(action core.Action) (bool, runtime.Object, error) {
|
||||
if action.GetSubresource() == "eviction" {
|
||||
createAct, matched := action.(core.CreateActionImpl)
|
||||
if !matched {
|
||||
@@ -331,62 +411,15 @@ func cachedClient(
|
||||
}
|
||||
// fallback to the default reactor
|
||||
return false, nil, nil
|
||||
})
|
||||
|
||||
klog.V(3).Infof("Pulling resources for the cached client from the cluster")
|
||||
pods, err := podLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to list pods: %v", err)
|
||||
}
|
||||
|
||||
for _, item := range pods {
|
||||
if _, err := fakeClient.CoreV1().Pods(item.Namespace).Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
|
||||
return nil, fmt.Errorf("unable to copy pod: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
nodes, err := nodeLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to list nodes: %v", err)
|
||||
}
|
||||
|
||||
for _, item := range nodes {
|
||||
if _, err := fakeClient.CoreV1().Nodes().Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
|
||||
return nil, fmt.Errorf("unable to copy node: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
namespaces, err := namespaceLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to list namespaces: %v", err)
|
||||
}
|
||||
|
||||
for _, item := range namespaces {
|
||||
if _, err := fakeClient.CoreV1().Namespaces().Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
|
||||
return nil, fmt.Errorf("unable to copy namespace: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
priorityClasses, err := priorityClassLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to list priorityclasses: %v", err)
|
||||
}
|
||||
|
||||
for _, item := range priorityClasses {
|
||||
if _, err := fakeClient.SchedulingV1().PriorityClasses().Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
|
||||
return nil, fmt.Errorf("unable to copy priorityclass: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
return fakeClient, nil
|
||||
}
|
||||
|
||||
func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer, deschedulerPolicy *api.DeschedulerPolicy, evictionPolicyGroupVersion string) error {
|
||||
var span trace.Span
|
||||
ctx, span = tracing.Tracer().Start(ctx, "RunDeschedulerStrategies")
|
||||
defer span.End()
|
||||
sharedInformerFactory := informers.NewSharedInformerFactory(rs.Client, 0)
|
||||
nodeLister := sharedInformerFactory.Core().V1().Nodes().Lister()
|
||||
|
||||
sharedInformerFactory := informers.NewSharedInformerFactoryWithOptions(rs.Client, 0, informers.WithTransform(trimManagedFields))
|
||||
|
||||
var nodeSelector string
|
||||
if deschedulerPolicy.NodeSelector != nil {
|
||||
@@ -412,12 +445,28 @@ func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer
|
||||
|
||||
sharedInformerFactory.Start(ctx.Done())
|
||||
sharedInformerFactory.WaitForCacheSync(ctx.Done())
|
||||
descheduler.podEvictor.WaitForEventHandlersSync(ctx)
|
||||
|
||||
if deschedulerPolicy.MetricsCollector.Enabled {
|
||||
go func() {
|
||||
klog.V(2).Infof("Starting metrics collector")
|
||||
descheduler.metricsCollector.Run(ctx)
|
||||
klog.V(2).Infof("Stopped metrics collector")
|
||||
}()
|
||||
klog.V(2).Infof("Waiting for metrics collector to sync")
|
||||
if err := wait.PollWithContext(ctx, time.Second, time.Minute, func(context.Context) (done bool, err error) {
|
||||
return descheduler.metricsCollector.HasSynced(), nil
|
||||
}); err != nil {
|
||||
return fmt.Errorf("unable to wait for metrics collector to sync: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
wait.NonSlidingUntil(func() {
|
||||
// A next context is created here intentionally to avoid nesting the spans via context.
|
||||
sCtx, sSpan := tracing.Tracer().Start(ctx, "NonSlidingUntil")
|
||||
defer sSpan.End()
|
||||
nodes, err := nodeutil.ReadyNodes(sCtx, rs.Client, nodeLister, nodeSelector)
|
||||
|
||||
nodes, err := nodeutil.ReadyNodes(sCtx, rs.Client, descheduler.sharedInformerFactory.Core().V1().Nodes().Lister(), nodeSelector)
|
||||
if err != nil {
|
||||
sSpan.AddEvent("Failed to detect ready nodes", trace.WithAttributes(attribute.String("err", err.Error())))
|
||||
klog.Error(err)
|
||||
@@ -462,3 +511,10 @@ func createClients(clientConnection componentbaseconfig.ClientConnectionConfigur
|
||||
|
||||
return kClient, eventClient, nil
|
||||
}
|
||||
|
||||
func trimManagedFields(obj interface{}) (interface{}, error) {
|
||||
if accessor, err := meta.Accessor(obj); err == nil {
|
||||
accessor.SetManagedFields(nil)
|
||||
}
|
||||
return obj, nil
|
||||
}
|
||||
|
||||
@@ -2,69 +2,180 @@ package descheduler
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
policy "k8s.io/api/policy/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/conversion"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
apiversion "k8s.io/apimachinery/pkg/version"
|
||||
fakediscovery "k8s.io/client-go/discovery/fake"
|
||||
"k8s.io/client-go/informers"
|
||||
fakeclientset "k8s.io/client-go/kubernetes/fake"
|
||||
core "k8s.io/client-go/testing"
|
||||
"k8s.io/component-base/featuregate"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/metrics/pkg/apis/metrics/v1beta1"
|
||||
metricsclient "k8s.io/metrics/pkg/client/clientset/versioned"
|
||||
fakemetricsclient "k8s.io/metrics/pkg/client/clientset/versioned/fake"
|
||||
utilptr "k8s.io/utils/ptr"
|
||||
|
||||
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/api/v1alpha1"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
"sigs.k8s.io/descheduler/pkg/features"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/pluginregistry"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/defaultevictor"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/nodeutilization"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/removeduplicates"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/removepodsviolatingnodetaints"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
deschedulerversion "sigs.k8s.io/descheduler/pkg/version"
|
||||
"sigs.k8s.io/descheduler/test"
|
||||
)
|
||||
|
||||
// scope contains information about an ongoing conversion.
|
||||
type scope struct {
|
||||
converter *conversion.Converter
|
||||
meta *conversion.Meta
|
||||
}
|
||||
|
||||
// Convert continues a conversion.
|
||||
func (s scope) Convert(src, dest interface{}) error {
|
||||
return s.converter.Convert(src, dest, s.meta)
|
||||
}
|
||||
|
||||
// Meta returns the meta object that was originally passed to Convert.
|
||||
func (s scope) Meta() *conversion.Meta {
|
||||
return s.meta
|
||||
}
|
||||
|
||||
func TestTaintsUpdated(t *testing.T) {
|
||||
pluginregistry.PluginRegistry = pluginregistry.NewRegistry()
|
||||
pluginregistry.Register(removepodsviolatingnodetaints.PluginName, removepodsviolatingnodetaints.New, &removepodsviolatingnodetaints.RemovePodsViolatingNodeTaints{}, &removepodsviolatingnodetaints.RemovePodsViolatingNodeTaintsArgs{}, removepodsviolatingnodetaints.ValidateRemovePodsViolatingNodeTaintsArgs, removepodsviolatingnodetaints.SetDefaults_RemovePodsViolatingNodeTaintsArgs, pluginregistry.PluginRegistry)
|
||||
pluginregistry.Register(defaultevictor.PluginName, defaultevictor.New, &defaultevictor.DefaultEvictor{}, &defaultevictor.DefaultEvictorArgs{}, defaultevictor.ValidateDefaultEvictorArgs, defaultevictor.SetDefaults_DefaultEvictorArgs, pluginregistry.PluginRegistry)
|
||||
|
||||
ctx := context.Background()
|
||||
n1 := test.BuildTestNode("n1", 2000, 3000, 10, nil)
|
||||
n2 := test.BuildTestNode("n2", 2000, 3000, 10, nil)
|
||||
|
||||
p1 := test.BuildTestPod(fmt.Sprintf("pod_1_%s", n1.Name), 200, 0, n1.Name, nil)
|
||||
p1.ObjectMeta.OwnerReferences = []metav1.OwnerReference{
|
||||
{},
|
||||
var (
|
||||
podEvictionError = errors.New("PodEvictionError")
|
||||
tooManyRequestsError = &apierrors.StatusError{
|
||||
ErrStatus: metav1.Status{
|
||||
Status: metav1.StatusFailure,
|
||||
Code: http.StatusTooManyRequests,
|
||||
Reason: metav1.StatusReasonTooManyRequests,
|
||||
Message: "admission webhook \"virt-launcher-eviction-interceptor.kubevirt.io\" denied the request: Eviction triggered evacuation of VMI",
|
||||
},
|
||||
}
|
||||
nodesgvr = schema.GroupVersionResource{Group: "metrics.k8s.io", Version: "v1beta1", Resource: "nodes"}
|
||||
podsgvr = schema.GroupVersionResource{Group: "metrics.k8s.io", Version: "v1beta1", Resource: "pods"}
|
||||
)
|
||||
|
||||
client := fakeclientset.NewSimpleClientset(n1, n2, p1)
|
||||
eventClient := fakeclientset.NewSimpleClientset(n1, n2, p1)
|
||||
dp := &v1alpha1.DeschedulerPolicy{
|
||||
Strategies: v1alpha1.StrategyList{
|
||||
"RemovePodsViolatingNodeTaints": v1alpha1.DeschedulerStrategy{
|
||||
Enabled: true,
|
||||
func initFeatureGates() featuregate.FeatureGate {
|
||||
featureGates := featuregate.NewFeatureGate()
|
||||
featureGates.Add(map[featuregate.Feature]featuregate.FeatureSpec{
|
||||
features.EvictionsInBackground: {Default: false, PreRelease: featuregate.Alpha},
|
||||
})
|
||||
return featureGates
|
||||
}
|
||||
|
||||
func initPluginRegistry() {
|
||||
pluginregistry.PluginRegistry = pluginregistry.NewRegistry()
|
||||
pluginregistry.Register(removeduplicates.PluginName, removeduplicates.New, &removeduplicates.RemoveDuplicates{}, &removeduplicates.RemoveDuplicatesArgs{}, removeduplicates.ValidateRemoveDuplicatesArgs, removeduplicates.SetDefaults_RemoveDuplicatesArgs, pluginregistry.PluginRegistry)
|
||||
pluginregistry.Register(defaultevictor.PluginName, defaultevictor.New, &defaultevictor.DefaultEvictor{}, &defaultevictor.DefaultEvictorArgs{}, defaultevictor.ValidateDefaultEvictorArgs, defaultevictor.SetDefaults_DefaultEvictorArgs, pluginregistry.PluginRegistry)
|
||||
pluginregistry.Register(removepodsviolatingnodetaints.PluginName, removepodsviolatingnodetaints.New, &removepodsviolatingnodetaints.RemovePodsViolatingNodeTaints{}, &removepodsviolatingnodetaints.RemovePodsViolatingNodeTaintsArgs{}, removepodsviolatingnodetaints.ValidateRemovePodsViolatingNodeTaintsArgs, removepodsviolatingnodetaints.SetDefaults_RemovePodsViolatingNodeTaintsArgs, pluginregistry.PluginRegistry)
|
||||
pluginregistry.Register(nodeutilization.LowNodeUtilizationPluginName, nodeutilization.NewLowNodeUtilization, &nodeutilization.LowNodeUtilization{}, &nodeutilization.LowNodeUtilizationArgs{}, nodeutilization.ValidateLowNodeUtilizationArgs, nodeutilization.SetDefaults_LowNodeUtilizationArgs, pluginregistry.PluginRegistry)
|
||||
}
|
||||
|
||||
func removePodsViolatingNodeTaintsPolicy() *api.DeschedulerPolicy {
|
||||
return &api.DeschedulerPolicy{
|
||||
Profiles: []api.DeschedulerProfile{
|
||||
{
|
||||
Name: "Profile",
|
||||
PluginConfigs: []api.PluginConfig{
|
||||
{
|
||||
Name: "RemovePodsViolatingNodeTaints",
|
||||
Args: &removepodsviolatingnodetaints.RemovePodsViolatingNodeTaintsArgs{},
|
||||
},
|
||||
{
|
||||
Name: "DefaultEvictor",
|
||||
Args: &defaultevictor.DefaultEvictorArgs{},
|
||||
},
|
||||
},
|
||||
Plugins: api.Plugins{
|
||||
Filter: api.PluginSet{
|
||||
Enabled: []string{
|
||||
"DefaultEvictor",
|
||||
},
|
||||
},
|
||||
Deschedule: api.PluginSet{
|
||||
Enabled: []string{
|
||||
"RemovePodsViolatingNodeTaints",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func removeDuplicatesPolicy() *api.DeschedulerPolicy {
|
||||
return &api.DeschedulerPolicy{
|
||||
Profiles: []api.DeschedulerProfile{
|
||||
{
|
||||
Name: "Profile",
|
||||
PluginConfigs: []api.PluginConfig{
|
||||
{
|
||||
Name: "RemoveDuplicates",
|
||||
Args: &removeduplicates.RemoveDuplicatesArgs{},
|
||||
},
|
||||
{
|
||||
Name: "DefaultEvictor",
|
||||
Args: &defaultevictor.DefaultEvictorArgs{},
|
||||
},
|
||||
},
|
||||
Plugins: api.Plugins{
|
||||
Filter: api.PluginSet{
|
||||
Enabled: []string{
|
||||
"DefaultEvictor",
|
||||
},
|
||||
},
|
||||
Balance: api.PluginSet{
|
||||
Enabled: []string{
|
||||
"RemoveDuplicates",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func lowNodeUtilizationPolicy(thresholds, targetThresholds api.ResourceThresholds, metricsEnabled bool) *api.DeschedulerPolicy {
|
||||
return &api.DeschedulerPolicy{
|
||||
Profiles: []api.DeschedulerProfile{
|
||||
{
|
||||
Name: "Profile",
|
||||
PluginConfigs: []api.PluginConfig{
|
||||
{
|
||||
Name: nodeutilization.LowNodeUtilizationPluginName,
|
||||
Args: &nodeutilization.LowNodeUtilizationArgs{
|
||||
Thresholds: thresholds,
|
||||
TargetThresholds: targetThresholds,
|
||||
MetricsUtilization: nodeutilization.MetricsUtilization{
|
||||
MetricsServer: metricsEnabled,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: defaultevictor.PluginName,
|
||||
Args: &defaultevictor.DefaultEvictorArgs{},
|
||||
},
|
||||
},
|
||||
Plugins: api.Plugins{
|
||||
Filter: api.PluginSet{
|
||||
Enabled: []string{
|
||||
defaultevictor.PluginName,
|
||||
},
|
||||
},
|
||||
Balance: api.PluginSet{
|
||||
Enabled: []string{
|
||||
nodeutilization.LowNodeUtilizationPluginName,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func initDescheduler(t *testing.T, ctx context.Context, featureGates featuregate.FeatureGate, internalDeschedulerPolicy *api.DeschedulerPolicy, metricsClient metricsclient.Interface, objects ...runtime.Object) (*options.DeschedulerServer, *descheduler, *fakeclientset.Clientset) {
|
||||
client := fakeclientset.NewSimpleClientset(objects...)
|
||||
eventClient := fakeclientset.NewSimpleClientset(objects...)
|
||||
|
||||
rs, err := options.NewDeschedulerServer()
|
||||
if err != nil {
|
||||
@@ -72,6 +183,44 @@ func TestTaintsUpdated(t *testing.T) {
|
||||
}
|
||||
rs.Client = client
|
||||
rs.EventClient = eventClient
|
||||
rs.DefaultFeatureGates = featureGates
|
||||
rs.MetricsClient = metricsClient
|
||||
|
||||
sharedInformerFactory := informers.NewSharedInformerFactoryWithOptions(rs.Client, 0, informers.WithTransform(trimManagedFields))
|
||||
eventBroadcaster, eventRecorder := utils.GetRecorderAndBroadcaster(ctx, client)
|
||||
|
||||
descheduler, err := newDescheduler(ctx, rs, internalDeschedulerPolicy, "v1", eventRecorder, sharedInformerFactory)
|
||||
if err != nil {
|
||||
eventBroadcaster.Shutdown()
|
||||
t.Fatalf("Unable to create a descheduler instance: %v", err)
|
||||
}
|
||||
|
||||
sharedInformerFactory.Start(ctx.Done())
|
||||
sharedInformerFactory.WaitForCacheSync(ctx.Done())
|
||||
|
||||
return rs, descheduler, client
|
||||
}
|
||||
|
||||
func TestTaintsUpdated(t *testing.T) {
|
||||
initPluginRegistry()
|
||||
|
||||
ctx := context.Background()
|
||||
n1 := test.BuildTestNode("n1", 2000, 3000, 10, nil)
|
||||
n2 := test.BuildTestNode("n2", 2000, 3000, 10, nil)
|
||||
|
||||
p1 := test.BuildTestPod(fmt.Sprintf("pod_1_%s", n1.Name), 200, 0, n1.Name, nil)
|
||||
p1.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
|
||||
|
||||
client := fakeclientset.NewSimpleClientset(n1, n2, p1)
|
||||
eventClient := fakeclientset.NewSimpleClientset(n1, n2, p1)
|
||||
|
||||
rs, err := options.NewDeschedulerServer()
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to initialize server: %v", err)
|
||||
}
|
||||
rs.Client = client
|
||||
rs.EventClient = eventClient
|
||||
rs.DefaultFeatureGates = initFeatureGates()
|
||||
|
||||
pods, err := client.CoreV1().Pods(p1.Namespace).List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
@@ -95,16 +244,9 @@ func TestTaintsUpdated(t *testing.T) {
|
||||
}
|
||||
|
||||
var evictedPods []string
|
||||
client.PrependReactor("create", "pods", podEvictionReactionFuc(&evictedPods))
|
||||
client.PrependReactor("create", "pods", podEvictionReactionTestingFnc(&evictedPods, nil, nil))
|
||||
|
||||
internalDeschedulerPolicy := &api.DeschedulerPolicy{}
|
||||
scope := scope{}
|
||||
err = v1alpha1.V1alpha1ToInternal(dp, pluginregistry.PluginRegistry, internalDeschedulerPolicy, scope)
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to convert v1alpha1 to v1alpha2: %v", err)
|
||||
}
|
||||
|
||||
if err := RunDeschedulerStrategies(ctx, rs, internalDeschedulerPolicy, "v1"); err != nil {
|
||||
if err := RunDeschedulerStrategies(ctx, rs, removePodsViolatingNodeTaintsPolicy(), "v1"); err != nil {
|
||||
t.Fatalf("Unable to run descheduler strategies: %v", err)
|
||||
}
|
||||
|
||||
@@ -114,9 +256,7 @@ func TestTaintsUpdated(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDuplicate(t *testing.T) {
|
||||
pluginregistry.PluginRegistry = pluginregistry.NewRegistry()
|
||||
pluginregistry.Register(removeduplicates.PluginName, removeduplicates.New, &removeduplicates.RemoveDuplicates{}, &removeduplicates.RemoveDuplicatesArgs{}, removeduplicates.ValidateRemoveDuplicatesArgs, removeduplicates.SetDefaults_RemoveDuplicatesArgs, pluginregistry.PluginRegistry)
|
||||
pluginregistry.Register(defaultevictor.PluginName, defaultevictor.New, &defaultevictor.DefaultEvictor{}, &defaultevictor.DefaultEvictorArgs{}, defaultevictor.ValidateDefaultEvictorArgs, defaultevictor.SetDefaults_DefaultEvictorArgs, pluginregistry.PluginRegistry)
|
||||
initPluginRegistry()
|
||||
|
||||
ctx := context.Background()
|
||||
node1 := test.BuildTestNode("n1", 2000, 3000, 10, nil)
|
||||
@@ -136,13 +276,6 @@ func TestDuplicate(t *testing.T) {
|
||||
|
||||
client := fakeclientset.NewSimpleClientset(node1, node2, p1, p2, p3)
|
||||
eventClient := fakeclientset.NewSimpleClientset(node1, node2, p1, p2, p3)
|
||||
dp := &v1alpha1.DeschedulerPolicy{
|
||||
Strategies: v1alpha1.StrategyList{
|
||||
"RemoveDuplicates": v1alpha1.DeschedulerStrategy{
|
||||
Enabled: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
rs, err := options.NewDeschedulerServer()
|
||||
if err != nil {
|
||||
@@ -150,6 +283,7 @@ func TestDuplicate(t *testing.T) {
|
||||
}
|
||||
rs.Client = client
|
||||
rs.EventClient = eventClient
|
||||
rs.DefaultFeatureGates = initFeatureGates()
|
||||
|
||||
pods, err := client.CoreV1().Pods(p1.Namespace).List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
@@ -161,20 +295,14 @@ func TestDuplicate(t *testing.T) {
|
||||
}
|
||||
|
||||
var evictedPods []string
|
||||
client.PrependReactor("create", "pods", podEvictionReactionFuc(&evictedPods))
|
||||
client.PrependReactor("create", "pods", podEvictionReactionTestingFnc(&evictedPods, nil, nil))
|
||||
|
||||
internalDeschedulerPolicy := &api.DeschedulerPolicy{}
|
||||
scope := scope{}
|
||||
err = v1alpha1.V1alpha1ToInternal(dp, pluginregistry.PluginRegistry, internalDeschedulerPolicy, scope)
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to convert v1alpha1 to v1alpha2: %v", err)
|
||||
}
|
||||
if err := RunDeschedulerStrategies(ctx, rs, internalDeschedulerPolicy, "v1"); err != nil {
|
||||
if err := RunDeschedulerStrategies(ctx, rs, removeDuplicatesPolicy(), "v1"); err != nil {
|
||||
t.Fatalf("Unable to run descheduler strategies: %v", err)
|
||||
}
|
||||
|
||||
if len(evictedPods) == 0 {
|
||||
t.Fatalf("Unable to evict pod, node taint did not get propagated to descheduler strategies %v\n", err)
|
||||
t.Fatalf("Unable to evict pods\n")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -195,6 +323,7 @@ func TestRootCancel(t *testing.T) {
|
||||
rs.Client = client
|
||||
rs.EventClient = eventClient
|
||||
rs.DeschedulingInterval = 100 * time.Millisecond
|
||||
rs.DefaultFeatureGates = initFeatureGates()
|
||||
errChan := make(chan error, 1)
|
||||
defer close(errChan)
|
||||
|
||||
@@ -230,6 +359,7 @@ func TestRootCancelWithNoInterval(t *testing.T) {
|
||||
rs.Client = client
|
||||
rs.EventClient = eventClient
|
||||
rs.DeschedulingInterval = 0
|
||||
rs.DefaultFeatureGates = initFeatureGates()
|
||||
errChan := make(chan error, 1)
|
||||
defer close(errChan)
|
||||
|
||||
@@ -251,44 +381,44 @@ func TestRootCancelWithNoInterval(t *testing.T) {
|
||||
func TestValidateVersionCompatibility(t *testing.T) {
|
||||
type testCase struct {
|
||||
name string
|
||||
deschedulerVersion string
|
||||
deschedulerVersion deschedulerversion.Info
|
||||
serverVersion string
|
||||
expectError bool
|
||||
}
|
||||
testCases := []testCase{
|
||||
{
|
||||
name: "no error when descheduler minor equals to server minor",
|
||||
deschedulerVersion: "v0.26",
|
||||
deschedulerVersion: deschedulerversion.Info{Major: "0", Minor: "26"},
|
||||
serverVersion: "v1.26.1",
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "no error when descheduler minor is 3 behind server minor",
|
||||
deschedulerVersion: "0.23",
|
||||
deschedulerVersion: deschedulerversion.Info{Major: "0", Minor: "23"},
|
||||
serverVersion: "v1.26.1",
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "no error when descheduler minor is 3 ahead of server minor",
|
||||
deschedulerVersion: "v0.26",
|
||||
deschedulerVersion: deschedulerversion.Info{Major: "0", Minor: "26"},
|
||||
serverVersion: "v1.26.1",
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "error when descheduler minor is 4 behind server minor",
|
||||
deschedulerVersion: "v0.22",
|
||||
deschedulerVersion: deschedulerversion.Info{Major: "0", Minor: "22"},
|
||||
serverVersion: "v1.26.1",
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
name: "error when descheduler minor is 4 ahead of server minor",
|
||||
deschedulerVersion: "v0.27",
|
||||
deschedulerVersion: deschedulerversion.Info{Major: "0", Minor: "27"},
|
||||
serverVersion: "v1.23.1",
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
name: "no error when using managed provider version",
|
||||
deschedulerVersion: "v0.25",
|
||||
deschedulerVersion: deschedulerversion.Info{Major: "0", Minor: "25"},
|
||||
serverVersion: "v1.25.12-eks-2d98532",
|
||||
expectError: false,
|
||||
},
|
||||
@@ -298,8 +428,7 @@ func TestValidateVersionCompatibility(t *testing.T) {
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
fakeDiscovery.FakedServerVersion = &apiversion.Info{GitVersion: tc.serverVersion}
|
||||
deschedulerVersion := deschedulerversion.Info{GitVersion: tc.deschedulerVersion}
|
||||
err := validateVersionCompatibility(fakeDiscovery, deschedulerVersion)
|
||||
err := validateVersionCompatibility(fakeDiscovery, tc.deschedulerVersion)
|
||||
|
||||
hasError := err != nil
|
||||
if tc.expectError != hasError {
|
||||
@@ -309,7 +438,7 @@ func TestValidateVersionCompatibility(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func podEvictionReactionFuc(evictedPods *[]string) func(action core.Action) (bool, runtime.Object, error) {
|
||||
func podEvictionReactionTestingFnc(evictedPods *[]string, isEvictionsInBackground func(podName string) bool, evictionErr error) func(action core.Action) (bool, runtime.Object, error) {
|
||||
return func(action core.Action) (bool, runtime.Object, error) {
|
||||
if action.GetSubresource() == "eviction" {
|
||||
createAct, matched := action.(core.CreateActionImpl)
|
||||
@@ -317,9 +446,420 @@ func podEvictionReactionFuc(evictedPods *[]string) func(action core.Action) (boo
|
||||
return false, nil, fmt.Errorf("unable to convert action to core.CreateActionImpl")
|
||||
}
|
||||
if eviction, matched := createAct.Object.(*policy.Eviction); matched {
|
||||
if isEvictionsInBackground != nil && isEvictionsInBackground(eviction.GetName()) {
|
||||
return true, nil, tooManyRequestsError
|
||||
}
|
||||
if evictionErr != nil {
|
||||
return true, nil, evictionErr
|
||||
}
|
||||
*evictedPods = append(*evictedPods, eviction.GetName())
|
||||
return true, nil, nil
|
||||
}
|
||||
}
|
||||
return false, nil, nil // fallback to the default reactor
|
||||
}
|
||||
}
|
||||
|
||||
func taintNodeNoSchedule(node *v1.Node) {
|
||||
node.Spec.Taints = []v1.Taint{
|
||||
{
|
||||
Key: "key",
|
||||
Value: "value",
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestPodEvictorReset(t *testing.T) {
|
||||
initPluginRegistry()
|
||||
|
||||
ctx := context.Background()
|
||||
node1 := test.BuildTestNode("n1", 2000, 3000, 10, taintNodeNoSchedule)
|
||||
node2 := test.BuildTestNode("n2", 2000, 3000, 10, nil)
|
||||
nodes := []*v1.Node{node1, node2}
|
||||
|
||||
ownerRef1 := test.GetReplicaSetOwnerRefList()
|
||||
updatePod := func(pod *v1.Pod) {
|
||||
pod.Namespace = "dev"
|
||||
pod.ObjectMeta.OwnerReferences = ownerRef1
|
||||
}
|
||||
|
||||
p1 := test.BuildTestPod("p1", 100, 0, node1.Name, updatePod)
|
||||
p2 := test.BuildTestPod("p2", 100, 0, node1.Name, updatePod)
|
||||
|
||||
internalDeschedulerPolicy := removePodsViolatingNodeTaintsPolicy()
|
||||
ctxCancel, cancel := context.WithCancel(ctx)
|
||||
rs, descheduler, client := initDescheduler(t, ctxCancel, initFeatureGates(), internalDeschedulerPolicy, nil, node1, node2, p1, p2)
|
||||
defer cancel()
|
||||
|
||||
var evictedPods []string
|
||||
client.PrependReactor("create", "pods", podEvictionReactionTestingFnc(&evictedPods, nil, nil))
|
||||
|
||||
var fakeEvictedPods []string
|
||||
descheduler.podEvictionReactionFnc = func(*fakeclientset.Clientset) func(action core.Action) (bool, runtime.Object, error) {
|
||||
return podEvictionReactionTestingFnc(&fakeEvictedPods, nil, nil)
|
||||
}
|
||||
|
||||
// a single pod eviction expected
|
||||
klog.Infof("2 pod eviction expected per a descheduling cycle, 2 real evictions in total")
|
||||
if err := descheduler.runDeschedulerLoop(ctx, nodes); err != nil {
|
||||
t.Fatalf("Unable to run a descheduling loop: %v", err)
|
||||
}
|
||||
if descheduler.podEvictor.TotalEvicted() != 2 || len(evictedPods) != 2 || len(fakeEvictedPods) != 0 {
|
||||
t.Fatalf("Expected (2,2,0) pods evicted, got (%v, %v, %v) instead", descheduler.podEvictor.TotalEvicted(), len(evictedPods), len(fakeEvictedPods))
|
||||
}
|
||||
|
||||
// a single pod eviction expected
|
||||
klog.Infof("2 pod eviction expected per a descheduling cycle, 4 real evictions in total")
|
||||
if err := descheduler.runDeschedulerLoop(ctx, nodes); err != nil {
|
||||
t.Fatalf("Unable to run a descheduling loop: %v", err)
|
||||
}
|
||||
if descheduler.podEvictor.TotalEvicted() != 2 || len(evictedPods) != 4 || len(fakeEvictedPods) != 0 {
|
||||
t.Fatalf("Expected (2,4,0) pods evicted, got (%v, %v, %v) instead", descheduler.podEvictor.TotalEvicted(), len(evictedPods), len(fakeEvictedPods))
|
||||
}
|
||||
|
||||
// check the fake client syncing and the right pods evicted
|
||||
klog.Infof("Enabling the dry run mode")
|
||||
rs.DryRun = true
|
||||
evictedPods = []string{}
|
||||
|
||||
klog.Infof("2 pod eviction expected per a descheduling cycle, 2 fake evictions in total")
|
||||
if err := descheduler.runDeschedulerLoop(ctx, nodes); err != nil {
|
||||
t.Fatalf("Unable to run a descheduling loop: %v", err)
|
||||
}
|
||||
if descheduler.podEvictor.TotalEvicted() != 2 || len(evictedPods) != 0 || len(fakeEvictedPods) != 2 {
|
||||
t.Fatalf("Expected (2,0,2) pods evicted, got (%v, %v, %v) instead", descheduler.podEvictor.TotalEvicted(), len(evictedPods), len(fakeEvictedPods))
|
||||
}
|
||||
|
||||
klog.Infof("2 pod eviction expected per a descheduling cycle, 4 fake evictions in total")
|
||||
if err := descheduler.runDeschedulerLoop(ctx, nodes); err != nil {
|
||||
t.Fatalf("Unable to run a descheduling loop: %v", err)
|
||||
}
|
||||
if descheduler.podEvictor.TotalEvicted() != 2 || len(evictedPods) != 0 || len(fakeEvictedPods) != 4 {
|
||||
t.Fatalf("Expected (2,0,4) pods evicted, got (%v, %v, %v) instead", descheduler.podEvictor.TotalEvicted(), len(evictedPods), len(fakeEvictedPods))
|
||||
}
|
||||
}
|
||||
|
||||
func checkTotals(t *testing.T, ctx context.Context, descheduler *descheduler, totalEvictionRequests, totalEvicted uint) {
|
||||
if total := descheduler.podEvictor.TotalEvictionRequests(); total != totalEvictionRequests {
|
||||
t.Fatalf("Expected %v total eviction requests, got %v instead", totalEvictionRequests, total)
|
||||
}
|
||||
if total := descheduler.podEvictor.TotalEvicted(); total != totalEvicted {
|
||||
t.Fatalf("Expected %v total evictions, got %v instead", totalEvicted, total)
|
||||
}
|
||||
t.Logf("Total evictions: %v, total eviction requests: %v, total evictions and eviction requests: %v", totalEvicted, totalEvictionRequests, totalEvicted+totalEvictionRequests)
|
||||
}
|
||||
|
||||
func runDeschedulingCycleAndCheckTotals(t *testing.T, ctx context.Context, nodes []*v1.Node, descheduler *descheduler, totalEvictionRequests, totalEvicted uint) {
|
||||
err := descheduler.runDeschedulerLoop(ctx, nodes)
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to run a descheduling loop: %v", err)
|
||||
}
|
||||
checkTotals(t, ctx, descheduler, totalEvictionRequests, totalEvicted)
|
||||
}
|
||||
|
||||
func TestEvictionRequestsCache(t *testing.T) {
|
||||
initPluginRegistry()
|
||||
|
||||
ctx := context.Background()
|
||||
node1 := test.BuildTestNode("n1", 2000, 3000, 10, taintNodeNoSchedule)
|
||||
node2 := test.BuildTestNode("n2", 2000, 3000, 10, nil)
|
||||
nodes := []*v1.Node{node1, node2}
|
||||
|
||||
ownerRef1 := test.GetReplicaSetOwnerRefList()
|
||||
updatePod := func(pod *v1.Pod) {
|
||||
pod.Namespace = "dev"
|
||||
pod.ObjectMeta.OwnerReferences = ownerRef1
|
||||
pod.Status.Phase = v1.PodRunning
|
||||
}
|
||||
updatePodWithEvictionInBackground := func(pod *v1.Pod) {
|
||||
updatePod(pod)
|
||||
pod.Annotations = map[string]string{
|
||||
evictions.EvictionRequestAnnotationKey: "",
|
||||
}
|
||||
}
|
||||
|
||||
p1 := test.BuildTestPod("p1", 100, 0, node1.Name, updatePodWithEvictionInBackground)
|
||||
p2 := test.BuildTestPod("p2", 100, 0, node1.Name, updatePodWithEvictionInBackground)
|
||||
p3 := test.BuildTestPod("p3", 100, 0, node1.Name, updatePod)
|
||||
p4 := test.BuildTestPod("p4", 100, 0, node1.Name, updatePod)
|
||||
p5 := test.BuildTestPod("p5", 100, 0, node1.Name, updatePod)
|
||||
|
||||
internalDeschedulerPolicy := removePodsViolatingNodeTaintsPolicy()
|
||||
ctxCancel, cancel := context.WithCancel(ctx)
|
||||
featureGates := featuregate.NewFeatureGate()
|
||||
featureGates.Add(map[featuregate.Feature]featuregate.FeatureSpec{
|
||||
features.EvictionsInBackground: {Default: true, PreRelease: featuregate.Alpha},
|
||||
})
|
||||
_, descheduler, client := initDescheduler(t, ctxCancel, featureGates, internalDeschedulerPolicy, nil, node1, node2, p1, p2, p3, p4)
|
||||
defer cancel()
|
||||
|
||||
var fakeEvictedPods []string
|
||||
descheduler.podEvictionReactionFnc = func(*fakeclientset.Clientset) func(action core.Action) (bool, runtime.Object, error) {
|
||||
return podEvictionReactionTestingFnc(&fakeEvictedPods, nil, podEvictionError)
|
||||
}
|
||||
|
||||
var evictedPods []string
|
||||
client.PrependReactor("create", "pods", podEvictionReactionTestingFnc(&evictedPods, func(name string) bool { return name == "p1" || name == "p2" }, nil))
|
||||
|
||||
klog.Infof("2 evictions in background expected, 2 normal evictions")
|
||||
runDeschedulingCycleAndCheckTotals(t, ctx, nodes, descheduler, 2, 2)
|
||||
|
||||
klog.Infof("Repeat the same as previously to confirm no more evictions in background are requested")
|
||||
// No evicted pod is actually deleted on purpose so the test can run the descheduling cycle repeatedly
|
||||
// without recreating the pods.
|
||||
runDeschedulingCycleAndCheckTotals(t, ctx, nodes, descheduler, 2, 2)
|
||||
|
||||
klog.Infof("Scenario: Eviction in background got initiated")
|
||||
p2.Annotations[evictions.EvictionInProgressAnnotationKey] = ""
|
||||
if _, err := client.CoreV1().Pods(p2.Namespace).Update(context.TODO(), p2, metav1.UpdateOptions{}); err != nil {
|
||||
t.Fatalf("unable to update a pod: %v", err)
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
klog.Infof("Repeat the same as previously to confirm no more evictions in background are requested")
|
||||
runDeschedulingCycleAndCheckTotals(t, ctx, nodes, descheduler, 2, 2)
|
||||
|
||||
klog.Infof("Scenario: Another eviction in background got initiated")
|
||||
p1.Annotations[evictions.EvictionInProgressAnnotationKey] = ""
|
||||
if _, err := client.CoreV1().Pods(p1.Namespace).Update(context.TODO(), p1, metav1.UpdateOptions{}); err != nil {
|
||||
t.Fatalf("unable to update a pod: %v", err)
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
klog.Infof("Repeat the same as previously to confirm no more evictions in background are requested")
|
||||
runDeschedulingCycleAndCheckTotals(t, ctx, nodes, descheduler, 2, 2)
|
||||
|
||||
klog.Infof("Scenario: Eviction in background completed")
|
||||
if err := client.CoreV1().Pods(p1.Namespace).Delete(context.TODO(), p1.Name, metav1.DeleteOptions{}); err != nil {
|
||||
t.Fatalf("unable to delete a pod: %v", err)
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
klog.Infof("Check the number of evictions in background decreased")
|
||||
runDeschedulingCycleAndCheckTotals(t, ctx, nodes, descheduler, 1, 2)
|
||||
|
||||
klog.Infof("Scenario: A new pod without eviction in background added")
|
||||
if _, err := client.CoreV1().Pods(p5.Namespace).Create(context.TODO(), p5, metav1.CreateOptions{}); err != nil {
|
||||
t.Fatalf("unable to create a pod: %v", err)
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
klog.Infof("Check the number of evictions increased after running a descheduling cycle")
|
||||
runDeschedulingCycleAndCheckTotals(t, ctx, nodes, descheduler, 1, 3)
|
||||
|
||||
klog.Infof("Scenario: Eviction in background canceled => eviction in progress annotation removed")
|
||||
delete(p2.Annotations, evictions.EvictionInProgressAnnotationKey)
|
||||
if _, err := client.CoreV1().Pods(p2.Namespace).Update(context.TODO(), p2, metav1.UpdateOptions{}); err != nil {
|
||||
t.Fatalf("unable to update a pod: %v", err)
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
klog.Infof("Check the number of evictions in background decreased")
|
||||
checkTotals(t, ctx, descheduler, 0, 3)
|
||||
|
||||
klog.Infof("Scenario: Re-run the descheduling cycle to re-request eviction in background")
|
||||
runDeschedulingCycleAndCheckTotals(t, ctx, nodes, descheduler, 1, 3)
|
||||
|
||||
klog.Infof("Scenario: Eviction in background completed with a pod in completed state")
|
||||
p2.Status.Phase = v1.PodSucceeded
|
||||
if _, err := client.CoreV1().Pods(p2.Namespace).Update(context.TODO(), p2, metav1.UpdateOptions{}); err != nil {
|
||||
t.Fatalf("unable to delete a pod: %v", err)
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
klog.Infof("Check the number of evictions in background decreased")
|
||||
runDeschedulingCycleAndCheckTotals(t, ctx, nodes, descheduler, 0, 3)
|
||||
}
|
||||
|
||||
func TestDeschedulingLimits(t *testing.T) {
|
||||
initPluginRegistry()
|
||||
|
||||
tests := []struct {
|
||||
description string
|
||||
policy *api.DeschedulerPolicy
|
||||
limit uint
|
||||
}{
|
||||
{
|
||||
description: "limits per node",
|
||||
policy: func() *api.DeschedulerPolicy {
|
||||
policy := removePodsViolatingNodeTaintsPolicy()
|
||||
policy.MaxNoOfPodsToEvictPerNode = utilptr.To[uint](4)
|
||||
return policy
|
||||
}(),
|
||||
limit: uint(4),
|
||||
},
|
||||
{
|
||||
description: "limits per namespace",
|
||||
policy: func() *api.DeschedulerPolicy {
|
||||
policy := removePodsViolatingNodeTaintsPolicy()
|
||||
policy.MaxNoOfPodsToEvictPerNamespace = utilptr.To[uint](4)
|
||||
return policy
|
||||
}(),
|
||||
limit: uint(4),
|
||||
},
|
||||
{
|
||||
description: "limits per cycle",
|
||||
policy: func() *api.DeschedulerPolicy {
|
||||
policy := removePodsViolatingNodeTaintsPolicy()
|
||||
policy.MaxNoOfPodsToEvictTotal = utilptr.To[uint](4)
|
||||
return policy
|
||||
}(),
|
||||
limit: uint(4),
|
||||
},
|
||||
}
|
||||
|
||||
ownerRef1 := test.GetReplicaSetOwnerRefList()
|
||||
updatePod := func(pod *v1.Pod) {
|
||||
pod.Namespace = "dev"
|
||||
pod.ObjectMeta.OwnerReferences = ownerRef1
|
||||
}
|
||||
|
||||
updatePodWithEvictionInBackground := func(pod *v1.Pod) {
|
||||
updatePod(pod)
|
||||
pod.Annotations = map[string]string{
|
||||
evictions.EvictionRequestAnnotationKey: "",
|
||||
}
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.description, func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
node1 := test.BuildTestNode("n1", 2000, 3000, 10, taintNodeNoSchedule)
|
||||
node2 := test.BuildTestNode("n2", 2000, 3000, 10, nil)
|
||||
nodes := []*v1.Node{node1, node2}
|
||||
ctxCancel, cancel := context.WithCancel(ctx)
|
||||
featureGates := featuregate.NewFeatureGate()
|
||||
featureGates.Add(map[featuregate.Feature]featuregate.FeatureSpec{
|
||||
features.EvictionsInBackground: {Default: true, PreRelease: featuregate.Alpha},
|
||||
})
|
||||
_, descheduler, client := initDescheduler(t, ctxCancel, featureGates, tc.policy, nil, node1, node2)
|
||||
defer cancel()
|
||||
|
||||
var fakeEvictedPods []string
|
||||
descheduler.podEvictionReactionFnc = func(*fakeclientset.Clientset) func(action core.Action) (bool, runtime.Object, error) {
|
||||
return podEvictionReactionTestingFnc(&fakeEvictedPods, nil, podEvictionError)
|
||||
}
|
||||
|
||||
var evictedPods []string
|
||||
client.PrependReactor("create", "pods", podEvictionReactionTestingFnc(&evictedPods, func(name string) bool { return name == "p1" || name == "p2" }, nil))
|
||||
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
pods := []*v1.Pod{
|
||||
test.BuildTestPod("p1", 100, 0, node1.Name, updatePodWithEvictionInBackground),
|
||||
test.BuildTestPod("p2", 100, 0, node1.Name, updatePodWithEvictionInBackground),
|
||||
test.BuildTestPod("p3", 100, 0, node1.Name, updatePod),
|
||||
test.BuildTestPod("p4", 100, 0, node1.Name, updatePod),
|
||||
test.BuildTestPod("p5", 100, 0, node1.Name, updatePod),
|
||||
}
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
rand.Shuffle(len(pods), func(i, j int) { pods[i], pods[j] = pods[j], pods[i] })
|
||||
func() {
|
||||
for j := 0; j < 5; j++ {
|
||||
idx := j
|
||||
if _, err := client.CoreV1().Pods(pods[idx].Namespace).Create(context.TODO(), pods[idx], metav1.CreateOptions{}); err != nil {
|
||||
t.Fatalf("unable to create a pod: %v", err)
|
||||
}
|
||||
defer func() {
|
||||
if err := client.CoreV1().Pods(pods[idx].Namespace).Delete(context.TODO(), pods[idx].Name, metav1.DeleteOptions{}); err != nil {
|
||||
t.Fatalf("unable to delete a pod: %v", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
klog.Infof("2 evictions in background expected, 2 normal evictions")
|
||||
err := descheduler.runDeschedulerLoop(ctx, nodes)
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to run a descheduling loop: %v", err)
|
||||
}
|
||||
totalERs := descheduler.podEvictor.TotalEvictionRequests()
|
||||
totalEs := descheduler.podEvictor.TotalEvicted()
|
||||
if totalERs+totalEs > tc.limit {
|
||||
t.Fatalf("Expected %v evictions and eviction requests in total, got %v instead", tc.limit, totalERs+totalEs)
|
||||
}
|
||||
t.Logf("Total evictions and eviction requests: %v (er=%v, e=%v)", totalERs+totalEs, totalERs, totalEs)
|
||||
}()
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadAwareDescheduling(t *testing.T) {
|
||||
initPluginRegistry()
|
||||
|
||||
ownerRef1 := test.GetReplicaSetOwnerRefList()
|
||||
updatePod := func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = ownerRef1
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
node1 := test.BuildTestNode("n1", 2000, 3000, 10, taintNodeNoSchedule)
|
||||
node2 := test.BuildTestNode("n2", 2000, 3000, 10, nil)
|
||||
nodes := []*v1.Node{node1, node2}
|
||||
|
||||
p1 := test.BuildTestPod("p1", 300, 0, node1.Name, updatePod)
|
||||
p2 := test.BuildTestPod("p2", 300, 0, node1.Name, updatePod)
|
||||
p3 := test.BuildTestPod("p3", 300, 0, node1.Name, updatePod)
|
||||
p4 := test.BuildTestPod("p4", 300, 0, node1.Name, updatePod)
|
||||
p5 := test.BuildTestPod("p5", 300, 0, node1.Name, updatePod)
|
||||
|
||||
nodemetricses := []*v1beta1.NodeMetrics{
|
||||
test.BuildNodeMetrics("n1", 2400, 3000),
|
||||
test.BuildNodeMetrics("n2", 400, 0),
|
||||
}
|
||||
|
||||
podmetricses := []*v1beta1.PodMetrics{
|
||||
test.BuildPodMetrics("p1", 400, 0),
|
||||
test.BuildPodMetrics("p2", 400, 0),
|
||||
test.BuildPodMetrics("p3", 400, 0),
|
||||
test.BuildPodMetrics("p4", 400, 0),
|
||||
test.BuildPodMetrics("p5", 400, 0),
|
||||
}
|
||||
|
||||
metricsClientset := fakemetricsclient.NewSimpleClientset()
|
||||
for _, nodemetrics := range nodemetricses {
|
||||
metricsClientset.Tracker().Create(nodesgvr, nodemetrics, "")
|
||||
}
|
||||
for _, podmetrics := range podmetricses {
|
||||
metricsClientset.Tracker().Create(podsgvr, podmetrics, podmetrics.Namespace)
|
||||
}
|
||||
|
||||
policy := lowNodeUtilizationPolicy(
|
||||
api.ResourceThresholds{
|
||||
v1.ResourceCPU: 30,
|
||||
v1.ResourcePods: 30,
|
||||
},
|
||||
api.ResourceThresholds{
|
||||
v1.ResourceCPU: 50,
|
||||
v1.ResourcePods: 50,
|
||||
},
|
||||
true, // enabled metrics utilization
|
||||
)
|
||||
policy.MetricsCollector.Enabled = true
|
||||
|
||||
ctxCancel, cancel := context.WithCancel(ctx)
|
||||
_, descheduler, _ := initDescheduler(
|
||||
t,
|
||||
ctxCancel,
|
||||
initFeatureGates(),
|
||||
policy,
|
||||
metricsClientset,
|
||||
node1, node2, p1, p2, p3, p4, p5)
|
||||
defer cancel()
|
||||
|
||||
// This needs to be run since the metrics collector is started
|
||||
// after newDescheduler in RunDeschedulerStrategies.
|
||||
descheduler.metricsCollector.Collect(ctx)
|
||||
|
||||
err := descheduler.runDeschedulerLoop(ctx, nodes)
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to run a descheduling loop: %v", err)
|
||||
}
|
||||
totalEs := descheduler.podEvictor.TotalEvicted()
|
||||
if totalEs != 2 {
|
||||
t.Fatalf("Expected %v evictions in total, got %v instead", 2, totalEs)
|
||||
}
|
||||
t.Logf("Total evictions: %v", totalEs)
|
||||
}
|
||||
|
||||
45
pkg/descheduler/evictions/errors.go
Normal file
45
pkg/descheduler/evictions/errors.go
Normal file
@@ -0,0 +1,45 @@
|
||||
package evictions
|
||||
|
||||
type EvictionNodeLimitError struct {
|
||||
node string
|
||||
}
|
||||
|
||||
func (e EvictionNodeLimitError) Error() string {
|
||||
return "maximum number of evicted pods per node reached"
|
||||
}
|
||||
|
||||
func NewEvictionNodeLimitError(node string) *EvictionNodeLimitError {
|
||||
return &EvictionNodeLimitError{
|
||||
node: node,
|
||||
}
|
||||
}
|
||||
|
||||
var _ error = &EvictionNodeLimitError{}
|
||||
|
||||
type EvictionNamespaceLimitError struct {
|
||||
namespace string
|
||||
}
|
||||
|
||||
func (e EvictionNamespaceLimitError) Error() string {
|
||||
return "maximum number of evicted pods per namespace reached"
|
||||
}
|
||||
|
||||
func NewEvictionNamespaceLimitError(namespace string) *EvictionNamespaceLimitError {
|
||||
return &EvictionNamespaceLimitError{
|
||||
namespace: namespace,
|
||||
}
|
||||
}
|
||||
|
||||
var _ error = &EvictionNamespaceLimitError{}
|
||||
|
||||
type EvictionTotalLimitError struct{}
|
||||
|
||||
func (e EvictionTotalLimitError) Error() string {
|
||||
return "maximum number of evicted pods per a descheduling cycle reached"
|
||||
}
|
||||
|
||||
func NewEvictionTotalLimitError() *EvictionTotalLimitError {
|
||||
return &EvictionTotalLimitError{}
|
||||
}
|
||||
|
||||
var _ error = &EvictionTotalLimitError{}
|
||||
@@ -19,6 +19,9 @@ package evictions
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
@@ -26,15 +29,176 @@ import (
|
||||
policy "k8s.io/api/policy/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/events"
|
||||
"k8s.io/component-base/featuregate"
|
||||
"k8s.io/klog/v2"
|
||||
"sigs.k8s.io/descheduler/metrics"
|
||||
|
||||
"sigs.k8s.io/descheduler/metrics"
|
||||
eutils "sigs.k8s.io/descheduler/pkg/descheduler/evictions/utils"
|
||||
"sigs.k8s.io/descheduler/pkg/features"
|
||||
"sigs.k8s.io/descheduler/pkg/tracing"
|
||||
)
|
||||
|
||||
var (
|
||||
assumedEvictionRequestTimeoutSeconds uint = 10 * 60 // 10 minutes
|
||||
evictionRequestsCacheResyncPeriod time.Duration = 10 * time.Minute
|
||||
// syncedPollPeriod controls how often you look at the status of your sync funcs
|
||||
syncedPollPeriod = 100 * time.Millisecond
|
||||
)
|
||||
|
||||
type evictionRequestItem struct {
|
||||
podName, podNamespace, podNodeName string
|
||||
evictionAssumed bool
|
||||
assumedTimestamp metav1.Time
|
||||
}
|
||||
|
||||
type evictionRequestsCache struct {
|
||||
mu sync.RWMutex
|
||||
requests map[string]evictionRequestItem
|
||||
requestsPerNode map[string]uint
|
||||
requestsPerNamespace map[string]uint
|
||||
requestsTotal uint
|
||||
assumedRequestTimeoutSeconds uint
|
||||
}
|
||||
|
||||
func newEvictionRequestsCache(assumedRequestTimeoutSeconds uint) *evictionRequestsCache {
|
||||
return &evictionRequestsCache{
|
||||
requests: make(map[string]evictionRequestItem),
|
||||
requestsPerNode: make(map[string]uint),
|
||||
requestsPerNamespace: make(map[string]uint),
|
||||
assumedRequestTimeoutSeconds: assumedRequestTimeoutSeconds,
|
||||
}
|
||||
}
|
||||
|
||||
func (erc *evictionRequestsCache) run(ctx context.Context) {
|
||||
wait.UntilWithContext(ctx, erc.cleanCache, evictionRequestsCacheResyncPeriod)
|
||||
}
|
||||
|
||||
// cleanCache removes all assumed entries that has not been confirmed
|
||||
// for more than a specified timeout
|
||||
func (erc *evictionRequestsCache) cleanCache(ctx context.Context) {
|
||||
erc.mu.Lock()
|
||||
defer erc.mu.Unlock()
|
||||
klog.V(4).Infof("Cleaning cache of assumed eviction requests in background")
|
||||
for uid, item := range erc.requests {
|
||||
if item.evictionAssumed {
|
||||
requestAgeSeconds := uint(metav1.Now().Sub(item.assumedTimestamp.Local()).Seconds())
|
||||
if requestAgeSeconds > erc.assumedRequestTimeoutSeconds {
|
||||
klog.V(4).InfoS("Assumed eviction request in background timed out, deleting", "timeout", erc.assumedRequestTimeoutSeconds, "podNamespace", item.podNamespace, "podName", item.podName)
|
||||
erc.deleteItem(uid)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (erc *evictionRequestsCache) evictionRequestsPerNode(nodeName string) uint {
|
||||
erc.mu.RLock()
|
||||
defer erc.mu.RUnlock()
|
||||
return erc.requestsPerNode[nodeName]
|
||||
}
|
||||
|
||||
func (erc *evictionRequestsCache) evictionRequestsPerNamespace(ns string) uint {
|
||||
erc.mu.RLock()
|
||||
defer erc.mu.RUnlock()
|
||||
return erc.requestsPerNamespace[ns]
|
||||
}
|
||||
|
||||
func (erc *evictionRequestsCache) evictionRequestsTotal() uint {
|
||||
erc.mu.RLock()
|
||||
defer erc.mu.RUnlock()
|
||||
return erc.requestsTotal
|
||||
}
|
||||
|
||||
func (erc *evictionRequestsCache) TotalEvictionRequests() uint {
|
||||
erc.mu.RLock()
|
||||
defer erc.mu.RUnlock()
|
||||
return uint(len(erc.requests))
|
||||
}
|
||||
|
||||
// getPodKey returns the string key of a pod.
|
||||
func getPodKey(pod *v1.Pod) string {
|
||||
uid := string(pod.UID)
|
||||
// Every pod is expected to have the UID set.
|
||||
// When the descheduling framework is used for simulation
|
||||
// user created workload may forget to set the UID.
|
||||
if len(uid) == 0 {
|
||||
panic(fmt.Errorf("cannot get cache key for %v/%v pod with empty UID", pod.Namespace, pod.Name))
|
||||
}
|
||||
return uid
|
||||
}
|
||||
|
||||
func (erc *evictionRequestsCache) addPod(pod *v1.Pod) {
|
||||
erc.mu.Lock()
|
||||
defer erc.mu.Unlock()
|
||||
uid := getPodKey(pod)
|
||||
if _, exists := erc.requests[uid]; exists {
|
||||
return
|
||||
}
|
||||
erc.requests[uid] = evictionRequestItem{podNamespace: pod.Namespace, podName: pod.Name, podNodeName: pod.Spec.NodeName}
|
||||
erc.requestsPerNode[pod.Spec.NodeName]++
|
||||
erc.requestsPerNamespace[pod.Namespace]++
|
||||
erc.requestsTotal++
|
||||
}
|
||||
|
||||
func (erc *evictionRequestsCache) assumePod(pod *v1.Pod) {
|
||||
erc.mu.Lock()
|
||||
defer erc.mu.Unlock()
|
||||
uid := getPodKey(pod)
|
||||
if _, exists := erc.requests[uid]; exists {
|
||||
return
|
||||
}
|
||||
erc.requests[uid] = evictionRequestItem{
|
||||
podNamespace: pod.Namespace,
|
||||
podName: pod.Name,
|
||||
podNodeName: pod.Spec.NodeName,
|
||||
evictionAssumed: true,
|
||||
assumedTimestamp: metav1.NewTime(time.Now()),
|
||||
}
|
||||
erc.requestsPerNode[pod.Spec.NodeName]++
|
||||
erc.requestsPerNamespace[pod.Namespace]++
|
||||
erc.requestsTotal++
|
||||
}
|
||||
|
||||
// no locking, expected to be invoked from protected methods only
|
||||
func (erc *evictionRequestsCache) deleteItem(uid string) {
|
||||
erc.requestsPerNode[erc.requests[uid].podNodeName]--
|
||||
if erc.requestsPerNode[erc.requests[uid].podNodeName] == 0 {
|
||||
delete(erc.requestsPerNode, erc.requests[uid].podNodeName)
|
||||
}
|
||||
erc.requestsPerNamespace[erc.requests[uid].podNamespace]--
|
||||
if erc.requestsPerNamespace[erc.requests[uid].podNamespace] == 0 {
|
||||
delete(erc.requestsPerNamespace, erc.requests[uid].podNamespace)
|
||||
}
|
||||
erc.requestsTotal--
|
||||
delete(erc.requests, uid)
|
||||
}
|
||||
|
||||
func (erc *evictionRequestsCache) deletePod(pod *v1.Pod) {
|
||||
erc.mu.Lock()
|
||||
defer erc.mu.Unlock()
|
||||
uid := getPodKey(pod)
|
||||
if _, exists := erc.requests[uid]; exists {
|
||||
erc.deleteItem(uid)
|
||||
}
|
||||
}
|
||||
|
||||
func (erc *evictionRequestsCache) hasPod(pod *v1.Pod) bool {
|
||||
erc.mu.RLock()
|
||||
defer erc.mu.RUnlock()
|
||||
uid := getPodKey(pod)
|
||||
_, exists := erc.requests[uid]
|
||||
return exists
|
||||
}
|
||||
|
||||
var (
|
||||
EvictionRequestAnnotationKey = "descheduler.alpha.kubernetes.io/request-evict-only"
|
||||
EvictionInProgressAnnotationKey = "descheduler.alpha.kubernetes.io/eviction-in-progress"
|
||||
EvictionInBackgroundErrorText = "Eviction triggered evacuation"
|
||||
)
|
||||
|
||||
// nodePodEvictedCount keeps count of pods evicted on node
|
||||
type (
|
||||
nodePodEvictedCount map[string]uint
|
||||
@@ -42,151 +206,368 @@ type (
|
||||
)
|
||||
|
||||
type PodEvictor struct {
|
||||
client clientset.Interface
|
||||
nodes []*v1.Node
|
||||
policyGroupVersion string
|
||||
dryRun bool
|
||||
maxPodsToEvictPerNode *uint
|
||||
maxPodsToEvictPerNamespace *uint
|
||||
nodepodCount nodePodEvictedCount
|
||||
namespacePodCount namespacePodEvictCount
|
||||
metricsEnabled bool
|
||||
eventRecorder events.EventRecorder
|
||||
mu sync.RWMutex
|
||||
client clientset.Interface
|
||||
policyGroupVersion string
|
||||
dryRun bool
|
||||
evictionFailureEventNotification bool
|
||||
maxPodsToEvictPerNode *uint
|
||||
maxPodsToEvictPerNamespace *uint
|
||||
maxPodsToEvictTotal *uint
|
||||
nodePodCount nodePodEvictedCount
|
||||
namespacePodCount namespacePodEvictCount
|
||||
totalPodCount uint
|
||||
metricsEnabled bool
|
||||
eventRecorder events.EventRecorder
|
||||
erCache *evictionRequestsCache
|
||||
featureGates featuregate.FeatureGate
|
||||
|
||||
// registeredHandlers contains the registrations of all handlers. It's used to check if all handlers have finished syncing before the scheduling cycles start.
|
||||
registeredHandlers []cache.ResourceEventHandlerRegistration
|
||||
}
|
||||
|
||||
func NewPodEvictor(
|
||||
ctx context.Context,
|
||||
client clientset.Interface,
|
||||
policyGroupVersion string,
|
||||
dryRun bool,
|
||||
maxPodsToEvictPerNode *uint,
|
||||
maxPodsToEvictPerNamespace *uint,
|
||||
nodes []*v1.Node,
|
||||
metricsEnabled bool,
|
||||
eventRecorder events.EventRecorder,
|
||||
) *PodEvictor {
|
||||
nodePodCount := make(nodePodEvictedCount)
|
||||
namespacePodCount := make(namespacePodEvictCount)
|
||||
for _, node := range nodes {
|
||||
// Initialize podsEvicted till now with 0.
|
||||
nodePodCount[node.Name] = 0
|
||||
podInformer cache.SharedIndexInformer,
|
||||
featureGates featuregate.FeatureGate,
|
||||
options *Options,
|
||||
) (*PodEvictor, error) {
|
||||
if options == nil {
|
||||
options = NewOptions()
|
||||
}
|
||||
|
||||
return &PodEvictor{
|
||||
client: client,
|
||||
nodes: nodes,
|
||||
policyGroupVersion: policyGroupVersion,
|
||||
dryRun: dryRun,
|
||||
maxPodsToEvictPerNode: maxPodsToEvictPerNode,
|
||||
maxPodsToEvictPerNamespace: maxPodsToEvictPerNamespace,
|
||||
nodepodCount: nodePodCount,
|
||||
namespacePodCount: namespacePodCount,
|
||||
metricsEnabled: metricsEnabled,
|
||||
eventRecorder: eventRecorder,
|
||||
podEvictor := &PodEvictor{
|
||||
client: client,
|
||||
eventRecorder: eventRecorder,
|
||||
policyGroupVersion: options.policyGroupVersion,
|
||||
dryRun: options.dryRun,
|
||||
evictionFailureEventNotification: options.evictionFailureEventNotification,
|
||||
maxPodsToEvictPerNode: options.maxPodsToEvictPerNode,
|
||||
maxPodsToEvictPerNamespace: options.maxPodsToEvictPerNamespace,
|
||||
maxPodsToEvictTotal: options.maxPodsToEvictTotal,
|
||||
metricsEnabled: options.metricsEnabled,
|
||||
nodePodCount: make(nodePodEvictedCount),
|
||||
namespacePodCount: make(namespacePodEvictCount),
|
||||
featureGates: featureGates,
|
||||
}
|
||||
|
||||
if featureGates.Enabled(features.EvictionsInBackground) {
|
||||
erCache := newEvictionRequestsCache(assumedEvictionRequestTimeoutSeconds)
|
||||
|
||||
handlerRegistration, err := podInformer.AddEventHandler(
|
||||
cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: func(obj interface{}) {
|
||||
pod, ok := obj.(*v1.Pod)
|
||||
if !ok {
|
||||
klog.ErrorS(nil, "Cannot convert to *v1.Pod", "obj", obj)
|
||||
return
|
||||
}
|
||||
if _, exists := pod.Annotations[EvictionRequestAnnotationKey]; exists {
|
||||
if _, exists := pod.Annotations[EvictionInProgressAnnotationKey]; exists {
|
||||
// Ignore completed/suceeeded or failed pods
|
||||
if pod.Status.Phase != v1.PodSucceeded && pod.Status.Phase != v1.PodFailed {
|
||||
klog.V(3).InfoS("Eviction in background detected. Adding pod to the cache.", "pod", klog.KObj(pod))
|
||||
erCache.addPod(pod)
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
UpdateFunc: func(oldObj, newObj interface{}) {
|
||||
oldPod, ok := oldObj.(*v1.Pod)
|
||||
if !ok {
|
||||
klog.ErrorS(nil, "Cannot convert oldObj to *v1.Pod", "oldObj", oldObj)
|
||||
return
|
||||
}
|
||||
newPod, ok := newObj.(*v1.Pod)
|
||||
if !ok {
|
||||
klog.ErrorS(nil, "Cannot convert newObj to *v1.Pod", "newObj", newObj)
|
||||
return
|
||||
}
|
||||
// Ignore pod's that are not subject to an eviction in background
|
||||
if _, exists := newPod.Annotations[EvictionRequestAnnotationKey]; !exists {
|
||||
if erCache.hasPod(newPod) {
|
||||
klog.V(3).InfoS("Pod with eviction in background lost annotation. Removing pod from the cache.", "pod", klog.KObj(newPod))
|
||||
}
|
||||
erCache.deletePod(newPod)
|
||||
return
|
||||
}
|
||||
// Remove completed/suceeeded or failed pods from the cache
|
||||
if newPod.Status.Phase == v1.PodSucceeded || newPod.Status.Phase == v1.PodFailed {
|
||||
klog.V(3).InfoS("Pod with eviction in background completed. Removing pod from the cache.", "pod", klog.KObj(newPod))
|
||||
erCache.deletePod(newPod)
|
||||
return
|
||||
}
|
||||
// Ignore any pod that does not have eviction in progress
|
||||
if _, exists := newPod.Annotations[EvictionInProgressAnnotationKey]; !exists {
|
||||
// In case EvictionInProgressAnnotationKey annotation is not present/removed
|
||||
// it's unclear whether the eviction was restarted or terminated.
|
||||
// If the eviction gets restarted the pod needs to be removed from the cache
|
||||
// to allow re-triggering the eviction.
|
||||
if _, exists := oldPod.Annotations[EvictionInProgressAnnotationKey]; !exists {
|
||||
return
|
||||
}
|
||||
// the annotation was removed -> remove the pod from the cache to allow to
|
||||
// request for eviction again. In case the eviction got restarted requesting
|
||||
// the eviction again is expected to be a no-op. In case the eviction
|
||||
// got terminated with no-retry, requesting a new eviction is a normal
|
||||
// operation.
|
||||
klog.V(3).InfoS("Eviction in background canceled (annotation removed). Removing pod from the cache.", "annotation", EvictionInProgressAnnotationKey, "pod", klog.KObj(newPod))
|
||||
erCache.deletePod(newPod)
|
||||
return
|
||||
}
|
||||
// Pick up the eviction in progress
|
||||
if !erCache.hasPod(newPod) {
|
||||
klog.V(3).InfoS("Eviction in background detected. Updating the cache.", "pod", klog.KObj(newPod))
|
||||
}
|
||||
erCache.addPod(newPod)
|
||||
},
|
||||
DeleteFunc: func(obj interface{}) {
|
||||
var pod *v1.Pod
|
||||
switch t := obj.(type) {
|
||||
case *v1.Pod:
|
||||
pod = t
|
||||
case cache.DeletedFinalStateUnknown:
|
||||
var ok bool
|
||||
pod, ok = t.Obj.(*v1.Pod)
|
||||
if !ok {
|
||||
klog.ErrorS(nil, "Cannot convert to *v1.Pod", "obj", t.Obj)
|
||||
return
|
||||
}
|
||||
default:
|
||||
klog.ErrorS(nil, "Cannot convert to *v1.Pod", "obj", t)
|
||||
return
|
||||
}
|
||||
if erCache.hasPod(pod) {
|
||||
klog.V(3).InfoS("Pod with eviction in background deleted/evicted. Removing pod from the cache.", "pod", klog.KObj(pod))
|
||||
}
|
||||
erCache.deletePod(pod)
|
||||
},
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to register event handler for pod evictor: %v", err)
|
||||
}
|
||||
|
||||
podEvictor.registeredHandlers = append(podEvictor.registeredHandlers, handlerRegistration)
|
||||
|
||||
go erCache.run(ctx)
|
||||
|
||||
podEvictor.erCache = erCache
|
||||
}
|
||||
|
||||
return podEvictor, nil
|
||||
}
|
||||
|
||||
// WaitForEventHandlersSync waits for EventHandlers to sync.
|
||||
// It returns true if it was successful, false if the controller should shut down
|
||||
func (pe *PodEvictor) WaitForEventHandlersSync(ctx context.Context) error {
|
||||
return wait.PollUntilContextCancel(ctx, syncedPollPeriod, true, func(ctx context.Context) (done bool, err error) {
|
||||
for _, handler := range pe.registeredHandlers {
|
||||
if !handler.HasSynced() {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
}
|
||||
|
||||
// NodeEvicted gives a number of pods evicted for node
|
||||
func (pe *PodEvictor) NodeEvicted(node *v1.Node) uint {
|
||||
return pe.nodepodCount[node.Name]
|
||||
pe.mu.RLock()
|
||||
defer pe.mu.RUnlock()
|
||||
return pe.nodePodCount[node.Name]
|
||||
}
|
||||
|
||||
// TotalEvicted gives a number of pods evicted through all nodes
|
||||
func (pe *PodEvictor) TotalEvicted() uint {
|
||||
var total uint
|
||||
for _, count := range pe.nodepodCount {
|
||||
total += count
|
||||
}
|
||||
return total
|
||||
pe.mu.RLock()
|
||||
defer pe.mu.RUnlock()
|
||||
return pe.totalPodCount
|
||||
}
|
||||
|
||||
// NodeLimitExceeded checks if the number of evictions for a node was exceeded
|
||||
func (pe *PodEvictor) NodeLimitExceeded(node *v1.Node) bool {
|
||||
if pe.maxPodsToEvictPerNode != nil {
|
||||
return pe.nodepodCount[node.Name] == *pe.maxPodsToEvictPerNode
|
||||
func (pe *PodEvictor) ResetCounters() {
|
||||
pe.mu.Lock()
|
||||
defer pe.mu.Unlock()
|
||||
pe.nodePodCount = make(nodePodEvictedCount)
|
||||
pe.namespacePodCount = make(namespacePodEvictCount)
|
||||
pe.totalPodCount = 0
|
||||
}
|
||||
|
||||
func (pe *PodEvictor) SetClient(client clientset.Interface) {
|
||||
pe.mu.Lock()
|
||||
defer pe.mu.Unlock()
|
||||
pe.client = client
|
||||
}
|
||||
|
||||
func (pe *PodEvictor) evictionRequestsTotal() uint {
|
||||
if pe.featureGates.Enabled(features.EvictionsInBackground) {
|
||||
return pe.erCache.evictionRequestsTotal()
|
||||
} else {
|
||||
return 0
|
||||
}
|
||||
}
|
||||
|
||||
func (pe *PodEvictor) evictionRequestsPerNode(node string) uint {
|
||||
if pe.featureGates.Enabled(features.EvictionsInBackground) {
|
||||
return pe.erCache.evictionRequestsPerNode(node)
|
||||
} else {
|
||||
return 0
|
||||
}
|
||||
}
|
||||
|
||||
func (pe *PodEvictor) evictionRequestsPerNamespace(ns string) uint {
|
||||
if pe.featureGates.Enabled(features.EvictionsInBackground) {
|
||||
return pe.erCache.evictionRequestsPerNamespace(ns)
|
||||
} else {
|
||||
return 0
|
||||
}
|
||||
}
|
||||
|
||||
func (pe *PodEvictor) EvictionRequests(node *v1.Node) uint {
|
||||
pe.mu.RLock()
|
||||
defer pe.mu.RUnlock()
|
||||
return pe.evictionRequestsTotal()
|
||||
}
|
||||
|
||||
func (pe *PodEvictor) TotalEvictionRequests() uint {
|
||||
pe.mu.RLock()
|
||||
defer pe.mu.RUnlock()
|
||||
if pe.featureGates.Enabled(features.EvictionsInBackground) {
|
||||
return pe.erCache.TotalEvictionRequests()
|
||||
} else {
|
||||
return 0
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// EvictOptions provides a handle for passing additional info to EvictPod
|
||||
type EvictOptions struct {
|
||||
// Reason allows for passing details about the specific eviction for logging.
|
||||
Reason string
|
||||
// ProfileName allows for passing details about profile for observability.
|
||||
ProfileName string
|
||||
// StrategyName allows for passing details about strategy for observability.
|
||||
StrategyName string
|
||||
}
|
||||
|
||||
// EvictPod evicts a pod while exercising eviction limits.
|
||||
// Returns true when the pod is evicted on the server side.
|
||||
func (pe *PodEvictor) EvictPod(ctx context.Context, pod *v1.Pod, opts EvictOptions) bool {
|
||||
func (pe *PodEvictor) EvictPod(ctx context.Context, pod *v1.Pod, opts EvictOptions) error {
|
||||
if len(pod.UID) == 0 {
|
||||
klog.InfoS("Ignoring pod eviction due to missing UID", "pod", pod)
|
||||
return fmt.Errorf("Pod %v is missing UID", klog.KObj(pod))
|
||||
}
|
||||
|
||||
if pe.featureGates.Enabled(features.EvictionsInBackground) {
|
||||
// eviction in background requested
|
||||
if _, exists := pod.Annotations[EvictionRequestAnnotationKey]; exists {
|
||||
if pe.erCache.hasPod(pod) {
|
||||
klog.V(3).InfoS("Eviction in background already requested (ignoring)", "pod", klog.KObj(pod))
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pe.mu.Lock()
|
||||
defer pe.mu.Unlock()
|
||||
|
||||
var span trace.Span
|
||||
ctx, span = tracing.Tracer().Start(ctx, "EvictPod", trace.WithAttributes(attribute.String("podName", pod.Name), attribute.String("podNamespace", pod.Namespace), attribute.String("reason", opts.Reason), attribute.String("operation", tracing.EvictOperation)))
|
||||
defer span.End()
|
||||
// TODO: Replace context-propagated Strategy name with a defined framework handle for accessing Strategy info
|
||||
strategy := ""
|
||||
if ctx.Value("strategyName") != nil {
|
||||
strategy = ctx.Value("strategyName").(string)
|
||||
|
||||
if pe.maxPodsToEvictTotal != nil && pe.totalPodCount+pe.evictionRequestsTotal()+1 > *pe.maxPodsToEvictTotal {
|
||||
err := NewEvictionTotalLimitError()
|
||||
if pe.metricsEnabled {
|
||||
metrics.PodsEvicted.With(map[string]string{"result": err.Error(), "strategy": opts.StrategyName, "namespace": pod.Namespace, "node": pod.Spec.NodeName, "profile": opts.ProfileName}).Inc()
|
||||
}
|
||||
span.AddEvent("Eviction Failed", trace.WithAttributes(attribute.String("node", pod.Spec.NodeName), attribute.String("err", err.Error())))
|
||||
klog.ErrorS(err, "Error evicting pod", "limit", *pe.maxPodsToEvictTotal)
|
||||
if pe.evictionFailureEventNotification {
|
||||
pe.eventRecorder.Eventf(pod, nil, v1.EventTypeWarning, "EvictionFailed", "Descheduled", "pod eviction from %v node by sigs.k8s.io/descheduler failed: total eviction limit exceeded (%v)", pod.Spec.NodeName, *pe.maxPodsToEvictTotal)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
if pod.Spec.NodeName != "" {
|
||||
if pe.maxPodsToEvictPerNode != nil && pe.nodepodCount[pod.Spec.NodeName]+1 > *pe.maxPodsToEvictPerNode {
|
||||
if pe.maxPodsToEvictPerNode != nil && pe.nodePodCount[pod.Spec.NodeName]+pe.evictionRequestsPerNode(pod.Spec.NodeName)+1 > *pe.maxPodsToEvictPerNode {
|
||||
err := NewEvictionNodeLimitError(pod.Spec.NodeName)
|
||||
if pe.metricsEnabled {
|
||||
metrics.PodsEvicted.With(map[string]string{"result": "maximum number of pods per node reached", "strategy": strategy, "namespace": pod.Namespace, "node": pod.Spec.NodeName}).Inc()
|
||||
metrics.PodsEvicted.With(map[string]string{"result": err.Error(), "strategy": opts.StrategyName, "namespace": pod.Namespace, "node": pod.Spec.NodeName, "profile": opts.ProfileName}).Inc()
|
||||
}
|
||||
span.AddEvent("Eviction Failed", trace.WithAttributes(attribute.String("node", pod.Spec.NodeName), attribute.String("err", "Maximum number of evicted pods per node reached")))
|
||||
klog.ErrorS(fmt.Errorf("maximum number of evicted pods per node reached"), "Error evicting pod", "limit", *pe.maxPodsToEvictPerNode, "node", pod.Spec.NodeName)
|
||||
return false
|
||||
span.AddEvent("Eviction Failed", trace.WithAttributes(attribute.String("node", pod.Spec.NodeName), attribute.String("err", err.Error())))
|
||||
klog.ErrorS(err, "Error evicting pod", "limit", *pe.maxPodsToEvictPerNode, "node", pod.Spec.NodeName)
|
||||
if pe.evictionFailureEventNotification {
|
||||
pe.eventRecorder.Eventf(pod, nil, v1.EventTypeWarning, "EvictionFailed", "Descheduled", "pod eviction from %v node by sigs.k8s.io/descheduler failed: node eviction limit exceeded (%v)", pod.Spec.NodeName, *pe.maxPodsToEvictPerNode)
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if pe.maxPodsToEvictPerNamespace != nil && pe.namespacePodCount[pod.Namespace]+1 > *pe.maxPodsToEvictPerNamespace {
|
||||
if pe.maxPodsToEvictPerNamespace != nil && pe.namespacePodCount[pod.Namespace]+pe.evictionRequestsPerNamespace(pod.Namespace)+1 > *pe.maxPodsToEvictPerNamespace {
|
||||
err := NewEvictionNamespaceLimitError(pod.Namespace)
|
||||
if pe.metricsEnabled {
|
||||
metrics.PodsEvicted.With(map[string]string{"result": "maximum number of pods per namespace reached", "strategy": strategy, "namespace": pod.Namespace, "node": pod.Spec.NodeName}).Inc()
|
||||
metrics.PodsEvicted.With(map[string]string{"result": err.Error(), "strategy": opts.StrategyName, "namespace": pod.Namespace, "node": pod.Spec.NodeName, "profile": opts.ProfileName}).Inc()
|
||||
}
|
||||
span.AddEvent("Eviction Failed", trace.WithAttributes(attribute.String("node", pod.Spec.NodeName), attribute.String("err", "Maximum number of evicted pods per namespace reached")))
|
||||
klog.ErrorS(fmt.Errorf("maximum number of evicted pods per namespace reached"), "Error evicting pod", "limit", *pe.maxPodsToEvictPerNamespace, "namespace", pod.Namespace)
|
||||
return false
|
||||
span.AddEvent("Eviction Failed", trace.WithAttributes(attribute.String("node", pod.Spec.NodeName), attribute.String("err", err.Error())))
|
||||
klog.ErrorS(err, "Error evicting pod", "limit", *pe.maxPodsToEvictPerNamespace, "namespace", pod.Namespace, "pod", klog.KObj(pod))
|
||||
if pe.evictionFailureEventNotification {
|
||||
pe.eventRecorder.Eventf(pod, nil, v1.EventTypeWarning, "EvictionFailed", "Descheduled", "pod eviction from %v node by sigs.k8s.io/descheduler failed: namespace eviction limit exceeded (%v)", pod.Spec.NodeName, *pe.maxPodsToEvictPerNamespace)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
err := evictPod(ctx, pe.client, pod, pe.policyGroupVersion)
|
||||
ignore, err := pe.evictPod(ctx, pod)
|
||||
if err != nil {
|
||||
// err is used only for logging purposes
|
||||
span.AddEvent("Eviction Failed", trace.WithAttributes(attribute.String("node", pod.Spec.NodeName), attribute.String("err", err.Error())))
|
||||
klog.ErrorS(err, "Error evicting pod", "pod", klog.KObj(pod), "reason", opts.Reason)
|
||||
if pe.metricsEnabled {
|
||||
metrics.PodsEvicted.With(map[string]string{"result": "error", "strategy": strategy, "namespace": pod.Namespace, "node": pod.Spec.NodeName}).Inc()
|
||||
metrics.PodsEvicted.With(map[string]string{"result": "error", "strategy": opts.StrategyName, "namespace": pod.Namespace, "node": pod.Spec.NodeName, "profile": opts.ProfileName}).Inc()
|
||||
}
|
||||
return false
|
||||
if pe.evictionFailureEventNotification {
|
||||
pe.eventRecorder.Eventf(pod, nil, v1.EventTypeWarning, "EvictionFailed", "Descheduled", "pod eviction from %v node by sigs.k8s.io/descheduler failed: %v", pod.Spec.NodeName, err.Error())
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
if ignore {
|
||||
return nil
|
||||
}
|
||||
|
||||
if pod.Spec.NodeName != "" {
|
||||
pe.nodepodCount[pod.Spec.NodeName]++
|
||||
pe.nodePodCount[pod.Spec.NodeName]++
|
||||
}
|
||||
pe.namespacePodCount[pod.Namespace]++
|
||||
pe.totalPodCount++
|
||||
|
||||
if pe.metricsEnabled {
|
||||
metrics.PodsEvicted.With(map[string]string{"result": "success", "strategy": strategy, "namespace": pod.Namespace, "node": pod.Spec.NodeName}).Inc()
|
||||
metrics.PodsEvicted.With(map[string]string{"result": "success", "strategy": opts.StrategyName, "namespace": pod.Namespace, "node": pod.Spec.NodeName, "profile": opts.ProfileName}).Inc()
|
||||
}
|
||||
|
||||
if pe.dryRun {
|
||||
klog.V(1).InfoS("Evicted pod in dry run mode", "pod", klog.KObj(pod), "reason", opts.Reason, "strategy", strategy, "node", pod.Spec.NodeName)
|
||||
klog.V(1).InfoS("Evicted pod in dry run mode", "pod", klog.KObj(pod), "reason", opts.Reason, "strategy", opts.StrategyName, "node", pod.Spec.NodeName, "profile", opts.ProfileName)
|
||||
} else {
|
||||
klog.V(1).InfoS("Evicted pod", "pod", klog.KObj(pod), "reason", opts.Reason, "strategy", strategy, "node", pod.Spec.NodeName)
|
||||
klog.V(1).InfoS("Evicted pod", "pod", klog.KObj(pod), "reason", opts.Reason, "strategy", opts.StrategyName, "node", pod.Spec.NodeName, "profile", opts.ProfileName)
|
||||
reason := opts.Reason
|
||||
if len(reason) == 0 {
|
||||
reason = strategy
|
||||
reason = opts.StrategyName
|
||||
if len(reason) == 0 {
|
||||
reason = "NotSet"
|
||||
}
|
||||
}
|
||||
pe.eventRecorder.Eventf(pod, nil, v1.EventTypeNormal, reason, "Descheduled", "pod evicted from %v node by sigs.k8s.io/descheduler", pod.Spec.NodeName)
|
||||
pe.eventRecorder.Eventf(pod, nil, v1.EventTypeNormal, reason, "Descheduled", "pod eviction from %v node by sigs.k8s.io/descheduler", pod.Spec.NodeName)
|
||||
}
|
||||
return true
|
||||
return nil
|
||||
}
|
||||
|
||||
func evictPod(ctx context.Context, client clientset.Interface, pod *v1.Pod, policyGroupVersion string) error {
|
||||
// return (ignore, err)
|
||||
func (pe *PodEvictor) evictPod(ctx context.Context, pod *v1.Pod) (bool, error) {
|
||||
deleteOptions := &metav1.DeleteOptions{}
|
||||
// GracePeriodSeconds ?
|
||||
eviction := &policy.Eviction{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: policyGroupVersion,
|
||||
APIVersion: pe.policyGroupVersion,
|
||||
Kind: eutils.EvictionKind,
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@@ -195,13 +576,36 @@ func evictPod(ctx context.Context, client clientset.Interface, pod *v1.Pod, poli
|
||||
},
|
||||
DeleteOptions: deleteOptions,
|
||||
}
|
||||
err := client.PolicyV1().Evictions(eviction.Namespace).Evict(ctx, eviction)
|
||||
err := pe.client.PolicyV1().Evictions(eviction.Namespace).Evict(ctx, eviction)
|
||||
if err == nil {
|
||||
return false, nil
|
||||
}
|
||||
if pe.featureGates.Enabled(features.EvictionsInBackground) {
|
||||
// eviction in background requested
|
||||
if _, exists := pod.Annotations[EvictionRequestAnnotationKey]; exists {
|
||||
// Simulating https://github.com/kubevirt/kubevirt/pull/11532/files#diff-059cc1fc09e8b469143348cc3aa80b40de987670e008fa18a6fe010061f973c9R77
|
||||
if apierrors.IsTooManyRequests(err) && strings.Contains(err.Error(), EvictionInBackgroundErrorText) {
|
||||
// Ignore eviction of any pod that's failed or completed.
|
||||
// It can happen an eviction in background ends up with the pod stuck in the completed state.
|
||||
// Normally, any request eviction is expected to end with the pod deletion.
|
||||
// However, some custom eviction policies may end up with completed pods around.
|
||||
// Which leads to all the completed pods to be considered still as unfinished evictions in background.
|
||||
if pod.Status.Phase == v1.PodSucceeded || pod.Status.Phase == v1.PodFailed {
|
||||
klog.V(3).InfoS("Ignoring eviction of a completed/failed pod", "pod", klog.KObj(pod))
|
||||
return true, nil
|
||||
}
|
||||
klog.V(3).InfoS("Eviction in background assumed", "pod", klog.KObj(pod))
|
||||
pe.erCache.assumePod(pod)
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if apierrors.IsTooManyRequests(err) {
|
||||
return fmt.Errorf("error when evicting pod (ignoring) %q: %v", pod.Name, err)
|
||||
return false, fmt.Errorf("error when evicting pod (ignoring) %q: %v", pod.Name, err)
|
||||
}
|
||||
if apierrors.IsNotFound(err) {
|
||||
return fmt.Errorf("pod not found when evicting %q: %v", pod.Name, err)
|
||||
return false, fmt.Errorf("pod not found when evicting %q: %v", pod.Name, err)
|
||||
}
|
||||
return err
|
||||
return false, err
|
||||
}
|
||||
|
||||
@@ -18,54 +18,107 @@ package evictions
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
policy "k8s.io/api/policy/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
fakeclientset "k8s.io/client-go/kubernetes/fake"
|
||||
core "k8s.io/client-go/testing"
|
||||
"k8s.io/client-go/tools/events"
|
||||
"k8s.io/component-base/featuregate"
|
||||
"k8s.io/klog/v2"
|
||||
utilptr "k8s.io/utils/ptr"
|
||||
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/pkg/features"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
"sigs.k8s.io/descheduler/test"
|
||||
)
|
||||
|
||||
const (
|
||||
notFoundText = "pod not found when evicting \"%s\": pods \"%s\" not found"
|
||||
tooManyRequests = "error when evicting pod (ignoring) \"%s\": Too many requests: too many requests"
|
||||
)
|
||||
|
||||
func initFeatureGates() featuregate.FeatureGate {
|
||||
featureGates := featuregate.NewFeatureGate()
|
||||
featureGates.Add(map[featuregate.Feature]featuregate.FeatureSpec{
|
||||
features.EvictionsInBackground: {Default: true, PreRelease: featuregate.Alpha},
|
||||
})
|
||||
return featureGates
|
||||
}
|
||||
|
||||
func TestEvictPod(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
node1 := test.BuildTestNode("node1", 1000, 2000, 9, nil)
|
||||
pod1 := test.BuildTestPod("p1", 400, 0, "node1", nil)
|
||||
tests := []struct {
|
||||
description string
|
||||
node *v1.Node
|
||||
pod *v1.Pod
|
||||
pods []v1.Pod
|
||||
want error
|
||||
evictedPod *v1.Pod
|
||||
pods []runtime.Object
|
||||
wantErr error
|
||||
}{
|
||||
{
|
||||
description: "test pod eviction - pod present",
|
||||
node: node1,
|
||||
pod: pod1,
|
||||
pods: []v1.Pod{*pod1},
|
||||
want: nil,
|
||||
evictedPod: pod1,
|
||||
pods: []runtime.Object{pod1},
|
||||
},
|
||||
{
|
||||
description: "test pod eviction - pod absent",
|
||||
description: "test pod eviction - pod absent (not found error)",
|
||||
node: node1,
|
||||
pod: pod1,
|
||||
pods: []v1.Pod{*test.BuildTestPod("p2", 400, 0, "node1", nil), *test.BuildTestPod("p3", 450, 0, "node1", nil)},
|
||||
want: nil,
|
||||
evictedPod: pod1,
|
||||
pods: []runtime.Object{test.BuildTestPod("p2", 400, 0, "node1", nil), test.BuildTestPod("p3", 450, 0, "node1", nil)},
|
||||
wantErr: fmt.Errorf(notFoundText, pod1.Name, pod1.Name),
|
||||
},
|
||||
{
|
||||
description: "test pod eviction - pod absent (too many requests error)",
|
||||
node: node1,
|
||||
evictedPod: pod1,
|
||||
pods: []runtime.Object{test.BuildTestPod("p2", 400, 0, "node1", nil), test.BuildTestPod("p3", 450, 0, "node1", nil)},
|
||||
wantErr: fmt.Errorf(tooManyRequests, pod1.Name),
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
fakeClient := &fake.Clientset{}
|
||||
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
|
||||
return true, &v1.PodList{Items: test.pods}, nil
|
||||
t.Run(test.description, func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
fakeClient := fake.NewClientset(test.pods...)
|
||||
fakeClient.PrependReactor("create", "pods/eviction", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
return true, nil, test.wantErr
|
||||
})
|
||||
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
|
||||
sharedInformerFactory.Start(ctx.Done())
|
||||
sharedInformerFactory.WaitForCacheSync(ctx.Done())
|
||||
|
||||
eventRecorder := &events.FakeRecorder{}
|
||||
podEvictor, err := NewPodEvictor(
|
||||
ctx,
|
||||
fakeClient,
|
||||
eventRecorder,
|
||||
sharedInformerFactory.Core().V1().Pods().Informer(),
|
||||
initFeatureGates(),
|
||||
NewOptions(),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error when creating a pod evictor: %v", err)
|
||||
}
|
||||
|
||||
_, got := podEvictor.evictPod(ctx, test.evictedPod)
|
||||
if got != test.wantErr {
|
||||
t.Errorf("Test error for Desc: %s. Expected %v pod eviction to be %v, got %v", test.description, test.evictedPod.Name, test.wantErr, got)
|
||||
}
|
||||
})
|
||||
got := evictPod(ctx, fakeClient, test.pod, "v1")
|
||||
if got != test.want {
|
||||
t.Errorf("Test error for Desc: %s. Expected %v pod eviction to be %v, got %v", test.description, test.pod.Name, test.want, got)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -113,3 +166,319 @@ func TestPodTypes(t *testing.T) {
|
||||
t.Errorf("Expected p1 to be a normal pod.")
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewPodEvictor(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
pod1 := test.BuildTestPod("pod", 400, 0, "node", nil)
|
||||
type podEvictorTest struct {
|
||||
description string
|
||||
pod *v1.Pod
|
||||
dryRun bool
|
||||
evictionFailureEventNotification *bool
|
||||
maxPodsToEvictTotal *uint
|
||||
maxPodsToEvictPerNode *uint
|
||||
maxPodsToEvictPerNamespace *uint
|
||||
expectedNodeEvictions uint
|
||||
expectedTotalEvictions uint
|
||||
expectedError error
|
||||
// expectedEvent is a slice of strings representing expected events.
|
||||
// Each string in the slice should follow the format: "EventType Reason Message".
|
||||
// - "Warning Failed processing failed"
|
||||
events []string
|
||||
}
|
||||
tests := []podEvictorTest{
|
||||
{
|
||||
description: "one eviction expected with eviction failure event notification",
|
||||
pod: pod1,
|
||||
evictionFailureEventNotification: utilptr.To[bool](true),
|
||||
maxPodsToEvictTotal: utilptr.To[uint](1),
|
||||
maxPodsToEvictPerNode: utilptr.To[uint](1),
|
||||
maxPodsToEvictPerNamespace: utilptr.To[uint](1),
|
||||
expectedNodeEvictions: 1,
|
||||
expectedTotalEvictions: 1,
|
||||
expectedError: nil,
|
||||
events: []string{"Normal NotSet pod eviction from node node by sigs.k8s.io/descheduler"},
|
||||
},
|
||||
{
|
||||
description: "eviction limit exceeded on total with eviction failure event notification",
|
||||
pod: pod1,
|
||||
evictionFailureEventNotification: utilptr.To[bool](true),
|
||||
maxPodsToEvictTotal: utilptr.To[uint](0),
|
||||
maxPodsToEvictPerNode: utilptr.To[uint](1),
|
||||
maxPodsToEvictPerNamespace: utilptr.To[uint](1),
|
||||
expectedNodeEvictions: 0,
|
||||
expectedTotalEvictions: 0,
|
||||
expectedError: NewEvictionTotalLimitError(),
|
||||
events: []string{"Warning EvictionFailed pod eviction from node node by sigs.k8s.io/descheduler failed: total eviction limit exceeded (0)"},
|
||||
},
|
||||
{
|
||||
description: "eviction limit exceeded on node with eviction failure event notification",
|
||||
pod: pod1,
|
||||
evictionFailureEventNotification: utilptr.To[bool](true),
|
||||
maxPodsToEvictTotal: utilptr.To[uint](1),
|
||||
maxPodsToEvictPerNode: utilptr.To[uint](0),
|
||||
maxPodsToEvictPerNamespace: utilptr.To[uint](1),
|
||||
expectedNodeEvictions: 0,
|
||||
expectedTotalEvictions: 0,
|
||||
expectedError: NewEvictionNodeLimitError("node"),
|
||||
events: []string{"Warning EvictionFailed pod eviction from node node by sigs.k8s.io/descheduler failed: node eviction limit exceeded (0)"},
|
||||
},
|
||||
{
|
||||
description: "eviction limit exceeded on node with eviction failure event notification",
|
||||
pod: pod1,
|
||||
evictionFailureEventNotification: utilptr.To[bool](true),
|
||||
maxPodsToEvictTotal: utilptr.To[uint](1),
|
||||
maxPodsToEvictPerNode: utilptr.To[uint](1),
|
||||
maxPodsToEvictPerNamespace: utilptr.To[uint](0),
|
||||
expectedNodeEvictions: 0,
|
||||
expectedTotalEvictions: 0,
|
||||
expectedError: NewEvictionNamespaceLimitError("default"),
|
||||
events: []string{"Warning EvictionFailed pod eviction from node node by sigs.k8s.io/descheduler failed: namespace eviction limit exceeded (0)"},
|
||||
},
|
||||
{
|
||||
description: "eviction error with eviction failure event notification",
|
||||
pod: pod1,
|
||||
evictionFailureEventNotification: utilptr.To[bool](true),
|
||||
maxPodsToEvictTotal: utilptr.To[uint](1),
|
||||
maxPodsToEvictPerNode: utilptr.To[uint](1),
|
||||
maxPodsToEvictPerNamespace: utilptr.To[uint](1),
|
||||
expectedNodeEvictions: 0,
|
||||
expectedTotalEvictions: 0,
|
||||
expectedError: fmt.Errorf("eviction error"),
|
||||
events: []string{"Warning EvictionFailed pod eviction from node node by sigs.k8s.io/descheduler failed: eviction error"},
|
||||
},
|
||||
{
|
||||
description: "eviction with dryRun with eviction failure event notification",
|
||||
pod: pod1,
|
||||
dryRun: true,
|
||||
evictionFailureEventNotification: utilptr.To[bool](true),
|
||||
maxPodsToEvictTotal: utilptr.To[uint](1),
|
||||
maxPodsToEvictPerNode: utilptr.To[uint](1),
|
||||
maxPodsToEvictPerNamespace: utilptr.To[uint](1),
|
||||
expectedNodeEvictions: 1,
|
||||
expectedTotalEvictions: 1,
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
description: "one eviction expected without eviction failure event notification",
|
||||
pod: pod1,
|
||||
maxPodsToEvictTotal: utilptr.To[uint](1),
|
||||
maxPodsToEvictPerNode: utilptr.To[uint](1),
|
||||
maxPodsToEvictPerNamespace: utilptr.To[uint](1),
|
||||
expectedNodeEvictions: 1,
|
||||
expectedTotalEvictions: 1,
|
||||
expectedError: nil,
|
||||
events: []string{"Normal NotSet pod eviction from node node by sigs.k8s.io/descheduler"},
|
||||
},
|
||||
{
|
||||
description: "eviction limit exceeded on total without eviction failure event notification",
|
||||
pod: pod1,
|
||||
maxPodsToEvictTotal: utilptr.To[uint](0),
|
||||
maxPodsToEvictPerNode: utilptr.To[uint](1),
|
||||
maxPodsToEvictPerNamespace: utilptr.To[uint](1),
|
||||
expectedNodeEvictions: 0,
|
||||
expectedTotalEvictions: 0,
|
||||
expectedError: NewEvictionTotalLimitError(),
|
||||
},
|
||||
{
|
||||
description: "eviction limit exceeded on node without eviction failure event notification",
|
||||
pod: pod1,
|
||||
maxPodsToEvictTotal: utilptr.To[uint](1),
|
||||
maxPodsToEvictPerNode: utilptr.To[uint](0),
|
||||
maxPodsToEvictPerNamespace: utilptr.To[uint](1),
|
||||
expectedNodeEvictions: 0,
|
||||
expectedTotalEvictions: 0,
|
||||
expectedError: NewEvictionNodeLimitError("node"),
|
||||
},
|
||||
{
|
||||
description: "eviction limit exceeded on node without eviction failure event notification",
|
||||
pod: pod1,
|
||||
maxPodsToEvictTotal: utilptr.To[uint](1),
|
||||
maxPodsToEvictPerNode: utilptr.To[uint](1),
|
||||
maxPodsToEvictPerNamespace: utilptr.To[uint](0),
|
||||
expectedNodeEvictions: 0,
|
||||
expectedTotalEvictions: 0,
|
||||
expectedError: NewEvictionNamespaceLimitError("default"),
|
||||
},
|
||||
{
|
||||
description: "eviction error without eviction failure event notification",
|
||||
pod: pod1,
|
||||
maxPodsToEvictTotal: utilptr.To[uint](1),
|
||||
maxPodsToEvictPerNode: utilptr.To[uint](1),
|
||||
maxPodsToEvictPerNamespace: utilptr.To[uint](1),
|
||||
expectedNodeEvictions: 0,
|
||||
expectedTotalEvictions: 0,
|
||||
expectedError: fmt.Errorf("eviction error"),
|
||||
},
|
||||
{
|
||||
description: "eviction without dryRun with eviction failure event notification",
|
||||
pod: pod1,
|
||||
dryRun: true,
|
||||
maxPodsToEvictTotal: utilptr.To[uint](1),
|
||||
maxPodsToEvictPerNode: utilptr.To[uint](1),
|
||||
maxPodsToEvictPerNamespace: utilptr.To[uint](1),
|
||||
expectedNodeEvictions: 1,
|
||||
expectedTotalEvictions: 1,
|
||||
expectedError: nil,
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
t.Run(test.description, func(t *testing.T) {
|
||||
fakeClient := fake.NewSimpleClientset(pod1)
|
||||
fakeClient.PrependReactor("create", "pods/eviction", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
return true, nil, test.expectedError
|
||||
})
|
||||
|
||||
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
|
||||
sharedInformerFactory.Start(ctx.Done())
|
||||
sharedInformerFactory.WaitForCacheSync(ctx.Done())
|
||||
|
||||
eventRecorder := events.NewFakeRecorder(100)
|
||||
|
||||
podEvictor, err := NewPodEvictor(
|
||||
ctx,
|
||||
fakeClient,
|
||||
eventRecorder,
|
||||
sharedInformerFactory.Core().V1().Pods().Informer(),
|
||||
initFeatureGates(),
|
||||
NewOptions().
|
||||
WithDryRun(test.dryRun).
|
||||
WithMaxPodsToEvictTotal(test.maxPodsToEvictTotal).
|
||||
WithMaxPodsToEvictPerNode(test.maxPodsToEvictPerNode).
|
||||
WithEvictionFailureEventNotification(test.evictionFailureEventNotification).
|
||||
WithMaxPodsToEvictPerNamespace(test.maxPodsToEvictPerNamespace),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error when creating a pod evictor: %v", err)
|
||||
}
|
||||
|
||||
stubNode := &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "node"}}
|
||||
|
||||
if actualErr := podEvictor.EvictPod(ctx, test.pod, EvictOptions{}); actualErr != nil && actualErr.Error() != test.expectedError.Error() {
|
||||
t.Errorf("Expected error: %v, got: %v", test.expectedError, actualErr)
|
||||
}
|
||||
|
||||
if evictions := podEvictor.NodeEvicted(stubNode); evictions != test.expectedNodeEvictions {
|
||||
t.Errorf("Expected %d node evictions, got %d instead", test.expectedNodeEvictions, evictions)
|
||||
}
|
||||
|
||||
if evictions := podEvictor.TotalEvicted(); evictions != test.expectedTotalEvictions {
|
||||
t.Errorf("Expected %d total evictions, got %d instead", test.expectedTotalEvictions, evictions)
|
||||
}
|
||||
|
||||
// Assert that the events are correct.
|
||||
assertEqualEvents(t, test.events, eventRecorder.Events)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestEvictionRequestsCacheCleanup(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
node1 := test.BuildTestNode("n1", 2000, 3000, 10, nil)
|
||||
|
||||
ownerRef1 := test.GetReplicaSetOwnerRefList()
|
||||
updatePod := func(pod *v1.Pod) {
|
||||
pod.Namespace = "dev"
|
||||
pod.ObjectMeta.OwnerReferences = ownerRef1
|
||||
}
|
||||
updatePodWithEvictionInBackground := func(pod *v1.Pod) {
|
||||
updatePod(pod)
|
||||
pod.Annotations = map[string]string{
|
||||
EvictionRequestAnnotationKey: "",
|
||||
}
|
||||
}
|
||||
|
||||
p1 := test.BuildTestPod("p1", 100, 0, node1.Name, updatePodWithEvictionInBackground)
|
||||
p2 := test.BuildTestPod("p2", 100, 0, node1.Name, updatePodWithEvictionInBackground)
|
||||
p3 := test.BuildTestPod("p3", 100, 0, node1.Name, updatePod)
|
||||
p4 := test.BuildTestPod("p4", 100, 0, node1.Name, updatePod)
|
||||
|
||||
client := fakeclientset.NewSimpleClientset(node1, p1, p2, p3, p4)
|
||||
sharedInformerFactory := informers.NewSharedInformerFactory(client, 0)
|
||||
_, eventRecorder := utils.GetRecorderAndBroadcaster(ctx, client)
|
||||
|
||||
podEvictor, err := NewPodEvictor(
|
||||
ctx,
|
||||
client,
|
||||
eventRecorder,
|
||||
sharedInformerFactory.Core().V1().Pods().Informer(),
|
||||
initFeatureGates(),
|
||||
nil,
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error when creating a pod evictor: %v", err)
|
||||
}
|
||||
|
||||
client.PrependReactor("create", "pods", func(action core.Action) (bool, runtime.Object, error) {
|
||||
if action.GetSubresource() == "eviction" {
|
||||
createAct, matched := action.(core.CreateActionImpl)
|
||||
if !matched {
|
||||
return false, nil, fmt.Errorf("unable to convert action to core.CreateActionImpl")
|
||||
}
|
||||
if eviction, matched := createAct.Object.(*policy.Eviction); matched {
|
||||
podName := eviction.GetName()
|
||||
if podName == "p1" || podName == "p2" {
|
||||
return true, nil, &apierrors.StatusError{
|
||||
ErrStatus: metav1.Status{
|
||||
Reason: metav1.StatusReasonTooManyRequests,
|
||||
Message: "Eviction triggered evacuation",
|
||||
},
|
||||
}
|
||||
}
|
||||
return true, nil, nil
|
||||
}
|
||||
}
|
||||
return false, nil, nil
|
||||
})
|
||||
|
||||
sharedInformerFactory.Start(ctx.Done())
|
||||
sharedInformerFactory.WaitForCacheSync(ctx.Done())
|
||||
|
||||
podEvictor.EvictPod(ctx, p1, EvictOptions{})
|
||||
podEvictor.EvictPod(ctx, p2, EvictOptions{})
|
||||
podEvictor.EvictPod(ctx, p3, EvictOptions{})
|
||||
podEvictor.EvictPod(ctx, p4, EvictOptions{})
|
||||
|
||||
klog.Infof("2 evictions in background expected, 2 normal evictions")
|
||||
if total := podEvictor.TotalEvictionRequests(); total != 2 {
|
||||
t.Fatalf("Expected %v total eviction requests, got %v instead", 2, total)
|
||||
}
|
||||
if total := podEvictor.TotalEvicted(); total != 2 {
|
||||
t.Fatalf("Expected %v total evictions, got %v instead", 2, total)
|
||||
}
|
||||
|
||||
klog.Infof("2 evictions in background assumed. Wait for few seconds and check the assumed requests timed out")
|
||||
time.Sleep(2 * time.Second)
|
||||
klog.Infof("Checking the assumed requests timed out and were deleted")
|
||||
// Set the timeout to 1s so the cleaning can be tested
|
||||
podEvictor.erCache.assumedRequestTimeoutSeconds = 1
|
||||
podEvictor.erCache.cleanCache(ctx)
|
||||
if totalERs := podEvictor.TotalEvictionRequests(); totalERs > 0 {
|
||||
t.Fatalf("Expected 0 eviction requests, got %v instead", totalERs)
|
||||
}
|
||||
}
|
||||
|
||||
func assertEqualEvents(t *testing.T, expected []string, actual <-chan string) {
|
||||
t.Logf("Assert for events: %v", expected)
|
||||
c := time.After(wait.ForeverTestTimeout)
|
||||
for _, e := range expected {
|
||||
select {
|
||||
case a := <-actual:
|
||||
if !reflect.DeepEqual(a, e) {
|
||||
t.Errorf("Expected event %q, got %q instead", e, a)
|
||||
}
|
||||
case <-c:
|
||||
t.Errorf("Expected event %q, got nothing", e)
|
||||
// continue iterating to print all expected events
|
||||
}
|
||||
}
|
||||
for {
|
||||
select {
|
||||
case a := <-actual:
|
||||
t.Errorf("Unexpected event: %q", a)
|
||||
default:
|
||||
return // No more events, as expected.
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
59
pkg/descheduler/evictions/options.go
Normal file
59
pkg/descheduler/evictions/options.go
Normal file
@@ -0,0 +1,59 @@
|
||||
package evictions
|
||||
|
||||
import (
|
||||
policy "k8s.io/api/policy/v1"
|
||||
)
|
||||
|
||||
type Options struct {
|
||||
policyGroupVersion string
|
||||
dryRun bool
|
||||
maxPodsToEvictPerNode *uint
|
||||
maxPodsToEvictPerNamespace *uint
|
||||
maxPodsToEvictTotal *uint
|
||||
evictionFailureEventNotification bool
|
||||
metricsEnabled bool
|
||||
}
|
||||
|
||||
// NewOptions returns an Options with default values.
|
||||
func NewOptions() *Options {
|
||||
return &Options{
|
||||
policyGroupVersion: policy.SchemeGroupVersion.String(),
|
||||
}
|
||||
}
|
||||
|
||||
func (o *Options) WithPolicyGroupVersion(policyGroupVersion string) *Options {
|
||||
o.policyGroupVersion = policyGroupVersion
|
||||
return o
|
||||
}
|
||||
|
||||
func (o *Options) WithDryRun(dryRun bool) *Options {
|
||||
o.dryRun = dryRun
|
||||
return o
|
||||
}
|
||||
|
||||
func (o *Options) WithMaxPodsToEvictPerNode(maxPodsToEvictPerNode *uint) *Options {
|
||||
o.maxPodsToEvictPerNode = maxPodsToEvictPerNode
|
||||
return o
|
||||
}
|
||||
|
||||
func (o *Options) WithMaxPodsToEvictPerNamespace(maxPodsToEvictPerNamespace *uint) *Options {
|
||||
o.maxPodsToEvictPerNamespace = maxPodsToEvictPerNamespace
|
||||
return o
|
||||
}
|
||||
|
||||
func (o *Options) WithMaxPodsToEvictTotal(maxPodsToEvictTotal *uint) *Options {
|
||||
o.maxPodsToEvictTotal = maxPodsToEvictTotal
|
||||
return o
|
||||
}
|
||||
|
||||
func (o *Options) WithMetricsEnabled(metricsEnabled bool) *Options {
|
||||
o.metricsEnabled = metricsEnabled
|
||||
return o
|
||||
}
|
||||
|
||||
func (o *Options) WithEvictionFailureEventNotification(evictionFailureEventNotification *bool) *Options {
|
||||
if evictionFailureEventNotification != nil {
|
||||
o.evictionFailureEventNotification = *evictionFailureEventNotification
|
||||
}
|
||||
return o
|
||||
}
|
||||
151
pkg/descheduler/metricscollector/metricscollector.go
Normal file
151
pkg/descheduler/metricscollector/metricscollector.go
Normal file
@@ -0,0 +1,151 @@
|
||||
/*
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package metricscollector
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
listercorev1 "k8s.io/client-go/listers/core/v1"
|
||||
"k8s.io/klog/v2"
|
||||
metricsclient "k8s.io/metrics/pkg/client/clientset/versioned"
|
||||
utilptr "k8s.io/utils/ptr"
|
||||
)
|
||||
|
||||
const (
|
||||
beta float64 = 0.9
|
||||
)
|
||||
|
||||
type MetricsCollector struct {
|
||||
nodeLister listercorev1.NodeLister
|
||||
metricsClientset metricsclient.Interface
|
||||
nodeSelector labels.Selector
|
||||
|
||||
nodes map[string]map[v1.ResourceName]*resource.Quantity
|
||||
|
||||
mu sync.RWMutex
|
||||
// hasSynced signals at least one sync succeeded
|
||||
hasSynced bool
|
||||
}
|
||||
|
||||
func NewMetricsCollector(nodeLister listercorev1.NodeLister, metricsClientset metricsclient.Interface, nodeSelector labels.Selector) *MetricsCollector {
|
||||
return &MetricsCollector{
|
||||
nodeLister: nodeLister,
|
||||
metricsClientset: metricsClientset,
|
||||
nodeSelector: nodeSelector,
|
||||
nodes: make(map[string]map[v1.ResourceName]*resource.Quantity),
|
||||
}
|
||||
}
|
||||
|
||||
func (mc *MetricsCollector) Run(ctx context.Context) {
|
||||
wait.NonSlidingUntil(func() {
|
||||
mc.Collect(ctx)
|
||||
}, 5*time.Second, ctx.Done())
|
||||
}
|
||||
|
||||
// During experiments rounding to int error causes weightedAverage to never
|
||||
// reach value even when weightedAverage is repeated many times in a row.
|
||||
// The difference between the limit and computed average stops within 5 units.
|
||||
// Nevertheless, the value is expected to change in time. So the weighted
|
||||
// average nevers gets a chance to converge. Which makes the computed
|
||||
// error negligible.
|
||||
// The speed of convergence depends on how often the metrics collector
|
||||
// syncs with the current value. Currently, the interval is set to 5s.
|
||||
func weightedAverage(prevValue, value int64) int64 {
|
||||
return int64(math.Round(beta*float64(prevValue) + (1-beta)*float64(value)))
|
||||
}
|
||||
|
||||
func (mc *MetricsCollector) AllNodesUsage() (map[string]map[v1.ResourceName]*resource.Quantity, error) {
|
||||
mc.mu.RLock()
|
||||
defer mc.mu.RUnlock()
|
||||
|
||||
allNodesUsage := make(map[string]map[v1.ResourceName]*resource.Quantity)
|
||||
for nodeName := range mc.nodes {
|
||||
allNodesUsage[nodeName] = map[v1.ResourceName]*resource.Quantity{
|
||||
v1.ResourceCPU: utilptr.To[resource.Quantity](mc.nodes[nodeName][v1.ResourceCPU].DeepCopy()),
|
||||
v1.ResourceMemory: utilptr.To[resource.Quantity](mc.nodes[nodeName][v1.ResourceMemory].DeepCopy()),
|
||||
}
|
||||
}
|
||||
|
||||
return allNodesUsage, nil
|
||||
}
|
||||
|
||||
func (mc *MetricsCollector) NodeUsage(node *v1.Node) (map[v1.ResourceName]*resource.Quantity, error) {
|
||||
mc.mu.RLock()
|
||||
defer mc.mu.RUnlock()
|
||||
|
||||
if _, exists := mc.nodes[node.Name]; !exists {
|
||||
klog.V(4).InfoS("unable to find node in the collected metrics", "node", klog.KObj(node))
|
||||
return nil, fmt.Errorf("unable to find node %q in the collected metrics", node.Name)
|
||||
}
|
||||
return map[v1.ResourceName]*resource.Quantity{
|
||||
v1.ResourceCPU: utilptr.To[resource.Quantity](mc.nodes[node.Name][v1.ResourceCPU].DeepCopy()),
|
||||
v1.ResourceMemory: utilptr.To[resource.Quantity](mc.nodes[node.Name][v1.ResourceMemory].DeepCopy()),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (mc *MetricsCollector) HasSynced() bool {
|
||||
return mc.hasSynced
|
||||
}
|
||||
|
||||
func (mc *MetricsCollector) MetricsClient() metricsclient.Interface {
|
||||
return mc.metricsClientset
|
||||
}
|
||||
|
||||
func (mc *MetricsCollector) Collect(ctx context.Context) error {
|
||||
mc.mu.Lock()
|
||||
defer mc.mu.Unlock()
|
||||
nodes, err := mc.nodeLister.List(mc.nodeSelector)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to list nodes: %v", err)
|
||||
}
|
||||
|
||||
for _, node := range nodes {
|
||||
metrics, err := mc.metricsClientset.MetricsV1beta1().NodeMetricses().Get(ctx, node.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Error fetching metrics", "node", node.Name)
|
||||
// No entry -> duplicate the previous value -> do nothing as beta*PV + (1-beta)*PV = PV
|
||||
continue
|
||||
}
|
||||
|
||||
if _, exists := mc.nodes[node.Name]; !exists {
|
||||
mc.nodes[node.Name] = map[v1.ResourceName]*resource.Quantity{
|
||||
v1.ResourceCPU: utilptr.To[resource.Quantity](metrics.Usage.Cpu().DeepCopy()),
|
||||
v1.ResourceMemory: utilptr.To[resource.Quantity](metrics.Usage.Memory().DeepCopy()),
|
||||
}
|
||||
} else {
|
||||
// get MilliValue to reduce loss of precision
|
||||
mc.nodes[node.Name][v1.ResourceCPU].SetMilli(
|
||||
weightedAverage(mc.nodes[node.Name][v1.ResourceCPU].MilliValue(), metrics.Usage.Cpu().MilliValue()),
|
||||
)
|
||||
mc.nodes[node.Name][v1.ResourceMemory].Set(
|
||||
weightedAverage(mc.nodes[node.Name][v1.ResourceMemory].Value(), metrics.Usage.Memory().Value()),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
mc.hasSynced = true
|
||||
return nil
|
||||
}
|
||||
141
pkg/descheduler/metricscollector/metricscollector_test.go
Normal file
141
pkg/descheduler/metricscollector/metricscollector_test.go
Normal file
@@ -0,0 +1,141 @@
|
||||
/*
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package metricscollector
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math"
|
||||
"testing"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/client-go/informers"
|
||||
fakeclientset "k8s.io/client-go/kubernetes/fake"
|
||||
fakemetricsclient "k8s.io/metrics/pkg/client/clientset/versioned/fake"
|
||||
|
||||
"sigs.k8s.io/descheduler/test"
|
||||
)
|
||||
|
||||
func checkCpuNodeUsage(t *testing.T, usage map[v1.ResourceName]*resource.Quantity, millicpu int64) {
|
||||
t.Logf("current node cpu usage: %v\n", usage[v1.ResourceCPU].MilliValue())
|
||||
if usage[v1.ResourceCPU].MilliValue() != millicpu {
|
||||
t.Fatalf("cpu node usage expected to be %v, got %v instead", millicpu, usage[v1.ResourceCPU].MilliValue())
|
||||
}
|
||||
}
|
||||
|
||||
func TestMetricsCollector(t *testing.T) {
|
||||
gvr := schema.GroupVersionResource{Group: "metrics.k8s.io", Version: "v1beta1", Resource: "nodes"}
|
||||
|
||||
n1 := test.BuildTestNode("n1", 2000, 3000, 10, nil)
|
||||
n2 := test.BuildTestNode("n2", 2000, 3000, 10, nil)
|
||||
n3 := test.BuildTestNode("n3", 2000, 3000, 10, nil)
|
||||
|
||||
n1metrics := test.BuildNodeMetrics("n1", 400, 1714978816)
|
||||
n2metrics := test.BuildNodeMetrics("n2", 1400, 1714978816)
|
||||
n3metrics := test.BuildNodeMetrics("n3", 300, 1714978816)
|
||||
|
||||
clientset := fakeclientset.NewSimpleClientset(n1, n2, n3)
|
||||
metricsClientset := fakemetricsclient.NewSimpleClientset()
|
||||
metricsClientset.Tracker().Create(gvr, n1metrics, "")
|
||||
metricsClientset.Tracker().Create(gvr, n2metrics, "")
|
||||
metricsClientset.Tracker().Create(gvr, n3metrics, "")
|
||||
|
||||
ctx := context.TODO()
|
||||
sharedInformerFactory := informers.NewSharedInformerFactory(clientset, 0)
|
||||
nodeLister := sharedInformerFactory.Core().V1().Nodes().Lister()
|
||||
sharedInformerFactory.Start(ctx.Done())
|
||||
sharedInformerFactory.WaitForCacheSync(ctx.Done())
|
||||
|
||||
t.Logf("Set initial node cpu usage to 1400")
|
||||
collector := NewMetricsCollector(nodeLister, metricsClientset, labels.Everything())
|
||||
collector.Collect(context.TODO())
|
||||
nodesUsage, _ := collector.NodeUsage(n2)
|
||||
checkCpuNodeUsage(t, nodesUsage, 1400)
|
||||
allnodesUsage, _ := collector.AllNodesUsage()
|
||||
checkCpuNodeUsage(t, allnodesUsage[n2.Name], 1400)
|
||||
|
||||
t.Logf("Set current node cpu usage to 500")
|
||||
n2metrics.Usage[v1.ResourceCPU] = *resource.NewMilliQuantity(500, resource.DecimalSI)
|
||||
metricsClientset.Tracker().Update(gvr, n2metrics, "")
|
||||
collector.Collect(context.TODO())
|
||||
nodesUsage, _ = collector.NodeUsage(n2)
|
||||
checkCpuNodeUsage(t, nodesUsage, 1310)
|
||||
allnodesUsage, _ = collector.AllNodesUsage()
|
||||
checkCpuNodeUsage(t, allnodesUsage[n2.Name], 1310)
|
||||
|
||||
t.Logf("Set current node cpu usage to 900")
|
||||
n2metrics.Usage[v1.ResourceCPU] = *resource.NewMilliQuantity(900, resource.DecimalSI)
|
||||
metricsClientset.Tracker().Update(gvr, n2metrics, "")
|
||||
collector.Collect(context.TODO())
|
||||
nodesUsage, _ = collector.NodeUsage(n2)
|
||||
checkCpuNodeUsage(t, nodesUsage, 1269)
|
||||
allnodesUsage, _ = collector.AllNodesUsage()
|
||||
checkCpuNodeUsage(t, allnodesUsage[n2.Name], 1269)
|
||||
}
|
||||
|
||||
func TestMetricsCollectorConvergence(t *testing.T) {
|
||||
gvr := schema.GroupVersionResource{Group: "metrics.k8s.io", Version: "v1beta1", Resource: "nodes"}
|
||||
|
||||
n1 := test.BuildTestNode("n1", 2000, 3000, 10, nil)
|
||||
n2 := test.BuildTestNode("n2", 2000, 3000, 10, nil)
|
||||
n3 := test.BuildTestNode("n3", 2000, 3000, 10, nil)
|
||||
|
||||
n1metrics := test.BuildNodeMetrics("n1", 400, 1714978816)
|
||||
n2metrics := test.BuildNodeMetrics("n2", 1400, 1714978816)
|
||||
n3metrics := test.BuildNodeMetrics("n3", 300, 1714978816)
|
||||
|
||||
clientset := fakeclientset.NewSimpleClientset(n1, n2, n3)
|
||||
metricsClientset := fakemetricsclient.NewSimpleClientset()
|
||||
metricsClientset.Tracker().Create(gvr, n1metrics, "")
|
||||
metricsClientset.Tracker().Create(gvr, n2metrics, "")
|
||||
metricsClientset.Tracker().Create(gvr, n3metrics, "")
|
||||
|
||||
ctx := context.TODO()
|
||||
sharedInformerFactory := informers.NewSharedInformerFactory(clientset, 0)
|
||||
nodeLister := sharedInformerFactory.Core().V1().Nodes().Lister()
|
||||
sharedInformerFactory.Start(ctx.Done())
|
||||
sharedInformerFactory.WaitForCacheSync(ctx.Done())
|
||||
|
||||
t.Logf("Set initial node cpu usage to 1400")
|
||||
collector := NewMetricsCollector(nodeLister, metricsClientset, labels.Everything())
|
||||
collector.Collect(context.TODO())
|
||||
nodesUsage, _ := collector.NodeUsage(n2)
|
||||
checkCpuNodeUsage(t, nodesUsage, 1400)
|
||||
allnodesUsage, _ := collector.AllNodesUsage()
|
||||
checkCpuNodeUsage(t, allnodesUsage[n2.Name], 1400)
|
||||
|
||||
t.Logf("Set current node cpu/memory usage to 900/1614978816 and wait until it converges to it")
|
||||
n2metrics.Usage[v1.ResourceCPU] = *resource.NewMilliQuantity(900, resource.DecimalSI)
|
||||
n2metrics.Usage[v1.ResourceMemory] = *resource.NewQuantity(1614978816, resource.BinarySI)
|
||||
metricsClientset.Tracker().Update(gvr, n2metrics, "")
|
||||
converged := false
|
||||
for i := 0; i < 300; i++ {
|
||||
collector.Collect(context.TODO())
|
||||
nodesUsage, _ = collector.NodeUsage(n2)
|
||||
if math.Abs(float64(900-nodesUsage[v1.ResourceCPU].MilliValue())) < 6 && math.Abs(float64(1614978816-nodesUsage[v1.ResourceMemory].Value())) < 6 {
|
||||
t.Logf("Node cpu/memory usage converged to 900+-5/1614978816+-5")
|
||||
converged = true
|
||||
break
|
||||
}
|
||||
t.Logf("The current node usage: cpu=%v, memory=%v", nodesUsage[v1.ResourceCPU].MilliValue(), nodesUsage[v1.ResourceMemory].Value())
|
||||
}
|
||||
if !converged {
|
||||
t.Fatalf("The node usage did not converged to 900+-1")
|
||||
}
|
||||
}
|
||||
@@ -18,20 +18,24 @@ package node
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync/atomic"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
listersv1 "k8s.io/client-go/listers/core/v1"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
"k8s.io/klog/v2"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
)
|
||||
|
||||
const workersCount = 100
|
||||
|
||||
// ReadyNodes returns ready nodes irrespective of whether they are
|
||||
// schedulable or not.
|
||||
func ReadyNodes(ctx context.Context, client clientset.Interface, nodeLister listersv1.NodeLister, nodeSelector string) ([]*v1.Node, error) {
|
||||
@@ -104,83 +108,96 @@ func IsReady(node *v1.Node) bool {
|
||||
// This function is used when the NodeFit pod filtering feature of the Descheduler is enabled.
|
||||
// This function currently considers a subset of the Kubernetes Scheduler's predicates when
|
||||
// deciding if a pod would fit on a node, but more predicates may be added in the future.
|
||||
func NodeFit(nodeIndexer podutil.GetPodsAssignedToNodeFunc, pod *v1.Pod, node *v1.Node) []error {
|
||||
// There should be no methods to modify nodes or pods in this method.
|
||||
func NodeFit(nodeIndexer podutil.GetPodsAssignedToNodeFunc, pod *v1.Pod, node *v1.Node) error {
|
||||
// Check node selector and required affinity
|
||||
var errors []error
|
||||
if ok, err := utils.PodMatchNodeSelector(pod, node); err != nil {
|
||||
errors = append(errors, err)
|
||||
return err
|
||||
} else if !ok {
|
||||
errors = append(errors, fmt.Errorf("pod node selector does not match the node label"))
|
||||
return errors.New("pod node selector does not match the node label")
|
||||
}
|
||||
|
||||
// Check taints (we only care about NoSchedule and NoExecute taints)
|
||||
ok := utils.TolerationsTolerateTaintsWithFilter(pod.Spec.Tolerations, node.Spec.Taints, func(taint *v1.Taint) bool {
|
||||
return taint.Effect == v1.TaintEffectNoSchedule || taint.Effect == v1.TaintEffectNoExecute
|
||||
})
|
||||
if !ok {
|
||||
errors = append(errors, fmt.Errorf("pod does not tolerate taints on the node"))
|
||||
}
|
||||
// Check if the pod can fit on a node based off it's requests
|
||||
if pod.Spec.NodeName == "" || pod.Spec.NodeName != node.Name {
|
||||
if ok, reqErrors := fitsRequest(nodeIndexer, pod, node); !ok {
|
||||
errors = append(errors, reqErrors...)
|
||||
}
|
||||
}
|
||||
// Check if node is schedulable
|
||||
if IsNodeUnschedulable(node) {
|
||||
errors = append(errors, fmt.Errorf("node is not schedulable"))
|
||||
return errors.New("pod does not tolerate taints on the node")
|
||||
}
|
||||
|
||||
return errors
|
||||
// Check if the pod can fit on a node based off it's requests
|
||||
if pod.Spec.NodeName == "" || pod.Spec.NodeName != node.Name {
|
||||
if ok, reqError := fitsRequest(nodeIndexer, pod, node); !ok {
|
||||
return reqError
|
||||
}
|
||||
}
|
||||
|
||||
// Check if node is schedulable
|
||||
if IsNodeUnschedulable(node) {
|
||||
return errors.New("node is not schedulable")
|
||||
}
|
||||
|
||||
// Check if pod matches inter-pod anti-affinity rule of pod on node
|
||||
if match, err := podMatchesInterPodAntiAffinity(nodeIndexer, pod, node); err != nil {
|
||||
return err
|
||||
} else if match {
|
||||
return errors.New("pod matches inter-pod anti-affinity rule of other pod on node")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func podFitsNodes(nodeIndexer podutil.GetPodsAssignedToNodeFunc, pod *v1.Pod, nodes []*v1.Node, excludeFilter func(pod *v1.Pod, node *v1.Node) bool) bool {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
var filteredLen int32
|
||||
checkNode := func(i int) {
|
||||
node := nodes[i]
|
||||
if excludeFilter != nil && excludeFilter(pod, node) {
|
||||
return
|
||||
}
|
||||
err := NodeFit(nodeIndexer, pod, node)
|
||||
if err == nil {
|
||||
klog.V(4).InfoS("Pod fits on node", "pod", klog.KObj(pod), "node", klog.KObj(node))
|
||||
atomic.AddInt32(&filteredLen, 1)
|
||||
cancel()
|
||||
} else {
|
||||
klog.V(4).InfoS("Pod does not fit on node", "pod", klog.KObj(pod), "node", klog.KObj(node), "err", err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
// Stops searching for more nodes once a node are found.
|
||||
workqueue.ParallelizeUntil(ctx, workersCount, len(nodes), checkNode)
|
||||
|
||||
return filteredLen > 0
|
||||
}
|
||||
|
||||
// PodFitsAnyOtherNode checks if the given pod will fit any of the given nodes, besides the node
|
||||
// the pod is already running on. The predicates used to determine if the pod will fit can be found in the NodeFit function.
|
||||
func PodFitsAnyOtherNode(nodeIndexer podutil.GetPodsAssignedToNodeFunc, pod *v1.Pod, nodes []*v1.Node) bool {
|
||||
for _, node := range nodes {
|
||||
// Skip node pod is already on
|
||||
if node.Name == pod.Spec.NodeName {
|
||||
continue
|
||||
}
|
||||
|
||||
errors := NodeFit(nodeIndexer, pod, node)
|
||||
if len(errors) == 0 {
|
||||
klog.V(4).InfoS("Pod fits on node", "pod", klog.KObj(pod), "node", klog.KObj(node))
|
||||
return true
|
||||
}
|
||||
klog.V(4).InfoS("Pod does not fit on node",
|
||||
"pod:", klog.KObj(pod), "node:", klog.KObj(node), "error:", utilerrors.NewAggregate(errors).Error())
|
||||
}
|
||||
|
||||
return false
|
||||
return podFitsNodes(nodeIndexer, pod, nodes, func(pod *v1.Pod, node *v1.Node) bool {
|
||||
return pod.Spec.NodeName == node.Name
|
||||
})
|
||||
}
|
||||
|
||||
// PodFitsAnyNode checks if the given pod will fit any of the given nodes. The predicates used
|
||||
// to determine if the pod will fit can be found in the NodeFit function.
|
||||
func PodFitsAnyNode(nodeIndexer podutil.GetPodsAssignedToNodeFunc, pod *v1.Pod, nodes []*v1.Node) bool {
|
||||
for _, node := range nodes {
|
||||
errors := NodeFit(nodeIndexer, pod, node)
|
||||
if len(errors) == 0 {
|
||||
klog.V(4).InfoS("Pod fits on node", "pod", klog.KObj(pod), "node", klog.KObj(node))
|
||||
return true
|
||||
}
|
||||
klog.V(4).InfoS("Pod does not fit on node",
|
||||
"pod:", klog.KObj(pod), "node:", klog.KObj(node), "error:", utilerrors.NewAggregate(errors).Error())
|
||||
}
|
||||
|
||||
return false
|
||||
return podFitsNodes(nodeIndexer, pod, nodes, nil)
|
||||
}
|
||||
|
||||
// PodFitsCurrentNode checks if the given pod will fit onto the given node. The predicates used
|
||||
// to determine if the pod will fit can be found in the NodeFit function.
|
||||
func PodFitsCurrentNode(nodeIndexer podutil.GetPodsAssignedToNodeFunc, pod *v1.Pod, node *v1.Node) bool {
|
||||
errors := NodeFit(nodeIndexer, pod, node)
|
||||
if len(errors) == 0 {
|
||||
err := NodeFit(nodeIndexer, pod, node)
|
||||
if err == nil {
|
||||
klog.V(4).InfoS("Pod fits on node", "pod", klog.KObj(pod), "node", klog.KObj(node))
|
||||
return true
|
||||
}
|
||||
|
||||
klog.V(4).InfoS("Pod does not fit on node",
|
||||
"pod:", klog.KObj(pod), "node:", klog.KObj(node), "error:", utilerrors.NewAggregate(errors).Error())
|
||||
klog.V(4).InfoS("Pod does not fit on current node",
|
||||
"pod", klog.KObj(pod), "node", klog.KObj(node), "error", err)
|
||||
|
||||
return false
|
||||
}
|
||||
@@ -193,9 +210,7 @@ func IsNodeUnschedulable(node *v1.Node) bool {
|
||||
|
||||
// fitsRequest determines if a pod can fit on a node based on its resource requests. It returns true if
|
||||
// the pod will fit.
|
||||
func fitsRequest(nodeIndexer podutil.GetPodsAssignedToNodeFunc, pod *v1.Pod, node *v1.Node) (bool, []error) {
|
||||
var insufficientResources []error
|
||||
|
||||
func fitsRequest(nodeIndexer podutil.GetPodsAssignedToNodeFunc, pod *v1.Pod, node *v1.Node) (bool, error) {
|
||||
// Get pod requests
|
||||
podRequests, _ := utils.PodRequestsAndLimits(pod)
|
||||
resourceNames := make([]v1.ResourceName, 0, len(podRequests))
|
||||
@@ -203,36 +218,41 @@ func fitsRequest(nodeIndexer podutil.GetPodsAssignedToNodeFunc, pod *v1.Pod, nod
|
||||
resourceNames = append(resourceNames, name)
|
||||
}
|
||||
|
||||
availableResources, err := nodeAvailableResources(nodeIndexer, node, resourceNames)
|
||||
availableResources, err := nodeAvailableResources(nodeIndexer, node, resourceNames,
|
||||
func(pod *v1.Pod) (v1.ResourceList, error) {
|
||||
req, _ := utils.PodRequestsAndLimits(pod)
|
||||
return req, nil
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return false, []error{err}
|
||||
return false, err
|
||||
}
|
||||
|
||||
podFitsOnNode := true
|
||||
for _, resource := range resourceNames {
|
||||
podResourceRequest := podRequests[resource]
|
||||
availableResource, ok := availableResources[resource]
|
||||
if !ok || podResourceRequest.MilliValue() > availableResource.MilliValue() {
|
||||
insufficientResources = append(insufficientResources, fmt.Errorf("insufficient %v", resource))
|
||||
podFitsOnNode = false
|
||||
return false, fmt.Errorf("insufficient %v", resource)
|
||||
}
|
||||
}
|
||||
// check pod num, at least one pod number is avaibalbe
|
||||
if availableResources[v1.ResourcePods].MilliValue() <= 0 {
|
||||
insufficientResources = append(insufficientResources, fmt.Errorf("insufficient %v", v1.ResourcePods))
|
||||
podFitsOnNode = false
|
||||
return false, fmt.Errorf("insufficient %v", v1.ResourcePods)
|
||||
}
|
||||
|
||||
return podFitsOnNode, insufficientResources
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// nodeAvailableResources returns resources mapped to the quanitity available on the node.
|
||||
func nodeAvailableResources(nodeIndexer podutil.GetPodsAssignedToNodeFunc, node *v1.Node, resourceNames []v1.ResourceName) (map[v1.ResourceName]*resource.Quantity, error) {
|
||||
func nodeAvailableResources(nodeIndexer podutil.GetPodsAssignedToNodeFunc, node *v1.Node, resourceNames []v1.ResourceName, podUtilization podutil.PodUtilizationFnc) (map[v1.ResourceName]*resource.Quantity, error) {
|
||||
podsOnNode, err := podutil.ListPodsOnANode(node.Name, nodeIndexer, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nodeUtilization := NodeUtilization(podsOnNode, resourceNames)
|
||||
nodeUtilization, err := NodeUtilization(podsOnNode, resourceNames, podUtilization)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
remainingResources := map[v1.ResourceName]*resource.Quantity{
|
||||
v1.ResourceCPU: resource.NewMilliQuantity(node.Status.Allocatable.Cpu().MilliValue()-nodeUtilization[v1.ResourceCPU].MilliValue(), resource.DecimalSI),
|
||||
v1.ResourceMemory: resource.NewQuantity(node.Status.Allocatable.Memory().Value()-nodeUtilization[v1.ResourceMemory].Value(), resource.BinarySI),
|
||||
@@ -253,31 +273,34 @@ func nodeAvailableResources(nodeIndexer podutil.GetPodsAssignedToNodeFunc, node
|
||||
}
|
||||
|
||||
// NodeUtilization returns the resources requested by the given pods. Only resources supplied in the resourceNames parameter are calculated.
|
||||
func NodeUtilization(pods []*v1.Pod, resourceNames []v1.ResourceName) map[v1.ResourceName]*resource.Quantity {
|
||||
totalReqs := map[v1.ResourceName]*resource.Quantity{
|
||||
func NodeUtilization(pods []*v1.Pod, resourceNames []v1.ResourceName, podUtilization podutil.PodUtilizationFnc) (map[v1.ResourceName]*resource.Quantity, error) {
|
||||
totalUtilization := map[v1.ResourceName]*resource.Quantity{
|
||||
v1.ResourceCPU: resource.NewMilliQuantity(0, resource.DecimalSI),
|
||||
v1.ResourceMemory: resource.NewQuantity(0, resource.BinarySI),
|
||||
v1.ResourcePods: resource.NewQuantity(int64(len(pods)), resource.DecimalSI),
|
||||
}
|
||||
for _, name := range resourceNames {
|
||||
if !IsBasicResource(name) {
|
||||
totalReqs[name] = resource.NewQuantity(0, resource.DecimalSI)
|
||||
totalUtilization[name] = resource.NewQuantity(0, resource.DecimalSI)
|
||||
}
|
||||
}
|
||||
|
||||
for _, pod := range pods {
|
||||
req, _ := utils.PodRequestsAndLimits(pod)
|
||||
podUtil, err := podUtilization(pod)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, name := range resourceNames {
|
||||
quantity, ok := req[name]
|
||||
quantity, ok := podUtil[name]
|
||||
if ok && name != v1.ResourcePods {
|
||||
// As Quantity.Add says: Add adds the provided y quantity to the current value. If the current value is zero,
|
||||
// the format of the quantity will be updated to the format of y.
|
||||
totalReqs[name].Add(quantity)
|
||||
totalUtilization[name].Add(quantity)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return totalReqs
|
||||
return totalUtilization, nil
|
||||
}
|
||||
|
||||
// IsBasicResource checks if resource is basic native.
|
||||
@@ -323,3 +346,43 @@ func PodMatchNodeSelector(pod *v1.Pod, node *v1.Node) bool {
|
||||
}
|
||||
return matches
|
||||
}
|
||||
|
||||
// podMatchesInterPodAntiAffinity checks if the pod matches the anti-affinity rule
|
||||
// of another pod that is already on the given node.
|
||||
// If a match is found, it returns true.
|
||||
func podMatchesInterPodAntiAffinity(nodeIndexer podutil.GetPodsAssignedToNodeFunc, pod *v1.Pod, node *v1.Node) (bool, error) {
|
||||
if pod.Spec.Affinity == nil || pod.Spec.Affinity.PodAntiAffinity == nil {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
podsOnNode, err := podutil.ListPodsOnANode(node.Name, nodeIndexer, nil)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("error listing all pods: %v", err)
|
||||
}
|
||||
assignedPodsInNamespace := podutil.GroupByNamespace(podsOnNode)
|
||||
|
||||
for _, term := range utils.GetPodAntiAffinityTerms(pod.Spec.Affinity.PodAntiAffinity) {
|
||||
namespaces := utils.GetNamespacesFromPodAffinityTerm(pod, &term)
|
||||
selector, err := metav1.LabelSelectorAsSelector(term.LabelSelector)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Unable to convert LabelSelector into Selector")
|
||||
return false, err
|
||||
}
|
||||
|
||||
for namespace := range namespaces {
|
||||
for _, assignedPod := range assignedPodsInNamespace[namespace] {
|
||||
if assignedPod.Name == pod.Name || !utils.PodMatchesTermsNamespaceAndSelector(assignedPod, namespaces, selector) {
|
||||
klog.V(4).InfoS("Pod doesn't match inter-pod anti-affinity rule of assigned pod on node", "candidatePod", klog.KObj(pod), "assignedPod", klog.KObj(assignedPod))
|
||||
continue
|
||||
}
|
||||
|
||||
if _, ok := node.Labels[term.TopologyKey]; ok {
|
||||
klog.V(1).InfoS("Pod matches inter-pod anti-affinity rule of assigned pod on node", "candidatePod", klog.KObj(pod), "assignedPod", klog.KObj(assignedPod))
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
@@ -19,6 +19,7 @@ package node
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
@@ -230,7 +231,7 @@ func TestPodFitsAnyOtherNode(t *testing.T) {
|
||||
nodeTaintValue := "gpu"
|
||||
|
||||
// Staging node has no scheduling restrictions, but the pod always starts here and PodFitsAnyOtherNode() doesn't take into account the node the pod is running on.
|
||||
nodeNames := []string{"node1", "node2", "stagingNode"}
|
||||
nodeNames := []string{"node1", "node2", "stagingNode", "node4"}
|
||||
|
||||
tests := []struct {
|
||||
description string
|
||||
@@ -716,6 +717,151 @@ func TestPodFitsAnyOtherNode(t *testing.T) {
|
||||
},
|
||||
success: false,
|
||||
},
|
||||
{
|
||||
description: "There are four nodes. One node has a taint, and the other three nodes do not meet the resource requirements, should fail",
|
||||
pod: test.BuildTestPod("p1", 1000, 2*1000*1000*1000, nodeNames[2], func(pod *v1.Pod) {
|
||||
pod.Spec.NodeSelector = map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
}
|
||||
pod.Spec.Containers[0].Resources.Requests[v1.ResourceEphemeralStorage] = *resource.NewQuantity(10*1000*1000*1000, resource.DecimalSI)
|
||||
}),
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode(nodeNames[0], 64000, 128*1000*1000*1000, 200, func(node *v1.Node) {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
}
|
||||
|
||||
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(1000*1000*1000*1000, resource.DecimalSI)
|
||||
node.Spec.Taints = []v1.Taint{
|
||||
{
|
||||
Key: nodeTaintKey,
|
||||
Value: nodeTaintValue,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
}
|
||||
}),
|
||||
test.BuildTestNode(nodeNames[1], 3000, 8*1000*1000*1000, 12, func(node *v1.Node) {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
}
|
||||
|
||||
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(200*1000*1000*1000, resource.DecimalSI)
|
||||
}),
|
||||
test.BuildTestNode(nodeNames[2], 3000, 8*1000*1000*1000, 12, nil),
|
||||
test.BuildTestNode(nodeNames[3], 0, 0, 0, nil),
|
||||
},
|
||||
podsOnNodes: []*v1.Pod{
|
||||
test.BuildTestPod("3-core-pod", 2000, 4*1000*1000*1000, nodeNames[1], func(pod *v1.Pod) {
|
||||
pod.ObjectMeta = metav1.ObjectMeta{
|
||||
Namespace: "test",
|
||||
Labels: map[string]string{
|
||||
"test": "true",
|
||||
},
|
||||
}
|
||||
pod.Spec.Containers[0].Resources.Requests[v1.ResourceEphemeralStorage] = *resource.NewQuantity(10*1000*1000*1000, resource.DecimalSI)
|
||||
pod.Spec.Overhead = createResourceList(1000, 1000*1000*1000, 1000*1000*1000)
|
||||
}),
|
||||
},
|
||||
success: false,
|
||||
},
|
||||
{
|
||||
description: "There are four nodes. First node has a taint, second node has no label, third node do not meet the resource requirements, just fourth node meets the requirements, should success",
|
||||
pod: test.BuildTestPod("p1", 1000, 2*1000*1000*1000, nodeNames[2], func(pod *v1.Pod) {
|
||||
pod.Spec.NodeSelector = map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
}
|
||||
pod.Spec.Containers[0].Resources.Requests[v1.ResourceEphemeralStorage] = *resource.NewQuantity(10*1000*1000*1000, resource.DecimalSI)
|
||||
}),
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode(nodeNames[0], 64000, 128*1000*1000*1000, 200, func(node *v1.Node) {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
}
|
||||
|
||||
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(1000*1000*1000*1000, resource.DecimalSI)
|
||||
node.Spec.Taints = []v1.Taint{
|
||||
{
|
||||
Key: nodeTaintKey,
|
||||
Value: nodeTaintValue,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
}
|
||||
}),
|
||||
test.BuildTestNode(nodeNames[1], 8000, 8*1000*1000*1000, 12, func(node *v1.Node) {
|
||||
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(200*1000*1000*1000, resource.DecimalSI)
|
||||
}),
|
||||
test.BuildTestNode(nodeNames[2], 1000, 8*1000*1000*1000, 12, nil),
|
||||
test.BuildTestNode(nodeNames[3], 8000, 8*1000*1000*1000, 12, func(node *v1.Node) {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
}
|
||||
|
||||
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(1000*1000*1000*1000, resource.DecimalSI)
|
||||
}),
|
||||
},
|
||||
podsOnNodes: []*v1.Pod{
|
||||
test.BuildTestPod("3-core-pod", 2000, 4*1000*1000*1000, nodeNames[1], func(pod *v1.Pod) {
|
||||
pod.ObjectMeta = metav1.ObjectMeta{
|
||||
Namespace: "test",
|
||||
Labels: map[string]string{
|
||||
"test": "true",
|
||||
},
|
||||
}
|
||||
pod.Spec.Containers[0].Resources.Requests[v1.ResourceEphemeralStorage] = *resource.NewQuantity(10*1000*1000*1000, resource.DecimalSI)
|
||||
pod.Spec.Overhead = createResourceList(1000, 1000*1000*1000, 1000*1000*1000)
|
||||
}),
|
||||
},
|
||||
success: true,
|
||||
},
|
||||
{
|
||||
description: "There are four nodes. First node has a taint, second node has no label, third node do not meet the resource requirements, fourth node is the one where the pod is located, should fail",
|
||||
pod: test.BuildTestPod("p1", 1000, 2*1000*1000*1000, nodeNames[3], func(pod *v1.Pod) {
|
||||
pod.Spec.NodeSelector = map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
}
|
||||
pod.Spec.Containers[0].Resources.Requests[v1.ResourceEphemeralStorage] = *resource.NewQuantity(10*1000*1000*1000, resource.DecimalSI)
|
||||
}),
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode(nodeNames[0], 64000, 128*1000*1000*1000, 200, func(node *v1.Node) {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
}
|
||||
|
||||
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(1000*1000*1000*1000, resource.DecimalSI)
|
||||
node.Spec.Taints = []v1.Taint{
|
||||
{
|
||||
Key: nodeTaintKey,
|
||||
Value: nodeTaintValue,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
}
|
||||
}),
|
||||
test.BuildTestNode(nodeNames[1], 8000, 8*1000*1000*1000, 12, func(node *v1.Node) {
|
||||
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(200*1000*1000*1000, resource.DecimalSI)
|
||||
}),
|
||||
test.BuildTestNode(nodeNames[2], 1000, 8*1000*1000*1000, 12, nil),
|
||||
test.BuildTestNode(nodeNames[3], 8000, 8*1000*1000*1000, 12, func(node *v1.Node) {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
}
|
||||
|
||||
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(1000*1000*1000*1000, resource.DecimalSI)
|
||||
}),
|
||||
},
|
||||
podsOnNodes: []*v1.Pod{
|
||||
test.BuildTestPod("3-core-pod", 2000, 4*1000*1000*1000, nodeNames[1], func(pod *v1.Pod) {
|
||||
pod.ObjectMeta = metav1.ObjectMeta{
|
||||
Namespace: "test",
|
||||
Labels: map[string]string{
|
||||
"test": "true",
|
||||
},
|
||||
}
|
||||
pod.Spec.Containers[0].Resources.Requests[v1.ResourceEphemeralStorage] = *resource.NewQuantity(10*1000*1000*1000, resource.DecimalSI)
|
||||
pod.Spec.Overhead = createResourceList(1000, 1000*1000*1000, 1000*1000*1000)
|
||||
}),
|
||||
},
|
||||
success: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
@@ -753,8 +899,60 @@ func TestPodFitsAnyOtherNode(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestPodFitsNodes(t *testing.T) {
|
||||
nodeNames := []string{"node1", "node2", "node3", "node4"}
|
||||
pod := test.BuildTestPod("p1", 950, 2*1000*1000*1000, nodeNames[0], nil)
|
||||
nodes := []*v1.Node{
|
||||
test.BuildTestNode(nodeNames[0], 1000, 8*1000*1000*1000, 12, nil),
|
||||
test.BuildTestNode(nodeNames[1], 200, 8*1000*1000*1000, 12, nil),
|
||||
test.BuildTestNode(nodeNames[2], 300, 8*1000*1000*1000, 12, nil),
|
||||
test.BuildTestNode(nodeNames[3], 400, 8*1000*1000*1000, 12, nil),
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
var objs []runtime.Object
|
||||
for _, node := range nodes {
|
||||
objs = append(objs, node)
|
||||
}
|
||||
objs = append(objs, pod)
|
||||
|
||||
fakeClient := fake.NewSimpleClientset(objs...)
|
||||
|
||||
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
|
||||
podInformer := sharedInformerFactory.Core().V1().Pods().Informer()
|
||||
|
||||
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
|
||||
if err != nil {
|
||||
t.Errorf("Build get pods assigned to node function error: %v", err)
|
||||
}
|
||||
|
||||
sharedInformerFactory.Start(ctx.Done())
|
||||
sharedInformerFactory.WaitForCacheSync(ctx.Done())
|
||||
|
||||
var nodesTraversed sync.Map
|
||||
podFitsNodes(getPodsAssignedToNode, pod, nodes, func(pod *v1.Pod, node *v1.Node) bool {
|
||||
nodesTraversed.Store(node.Name, node)
|
||||
return true
|
||||
})
|
||||
|
||||
for _, node := range nodes {
|
||||
if _, exists := nodesTraversed.Load(node.Name); !exists {
|
||||
t.Errorf("Node %v was not proccesed", node.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestNodeFit(t *testing.T) {
|
||||
node := test.BuildTestNode("node", 64000, 128*1000*1000*1000, 2, nil)
|
||||
node := test.BuildTestNode("node", 64000, 128*1000*1000*1000, 2, func(node *v1.Node) {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
"region": "main-region",
|
||||
}
|
||||
})
|
||||
|
||||
nodeNolabel := test.BuildTestNode("node", 64000, 128*1000*1000*1000, 2, nil)
|
||||
|
||||
tests := []struct {
|
||||
description string
|
||||
pod *v1.Pod
|
||||
@@ -763,7 +961,7 @@ func TestNodeFit(t *testing.T) {
|
||||
err error
|
||||
}{
|
||||
{
|
||||
description: "insufficient cpu",
|
||||
description: "Insufficient cpu",
|
||||
pod: test.BuildTestPod("p1", 10000, 2*1000*1000*1000, "", nil),
|
||||
node: node,
|
||||
podsOnNode: []*v1.Pod{
|
||||
@@ -772,7 +970,7 @@ func TestNodeFit(t *testing.T) {
|
||||
err: errors.New("insufficient cpu"),
|
||||
},
|
||||
{
|
||||
description: "insufficient pod num",
|
||||
description: "Insufficient pod num",
|
||||
pod: test.BuildTestPod("p1", 1000, 2*1000*1000*1000, "", nil),
|
||||
node: node,
|
||||
podsOnNode: []*v1.Pod{
|
||||
@@ -781,6 +979,47 @@ func TestNodeFit(t *testing.T) {
|
||||
},
|
||||
err: errors.New("insufficient pods"),
|
||||
},
|
||||
{
|
||||
description: "Pod matches inter-pod anti-affinity rule of other pod on node",
|
||||
pod: test.PodWithPodAntiAffinity(test.BuildTestPod("p1", 1000, 1000, node.Name, nil), "foo", "bar"),
|
||||
node: node,
|
||||
podsOnNode: []*v1.Pod{
|
||||
test.PodWithPodAntiAffinity(test.BuildTestPod("p2", 1000, 1000, node.Name, nil), "foo", "bar"),
|
||||
},
|
||||
err: errors.New("pod matches inter-pod anti-affinity rule of other pod on node"),
|
||||
},
|
||||
{
|
||||
description: "Pod doesn't match inter-pod anti-affinity rule of other pod on node, because pod and other pod is not same namespace",
|
||||
pod: test.PodWithPodAntiAffinity(test.BuildTestPod("p1", 1000, 1000, node.Name, nil), "foo", "bar"),
|
||||
node: node,
|
||||
podsOnNode: []*v1.Pod{
|
||||
test.PodWithPodAntiAffinity(test.BuildTestPod("p2", 1000, 1000, node.Name, func(pod *v1.Pod) {
|
||||
pod.Namespace = "test"
|
||||
}), "foo", "bar"),
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "Pod doesn't match inter-pod anti-affinity rule of other pod on node, because other pod not match labels of pod",
|
||||
pod: test.PodWithPodAntiAffinity(test.BuildTestPod("p1", 1000, 1000, node.Name, nil), "foo", "bar"),
|
||||
node: node,
|
||||
podsOnNode: []*v1.Pod{
|
||||
test.PodWithPodAntiAffinity(test.BuildTestPod("p2", 1000, 1000, node.Name, nil), "foo1", "bar1"),
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "Pod doesn't match inter-pod anti-affinity rule of other pod on node, because node have no topologyKey",
|
||||
pod: test.PodWithPodAntiAffinity(test.BuildTestPod("p1", 1000, 1000, "node1", nil), "foo", "bar"),
|
||||
node: nodeNolabel,
|
||||
podsOnNode: []*v1.Pod{
|
||||
test.PodWithPodAntiAffinity(test.BuildTestPod("p2", 1000, 1000, node.Name, nil), "foo", "bar"),
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "Pod fits on node",
|
||||
pod: test.BuildTestPod("p1", 1000, 1000, "", func(pod *v1.Pod) {}),
|
||||
node: node,
|
||||
podsOnNode: []*v1.Pod{},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
@@ -804,8 +1043,9 @@ func TestNodeFit(t *testing.T) {
|
||||
|
||||
sharedInformerFactory.Start(ctx.Done())
|
||||
sharedInformerFactory.WaitForCacheSync(ctx.Done())
|
||||
if errs := NodeFit(getPodsAssignedToNode, tc.pod, tc.node); (len(errs) == 0 && tc.err != nil) || errs[0].Error() != tc.err.Error() {
|
||||
t.Errorf("Test %#v failed, got %v, expect %v", tc.description, errs, tc.err)
|
||||
err = NodeFit(getPodsAssignedToNode, tc.pod, tc.node)
|
||||
if (err == nil && tc.err != nil) || (err != nil && err.Error() != tc.err.Error()) {
|
||||
t.Errorf("Test %#v failed, got %v, expect %v", tc.description, err, tc.err)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -39,6 +39,9 @@ type FilterFunc func(*v1.Pod) bool
|
||||
// as input and returns the pods that assigned to the node.
|
||||
type GetPodsAssignedToNodeFunc func(string, FilterFunc) ([]*v1.Pod, error)
|
||||
|
||||
// PodUtilizationFnc is a function for getting pod's utilization. E.g. requested resources of utilization from metrics.
|
||||
type PodUtilizationFnc func(pod *v1.Pod) (v1.ResourceList, error)
|
||||
|
||||
// WrapFilterFuncs wraps a set of FilterFunc in one.
|
||||
func WrapFilterFuncs(filters ...FilterFunc) FilterFunc {
|
||||
return func(pod *v1.Pod) bool {
|
||||
@@ -99,9 +102,6 @@ func (o *Options) BuildFilterFunc() (FilterFunc, error) {
|
||||
}
|
||||
}
|
||||
return func(pod *v1.Pod) bool {
|
||||
if o.filter != nil && !o.filter(pod) {
|
||||
return false
|
||||
}
|
||||
if len(o.includedNamespaces) > 0 && !o.includedNamespaces.Has(pod.Namespace) {
|
||||
return false
|
||||
}
|
||||
@@ -111,6 +111,9 @@ func (o *Options) BuildFilterFunc() (FilterFunc, error) {
|
||||
if s != nil && !s.Matches(labels.Set(pod.GetLabels())) {
|
||||
return false
|
||||
}
|
||||
if o.filter != nil && !o.filter(pod) {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}, nil
|
||||
}
|
||||
@@ -142,21 +145,25 @@ func BuildGetPodsAssignedToNodeFunc(podInformer cache.SharedIndexInformer) (GetP
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pods := make([]*v1.Pod, 0, len(objs))
|
||||
for _, obj := range objs {
|
||||
pod, ok := obj.(*v1.Pod)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
if filter(pod) {
|
||||
pods = append(pods, pod)
|
||||
}
|
||||
}
|
||||
return pods, nil
|
||||
return ConvertToPods(objs, filter), nil
|
||||
}
|
||||
return getPodsAssignedToNode, nil
|
||||
}
|
||||
|
||||
func ConvertToPods(objs []interface{}, filter FilterFunc) []*v1.Pod {
|
||||
pods := make([]*v1.Pod, 0, len(objs))
|
||||
for _, obj := range objs {
|
||||
pod, ok := obj.(*v1.Pod)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
if filter == nil || filter(pod) {
|
||||
pods = append(pods, pod)
|
||||
}
|
||||
}
|
||||
return pods
|
||||
}
|
||||
|
||||
// ListPodsOnNodes returns all pods on given nodes.
|
||||
func ListPodsOnNodes(nodes []*v1.Node, getPodsAssignedToNode GetPodsAssignedToNodeFunc, filter FilterFunc) ([]*v1.Pod, error) {
|
||||
pods := make([]*v1.Pod, 0)
|
||||
@@ -215,6 +222,14 @@ func OwnerRef(pod *v1.Pod) []metav1.OwnerReference {
|
||||
return pod.ObjectMeta.GetOwnerReferences()
|
||||
}
|
||||
|
||||
func OwnerRefUIDs(pod *v1.Pod) []string {
|
||||
var ownerRefUIDs []string
|
||||
for _, ownerRef := range OwnerRef(pod) {
|
||||
ownerRefUIDs = append(ownerRefUIDs, string(ownerRef.UID))
|
||||
}
|
||||
return ownerRefUIDs
|
||||
}
|
||||
|
||||
func IsBestEffortPod(pod *v1.Pod) bool {
|
||||
return utils.GetPodQOS(pod) == v1.PodQOSBestEffort
|
||||
}
|
||||
@@ -257,3 +272,12 @@ func SortPodsBasedOnAge(pods []*v1.Pod) {
|
||||
return pods[i].CreationTimestamp.Before(&pods[j].CreationTimestamp)
|
||||
})
|
||||
}
|
||||
|
||||
func GroupByNodeName(pods []*v1.Pod) map[string][]*v1.Pod {
|
||||
m := make(map[string][]*v1.Pod)
|
||||
for i := 0; i < len(pods); i++ {
|
||||
pod := pods[i]
|
||||
m[pod.Spec.NodeName] = append(m[pod.Spec.NodeName], pod)
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
@@ -176,3 +176,67 @@ func TestSortPodsBasedOnAge(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGroupByNodeName(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
pods []*v1.Pod
|
||||
expMap map[string][]*v1.Pod
|
||||
}{
|
||||
{
|
||||
name: "list of pods is empty",
|
||||
pods: []*v1.Pod{},
|
||||
expMap: map[string][]*v1.Pod{},
|
||||
},
|
||||
{
|
||||
name: "pods are on same node",
|
||||
pods: []*v1.Pod{
|
||||
{Spec: v1.PodSpec{
|
||||
NodeName: "node1",
|
||||
}},
|
||||
{Spec: v1.PodSpec{
|
||||
NodeName: "node1",
|
||||
}},
|
||||
},
|
||||
expMap: map[string][]*v1.Pod{"node1": {
|
||||
{Spec: v1.PodSpec{
|
||||
NodeName: "node1",
|
||||
}},
|
||||
{Spec: v1.PodSpec{
|
||||
NodeName: "node1",
|
||||
}},
|
||||
}},
|
||||
},
|
||||
{
|
||||
name: "pods are on different nodes",
|
||||
pods: []*v1.Pod{
|
||||
{Spec: v1.PodSpec{
|
||||
NodeName: "node1",
|
||||
}},
|
||||
{Spec: v1.PodSpec{
|
||||
NodeName: "node2",
|
||||
}},
|
||||
},
|
||||
expMap: map[string][]*v1.Pod{
|
||||
"node1": {
|
||||
{Spec: v1.PodSpec{
|
||||
NodeName: "node1",
|
||||
}},
|
||||
},
|
||||
"node2": {
|
||||
{Spec: v1.PodSpec{
|
||||
NodeName: "node2",
|
||||
}},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
resultMap := GroupByNodeName(test.pods)
|
||||
if !reflect.DeepEqual(resultMap, test.expMap) {
|
||||
t.Errorf("Expected %v node map, got %v", test.expMap, resultMap)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -28,7 +28,6 @@ import (
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/api/v1alpha1"
|
||||
"sigs.k8s.io/descheduler/pkg/api/v1alpha2"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/scheme"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/pluginregistry"
|
||||
@@ -54,7 +53,7 @@ func decode(policyConfigFile string, policy []byte, client clientset.Interface,
|
||||
internalPolicy := &api.DeschedulerPolicy{}
|
||||
var err error
|
||||
|
||||
decoder := scheme.Codecs.UniversalDecoder(v1alpha1.SchemeGroupVersion, v1alpha2.SchemeGroupVersion, api.SchemeGroupVersion)
|
||||
decoder := scheme.Codecs.UniversalDecoder(v1alpha2.SchemeGroupVersion, api.SchemeGroupVersion)
|
||||
if err := runtime.DecodeInto(decoder, policy, internalPolicy); err != nil {
|
||||
return nil, fmt.Errorf("failed decoding descheduler's policy config %q: %v", policyConfigFile, err)
|
||||
}
|
||||
@@ -106,6 +105,7 @@ func setDefaultEvictor(profile api.DeschedulerProfile, client clientset.Interfac
|
||||
EvictSystemCriticalPods: false,
|
||||
IgnorePvcPods: false,
|
||||
EvictFailedBarePods: false,
|
||||
IgnorePodsWithoutPDB: false,
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@@ -21,795 +21,37 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/conversion"
|
||||
fakeclientset "k8s.io/client-go/kubernetes/fake"
|
||||
utilpointer "k8s.io/utils/pointer"
|
||||
utilptr "k8s.io/utils/ptr"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/api/v1alpha1"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/pluginregistry"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/defaultevictor"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/nodeutilization"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/podlifetime"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/removeduplicates"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/removefailedpods"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/removepodshavingtoomanyrestarts"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/removepodsviolatinginterpodantiaffinity"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/removepodsviolatingnodeaffinity"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/removepodsviolatingnodetaints"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/removepodsviolatingtopologyspreadconstraint"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
)
|
||||
|
||||
func TestV1alpha1ToV1alpha2(t *testing.T) {
|
||||
SetupPlugins()
|
||||
defaultEvictorPluginConfig := api.PluginConfig{
|
||||
Name: defaultevictor.PluginName,
|
||||
Args: &defaultevictor.DefaultEvictorArgs{
|
||||
PriorityThreshold: &api.PriorityThreshold{
|
||||
Value: nil,
|
||||
},
|
||||
},
|
||||
}
|
||||
type testCase struct {
|
||||
description string
|
||||
policy *v1alpha1.DeschedulerPolicy
|
||||
err error
|
||||
result *api.DeschedulerPolicy
|
||||
}
|
||||
testCases := []testCase{
|
||||
{
|
||||
description: "RemoveFailedPods enabled, LowNodeUtilization disabled strategies to profile",
|
||||
policy: &v1alpha1.DeschedulerPolicy{
|
||||
Strategies: v1alpha1.StrategyList{
|
||||
removeduplicates.PluginName: v1alpha1.DeschedulerStrategy{
|
||||
Enabled: true,
|
||||
Params: &v1alpha1.StrategyParameters{
|
||||
Namespaces: &v1alpha1.Namespaces{
|
||||
Exclude: []string{
|
||||
"test2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
nodeutilization.LowNodeUtilizationPluginName: v1alpha1.DeschedulerStrategy{
|
||||
Enabled: false,
|
||||
Params: &v1alpha1.StrategyParameters{
|
||||
NodeResourceUtilizationThresholds: &v1alpha1.NodeResourceUtilizationThresholds{
|
||||
Thresholds: v1alpha1.ResourceThresholds{
|
||||
"cpu": v1alpha1.Percentage(20),
|
||||
"memory": v1alpha1.Percentage(20),
|
||||
"pods": v1alpha1.Percentage(20),
|
||||
},
|
||||
TargetThresholds: v1alpha1.ResourceThresholds{
|
||||
"cpu": v1alpha1.Percentage(50),
|
||||
"memory": v1alpha1.Percentage(50),
|
||||
"pods": v1alpha1.Percentage(50),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
result: &api.DeschedulerPolicy{
|
||||
Profiles: []api.DeschedulerProfile{
|
||||
{
|
||||
Name: fmt.Sprintf("strategy-%s-profile", removeduplicates.PluginName),
|
||||
PluginConfigs: []api.PluginConfig{
|
||||
defaultEvictorPluginConfig,
|
||||
{
|
||||
Name: removeduplicates.PluginName,
|
||||
Args: &removeduplicates.RemoveDuplicatesArgs{
|
||||
Namespaces: &api.Namespaces{
|
||||
Exclude: []string{
|
||||
"test2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Plugins: api.Plugins{
|
||||
Balance: api.PluginSet{
|
||||
Enabled: []string{removeduplicates.PluginName},
|
||||
},
|
||||
Filter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
PreEvictionFilter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
},
|
||||
},
|
||||
// Disabled strategy is not generating internal plugin since it is not being used internally currently
|
||||
// {
|
||||
// Name: nodeutilization.LowNodeUtilizationPluginName,
|
||||
// PluginConfigs: []api.PluginConfig{
|
||||
// {
|
||||
// Name: nodeutilization.LowNodeUtilizationPluginName,
|
||||
// Args: &nodeutilization.LowNodeUtilizationArgs{
|
||||
// Thresholds: api.ResourceThresholds{
|
||||
// "cpu": api.Percentage(20),
|
||||
// [...]
|
||||
// [...]
|
||||
// },
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "convert all strategies",
|
||||
policy: &v1alpha1.DeschedulerPolicy{
|
||||
Strategies: v1alpha1.StrategyList{
|
||||
removeduplicates.PluginName: v1alpha1.DeschedulerStrategy{
|
||||
Enabled: true,
|
||||
Params: &v1alpha1.StrategyParameters{},
|
||||
},
|
||||
nodeutilization.LowNodeUtilizationPluginName: v1alpha1.DeschedulerStrategy{
|
||||
Enabled: true,
|
||||
Params: &v1alpha1.StrategyParameters{
|
||||
NodeResourceUtilizationThresholds: &v1alpha1.NodeResourceUtilizationThresholds{
|
||||
Thresholds: v1alpha1.ResourceThresholds{
|
||||
"cpu": v1alpha1.Percentage(20),
|
||||
"memory": v1alpha1.Percentage(20),
|
||||
"pods": v1alpha1.Percentage(20),
|
||||
},
|
||||
TargetThresholds: v1alpha1.ResourceThresholds{
|
||||
"cpu": v1alpha1.Percentage(50),
|
||||
"memory": v1alpha1.Percentage(50),
|
||||
"pods": v1alpha1.Percentage(50),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
nodeutilization.HighNodeUtilizationPluginName: v1alpha1.DeschedulerStrategy{
|
||||
Enabled: true,
|
||||
Params: &v1alpha1.StrategyParameters{
|
||||
NodeResourceUtilizationThresholds: &v1alpha1.NodeResourceUtilizationThresholds{
|
||||
Thresholds: v1alpha1.ResourceThresholds{
|
||||
"cpu": v1alpha1.Percentage(20),
|
||||
"memory": v1alpha1.Percentage(20),
|
||||
"pods": v1alpha1.Percentage(20),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
removefailedpods.PluginName: v1alpha1.DeschedulerStrategy{
|
||||
Enabled: true,
|
||||
Params: &v1alpha1.StrategyParameters{},
|
||||
},
|
||||
removepodshavingtoomanyrestarts.PluginName: v1alpha1.DeschedulerStrategy{
|
||||
Enabled: true,
|
||||
Params: &v1alpha1.StrategyParameters{
|
||||
PodsHavingTooManyRestarts: &v1alpha1.PodsHavingTooManyRestarts{
|
||||
PodRestartThreshold: 100,
|
||||
},
|
||||
},
|
||||
},
|
||||
removepodsviolatinginterpodantiaffinity.PluginName: v1alpha1.DeschedulerStrategy{
|
||||
Enabled: true,
|
||||
Params: &v1alpha1.StrategyParameters{},
|
||||
},
|
||||
removepodsviolatingnodeaffinity.PluginName: v1alpha1.DeschedulerStrategy{
|
||||
Enabled: true,
|
||||
Params: &v1alpha1.StrategyParameters{
|
||||
NodeAffinityType: []string{"requiredDuringSchedulingIgnoredDuringExecution"},
|
||||
},
|
||||
},
|
||||
removepodsviolatingnodetaints.PluginName: v1alpha1.DeschedulerStrategy{
|
||||
Enabled: true,
|
||||
Params: &v1alpha1.StrategyParameters{},
|
||||
},
|
||||
removepodsviolatingtopologyspreadconstraint.PluginName: v1alpha1.DeschedulerStrategy{
|
||||
Enabled: true,
|
||||
Params: &v1alpha1.StrategyParameters{},
|
||||
},
|
||||
},
|
||||
},
|
||||
result: &api.DeschedulerPolicy{
|
||||
Profiles: []api.DeschedulerProfile{
|
||||
{
|
||||
Name: fmt.Sprintf("strategy-%s-profile", nodeutilization.HighNodeUtilizationPluginName),
|
||||
PluginConfigs: []api.PluginConfig{
|
||||
defaultEvictorPluginConfig,
|
||||
{
|
||||
Name: nodeutilization.HighNodeUtilizationPluginName,
|
||||
Args: &nodeutilization.HighNodeUtilizationArgs{
|
||||
Thresholds: api.ResourceThresholds{
|
||||
"cpu": api.Percentage(20),
|
||||
"memory": api.Percentage(20),
|
||||
"pods": api.Percentage(20),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Plugins: api.Plugins{
|
||||
Balance: api.PluginSet{
|
||||
Enabled: []string{nodeutilization.HighNodeUtilizationPluginName},
|
||||
},
|
||||
Filter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
PreEvictionFilter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: fmt.Sprintf("strategy-%s-profile", nodeutilization.LowNodeUtilizationPluginName),
|
||||
PluginConfigs: []api.PluginConfig{
|
||||
defaultEvictorPluginConfig,
|
||||
{
|
||||
Name: nodeutilization.LowNodeUtilizationPluginName,
|
||||
Args: &nodeutilization.LowNodeUtilizationArgs{
|
||||
Thresholds: api.ResourceThresholds{
|
||||
"cpu": api.Percentage(20),
|
||||
"memory": api.Percentage(20),
|
||||
"pods": api.Percentage(20),
|
||||
},
|
||||
TargetThresholds: api.ResourceThresholds{
|
||||
"cpu": api.Percentage(50),
|
||||
"memory": api.Percentage(50),
|
||||
"pods": api.Percentage(50),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Plugins: api.Plugins{
|
||||
Balance: api.PluginSet{
|
||||
Enabled: []string{nodeutilization.LowNodeUtilizationPluginName},
|
||||
},
|
||||
Filter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
PreEvictionFilter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: fmt.Sprintf("strategy-%s-profile", removeduplicates.PluginName),
|
||||
PluginConfigs: []api.PluginConfig{
|
||||
defaultEvictorPluginConfig,
|
||||
{
|
||||
Name: removeduplicates.PluginName,
|
||||
Args: &removeduplicates.RemoveDuplicatesArgs{},
|
||||
},
|
||||
},
|
||||
Plugins: api.Plugins{
|
||||
Balance: api.PluginSet{
|
||||
Enabled: []string{removeduplicates.PluginName},
|
||||
},
|
||||
Filter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
PreEvictionFilter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: fmt.Sprintf("strategy-%s-profile", removefailedpods.PluginName),
|
||||
PluginConfigs: []api.PluginConfig{
|
||||
defaultEvictorPluginConfig,
|
||||
{
|
||||
Name: removefailedpods.PluginName,
|
||||
Args: &removefailedpods.RemoveFailedPodsArgs{},
|
||||
},
|
||||
},
|
||||
Plugins: api.Plugins{
|
||||
Deschedule: api.PluginSet{
|
||||
Enabled: []string{removefailedpods.PluginName},
|
||||
},
|
||||
Filter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
PreEvictionFilter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: fmt.Sprintf("strategy-%s-profile", removepodshavingtoomanyrestarts.PluginName),
|
||||
PluginConfigs: []api.PluginConfig{
|
||||
defaultEvictorPluginConfig,
|
||||
{
|
||||
Name: removepodshavingtoomanyrestarts.PluginName,
|
||||
Args: &removepodshavingtoomanyrestarts.RemovePodsHavingTooManyRestartsArgs{
|
||||
PodRestartThreshold: 100,
|
||||
},
|
||||
},
|
||||
},
|
||||
Plugins: api.Plugins{
|
||||
Deschedule: api.PluginSet{
|
||||
Enabled: []string{removepodshavingtoomanyrestarts.PluginName},
|
||||
},
|
||||
Filter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
PreEvictionFilter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: fmt.Sprintf("strategy-%s-profile", removepodsviolatinginterpodantiaffinity.PluginName),
|
||||
PluginConfigs: []api.PluginConfig{
|
||||
defaultEvictorPluginConfig,
|
||||
{
|
||||
Name: removepodsviolatinginterpodantiaffinity.PluginName,
|
||||
Args: &removepodsviolatinginterpodantiaffinity.RemovePodsViolatingInterPodAntiAffinityArgs{},
|
||||
},
|
||||
},
|
||||
Plugins: api.Plugins{
|
||||
Deschedule: api.PluginSet{
|
||||
Enabled: []string{removepodsviolatinginterpodantiaffinity.PluginName},
|
||||
},
|
||||
Filter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
PreEvictionFilter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: fmt.Sprintf("strategy-%s-profile", removepodsviolatingnodeaffinity.PluginName),
|
||||
PluginConfigs: []api.PluginConfig{
|
||||
defaultEvictorPluginConfig,
|
||||
{
|
||||
Name: removepodsviolatingnodeaffinity.PluginName,
|
||||
Args: &removepodsviolatingnodeaffinity.RemovePodsViolatingNodeAffinityArgs{
|
||||
NodeAffinityType: []string{"requiredDuringSchedulingIgnoredDuringExecution"},
|
||||
},
|
||||
},
|
||||
},
|
||||
Plugins: api.Plugins{
|
||||
Deschedule: api.PluginSet{
|
||||
Enabled: []string{removepodsviolatingnodeaffinity.PluginName},
|
||||
},
|
||||
Filter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
PreEvictionFilter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: fmt.Sprintf("strategy-%s-profile", removepodsviolatingnodetaints.PluginName),
|
||||
PluginConfigs: []api.PluginConfig{
|
||||
defaultEvictorPluginConfig,
|
||||
{
|
||||
Name: removepodsviolatingnodetaints.PluginName,
|
||||
Args: &removepodsviolatingnodetaints.RemovePodsViolatingNodeTaintsArgs{},
|
||||
},
|
||||
},
|
||||
Plugins: api.Plugins{
|
||||
Deschedule: api.PluginSet{
|
||||
Enabled: []string{removepodsviolatingnodetaints.PluginName},
|
||||
},
|
||||
Filter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
PreEvictionFilter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: fmt.Sprintf("strategy-%s-profile", removepodsviolatingtopologyspreadconstraint.PluginName),
|
||||
PluginConfigs: []api.PluginConfig{
|
||||
defaultEvictorPluginConfig,
|
||||
{
|
||||
Name: removepodsviolatingtopologyspreadconstraint.PluginName,
|
||||
Args: &removepodsviolatingtopologyspreadconstraint.RemovePodsViolatingTopologySpreadConstraintArgs{
|
||||
Constraints: []v1.UnsatisfiableConstraintAction{v1.DoNotSchedule},
|
||||
TopologyBalanceNodeFit: utilpointer.Bool(true),
|
||||
},
|
||||
},
|
||||
},
|
||||
Plugins: api.Plugins{
|
||||
Balance: api.PluginSet{
|
||||
Enabled: []string{removepodsviolatingtopologyspreadconstraint.PluginName},
|
||||
},
|
||||
Filter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
PreEvictionFilter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "pass in all params to check args",
|
||||
policy: &v1alpha1.DeschedulerPolicy{
|
||||
Strategies: v1alpha1.StrategyList{
|
||||
removeduplicates.PluginName: v1alpha1.DeschedulerStrategy{
|
||||
Enabled: true,
|
||||
Params: &v1alpha1.StrategyParameters{
|
||||
RemoveDuplicates: &v1alpha1.RemoveDuplicates{
|
||||
ExcludeOwnerKinds: []string{"ReplicaSet"},
|
||||
},
|
||||
},
|
||||
},
|
||||
nodeutilization.LowNodeUtilizationPluginName: v1alpha1.DeschedulerStrategy{
|
||||
Enabled: true,
|
||||
Params: &v1alpha1.StrategyParameters{
|
||||
NodeResourceUtilizationThresholds: &v1alpha1.NodeResourceUtilizationThresholds{
|
||||
Thresholds: v1alpha1.ResourceThresholds{
|
||||
"cpu": v1alpha1.Percentage(20),
|
||||
"memory": v1alpha1.Percentage(20),
|
||||
"pods": v1alpha1.Percentage(20),
|
||||
},
|
||||
TargetThresholds: v1alpha1.ResourceThresholds{
|
||||
"cpu": v1alpha1.Percentage(50),
|
||||
"memory": v1alpha1.Percentage(50),
|
||||
"pods": v1alpha1.Percentage(50),
|
||||
},
|
||||
UseDeviationThresholds: true,
|
||||
NumberOfNodes: 3,
|
||||
},
|
||||
},
|
||||
},
|
||||
nodeutilization.HighNodeUtilizationPluginName: v1alpha1.DeschedulerStrategy{
|
||||
Enabled: true,
|
||||
Params: &v1alpha1.StrategyParameters{
|
||||
NodeResourceUtilizationThresholds: &v1alpha1.NodeResourceUtilizationThresholds{
|
||||
Thresholds: v1alpha1.ResourceThresholds{
|
||||
"cpu": v1alpha1.Percentage(20),
|
||||
"memory": v1alpha1.Percentage(20),
|
||||
"pods": v1alpha1.Percentage(20),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
removefailedpods.PluginName: v1alpha1.DeschedulerStrategy{
|
||||
Enabled: true,
|
||||
Params: &v1alpha1.StrategyParameters{
|
||||
FailedPods: &v1alpha1.FailedPods{
|
||||
MinPodLifetimeSeconds: utilpointer.Uint(3600),
|
||||
ExcludeOwnerKinds: []string{"Job"},
|
||||
Reasons: []string{"NodeAffinity"},
|
||||
IncludingInitContainers: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
removepodshavingtoomanyrestarts.PluginName: v1alpha1.DeschedulerStrategy{
|
||||
Enabled: true,
|
||||
Params: &v1alpha1.StrategyParameters{
|
||||
PodsHavingTooManyRestarts: &v1alpha1.PodsHavingTooManyRestarts{
|
||||
PodRestartThreshold: 100,
|
||||
IncludingInitContainers: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
removepodsviolatinginterpodantiaffinity.PluginName: v1alpha1.DeschedulerStrategy{
|
||||
Enabled: true,
|
||||
Params: &v1alpha1.StrategyParameters{},
|
||||
},
|
||||
removepodsviolatingnodeaffinity.PluginName: v1alpha1.DeschedulerStrategy{
|
||||
Enabled: true,
|
||||
Params: &v1alpha1.StrategyParameters{
|
||||
NodeAffinityType: []string{"requiredDuringSchedulingIgnoredDuringExecution"},
|
||||
},
|
||||
},
|
||||
removepodsviolatingnodetaints.PluginName: v1alpha1.DeschedulerStrategy{
|
||||
Enabled: true,
|
||||
Params: &v1alpha1.StrategyParameters{
|
||||
ExcludedTaints: []string{"dedicated=special-user", "reserved"},
|
||||
},
|
||||
},
|
||||
removepodsviolatingtopologyspreadconstraint.PluginName: v1alpha1.DeschedulerStrategy{
|
||||
Enabled: true,
|
||||
Params: &v1alpha1.StrategyParameters{
|
||||
IncludeSoftConstraints: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
result: &api.DeschedulerPolicy{
|
||||
Profiles: []api.DeschedulerProfile{
|
||||
{
|
||||
Name: fmt.Sprintf("strategy-%s-profile", nodeutilization.HighNodeUtilizationPluginName),
|
||||
PluginConfigs: []api.PluginConfig{
|
||||
defaultEvictorPluginConfig,
|
||||
{
|
||||
Name: nodeutilization.HighNodeUtilizationPluginName,
|
||||
Args: &nodeutilization.HighNodeUtilizationArgs{
|
||||
Thresholds: api.ResourceThresholds{
|
||||
"cpu": api.Percentage(20),
|
||||
"memory": api.Percentage(20),
|
||||
"pods": api.Percentage(20),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Plugins: api.Plugins{
|
||||
Balance: api.PluginSet{
|
||||
Enabled: []string{nodeutilization.HighNodeUtilizationPluginName},
|
||||
},
|
||||
Filter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
PreEvictionFilter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: fmt.Sprintf("strategy-%s-profile", nodeutilization.LowNodeUtilizationPluginName),
|
||||
PluginConfigs: []api.PluginConfig{
|
||||
defaultEvictorPluginConfig,
|
||||
{
|
||||
Name: nodeutilization.LowNodeUtilizationPluginName,
|
||||
Args: &nodeutilization.LowNodeUtilizationArgs{
|
||||
UseDeviationThresholds: true,
|
||||
NumberOfNodes: 3,
|
||||
Thresholds: api.ResourceThresholds{
|
||||
"cpu": api.Percentage(20),
|
||||
"memory": api.Percentage(20),
|
||||
"pods": api.Percentage(20),
|
||||
},
|
||||
TargetThresholds: api.ResourceThresholds{
|
||||
"cpu": api.Percentage(50),
|
||||
"memory": api.Percentage(50),
|
||||
"pods": api.Percentage(50),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Plugins: api.Plugins{
|
||||
Balance: api.PluginSet{
|
||||
Enabled: []string{nodeutilization.LowNodeUtilizationPluginName},
|
||||
},
|
||||
Filter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
PreEvictionFilter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: fmt.Sprintf("strategy-%s-profile", removeduplicates.PluginName),
|
||||
PluginConfigs: []api.PluginConfig{
|
||||
defaultEvictorPluginConfig,
|
||||
{
|
||||
Name: removeduplicates.PluginName,
|
||||
Args: &removeduplicates.RemoveDuplicatesArgs{
|
||||
ExcludeOwnerKinds: []string{"ReplicaSet"},
|
||||
},
|
||||
},
|
||||
},
|
||||
Plugins: api.Plugins{
|
||||
Balance: api.PluginSet{
|
||||
Enabled: []string{removeduplicates.PluginName},
|
||||
},
|
||||
Filter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
PreEvictionFilter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: fmt.Sprintf("strategy-%s-profile", removefailedpods.PluginName),
|
||||
PluginConfigs: []api.PluginConfig{
|
||||
defaultEvictorPluginConfig,
|
||||
{
|
||||
Name: removefailedpods.PluginName,
|
||||
Args: &removefailedpods.RemoveFailedPodsArgs{
|
||||
ExcludeOwnerKinds: []string{"Job"},
|
||||
MinPodLifetimeSeconds: utilpointer.Uint(3600),
|
||||
Reasons: []string{"NodeAffinity"},
|
||||
IncludingInitContainers: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
Plugins: api.Plugins{
|
||||
Deschedule: api.PluginSet{
|
||||
Enabled: []string{removefailedpods.PluginName},
|
||||
},
|
||||
Filter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
PreEvictionFilter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: fmt.Sprintf("strategy-%s-profile", removepodshavingtoomanyrestarts.PluginName),
|
||||
PluginConfigs: []api.PluginConfig{
|
||||
defaultEvictorPluginConfig,
|
||||
{
|
||||
Name: removepodshavingtoomanyrestarts.PluginName,
|
||||
Args: &removepodshavingtoomanyrestarts.RemovePodsHavingTooManyRestartsArgs{
|
||||
PodRestartThreshold: 100,
|
||||
IncludingInitContainers: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
Plugins: api.Plugins{
|
||||
Deschedule: api.PluginSet{
|
||||
Enabled: []string{removepodshavingtoomanyrestarts.PluginName},
|
||||
},
|
||||
Filter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
PreEvictionFilter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: fmt.Sprintf("strategy-%s-profile", removepodsviolatinginterpodantiaffinity.PluginName),
|
||||
PluginConfigs: []api.PluginConfig{
|
||||
defaultEvictorPluginConfig,
|
||||
{
|
||||
Name: removepodsviolatinginterpodantiaffinity.PluginName,
|
||||
Args: &removepodsviolatinginterpodantiaffinity.RemovePodsViolatingInterPodAntiAffinityArgs{},
|
||||
},
|
||||
},
|
||||
Plugins: api.Plugins{
|
||||
Deschedule: api.PluginSet{
|
||||
Enabled: []string{removepodsviolatinginterpodantiaffinity.PluginName},
|
||||
},
|
||||
Filter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
PreEvictionFilter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: fmt.Sprintf("strategy-%s-profile", removepodsviolatingnodeaffinity.PluginName),
|
||||
PluginConfigs: []api.PluginConfig{
|
||||
defaultEvictorPluginConfig,
|
||||
{
|
||||
Name: removepodsviolatingnodeaffinity.PluginName,
|
||||
Args: &removepodsviolatingnodeaffinity.RemovePodsViolatingNodeAffinityArgs{
|
||||
NodeAffinityType: []string{"requiredDuringSchedulingIgnoredDuringExecution"},
|
||||
},
|
||||
},
|
||||
},
|
||||
Plugins: api.Plugins{
|
||||
Deschedule: api.PluginSet{
|
||||
Enabled: []string{removepodsviolatingnodeaffinity.PluginName},
|
||||
},
|
||||
Filter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
PreEvictionFilter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: fmt.Sprintf("strategy-%s-profile", removepodsviolatingnodetaints.PluginName),
|
||||
PluginConfigs: []api.PluginConfig{
|
||||
defaultEvictorPluginConfig,
|
||||
{
|
||||
Name: removepodsviolatingnodetaints.PluginName,
|
||||
Args: &removepodsviolatingnodetaints.RemovePodsViolatingNodeTaintsArgs{
|
||||
ExcludedTaints: []string{"dedicated=special-user", "reserved"},
|
||||
},
|
||||
},
|
||||
},
|
||||
Plugins: api.Plugins{
|
||||
Deschedule: api.PluginSet{
|
||||
Enabled: []string{removepodsviolatingnodetaints.PluginName},
|
||||
},
|
||||
Filter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
PreEvictionFilter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: fmt.Sprintf("strategy-%s-profile", removepodsviolatingtopologyspreadconstraint.PluginName),
|
||||
PluginConfigs: []api.PluginConfig{
|
||||
defaultEvictorPluginConfig,
|
||||
{
|
||||
Name: removepodsviolatingtopologyspreadconstraint.PluginName,
|
||||
Args: &removepodsviolatingtopologyspreadconstraint.RemovePodsViolatingTopologySpreadConstraintArgs{
|
||||
Constraints: []v1.UnsatisfiableConstraintAction{v1.DoNotSchedule, v1.ScheduleAnyway},
|
||||
TopologyBalanceNodeFit: utilpointer.Bool(true),
|
||||
},
|
||||
},
|
||||
},
|
||||
Plugins: api.Plugins{
|
||||
Balance: api.PluginSet{
|
||||
Enabled: []string{removepodsviolatingtopologyspreadconstraint.PluginName},
|
||||
},
|
||||
Filter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
PreEvictionFilter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "invalid strategy name",
|
||||
policy: &v1alpha1.DeschedulerPolicy{Strategies: v1alpha1.StrategyList{
|
||||
"InvalidName": v1alpha1.DeschedulerStrategy{
|
||||
Enabled: true,
|
||||
Params: &v1alpha1.StrategyParameters{},
|
||||
},
|
||||
}},
|
||||
result: nil,
|
||||
err: fmt.Errorf("unknown strategy name: InvalidName"),
|
||||
},
|
||||
{
|
||||
description: "invalid threshold priority",
|
||||
policy: &v1alpha1.DeschedulerPolicy{Strategies: v1alpha1.StrategyList{
|
||||
nodeutilization.LowNodeUtilizationPluginName: v1alpha1.DeschedulerStrategy{
|
||||
Enabled: true,
|
||||
Params: &v1alpha1.StrategyParameters{
|
||||
ThresholdPriority: utilpointer.Int32(100),
|
||||
ThresholdPriorityClassName: "name",
|
||||
NodeResourceUtilizationThresholds: &v1alpha1.NodeResourceUtilizationThresholds{
|
||||
Thresholds: v1alpha1.ResourceThresholds{
|
||||
"cpu": v1alpha1.Percentage(20),
|
||||
"memory": v1alpha1.Percentage(20),
|
||||
"pods": v1alpha1.Percentage(20),
|
||||
},
|
||||
TargetThresholds: v1alpha1.ResourceThresholds{
|
||||
"cpu": v1alpha1.Percentage(50),
|
||||
"memory": v1alpha1.Percentage(50),
|
||||
"pods": v1alpha1.Percentage(50),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}},
|
||||
result: nil,
|
||||
err: fmt.Errorf("priority threshold misconfigured for plugin LowNodeUtilization"),
|
||||
},
|
||||
}
|
||||
// scope contains information about an ongoing conversion.
|
||||
type scope struct {
|
||||
converter *conversion.Converter
|
||||
meta *conversion.Meta
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.description, func(t *testing.T) {
|
||||
result := &api.DeschedulerPolicy{}
|
||||
scope := scope{}
|
||||
err := v1alpha1.V1alpha1ToInternal(tc.policy, pluginregistry.PluginRegistry, result, scope)
|
||||
if err != nil {
|
||||
if err.Error() != tc.err.Error() {
|
||||
t.Errorf("unexpected error: %s", err.Error())
|
||||
}
|
||||
}
|
||||
if err == nil {
|
||||
// sort to easily compare deepequality
|
||||
result.Profiles = api.SortDeschedulerProfileByName(result.Profiles)
|
||||
diff := cmp.Diff(tc.result, result)
|
||||
if diff != "" {
|
||||
t.Errorf("test '%s' failed. Results are not deep equal. mismatch (-want +got):\n%s", tc.description, diff)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
// Convert continues a conversion.
|
||||
func (s scope) Convert(src, dest interface{}) error {
|
||||
return s.converter.Convert(src, dest, s.meta)
|
||||
}
|
||||
|
||||
// Meta returns the meta object that was originally passed to Convert.
|
||||
func (s scope) Meta() *conversion.Meta {
|
||||
return s.meta
|
||||
}
|
||||
|
||||
func TestDecodeVersionedPolicy(t *testing.T) {
|
||||
client := fakeclientset.NewSimpleClientset()
|
||||
SetupPlugins()
|
||||
defaultEvictorPluginConfig := api.PluginConfig{
|
||||
Name: defaultevictor.PluginName,
|
||||
Args: &defaultevictor.DefaultEvictorArgs{
|
||||
PriorityThreshold: &api.PriorityThreshold{
|
||||
Value: utilpointer.Int32(utils.SystemCriticalPriority),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
type testCase struct {
|
||||
description string
|
||||
policy []byte
|
||||
@@ -817,124 +59,6 @@ func TestDecodeVersionedPolicy(t *testing.T) {
|
||||
result *api.DeschedulerPolicy
|
||||
}
|
||||
testCases := []testCase{
|
||||
{
|
||||
description: "v1alpha1 to internal",
|
||||
policy: []byte(`apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
strategies:
|
||||
"PodLifeTime":
|
||||
enabled: true
|
||||
params:
|
||||
podLifeTime:
|
||||
maxPodLifeTimeSeconds: 5
|
||||
namespaces:
|
||||
include:
|
||||
- "testleaderelection-a"
|
||||
`),
|
||||
result: &api.DeschedulerPolicy{
|
||||
Profiles: []api.DeschedulerProfile{
|
||||
{
|
||||
Name: fmt.Sprintf("strategy-%s-profile", podlifetime.PluginName),
|
||||
PluginConfigs: []api.PluginConfig{
|
||||
defaultEvictorPluginConfig,
|
||||
{
|
||||
Name: podlifetime.PluginName,
|
||||
Args: &podlifetime.PodLifeTimeArgs{
|
||||
Namespaces: &api.Namespaces{
|
||||
Include: []string{"testleaderelection-a"},
|
||||
},
|
||||
MaxPodLifeTimeSeconds: utilpointer.Uint(5),
|
||||
},
|
||||
},
|
||||
},
|
||||
Plugins: api.Plugins{
|
||||
Filter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
PreEvictionFilter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
Deschedule: api.PluginSet{
|
||||
Enabled: []string{podlifetime.PluginName},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "v1alpha1 to internal with priorityThreshold",
|
||||
policy: []byte(`apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
strategies:
|
||||
"PodLifeTime":
|
||||
enabled: true
|
||||
params:
|
||||
podLifeTime:
|
||||
maxPodLifeTimeSeconds: 5
|
||||
namespaces:
|
||||
include:
|
||||
- "testleaderelection-a"
|
||||
thresholdPriority: null
|
||||
thresholdPriorityClassName: prioritym
|
||||
`),
|
||||
result: &api.DeschedulerPolicy{
|
||||
Profiles: []api.DeschedulerProfile{
|
||||
{
|
||||
Name: fmt.Sprintf("strategy-%s-profile", podlifetime.PluginName),
|
||||
PluginConfigs: []api.PluginConfig{
|
||||
{
|
||||
Name: "DefaultEvictor",
|
||||
Args: &defaultevictor.DefaultEvictorArgs{
|
||||
PriorityThreshold: &api.PriorityThreshold{
|
||||
Value: utilpointer.Int32(0),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: podlifetime.PluginName,
|
||||
Args: &podlifetime.PodLifeTimeArgs{
|
||||
Namespaces: &api.Namespaces{
|
||||
Include: []string{"testleaderelection-a"},
|
||||
},
|
||||
MaxPodLifeTimeSeconds: utilpointer.Uint(5),
|
||||
},
|
||||
},
|
||||
},
|
||||
Plugins: api.Plugins{
|
||||
Filter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
PreEvictionFilter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
Deschedule: api.PluginSet{
|
||||
Enabled: []string{podlifetime.PluginName},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "v1alpha1 to internal with priorityThreshold value and name should return error",
|
||||
policy: []byte(`apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
strategies:
|
||||
"PodLifeTime":
|
||||
enabled: true
|
||||
params:
|
||||
podLifeTime:
|
||||
maxPodLifeTimeSeconds: 5
|
||||
namespaces:
|
||||
include:
|
||||
- "testleaderelection-a"
|
||||
thresholdPriority: 222
|
||||
thresholdPriorityClassName: prioritym
|
||||
`),
|
||||
result: nil,
|
||||
err: fmt.Errorf("failed decoding descheduler's policy config \"filename\": priority threshold misconfigured for plugin PodLifeTime"),
|
||||
},
|
||||
{
|
||||
description: "v1alpha2 to internal",
|
||||
policy: []byte(`apiVersion: "descheduler/v1alpha2"
|
||||
@@ -947,6 +71,7 @@ profiles:
|
||||
evictSystemCriticalPods: true
|
||||
evictFailedBarePods: true
|
||||
evictLocalStoragePods: true
|
||||
evictDaemonSetPods: true
|
||||
nodeFit: true
|
||||
- name: "RemovePodsHavingTooManyRestarts"
|
||||
args:
|
||||
@@ -968,7 +93,8 @@ profiles:
|
||||
EvictSystemCriticalPods: true,
|
||||
EvictFailedBarePods: true,
|
||||
EvictLocalStoragePods: true,
|
||||
PriorityThreshold: &api.PriorityThreshold{Value: utilpointer.Int32(2000000000)},
|
||||
EvictDaemonSetPods: true,
|
||||
PriorityThreshold: &api.PriorityThreshold{Value: utilptr.To[int32](2000000000)},
|
||||
NodeFit: true,
|
||||
},
|
||||
},
|
||||
@@ -1099,6 +225,7 @@ profiles:
|
||||
evictSystemCriticalPods: true
|
||||
evictFailedBarePods: true
|
||||
evictLocalStoragePods: true
|
||||
evictDaemonSetPods: true
|
||||
nodeFit: true
|
||||
- name: "RemoveFailedPods"
|
||||
plugins:
|
||||
@@ -1123,14 +250,15 @@ profiles:
|
||||
EvictSystemCriticalPods: true,
|
||||
EvictFailedBarePods: true,
|
||||
EvictLocalStoragePods: true,
|
||||
PriorityThreshold: &api.PriorityThreshold{Value: utilpointer.Int32(2000000000)},
|
||||
EvictDaemonSetPods: true,
|
||||
PriorityThreshold: &api.PriorityThreshold{Value: utilptr.To[int32](2000000000)},
|
||||
NodeFit: true,
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: removefailedpods.PluginName,
|
||||
Args: &removefailedpods.RemoveFailedPodsArgs{
|
||||
MinPodLifetimeSeconds: utilpointer.Uint(3600),
|
||||
MinPodLifetimeSeconds: utilptr.To[uint](3600),
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -1161,6 +289,7 @@ profiles:
|
||||
evictSystemCriticalPods: true
|
||||
evictFailedBarePods: true
|
||||
evictLocalStoragePods: true
|
||||
evictDaemonSetPods: true
|
||||
nodeFit: true
|
||||
- name: "RemoveFailedPods"
|
||||
plugins:
|
||||
@@ -1179,14 +308,15 @@ profiles:
|
||||
EvictSystemCriticalPods: true,
|
||||
EvictFailedBarePods: true,
|
||||
EvictLocalStoragePods: true,
|
||||
PriorityThreshold: &api.PriorityThreshold{Value: utilpointer.Int32(2000000000)},
|
||||
EvictDaemonSetPods: true,
|
||||
PriorityThreshold: &api.PriorityThreshold{Value: utilptr.To[int32](2000000000)},
|
||||
NodeFit: true,
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: removefailedpods.PluginName,
|
||||
Args: &removefailedpods.RemoveFailedPodsArgs{
|
||||
MinPodLifetimeSeconds: utilpointer.Uint(3600),
|
||||
MinPodLifetimeSeconds: utilptr.To[uint](3600),
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
@@ -21,7 +21,6 @@ import (
|
||||
"k8s.io/apimachinery/pkg/runtime/serializer"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/api/v1alpha1"
|
||||
"sigs.k8s.io/descheduler/pkg/api/v1alpha2"
|
||||
"sigs.k8s.io/descheduler/pkg/apis/componentconfig"
|
||||
componentconfigv1alpha1 "sigs.k8s.io/descheduler/pkg/apis/componentconfig/v1alpha1"
|
||||
@@ -57,10 +56,8 @@ func init() {
|
||||
|
||||
utilruntime.Must(componentconfig.AddToScheme(Scheme))
|
||||
utilruntime.Must(componentconfigv1alpha1.AddToScheme(Scheme))
|
||||
utilruntime.Must(v1alpha1.AddToScheme(Scheme))
|
||||
utilruntime.Must(v1alpha2.AddToScheme(Scheme))
|
||||
utilruntime.Must(Scheme.SetVersionPriority(
|
||||
v1alpha2.SchemeGroupVersion,
|
||||
v1alpha1.SchemeGroupVersion,
|
||||
))
|
||||
}
|
||||
|
||||
49
pkg/features/features.go
Normal file
49
pkg/features/features.go
Normal file
@@ -0,0 +1,49 @@
|
||||
package features
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/component-base/featuregate"
|
||||
)
|
||||
|
||||
const (
|
||||
// Every feature gate should add method here following this template:
|
||||
//
|
||||
// // owner: @username
|
||||
// // kep: kep link
|
||||
// // alpha: v1.X
|
||||
// MyFeature featuregate.Feature = "MyFeature"
|
||||
//
|
||||
// Feature gates should be listed in alphabetical, case-sensitive
|
||||
// (upper before any lower case character) order. This reduces the risk
|
||||
// of code conflicts because changes are more likely to be scattered
|
||||
// across the file.
|
||||
|
||||
// owner: @ingvagabund
|
||||
// kep: https://github.com/kubernetes-sigs/descheduler/issues/1397
|
||||
// alpha: v1.31
|
||||
//
|
||||
// Enable evictions in background so users can create their own eviction policies
|
||||
// as an alternative to immediate evictions.
|
||||
EvictionsInBackground featuregate.Feature = "EvictionsInBackground"
|
||||
)
|
||||
|
||||
func init() {
|
||||
runtime.Must(DefaultMutableFeatureGate.Add(defaultDeschedulerFeatureGates))
|
||||
}
|
||||
|
||||
// defaultDeschedulerFeatureGates consists of all known descheduler-specific feature keys.
|
||||
// To add a new feature, define a key for it above and add it here. The features will be
|
||||
// available throughout descheduler binary.
|
||||
//
|
||||
// Entries are separated from each other with blank lines to avoid sweeping gofmt changes
|
||||
// when adding or removing one entry.
|
||||
var defaultDeschedulerFeatureGates = map[featuregate.Feature]featuregate.FeatureSpec{
|
||||
EvictionsInBackground: {Default: false, PreRelease: featuregate.Alpha},
|
||||
}
|
||||
|
||||
// DefaultMutableFeatureGate is a mutable version of DefaultFeatureGate.
|
||||
// Only top-level commands/options setup and the k8s.io/component-base/featuregate/testing package should make use of this.
|
||||
// Tests that need to modify feature gates for the duration of their test should use:
|
||||
//
|
||||
// defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.<FeatureName>, <value>)()
|
||||
var DefaultMutableFeatureGate featuregate.MutableFeatureGate = featuregate.NewFeatureGate()
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/metricscollector"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
)
|
||||
@@ -18,6 +19,7 @@ type HandleImpl struct {
|
||||
SharedInformerFactoryImpl informers.SharedInformerFactory
|
||||
EvictorFilterImpl frameworktypes.EvictorPlugin
|
||||
PodEvictorImpl *evictions.PodEvictor
|
||||
MetricsCollectorImpl *metricscollector.MetricsCollector
|
||||
}
|
||||
|
||||
var _ frameworktypes.Handle = &HandleImpl{}
|
||||
@@ -26,6 +28,10 @@ func (hi *HandleImpl) ClientSet() clientset.Interface {
|
||||
return hi.ClientsetImpl
|
||||
}
|
||||
|
||||
func (hi *HandleImpl) MetricsCollector() *metricscollector.MetricsCollector {
|
||||
return hi.MetricsCollectorImpl
|
||||
}
|
||||
|
||||
func (hi *HandleImpl) GetPodsAssignedToNodeFunc() podutil.GetPodsAssignedToNodeFunc {
|
||||
return hi.GetPodsAssignedToNodeFuncImpl
|
||||
}
|
||||
@@ -46,10 +52,6 @@ func (hi *HandleImpl) PreEvictionFilter(pod *v1.Pod) bool {
|
||||
return hi.EvictorFilterImpl.PreEvictionFilter(pod)
|
||||
}
|
||||
|
||||
func (hi *HandleImpl) Evict(ctx context.Context, pod *v1.Pod, opts evictions.EvictOptions) bool {
|
||||
func (hi *HandleImpl) Evict(ctx context.Context, pod *v1.Pod, opts evictions.EvictOptions) error {
|
||||
return hi.PodEvictorImpl.EvictPod(ctx, pod, opts)
|
||||
}
|
||||
|
||||
func (hi *HandleImpl) NodeLimitExceeded(node *v1.Node) bool {
|
||||
return hi.PodEvictorImpl.NodeLimitExceeded(node)
|
||||
}
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -16,13 +16,16 @@ package defaultevictor
|
||||
import (
|
||||
// "context"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/errors"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/klog/v2"
|
||||
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
@@ -45,7 +48,7 @@ type constraint func(pod *v1.Pod) error
|
||||
// This plugin is only meant to customize other actions (extension points) of the evictor,
|
||||
// like filtering, sorting, and other ones that might be relevant in the future
|
||||
type DefaultEvictor struct {
|
||||
args runtime.Object
|
||||
args *DefaultEvictorArgs
|
||||
constraints []constraint
|
||||
handle frameworktypes.Handle
|
||||
}
|
||||
@@ -62,15 +65,17 @@ func HaveEvictAnnotation(pod *v1.Pod) bool {
|
||||
}
|
||||
|
||||
// New builds plugin from its arguments while passing a handle
|
||||
// nolint: gocyclo
|
||||
func New(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
|
||||
defaultEvictorArgs, ok := args.(*DefaultEvictorArgs)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("want args to be of type defaultEvictorFilterArgs, got %T", args)
|
||||
}
|
||||
|
||||
ev := &DefaultEvictor{}
|
||||
ev.handle = handle
|
||||
ev.args = defaultEvictorArgs
|
||||
ev := &DefaultEvictor{
|
||||
handle: handle,
|
||||
args: defaultEvictorArgs,
|
||||
}
|
||||
|
||||
if defaultEvictorArgs.EvictFailedBarePods {
|
||||
klog.V(1).InfoS("Warning: EvictFailedBarePods is set to True. This could cause eviction of pods without ownerReferences.")
|
||||
@@ -122,6 +127,15 @@ func New(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plug
|
||||
return nil
|
||||
})
|
||||
}
|
||||
if !defaultEvictorArgs.EvictDaemonSetPods {
|
||||
ev.constraints = append(ev.constraints, func(pod *v1.Pod) error {
|
||||
ownerRefList := podutil.OwnerRef(pod)
|
||||
if utils.IsDaemonsetPod(ownerRefList) {
|
||||
return fmt.Errorf("pod is related to daemonset and descheduler is not configured with evictDaemonSetPods")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
if defaultEvictorArgs.IgnorePvcPods {
|
||||
ev.constraints = append(ev.constraints, func(pod *v1.Pod) error {
|
||||
if utils.IsPodWithPVC(pod) {
|
||||
@@ -143,6 +157,58 @@ func New(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plug
|
||||
})
|
||||
}
|
||||
|
||||
if defaultEvictorArgs.MinReplicas > 1 {
|
||||
indexName := "metadata.ownerReferences"
|
||||
indexer, err := getPodIndexerByOwnerRefs(indexName, handle)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ev.constraints = append(ev.constraints, func(pod *v1.Pod) error {
|
||||
if len(pod.OwnerReferences) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
if len(pod.OwnerReferences) > 1 {
|
||||
klog.V(5).InfoS("pod has multiple owner references which is not supported for minReplicas check", "size", len(pod.OwnerReferences), "pod", klog.KObj(pod))
|
||||
return nil
|
||||
}
|
||||
|
||||
ownerRef := pod.OwnerReferences[0]
|
||||
objs, err := indexer.ByIndex(indexName, string(ownerRef.UID))
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to list pods for minReplicas filter in the policy parameter")
|
||||
}
|
||||
|
||||
if uint(len(objs)) < defaultEvictorArgs.MinReplicas {
|
||||
return fmt.Errorf("owner has %d replicas which is less than minReplicas of %d", len(objs), defaultEvictorArgs.MinReplicas)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
if defaultEvictorArgs.MinPodAge != nil {
|
||||
ev.constraints = append(ev.constraints, func(pod *v1.Pod) error {
|
||||
if pod.Status.StartTime == nil || time.Since(pod.Status.StartTime.Time) < defaultEvictorArgs.MinPodAge.Duration {
|
||||
return fmt.Errorf("pod age is not older than MinPodAge: %s seconds", defaultEvictorArgs.MinPodAge.String())
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
if defaultEvictorArgs.IgnorePodsWithoutPDB {
|
||||
ev.constraints = append(ev.constraints, func(pod *v1.Pod) error {
|
||||
hasPdb, err := utils.IsPodCoveredByPDB(pod, handle.SharedInformerFactory().Policy().V1().PodDisruptionBudgets().Lister())
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to check if pod is covered by PodDisruptionBudget: %w", err)
|
||||
}
|
||||
if !hasPdb {
|
||||
return fmt.Errorf("no PodDisruptionBudget found for pod")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
return ev, nil
|
||||
}
|
||||
|
||||
@@ -152,9 +218,8 @@ func (d *DefaultEvictor) Name() string {
|
||||
}
|
||||
|
||||
func (d *DefaultEvictor) PreEvictionFilter(pod *v1.Pod) bool {
|
||||
defaultEvictorArgs := d.args.(*DefaultEvictorArgs)
|
||||
if defaultEvictorArgs.NodeFit {
|
||||
nodes, err := nodeutil.ReadyNodes(context.TODO(), d.handle.ClientSet(), d.handle.SharedInformerFactory().Core().V1().Nodes().Lister(), defaultEvictorArgs.NodeSelector)
|
||||
if d.args.NodeFit {
|
||||
nodes, err := nodeutil.ReadyNodes(context.TODO(), d.handle.ClientSet(), d.handle.SharedInformerFactory().Core().V1().Nodes().Lister(), d.args.NodeSelector)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "unable to list ready nodes", "pod", klog.KObj(pod))
|
||||
return false
|
||||
@@ -175,11 +240,6 @@ func (d *DefaultEvictor) Filter(pod *v1.Pod) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
ownerRefList := podutil.OwnerRef(pod)
|
||||
if utils.IsDaemonsetPod(ownerRefList) {
|
||||
checkErrs = append(checkErrs, fmt.Errorf("pod is a DaemonSet pod"))
|
||||
}
|
||||
|
||||
if utils.IsMirrorPod(pod) {
|
||||
checkErrs = append(checkErrs, fmt.Errorf("pod is a mirror pod"))
|
||||
}
|
||||
@@ -199,9 +259,36 @@ func (d *DefaultEvictor) Filter(pod *v1.Pod) bool {
|
||||
}
|
||||
|
||||
if len(checkErrs) > 0 {
|
||||
klog.V(4).InfoS("Pod fails the following checks", "pod", klog.KObj(pod), "checks", errors.NewAggregate(checkErrs).Error())
|
||||
klog.V(4).InfoS("Pod fails the following checks", "pod", klog.KObj(pod), "checks", utilerrors.NewAggregate(checkErrs).Error())
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func getPodIndexerByOwnerRefs(indexName string, handle frameworktypes.Handle) (cache.Indexer, error) {
|
||||
podInformer := handle.SharedInformerFactory().Core().V1().Pods().Informer()
|
||||
indexer := podInformer.GetIndexer()
|
||||
|
||||
// do not reinitialize the indexer, if it's been defined already
|
||||
for name := range indexer.GetIndexers() {
|
||||
if name == indexName {
|
||||
return indexer, nil
|
||||
}
|
||||
}
|
||||
|
||||
if err := podInformer.AddIndexers(cache.Indexers{
|
||||
indexName: func(obj interface{}) ([]string, error) {
|
||||
pod, ok := obj.(*v1.Pod)
|
||||
if !ok {
|
||||
return []string{}, errors.New("unexpected object")
|
||||
}
|
||||
|
||||
return podutil.OwnerRefUIDs(pod), nil
|
||||
},
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return indexer, nil
|
||||
}
|
||||
|
||||
@@ -15,11 +15,17 @@ package defaultevictor
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
policyv1 "k8s.io/api/policy/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
@@ -30,6 +36,22 @@ import (
|
||||
"sigs.k8s.io/descheduler/test"
|
||||
)
|
||||
|
||||
type testCase struct {
|
||||
description string
|
||||
pods []*v1.Pod
|
||||
nodes []*v1.Node
|
||||
pdbs []*policyv1.PodDisruptionBudget
|
||||
evictFailedBarePods bool
|
||||
evictLocalStoragePods bool
|
||||
evictSystemCriticalPods bool
|
||||
priorityThreshold *int32
|
||||
nodeFit bool
|
||||
minReplicas uint
|
||||
minPodAge *metav1.Duration
|
||||
result bool
|
||||
ignorePodsWithoutPDB bool
|
||||
}
|
||||
|
||||
func TestDefaultEvictorPreEvictionFilter(t *testing.T) {
|
||||
n1 := test.BuildTestNode("node1", 1000, 2000, 13, nil)
|
||||
|
||||
@@ -38,17 +60,6 @@ func TestDefaultEvictorPreEvictionFilter(t *testing.T) {
|
||||
|
||||
nodeLabelKey := "datacenter"
|
||||
nodeLabelValue := "east"
|
||||
type testCase struct {
|
||||
description string
|
||||
pods []*v1.Pod
|
||||
nodes []*v1.Node
|
||||
evictFailedBarePods bool
|
||||
evictLocalStoragePods bool
|
||||
evictSystemCriticalPods bool
|
||||
priorityThreshold *int32
|
||||
nodeFit bool
|
||||
result bool
|
||||
}
|
||||
|
||||
testCases := []testCase{
|
||||
{
|
||||
@@ -304,45 +315,7 @@ func TestDefaultEvictorPreEvictionFilter(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
var objs []runtime.Object
|
||||
for _, node := range test.nodes {
|
||||
objs = append(objs, node)
|
||||
}
|
||||
for _, pod := range test.pods {
|
||||
objs = append(objs, pod)
|
||||
}
|
||||
|
||||
fakeClient := fake.NewSimpleClientset(objs...)
|
||||
|
||||
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
|
||||
podInformer := sharedInformerFactory.Core().V1().Pods().Informer()
|
||||
|
||||
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
|
||||
if err != nil {
|
||||
t.Errorf("Build get pods assigned to node function error: %v", err)
|
||||
}
|
||||
|
||||
sharedInformerFactory.Start(ctx.Done())
|
||||
sharedInformerFactory.WaitForCacheSync(ctx.Done())
|
||||
|
||||
defaultEvictorArgs := &DefaultEvictorArgs{
|
||||
EvictLocalStoragePods: test.evictLocalStoragePods,
|
||||
EvictSystemCriticalPods: test.evictSystemCriticalPods,
|
||||
IgnorePvcPods: false,
|
||||
EvictFailedBarePods: test.evictFailedBarePods,
|
||||
PriorityThreshold: &api.PriorityThreshold{
|
||||
Value: test.priorityThreshold,
|
||||
},
|
||||
NodeFit: test.nodeFit,
|
||||
}
|
||||
|
||||
evictorPlugin, err := New(
|
||||
defaultEvictorArgs,
|
||||
&frameworkfake.HandleImpl{
|
||||
ClientsetImpl: fakeClient,
|
||||
GetPodsAssignedToNodeFuncImpl: getPodsAssignedToNode,
|
||||
SharedInformerFactoryImpl: sharedInformerFactory,
|
||||
})
|
||||
evictorPlugin, err := initializePlugin(ctx, test)
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to initialize the plugin: %v", err)
|
||||
}
|
||||
@@ -360,20 +333,12 @@ func TestDefaultEvictorFilter(t *testing.T) {
|
||||
lowPriority := int32(800)
|
||||
highPriority := int32(900)
|
||||
|
||||
minPodAge := metav1.Duration{Duration: 50 * time.Minute}
|
||||
|
||||
nodeTaintKey := "hardware"
|
||||
nodeTaintValue := "gpu"
|
||||
|
||||
type testCase struct {
|
||||
description string
|
||||
pods []*v1.Pod
|
||||
nodes []*v1.Node
|
||||
evictFailedBarePods bool
|
||||
evictLocalStoragePods bool
|
||||
evictSystemCriticalPods bool
|
||||
priorityThreshold *int32
|
||||
nodeFit bool
|
||||
result bool
|
||||
}
|
||||
ownerRefUUID := uuid.NewUUID()
|
||||
|
||||
testCases := []testCase{
|
||||
{
|
||||
@@ -527,7 +492,7 @@ func TestDefaultEvictorFilter(t *testing.T) {
|
||||
evictSystemCriticalPods: false,
|
||||
result: true,
|
||||
}, {
|
||||
description: "Pod not evicted becasuse it is part of a daemonSet",
|
||||
description: "Pod not evicted because it is part of a daemonSet",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p8", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
@@ -538,7 +503,7 @@ func TestDefaultEvictorFilter(t *testing.T) {
|
||||
evictSystemCriticalPods: false,
|
||||
result: false,
|
||||
}, {
|
||||
description: "Pod is evicted becasuse it is part of a daemonSet, but it has scheduler.alpha.kubernetes.io/evict annotation",
|
||||
description: "Pod is evicted because it is part of a daemonSet, but it has scheduler.alpha.kubernetes.io/evict annotation",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p9", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
pod.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"}
|
||||
@@ -549,7 +514,7 @@ func TestDefaultEvictorFilter(t *testing.T) {
|
||||
evictSystemCriticalPods: false,
|
||||
result: true,
|
||||
}, {
|
||||
description: "Pod not evicted becasuse it is a mirror poddsa",
|
||||
description: "Pod not evicted because it is a mirror poddsa",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p10", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
@@ -560,7 +525,7 @@ func TestDefaultEvictorFilter(t *testing.T) {
|
||||
evictSystemCriticalPods: false,
|
||||
result: false,
|
||||
}, {
|
||||
description: "Pod is evicted becasuse it is a mirror pod, but it has scheduler.alpha.kubernetes.io/evict annotation",
|
||||
description: "Pod is evicted because it is a mirror pod, but it has scheduler.alpha.kubernetes.io/evict annotation",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p11", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
@@ -572,7 +537,7 @@ func TestDefaultEvictorFilter(t *testing.T) {
|
||||
evictSystemCriticalPods: false,
|
||||
result: true,
|
||||
}, {
|
||||
description: "Pod not evicted becasuse it has system critical priority",
|
||||
description: "Pod not evicted because it has system critical priority",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p12", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
@@ -584,7 +549,7 @@ func TestDefaultEvictorFilter(t *testing.T) {
|
||||
evictSystemCriticalPods: false,
|
||||
result: false,
|
||||
}, {
|
||||
description: "Pod is evicted becasuse it has system critical priority, but it has scheduler.alpha.kubernetes.io/evict annotation",
|
||||
description: "Pod is evicted because it has system critical priority, but it has scheduler.alpha.kubernetes.io/evict annotation",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p13", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
@@ -599,7 +564,7 @@ func TestDefaultEvictorFilter(t *testing.T) {
|
||||
evictSystemCriticalPods: false,
|
||||
result: true,
|
||||
}, {
|
||||
description: "Pod not evicted becasuse it has a priority higher than the configured priority threshold",
|
||||
description: "Pod not evicted because it has a priority higher than the configured priority threshold",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p14", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
@@ -611,7 +576,7 @@ func TestDefaultEvictorFilter(t *testing.T) {
|
||||
priorityThreshold: &lowPriority,
|
||||
result: false,
|
||||
}, {
|
||||
description: "Pod is evicted becasuse it has a priority higher than the configured priority threshold, but it has scheduler.alpha.kubernetes.io/evict annotation",
|
||||
description: "Pod is evicted because it has a priority higher than the configured priority threshold, but it has scheduler.alpha.kubernetes.io/evict annotation",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p15", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
@@ -624,7 +589,7 @@ func TestDefaultEvictorFilter(t *testing.T) {
|
||||
priorityThreshold: &lowPriority,
|
||||
result: true,
|
||||
}, {
|
||||
description: "Pod is evicted becasuse it has system critical priority, but evictSystemCriticalPods = true",
|
||||
description: "Pod is evicted because it has system critical priority, but evictSystemCriticalPods = true",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p16", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
@@ -636,7 +601,7 @@ func TestDefaultEvictorFilter(t *testing.T) {
|
||||
evictSystemCriticalPods: true,
|
||||
result: true,
|
||||
}, {
|
||||
description: "Pod is evicted becasuse it has system critical priority, but evictSystemCriticalPods = true and it has scheduler.alpha.kubernetes.io/evict annotation",
|
||||
description: "Pod is evicted because it has system critical priority, but evictSystemCriticalPods = true and it has scheduler.alpha.kubernetes.io/evict annotation",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p16", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
@@ -649,7 +614,7 @@ func TestDefaultEvictorFilter(t *testing.T) {
|
||||
evictSystemCriticalPods: true,
|
||||
result: true,
|
||||
}, {
|
||||
description: "Pod is evicted becasuse it has a priority higher than the configured priority threshold, but evictSystemCriticalPods = true",
|
||||
description: "Pod is evicted because it has a priority higher than the configured priority threshold, but evictSystemCriticalPods = true",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p17", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
@@ -661,7 +626,7 @@ func TestDefaultEvictorFilter(t *testing.T) {
|
||||
priorityThreshold: &lowPriority,
|
||||
result: true,
|
||||
}, {
|
||||
description: "Pod is evicted becasuse it has a priority higher than the configured priority threshold, but evictSystemCriticalPods = true and it has scheduler.alpha.kubernetes.io/evict annotation",
|
||||
description: "Pod is evicted because it has a priority higher than the configured priority threshold, but evictSystemCriticalPods = true and it has scheduler.alpha.kubernetes.io/evict annotation",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p17", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
@@ -704,6 +669,106 @@ func TestDefaultEvictorFilter(t *testing.T) {
|
||||
evictSystemCriticalPods: false,
|
||||
nodeFit: true,
|
||||
result: true,
|
||||
}, {
|
||||
description: "minReplicas of 2, owner with 2 replicas, evicts",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p1", 1, 1, n1.Name, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
pod.ObjectMeta.OwnerReferences[0].UID = ownerRefUUID
|
||||
}),
|
||||
test.BuildTestPod("p2", 1, 1, n1.Name, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
pod.ObjectMeta.OwnerReferences[0].UID = ownerRefUUID
|
||||
}),
|
||||
},
|
||||
minReplicas: 2,
|
||||
result: true,
|
||||
}, {
|
||||
description: "minReplicas of 3, owner with 2 replicas, no eviction",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p1", 1, 1, n1.Name, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
pod.ObjectMeta.OwnerReferences[0].UID = ownerRefUUID
|
||||
}),
|
||||
test.BuildTestPod("p2", 1, 1, n1.Name, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
pod.ObjectMeta.OwnerReferences[0].UID = ownerRefUUID
|
||||
}),
|
||||
},
|
||||
minReplicas: 3,
|
||||
result: false,
|
||||
}, {
|
||||
description: "minReplicas of 2, multiple owners, no eviction",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p1", 1, 1, n1.Name, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = append(test.GetNormalPodOwnerRefList(), test.GetNormalPodOwnerRefList()...)
|
||||
pod.ObjectMeta.OwnerReferences[0].UID = ownerRefUUID
|
||||
}),
|
||||
test.BuildTestPod("p2", 1, 1, n1.Name, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
}),
|
||||
},
|
||||
minReplicas: 2,
|
||||
result: true,
|
||||
}, {
|
||||
description: "minPodAge of 50, pod created 10 minutes ago, no eviction",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p1", 1, 1, n1.Name, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
podStartTime := metav1.Now().Add(time.Minute * time.Duration(-10))
|
||||
pod.Status.StartTime = &metav1.Time{Time: podStartTime}
|
||||
}),
|
||||
},
|
||||
minPodAge: &minPodAge,
|
||||
result: false,
|
||||
}, {
|
||||
description: "minPodAge of 50, pod created 60 minutes ago, evicts",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p1", 1, 1, n1.Name, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
podStartTime := metav1.Now().Add(time.Minute * time.Duration(-60))
|
||||
pod.Status.StartTime = &metav1.Time{Time: podStartTime}
|
||||
}),
|
||||
},
|
||||
minPodAge: &minPodAge,
|
||||
result: true,
|
||||
}, {
|
||||
description: "nil minPodAge, pod created 60 minutes ago, evicts",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p1", 1, 1, n1.Name, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
podStartTime := metav1.Now().Add(time.Minute * time.Duration(-60))
|
||||
pod.Status.StartTime = &metav1.Time{Time: podStartTime}
|
||||
}),
|
||||
},
|
||||
result: true,
|
||||
}, {
|
||||
description: "ignorePodsWithoutPDB, pod with no PDBs, no eviction",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p1", 1, 1, n1.Name, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
pod.Labels = map[string]string{
|
||||
"app": "foo",
|
||||
}
|
||||
}),
|
||||
},
|
||||
ignorePodsWithoutPDB: true,
|
||||
result: false,
|
||||
}, {
|
||||
description: "ignorePodsWithoutPDB, pod with PDBs, evicts",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p1", 1, 1, n1.Name, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
pod.Labels = map[string]string{
|
||||
"app": "foo",
|
||||
}
|
||||
}),
|
||||
},
|
||||
pdbs: []*policyv1.PodDisruptionBudget{
|
||||
test.BuildTestPDB("pdb1", "foo"),
|
||||
},
|
||||
ignorePodsWithoutPDB: true,
|
||||
result: true,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -712,45 +777,7 @@ func TestDefaultEvictorFilter(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
var objs []runtime.Object
|
||||
for _, node := range test.nodes {
|
||||
objs = append(objs, node)
|
||||
}
|
||||
for _, pod := range test.pods {
|
||||
objs = append(objs, pod)
|
||||
}
|
||||
|
||||
fakeClient := fake.NewSimpleClientset(objs...)
|
||||
|
||||
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
|
||||
podInformer := sharedInformerFactory.Core().V1().Pods().Informer()
|
||||
|
||||
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
|
||||
if err != nil {
|
||||
t.Errorf("Build get pods assigned to node function error: %v", err)
|
||||
}
|
||||
|
||||
sharedInformerFactory.Start(ctx.Done())
|
||||
sharedInformerFactory.WaitForCacheSync(ctx.Done())
|
||||
|
||||
defaultEvictorArgs := &DefaultEvictorArgs{
|
||||
EvictLocalStoragePods: test.evictLocalStoragePods,
|
||||
EvictSystemCriticalPods: test.evictSystemCriticalPods,
|
||||
IgnorePvcPods: false,
|
||||
EvictFailedBarePods: test.evictFailedBarePods,
|
||||
PriorityThreshold: &api.PriorityThreshold{
|
||||
Value: test.priorityThreshold,
|
||||
},
|
||||
NodeFit: test.nodeFit,
|
||||
}
|
||||
|
||||
evictorPlugin, err := New(
|
||||
defaultEvictorArgs,
|
||||
&frameworkfake.HandleImpl{
|
||||
ClientsetImpl: fakeClient,
|
||||
GetPodsAssignedToNodeFuncImpl: getPodsAssignedToNode,
|
||||
SharedInformerFactoryImpl: sharedInformerFactory,
|
||||
})
|
||||
evictorPlugin, err := initializePlugin(ctx, test)
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to initialize the plugin: %v", err)
|
||||
}
|
||||
@@ -762,3 +789,100 @@ func TestDefaultEvictorFilter(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestReinitialization(t *testing.T) {
|
||||
n1 := test.BuildTestNode("node1", 1000, 2000, 13, nil)
|
||||
ownerRefUUID := uuid.NewUUID()
|
||||
|
||||
testCases := []testCase{
|
||||
{
|
||||
description: "minReplicas of 2, multiple owners, eviction",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p1", 1, 1, n1.Name, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = append(test.GetNormalPodOwnerRefList(), test.GetNormalPodOwnerRefList()...)
|
||||
pod.ObjectMeta.OwnerReferences[0].UID = ownerRefUUID
|
||||
}),
|
||||
test.BuildTestPod("p2", 1, 1, n1.Name, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
}),
|
||||
},
|
||||
minReplicas: 2,
|
||||
result: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range testCases {
|
||||
t.Run(test.description, func(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
evictorPlugin, err := initializePlugin(ctx, test)
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to initialize the plugin: %v", err)
|
||||
}
|
||||
|
||||
defaultEvictor, ok := evictorPlugin.(*DefaultEvictor)
|
||||
if !ok {
|
||||
t.Fatalf("Unable to initialize as a DefaultEvictor plugin")
|
||||
}
|
||||
_, err = New(defaultEvictor.args, defaultEvictor.handle)
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to reinitialize the plugin: %v", err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func initializePlugin(ctx context.Context, test testCase) (frameworktypes.Plugin, error) {
|
||||
var objs []runtime.Object
|
||||
for _, node := range test.nodes {
|
||||
objs = append(objs, node)
|
||||
}
|
||||
for _, pod := range test.pods {
|
||||
objs = append(objs, pod)
|
||||
}
|
||||
for _, pdb := range test.pdbs {
|
||||
objs = append(objs, pdb)
|
||||
}
|
||||
|
||||
fakeClient := fake.NewSimpleClientset(objs...)
|
||||
|
||||
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
|
||||
podInformer := sharedInformerFactory.Core().V1().Pods().Informer()
|
||||
_ = sharedInformerFactory.Policy().V1().PodDisruptionBudgets().Lister()
|
||||
|
||||
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("build get pods assigned to node function error: %v", err)
|
||||
}
|
||||
|
||||
sharedInformerFactory.Start(ctx.Done())
|
||||
sharedInformerFactory.WaitForCacheSync(ctx.Done())
|
||||
|
||||
defaultEvictorArgs := &DefaultEvictorArgs{
|
||||
EvictLocalStoragePods: test.evictLocalStoragePods,
|
||||
EvictSystemCriticalPods: test.evictSystemCriticalPods,
|
||||
IgnorePvcPods: false,
|
||||
EvictFailedBarePods: test.evictFailedBarePods,
|
||||
PriorityThreshold: &api.PriorityThreshold{
|
||||
Value: test.priorityThreshold,
|
||||
},
|
||||
NodeFit: test.nodeFit,
|
||||
MinReplicas: test.minReplicas,
|
||||
MinPodAge: test.minPodAge,
|
||||
IgnorePodsWithoutPDB: test.ignorePodsWithoutPDB,
|
||||
}
|
||||
|
||||
evictorPlugin, err := New(
|
||||
defaultEvictorArgs,
|
||||
&frameworkfake.HandleImpl{
|
||||
ClientsetImpl: fakeClient,
|
||||
GetPodsAssignedToNodeFuncImpl: getPodsAssignedToNode,
|
||||
SharedInformerFactoryImpl: sharedInformerFactory,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to initialize the plugin: %v", err)
|
||||
}
|
||||
|
||||
return evictorPlugin, nil
|
||||
}
|
||||
|
||||
@@ -31,6 +31,9 @@ func SetDefaults_DefaultEvictorArgs(obj runtime.Object) {
|
||||
if !args.EvictLocalStoragePods {
|
||||
args.EvictLocalStoragePods = false
|
||||
}
|
||||
if !args.EvictDaemonSetPods {
|
||||
args.EvictDaemonSetPods = false
|
||||
}
|
||||
if !args.EvictSystemCriticalPods {
|
||||
args.EvictSystemCriticalPods = false
|
||||
}
|
||||
|
||||
@@ -19,7 +19,7 @@ import (
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/utils/pointer"
|
||||
utilptr "k8s.io/utils/ptr"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
)
|
||||
|
||||
@@ -35,12 +35,14 @@ func TestSetDefaults_DefaultEvictorArgs(t *testing.T) {
|
||||
want: &DefaultEvictorArgs{
|
||||
NodeSelector: "",
|
||||
EvictLocalStoragePods: false,
|
||||
EvictDaemonSetPods: false,
|
||||
EvictSystemCriticalPods: false,
|
||||
IgnorePvcPods: false,
|
||||
EvictFailedBarePods: false,
|
||||
LabelSelector: nil,
|
||||
PriorityThreshold: nil,
|
||||
NodeFit: false,
|
||||
IgnorePodsWithoutPDB: false,
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -48,26 +50,30 @@ func TestSetDefaults_DefaultEvictorArgs(t *testing.T) {
|
||||
in: &DefaultEvictorArgs{
|
||||
NodeSelector: "NodeSelector",
|
||||
EvictLocalStoragePods: true,
|
||||
EvictDaemonSetPods: true,
|
||||
EvictSystemCriticalPods: true,
|
||||
IgnorePvcPods: true,
|
||||
EvictFailedBarePods: true,
|
||||
LabelSelector: nil,
|
||||
PriorityThreshold: &api.PriorityThreshold{
|
||||
Value: pointer.Int32(800),
|
||||
Value: utilptr.To[int32](800),
|
||||
},
|
||||
NodeFit: true,
|
||||
NodeFit: true,
|
||||
IgnorePodsWithoutPDB: true,
|
||||
},
|
||||
want: &DefaultEvictorArgs{
|
||||
NodeSelector: "NodeSelector",
|
||||
EvictLocalStoragePods: true,
|
||||
EvictDaemonSetPods: true,
|
||||
EvictSystemCriticalPods: true,
|
||||
IgnorePvcPods: true,
|
||||
EvictFailedBarePods: true,
|
||||
LabelSelector: nil,
|
||||
PriorityThreshold: &api.PriorityThreshold{
|
||||
Value: pointer.Int32(800),
|
||||
Value: utilptr.To[int32](800),
|
||||
},
|
||||
NodeFit: true,
|
||||
NodeFit: true,
|
||||
IgnorePodsWithoutPDB: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -25,12 +25,16 @@ import (
|
||||
type DefaultEvictorArgs struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
|
||||
NodeSelector string `json:"nodeSelector"`
|
||||
EvictLocalStoragePods bool `json:"evictLocalStoragePods"`
|
||||
EvictSystemCriticalPods bool `json:"evictSystemCriticalPods"`
|
||||
IgnorePvcPods bool `json:"ignorePvcPods"`
|
||||
EvictFailedBarePods bool `json:"evictFailedBarePods"`
|
||||
LabelSelector *metav1.LabelSelector `json:"labelSelector"`
|
||||
PriorityThreshold *api.PriorityThreshold `json:"priorityThreshold"`
|
||||
NodeFit bool `json:"nodeFit"`
|
||||
NodeSelector string `json:"nodeSelector,omitempty"`
|
||||
EvictLocalStoragePods bool `json:"evictLocalStoragePods,omitempty"`
|
||||
EvictDaemonSetPods bool `json:"evictDaemonSetPods,omitempty"`
|
||||
EvictSystemCriticalPods bool `json:"evictSystemCriticalPods,omitempty"`
|
||||
IgnorePvcPods bool `json:"ignorePvcPods,omitempty"`
|
||||
EvictFailedBarePods bool `json:"evictFailedBarePods,omitempty"`
|
||||
LabelSelector *metav1.LabelSelector `json:"labelSelector,omitempty"`
|
||||
PriorityThreshold *api.PriorityThreshold `json:"priorityThreshold,omitempty"`
|
||||
NodeFit bool `json:"nodeFit,omitempty"`
|
||||
MinReplicas uint `json:"minReplicas,omitempty"`
|
||||
MinPodAge *metav1.Duration `json:"minPodAge,omitempty"`
|
||||
IgnorePodsWithoutPDB bool `json:"ignorePodsWithoutPDB,omitempty"`
|
||||
}
|
||||
|
||||
@@ -16,6 +16,8 @@ package defaultevictor
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
@@ -26,5 +28,9 @@ func ValidateDefaultEvictorArgs(obj runtime.Object) error {
|
||||
return fmt.Errorf("priority threshold misconfigured, only one of priorityThreshold fields can be set, got %v", args)
|
||||
}
|
||||
|
||||
if args.MinReplicas == 1 {
|
||||
klog.V(4).Info("DefaultEvictor minReplicas must be greater than 1 to check for min pods during eviction. This check will be ignored during eviction.")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@@ -41,6 +41,11 @@ func (in *DefaultEvictorArgs) DeepCopyInto(out *DefaultEvictorArgs) {
|
||||
*out = new(api.PriorityThreshold)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.MinPodAge != nil {
|
||||
in, out := &in.MinPodAge, &out.MinPodAge
|
||||
*out = new(v1.Duration)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -25,6 +25,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/klog/v2"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
|
||||
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
@@ -37,9 +38,13 @@ const HighNodeUtilizationPluginName = "HighNodeUtilization"
|
||||
// Note that CPU/Memory requests are used to calculate nodes' utilization and not the actual resource usage.
|
||||
|
||||
type HighNodeUtilization struct {
|
||||
handle frameworktypes.Handle
|
||||
args *HighNodeUtilizationArgs
|
||||
podFilter func(pod *v1.Pod) bool
|
||||
handle frameworktypes.Handle
|
||||
args *HighNodeUtilizationArgs
|
||||
podFilter func(pod *v1.Pod) bool
|
||||
underutilizationCriteria []interface{}
|
||||
resourceNames []v1.ResourceName
|
||||
targetThresholds api.ResourceThresholds
|
||||
usageClient usageClient
|
||||
}
|
||||
|
||||
var _ frameworktypes.BalancePlugin = &HighNodeUtilization{}
|
||||
@@ -51,6 +56,21 @@ func NewHighNodeUtilization(args runtime.Object, handle frameworktypes.Handle) (
|
||||
return nil, fmt.Errorf("want args to be of type HighNodeUtilizationArgs, got %T", args)
|
||||
}
|
||||
|
||||
targetThresholds := make(api.ResourceThresholds)
|
||||
setDefaultForThresholds(highNodeUtilizatioArgs.Thresholds, targetThresholds)
|
||||
resourceNames := getResourceNames(targetThresholds)
|
||||
|
||||
underutilizationCriteria := []interface{}{
|
||||
"CPU", highNodeUtilizatioArgs.Thresholds[v1.ResourceCPU],
|
||||
"Mem", highNodeUtilizatioArgs.Thresholds[v1.ResourceMemory],
|
||||
"Pods", highNodeUtilizatioArgs.Thresholds[v1.ResourcePods],
|
||||
}
|
||||
for name := range highNodeUtilizatioArgs.Thresholds {
|
||||
if !nodeutil.IsBasicResource(name) {
|
||||
underutilizationCriteria = append(underutilizationCriteria, string(name), int64(highNodeUtilizatioArgs.Thresholds[name]))
|
||||
}
|
||||
}
|
||||
|
||||
podFilter, err := podutil.NewOptions().
|
||||
WithFilter(handle.Evictor().Filter).
|
||||
BuildFilterFunc()
|
||||
@@ -59,9 +79,13 @@ func NewHighNodeUtilization(args runtime.Object, handle frameworktypes.Handle) (
|
||||
}
|
||||
|
||||
return &HighNodeUtilization{
|
||||
handle: handle,
|
||||
args: highNodeUtilizatioArgs,
|
||||
podFilter: podFilter,
|
||||
handle: handle,
|
||||
args: highNodeUtilizatioArgs,
|
||||
resourceNames: resourceNames,
|
||||
targetThresholds: targetThresholds,
|
||||
underutilizationCriteria: underutilizationCriteria,
|
||||
podFilter: podFilter,
|
||||
usageClient: newRequestedUsageClient(resourceNames, handle.GetPodsAssignedToNodeFunc()),
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -72,15 +96,15 @@ func (h *HighNodeUtilization) Name() string {
|
||||
|
||||
// Balance extension point implementation for the plugin
|
||||
func (h *HighNodeUtilization) Balance(ctx context.Context, nodes []*v1.Node) *frameworktypes.Status {
|
||||
thresholds := h.args.Thresholds
|
||||
targetThresholds := make(api.ResourceThresholds)
|
||||
|
||||
setDefaultForThresholds(thresholds, targetThresholds)
|
||||
resourceNames := getResourceNames(targetThresholds)
|
||||
if err := h.usageClient.sync(nodes); err != nil {
|
||||
return &frameworktypes.Status{
|
||||
Err: fmt.Errorf("error getting node usage: %v", err),
|
||||
}
|
||||
}
|
||||
|
||||
sourceNodes, highNodes := classifyNodes(
|
||||
getNodeUsage(nodes, resourceNames, h.handle.GetPodsAssignedToNodeFunc()),
|
||||
getNodeThresholds(nodes, thresholds, targetThresholds, resourceNames, h.handle.GetPodsAssignedToNodeFunc(), false),
|
||||
getNodeUsage(nodes, h.usageClient),
|
||||
getNodeThresholds(nodes, h.args.Thresholds, h.targetThresholds, h.resourceNames, false, h.usageClient),
|
||||
func(node *v1.Node, usage NodeUsage, threshold NodeThresholds) bool {
|
||||
return isNodeWithLowUtilization(usage, threshold.lowResourceThreshold)
|
||||
},
|
||||
@@ -93,18 +117,7 @@ func (h *HighNodeUtilization) Balance(ctx context.Context, nodes []*v1.Node) *fr
|
||||
})
|
||||
|
||||
// log message in one line
|
||||
keysAndValues := []interface{}{
|
||||
"CPU", thresholds[v1.ResourceCPU],
|
||||
"Mem", thresholds[v1.ResourceMemory],
|
||||
"Pods", thresholds[v1.ResourcePods],
|
||||
}
|
||||
for name := range thresholds {
|
||||
if !nodeutil.IsBasicResource(name) {
|
||||
keysAndValues = append(keysAndValues, string(name), int64(thresholds[name]))
|
||||
}
|
||||
}
|
||||
|
||||
klog.V(1).InfoS("Criteria for a node below target utilization", keysAndValues...)
|
||||
klog.V(1).InfoS("Criteria for a node below target utilization", h.underutilizationCriteria...)
|
||||
klog.V(1).InfoS("Number of underutilized nodes", "totalNumber", len(sourceNodes))
|
||||
|
||||
if len(sourceNodes) == 0 {
|
||||
@@ -144,9 +157,12 @@ func (h *HighNodeUtilization) Balance(ctx context.Context, nodes []*v1.Node) *fr
|
||||
sourceNodes,
|
||||
highNodes,
|
||||
h.handle.Evictor(),
|
||||
evictions.EvictOptions{StrategyName: HighNodeUtilizationPluginName},
|
||||
h.podFilter,
|
||||
resourceNames,
|
||||
continueEvictionCond)
|
||||
h.resourceNames,
|
||||
continueEvictionCond,
|
||||
h.usageClient,
|
||||
)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -25,16 +25,13 @@ import (
|
||||
policy "k8s.io/api/policy/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
core "k8s.io/client-go/testing"
|
||||
"k8s.io/client-go/tools/events"
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
frameworkfake "sigs.k8s.io/descheduler/pkg/framework/fake"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/defaultevictor"
|
||||
frameworktesting "sigs.k8s.io/descheduler/pkg/framework/testing"
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
"sigs.k8s.io/descheduler/test"
|
||||
@@ -115,6 +112,7 @@ func TestHighNodeUtilization(t *testing.T) {
|
||||
}),
|
||||
test.BuildTestPod("p2", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
// A Critical Pod.
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Namespace = "kube-system"
|
||||
priority := utils.SystemCriticalPriority
|
||||
pod.Spec.Priority = &priority
|
||||
@@ -167,6 +165,7 @@ func TestHighNodeUtilization(t *testing.T) {
|
||||
// These won't be evicted.
|
||||
test.BuildTestPod("p2", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
// A Critical Pod.
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Namespace = "kube-system"
|
||||
priority := utils.SystemCriticalPriority
|
||||
pod.Spec.Priority = &priority
|
||||
@@ -448,20 +447,16 @@ func TestHighNodeUtilization(t *testing.T) {
|
||||
for _, pod := range testCase.pods {
|
||||
objs = append(objs, pod)
|
||||
}
|
||||
fakeClient := fake.NewSimpleClientset(objs...)
|
||||
|
||||
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
|
||||
podInformer := sharedInformerFactory.Core().V1().Pods().Informer()
|
||||
|
||||
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
|
||||
if err != nil {
|
||||
t.Errorf("Build get pods assigned to node function error: %v", err)
|
||||
}
|
||||
|
||||
podsForEviction := make(map[string]struct{})
|
||||
for _, pod := range testCase.evictedPods {
|
||||
podsForEviction[pod] = struct{}{}
|
||||
}
|
||||
fakeClient := fake.NewSimpleClientset(objs...)
|
||||
|
||||
handle, podEvictor, err := frameworktesting.InitFrameworkHandle(ctx, fakeClient, nil, defaultevictor.DefaultEvictorArgs{NodeFit: true}, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to initialize a framework handle: %v", err)
|
||||
}
|
||||
|
||||
evictionFailed := false
|
||||
if len(testCase.evictedPods) > 0 {
|
||||
@@ -479,50 +474,6 @@ func TestHighNodeUtilization(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
sharedInformerFactory.Start(ctx.Done())
|
||||
sharedInformerFactory.WaitForCacheSync(ctx.Done())
|
||||
|
||||
eventRecorder := &events.FakeRecorder{}
|
||||
|
||||
podEvictor := evictions.NewPodEvictor(
|
||||
fakeClient,
|
||||
"v1",
|
||||
false,
|
||||
nil,
|
||||
nil,
|
||||
testCase.nodes,
|
||||
false,
|
||||
eventRecorder,
|
||||
)
|
||||
|
||||
defaultevictorArgs := &defaultevictor.DefaultEvictorArgs{
|
||||
EvictLocalStoragePods: false,
|
||||
EvictSystemCriticalPods: false,
|
||||
IgnorePvcPods: false,
|
||||
EvictFailedBarePods: false,
|
||||
NodeFit: true,
|
||||
}
|
||||
|
||||
evictorFilter, err := defaultevictor.New(
|
||||
defaultevictorArgs,
|
||||
&frameworkfake.HandleImpl{
|
||||
ClientsetImpl: fakeClient,
|
||||
GetPodsAssignedToNodeFuncImpl: getPodsAssignedToNode,
|
||||
SharedInformerFactoryImpl: sharedInformerFactory,
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to initialize the plugin: %v", err)
|
||||
}
|
||||
|
||||
handle := &frameworkfake.HandleImpl{
|
||||
ClientsetImpl: fakeClient,
|
||||
GetPodsAssignedToNodeFuncImpl: getPodsAssignedToNode,
|
||||
PodEvictorImpl: podEvictor,
|
||||
EvictorFilterImpl: evictorFilter.(frameworktypes.EvictorPlugin),
|
||||
SharedInformerFactoryImpl: sharedInformerFactory,
|
||||
}
|
||||
|
||||
plugin, err := NewHighNodeUtilization(&HighNodeUtilizationArgs{
|
||||
Thresholds: testCase.thresholds,
|
||||
},
|
||||
@@ -623,55 +574,16 @@ func TestHighNodeUtilizationWithTaints(t *testing.T) {
|
||||
}
|
||||
|
||||
fakeClient := fake.NewSimpleClientset(objs...)
|
||||
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
|
||||
podInformer := sharedInformerFactory.Core().V1().Pods().Informer()
|
||||
|
||||
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
|
||||
if err != nil {
|
||||
t.Errorf("Build get pods assigned to node function error: %v", err)
|
||||
}
|
||||
|
||||
sharedInformerFactory.Start(ctx.Done())
|
||||
sharedInformerFactory.WaitForCacheSync(ctx.Done())
|
||||
|
||||
eventRecorder := &events.FakeRecorder{}
|
||||
|
||||
podEvictor := evictions.NewPodEvictor(
|
||||
handle, podEvictor, err := frameworktesting.InitFrameworkHandle(
|
||||
ctx,
|
||||
fakeClient,
|
||||
"policy/v1",
|
||||
false,
|
||||
&item.evictionsExpected,
|
||||
evictions.NewOptions().WithMaxPodsToEvictPerNode(&item.evictionsExpected),
|
||||
defaultevictor.DefaultEvictorArgs{},
|
||||
nil,
|
||||
item.nodes,
|
||||
false,
|
||||
eventRecorder,
|
||||
)
|
||||
|
||||
defaultevictorArgs := &defaultevictor.DefaultEvictorArgs{
|
||||
EvictLocalStoragePods: false,
|
||||
EvictSystemCriticalPods: false,
|
||||
IgnorePvcPods: false,
|
||||
EvictFailedBarePods: false,
|
||||
}
|
||||
|
||||
evictorFilter, err := defaultevictor.New(
|
||||
defaultevictorArgs,
|
||||
&frameworkfake.HandleImpl{
|
||||
ClientsetImpl: fakeClient,
|
||||
GetPodsAssignedToNodeFuncImpl: getPodsAssignedToNode,
|
||||
SharedInformerFactoryImpl: sharedInformerFactory,
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to initialize the plugin: %v", err)
|
||||
}
|
||||
|
||||
handle := &frameworkfake.HandleImpl{
|
||||
ClientsetImpl: fakeClient,
|
||||
GetPodsAssignedToNodeFuncImpl: getPodsAssignedToNode,
|
||||
PodEvictorImpl: podEvictor,
|
||||
EvictorFilterImpl: evictorFilter.(frameworktypes.EvictorPlugin),
|
||||
SharedInformerFactoryImpl: sharedInformerFactory,
|
||||
t.Fatalf("Unable to initialize a framework handle: %v", err)
|
||||
}
|
||||
|
||||
plugin, err := NewHighNodeUtilization(&HighNodeUtilizationArgs{
|
||||
|
||||
@@ -24,6 +24,9 @@ import (
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
@@ -35,9 +38,13 @@ const LowNodeUtilizationPluginName = "LowNodeUtilization"
|
||||
// to calculate nodes' utilization and not the actual resource usage.
|
||||
|
||||
type LowNodeUtilization struct {
|
||||
handle frameworktypes.Handle
|
||||
args *LowNodeUtilizationArgs
|
||||
podFilter func(pod *v1.Pod) bool
|
||||
handle frameworktypes.Handle
|
||||
args *LowNodeUtilizationArgs
|
||||
podFilter func(pod *v1.Pod) bool
|
||||
underutilizationCriteria []interface{}
|
||||
overutilizationCriteria []interface{}
|
||||
resourceNames []v1.ResourceName
|
||||
usageClient usageClient
|
||||
}
|
||||
|
||||
var _ frameworktypes.BalancePlugin = &LowNodeUtilization{}
|
||||
@@ -49,6 +56,30 @@ func NewLowNodeUtilization(args runtime.Object, handle frameworktypes.Handle) (f
|
||||
return nil, fmt.Errorf("want args to be of type LowNodeUtilizationArgs, got %T", args)
|
||||
}
|
||||
|
||||
setDefaultForLNUThresholds(lowNodeUtilizationArgsArgs.Thresholds, lowNodeUtilizationArgsArgs.TargetThresholds, lowNodeUtilizationArgsArgs.UseDeviationThresholds)
|
||||
|
||||
underutilizationCriteria := []interface{}{
|
||||
"CPU", lowNodeUtilizationArgsArgs.Thresholds[v1.ResourceCPU],
|
||||
"Mem", lowNodeUtilizationArgsArgs.Thresholds[v1.ResourceMemory],
|
||||
"Pods", lowNodeUtilizationArgsArgs.Thresholds[v1.ResourcePods],
|
||||
}
|
||||
for name := range lowNodeUtilizationArgsArgs.Thresholds {
|
||||
if !nodeutil.IsBasicResource(name) {
|
||||
underutilizationCriteria = append(underutilizationCriteria, string(name), int64(lowNodeUtilizationArgsArgs.Thresholds[name]))
|
||||
}
|
||||
}
|
||||
|
||||
overutilizationCriteria := []interface{}{
|
||||
"CPU", lowNodeUtilizationArgsArgs.TargetThresholds[v1.ResourceCPU],
|
||||
"Mem", lowNodeUtilizationArgsArgs.TargetThresholds[v1.ResourceMemory],
|
||||
"Pods", lowNodeUtilizationArgsArgs.TargetThresholds[v1.ResourcePods],
|
||||
}
|
||||
for name := range lowNodeUtilizationArgsArgs.TargetThresholds {
|
||||
if !nodeutil.IsBasicResource(name) {
|
||||
overutilizationCriteria = append(overutilizationCriteria, string(name), int64(lowNodeUtilizationArgsArgs.TargetThresholds[name]))
|
||||
}
|
||||
}
|
||||
|
||||
podFilter, err := podutil.NewOptions().
|
||||
WithFilter(handle.Evictor().Filter).
|
||||
BuildFilterFunc()
|
||||
@@ -56,10 +87,26 @@ func NewLowNodeUtilization(args runtime.Object, handle frameworktypes.Handle) (f
|
||||
return nil, fmt.Errorf("error initializing pod filter function: %v", err)
|
||||
}
|
||||
|
||||
resourceNames := getResourceNames(lowNodeUtilizationArgsArgs.Thresholds)
|
||||
|
||||
var usageClient usageClient
|
||||
if lowNodeUtilizationArgsArgs.MetricsUtilization.MetricsServer {
|
||||
if handle.MetricsCollector() == nil {
|
||||
return nil, fmt.Errorf("metrics client not initialized")
|
||||
}
|
||||
usageClient = newActualUsageClient(resourceNames, handle.GetPodsAssignedToNodeFunc(), handle.MetricsCollector())
|
||||
} else {
|
||||
usageClient = newRequestedUsageClient(resourceNames, handle.GetPodsAssignedToNodeFunc())
|
||||
}
|
||||
|
||||
return &LowNodeUtilization{
|
||||
handle: handle,
|
||||
args: lowNodeUtilizationArgsArgs,
|
||||
podFilter: podFilter,
|
||||
handle: handle,
|
||||
args: lowNodeUtilizationArgsArgs,
|
||||
underutilizationCriteria: underutilizationCriteria,
|
||||
overutilizationCriteria: overutilizationCriteria,
|
||||
resourceNames: resourceNames,
|
||||
podFilter: podFilter,
|
||||
usageClient: usageClient,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -70,43 +117,15 @@ func (l *LowNodeUtilization) Name() string {
|
||||
|
||||
// Balance extension point implementation for the plugin
|
||||
func (l *LowNodeUtilization) Balance(ctx context.Context, nodes []*v1.Node) *frameworktypes.Status {
|
||||
useDeviationThresholds := l.args.UseDeviationThresholds
|
||||
thresholds := l.args.Thresholds
|
||||
targetThresholds := l.args.TargetThresholds
|
||||
|
||||
// check if Pods/CPU/Mem are set, if not, set them to 100
|
||||
if _, ok := thresholds[v1.ResourcePods]; !ok {
|
||||
if useDeviationThresholds {
|
||||
thresholds[v1.ResourcePods] = MinResourcePercentage
|
||||
targetThresholds[v1.ResourcePods] = MinResourcePercentage
|
||||
} else {
|
||||
thresholds[v1.ResourcePods] = MaxResourcePercentage
|
||||
targetThresholds[v1.ResourcePods] = MaxResourcePercentage
|
||||
if err := l.usageClient.sync(nodes); err != nil {
|
||||
return &frameworktypes.Status{
|
||||
Err: fmt.Errorf("error getting node usage: %v", err),
|
||||
}
|
||||
}
|
||||
if _, ok := thresholds[v1.ResourceCPU]; !ok {
|
||||
if useDeviationThresholds {
|
||||
thresholds[v1.ResourceCPU] = MinResourcePercentage
|
||||
targetThresholds[v1.ResourceCPU] = MinResourcePercentage
|
||||
} else {
|
||||
thresholds[v1.ResourceCPU] = MaxResourcePercentage
|
||||
targetThresholds[v1.ResourceCPU] = MaxResourcePercentage
|
||||
}
|
||||
}
|
||||
if _, ok := thresholds[v1.ResourceMemory]; !ok {
|
||||
if useDeviationThresholds {
|
||||
thresholds[v1.ResourceMemory] = MinResourcePercentage
|
||||
targetThresholds[v1.ResourceMemory] = MinResourcePercentage
|
||||
} else {
|
||||
thresholds[v1.ResourceMemory] = MaxResourcePercentage
|
||||
targetThresholds[v1.ResourceMemory] = MaxResourcePercentage
|
||||
}
|
||||
}
|
||||
resourceNames := getResourceNames(thresholds)
|
||||
|
||||
lowNodes, sourceNodes := classifyNodes(
|
||||
getNodeUsage(nodes, resourceNames, l.handle.GetPodsAssignedToNodeFunc()),
|
||||
getNodeThresholds(nodes, thresholds, targetThresholds, resourceNames, l.handle.GetPodsAssignedToNodeFunc(), useDeviationThresholds),
|
||||
getNodeUsage(nodes, l.usageClient),
|
||||
getNodeThresholds(nodes, l.args.Thresholds, l.args.TargetThresholds, l.resourceNames, l.args.UseDeviationThresholds, l.usageClient),
|
||||
// The node has to be schedulable (to be able to move workload there)
|
||||
func(node *v1.Node, usage NodeUsage, threshold NodeThresholds) bool {
|
||||
if nodeutil.IsNodeUnschedulable(node) {
|
||||
@@ -121,31 +140,11 @@ func (l *LowNodeUtilization) Balance(ctx context.Context, nodes []*v1.Node) *fra
|
||||
)
|
||||
|
||||
// log message for nodes with low utilization
|
||||
underutilizationCriteria := []interface{}{
|
||||
"CPU", thresholds[v1.ResourceCPU],
|
||||
"Mem", thresholds[v1.ResourceMemory],
|
||||
"Pods", thresholds[v1.ResourcePods],
|
||||
}
|
||||
for name := range thresholds {
|
||||
if !nodeutil.IsBasicResource(name) {
|
||||
underutilizationCriteria = append(underutilizationCriteria, string(name), int64(thresholds[name]))
|
||||
}
|
||||
}
|
||||
klog.V(1).InfoS("Criteria for a node under utilization", underutilizationCriteria...)
|
||||
klog.V(1).InfoS("Criteria for a node under utilization", l.underutilizationCriteria...)
|
||||
klog.V(1).InfoS("Number of underutilized nodes", "totalNumber", len(lowNodes))
|
||||
|
||||
// log message for over utilized nodes
|
||||
overutilizationCriteria := []interface{}{
|
||||
"CPU", targetThresholds[v1.ResourceCPU],
|
||||
"Mem", targetThresholds[v1.ResourceMemory],
|
||||
"Pods", targetThresholds[v1.ResourcePods],
|
||||
}
|
||||
for name := range targetThresholds {
|
||||
if !nodeutil.IsBasicResource(name) {
|
||||
overutilizationCriteria = append(overutilizationCriteria, string(name), int64(targetThresholds[name]))
|
||||
}
|
||||
}
|
||||
klog.V(1).InfoS("Criteria for a node above target utilization", overutilizationCriteria...)
|
||||
klog.V(1).InfoS("Criteria for a node above target utilization", l.overutilizationCriteria...)
|
||||
klog.V(1).InfoS("Number of overutilized nodes", "totalNumber", len(sourceNodes))
|
||||
|
||||
if len(lowNodes) == 0 {
|
||||
@@ -191,9 +190,43 @@ func (l *LowNodeUtilization) Balance(ctx context.Context, nodes []*v1.Node) *fra
|
||||
sourceNodes,
|
||||
lowNodes,
|
||||
l.handle.Evictor(),
|
||||
evictions.EvictOptions{StrategyName: LowNodeUtilizationPluginName},
|
||||
l.podFilter,
|
||||
resourceNames,
|
||||
continueEvictionCond)
|
||||
l.resourceNames,
|
||||
continueEvictionCond,
|
||||
l.usageClient,
|
||||
)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func setDefaultForLNUThresholds(thresholds, targetThresholds api.ResourceThresholds, useDeviationThresholds bool) {
|
||||
// check if Pods/CPU/Mem are set, if not, set them to 100
|
||||
if _, ok := thresholds[v1.ResourcePods]; !ok {
|
||||
if useDeviationThresholds {
|
||||
thresholds[v1.ResourcePods] = MinResourcePercentage
|
||||
targetThresholds[v1.ResourcePods] = MinResourcePercentage
|
||||
} else {
|
||||
thresholds[v1.ResourcePods] = MaxResourcePercentage
|
||||
targetThresholds[v1.ResourcePods] = MaxResourcePercentage
|
||||
}
|
||||
}
|
||||
if _, ok := thresholds[v1.ResourceCPU]; !ok {
|
||||
if useDeviationThresholds {
|
||||
thresholds[v1.ResourceCPU] = MinResourcePercentage
|
||||
targetThresholds[v1.ResourceCPU] = MinResourcePercentage
|
||||
} else {
|
||||
thresholds[v1.ResourceCPU] = MaxResourcePercentage
|
||||
targetThresholds[v1.ResourceCPU] = MaxResourcePercentage
|
||||
}
|
||||
}
|
||||
if _, ok := thresholds[v1.ResourceMemory]; !ok {
|
||||
if useDeviationThresholds {
|
||||
thresholds[v1.ResourceMemory] = MinResourcePercentage
|
||||
targetThresholds[v1.ResourceMemory] = MinResourcePercentage
|
||||
} else {
|
||||
thresholds[v1.ResourceMemory] = MaxResourcePercentage
|
||||
targetThresholds[v1.ResourceMemory] = MaxResourcePercentage
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -21,22 +21,23 @@ import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
frameworkfake "sigs.k8s.io/descheduler/pkg/framework/fake"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/defaultevictor"
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
policy "k8s.io/api/policy/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
core "k8s.io/client-go/testing"
|
||||
"k8s.io/client-go/tools/events"
|
||||
"k8s.io/metrics/pkg/apis/metrics/v1beta1"
|
||||
fakemetricsclient "k8s.io/metrics/pkg/client/clientset/versioned/fake"
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/metricscollector"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/defaultevictor"
|
||||
frameworktesting "sigs.k8s.io/descheduler/pkg/framework/testing"
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
"sigs.k8s.io/descheduler/test"
|
||||
)
|
||||
@@ -51,14 +52,17 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
notMatchingNodeSelectorValue := "east"
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
useDeviationThresholds bool
|
||||
thresholds, targetThresholds api.ResourceThresholds
|
||||
nodes []*v1.Node
|
||||
pods []*v1.Pod
|
||||
expectedPodsEvicted uint
|
||||
evictedPods []string
|
||||
evictableNamespaces *api.Namespaces
|
||||
name string
|
||||
useDeviationThresholds bool
|
||||
thresholds, targetThresholds api.ResourceThresholds
|
||||
nodes []*v1.Node
|
||||
pods []*v1.Pod
|
||||
nodemetricses []*v1beta1.NodeMetrics
|
||||
podmetricses []*v1beta1.PodMetrics
|
||||
expectedPodsEvicted uint
|
||||
expectedPodsWithMetricsEvicted uint
|
||||
evictedPods []string
|
||||
evictableNamespaces *api.Namespaces
|
||||
}{
|
||||
{
|
||||
name: "no evictable pods",
|
||||
@@ -106,7 +110,20 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
}),
|
||||
test.BuildTestPod("p9", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
},
|
||||
expectedPodsEvicted: 0,
|
||||
nodemetricses: []*v1beta1.NodeMetrics{
|
||||
test.BuildNodeMetrics(n1NodeName, 2401, 1714978816),
|
||||
test.BuildNodeMetrics(n2NodeName, 401, 1714978816),
|
||||
test.BuildNodeMetrics(n3NodeName, 10, 1714978816),
|
||||
},
|
||||
podmetricses: []*v1beta1.PodMetrics{
|
||||
test.BuildPodMetrics("p1", 401, 0),
|
||||
test.BuildPodMetrics("p2", 401, 0),
|
||||
test.BuildPodMetrics("p3", 401, 0),
|
||||
test.BuildPodMetrics("p4", 401, 0),
|
||||
test.BuildPodMetrics("p5", 401, 0),
|
||||
},
|
||||
expectedPodsEvicted: 0,
|
||||
expectedPodsWithMetricsEvicted: 0,
|
||||
},
|
||||
{
|
||||
name: "without priorities",
|
||||
@@ -156,7 +173,20 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
}),
|
||||
test.BuildTestPod("p9", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
},
|
||||
expectedPodsEvicted: 4,
|
||||
nodemetricses: []*v1beta1.NodeMetrics{
|
||||
test.BuildNodeMetrics(n1NodeName, 3201, 0),
|
||||
test.BuildNodeMetrics(n2NodeName, 401, 0),
|
||||
test.BuildNodeMetrics(n3NodeName, 11, 0),
|
||||
},
|
||||
podmetricses: []*v1beta1.PodMetrics{
|
||||
test.BuildPodMetrics("p1", 401, 0),
|
||||
test.BuildPodMetrics("p2", 401, 0),
|
||||
test.BuildPodMetrics("p3", 401, 0),
|
||||
test.BuildPodMetrics("p4", 401, 0),
|
||||
test.BuildPodMetrics("p5", 401, 0),
|
||||
},
|
||||
expectedPodsEvicted: 4,
|
||||
expectedPodsWithMetricsEvicted: 4,
|
||||
},
|
||||
{
|
||||
name: "without priorities, but excluding namespaces",
|
||||
@@ -175,18 +205,23 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
},
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p1", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Namespace = "namespace1"
|
||||
}),
|
||||
test.BuildTestPod("p2", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Namespace = "namespace1"
|
||||
}),
|
||||
test.BuildTestPod("p3", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Namespace = "namespace1"
|
||||
}),
|
||||
test.BuildTestPod("p4", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Namespace = "namespace1"
|
||||
}),
|
||||
test.BuildTestPod("p5", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Namespace = "namespace1"
|
||||
}),
|
||||
// These won't be evicted.
|
||||
@@ -216,12 +251,25 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
}),
|
||||
test.BuildTestPod("p9", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
},
|
||||
nodemetricses: []*v1beta1.NodeMetrics{
|
||||
test.BuildNodeMetrics(n1NodeName, 3201, 0),
|
||||
test.BuildNodeMetrics(n2NodeName, 401, 0),
|
||||
test.BuildNodeMetrics(n3NodeName, 11, 0),
|
||||
},
|
||||
podmetricses: []*v1beta1.PodMetrics{
|
||||
test.BuildPodMetrics("p1", 401, 0),
|
||||
test.BuildPodMetrics("p2", 401, 0),
|
||||
test.BuildPodMetrics("p3", 401, 0),
|
||||
test.BuildPodMetrics("p4", 401, 0),
|
||||
test.BuildPodMetrics("p5", 401, 0),
|
||||
},
|
||||
evictableNamespaces: &api.Namespaces{
|
||||
Exclude: []string{
|
||||
"namespace1",
|
||||
},
|
||||
},
|
||||
expectedPodsEvicted: 0,
|
||||
expectedPodsEvicted: 0,
|
||||
expectedPodsWithMetricsEvicted: 0,
|
||||
},
|
||||
{
|
||||
name: "without priorities, but include only default namespace",
|
||||
@@ -242,12 +290,15 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
test.BuildTestPod("p1", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p2", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p3", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
// TODO(zhifei92): add ownerRef for pod
|
||||
pod.Namespace = "namespace3"
|
||||
}),
|
||||
test.BuildTestPod("p4", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
// TODO(zhifei92): add ownerRef for pod
|
||||
pod.Namespace = "namespace4"
|
||||
}),
|
||||
test.BuildTestPod("p5", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
// TODO(zhifei92): add ownerRef for pod
|
||||
pod.Namespace = "namespace5"
|
||||
}),
|
||||
// These won't be evicted.
|
||||
@@ -271,18 +322,32 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
}),
|
||||
test.BuildTestPod("p8", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
// A Critical Pod.
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Namespace = "kube-system"
|
||||
priority := utils.SystemCriticalPriority
|
||||
pod.Spec.Priority = &priority
|
||||
}),
|
||||
test.BuildTestPod("p9", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
},
|
||||
nodemetricses: []*v1beta1.NodeMetrics{
|
||||
test.BuildNodeMetrics(n1NodeName, 3201, 0),
|
||||
test.BuildNodeMetrics(n2NodeName, 401, 0),
|
||||
test.BuildNodeMetrics(n3NodeName, 11, 0),
|
||||
},
|
||||
podmetricses: []*v1beta1.PodMetrics{
|
||||
test.BuildPodMetrics("p1", 401, 0),
|
||||
test.BuildPodMetrics("p2", 401, 0),
|
||||
test.BuildPodMetrics("p3", 401, 0),
|
||||
test.BuildPodMetrics("p4", 401, 0),
|
||||
test.BuildPodMetrics("p5", 401, 0),
|
||||
},
|
||||
evictableNamespaces: &api.Namespaces{
|
||||
Include: []string{
|
||||
"default",
|
||||
},
|
||||
},
|
||||
expectedPodsEvicted: 2,
|
||||
expectedPodsEvicted: 2,
|
||||
expectedPodsWithMetricsEvicted: 2,
|
||||
},
|
||||
{
|
||||
name: "without priorities stop when cpu capacity is depleted",
|
||||
@@ -300,14 +365,14 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
test.BuildTestNode(n3NodeName, 4000, 3000, 10, test.SetNodeUnschedulable),
|
||||
},
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p1", 400, 300, n1NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p2", 400, 300, n1NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p3", 400, 300, n1NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p4", 400, 300, n1NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p5", 400, 300, n1NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p1", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p2", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p3", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p4", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p5", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
// These won't be evicted.
|
||||
test.BuildTestPod("p6", 400, 300, n1NodeName, test.SetDSOwnerRef),
|
||||
test.BuildTestPod("p7", 400, 300, n1NodeName, func(pod *v1.Pod) {
|
||||
test.BuildTestPod("p6", 400, 0, n1NodeName, test.SetDSOwnerRef),
|
||||
test.BuildTestPod("p7", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
// A pod with local storage.
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Spec.Volumes = []v1.Volume{
|
||||
@@ -324,16 +389,29 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
// A Mirror Pod.
|
||||
pod.Annotations = test.GetMirrorPodAnnotation()
|
||||
}),
|
||||
test.BuildTestPod("p8", 400, 300, n1NodeName, func(pod *v1.Pod) {
|
||||
test.BuildTestPod("p8", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
// A Critical Pod.
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Namespace = "kube-system"
|
||||
priority := utils.SystemCriticalPriority
|
||||
pod.Spec.Priority = &priority
|
||||
}),
|
||||
test.BuildTestPod("p9", 400, 2100, n2NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p9", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
},
|
||||
// 4 pods available for eviction based on v1.ResourcePods, only 3 pods can be evicted before cpu is depleted
|
||||
expectedPodsEvicted: 3,
|
||||
nodemetricses: []*v1beta1.NodeMetrics{
|
||||
test.BuildNodeMetrics(n1NodeName, 3201, 0),
|
||||
test.BuildNodeMetrics(n2NodeName, 401, 0),
|
||||
test.BuildNodeMetrics(n3NodeName, 0, 0),
|
||||
},
|
||||
podmetricses: []*v1beta1.PodMetrics{
|
||||
test.BuildPodMetrics("p1", 401, 0),
|
||||
test.BuildPodMetrics("p2", 401, 0),
|
||||
test.BuildPodMetrics("p3", 401, 0),
|
||||
test.BuildPodMetrics("p4", 401, 0),
|
||||
test.BuildPodMetrics("p5", 401, 0),
|
||||
},
|
||||
expectedPodsEvicted: 4,
|
||||
expectedPodsWithMetricsEvicted: 4,
|
||||
},
|
||||
{
|
||||
name: "with priorities",
|
||||
@@ -396,13 +474,27 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
}),
|
||||
test.BuildTestPod("p8", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
// A Critical Pod.
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Namespace = "kube-system"
|
||||
priority := utils.SystemCriticalPriority
|
||||
pod.Spec.Priority = &priority
|
||||
}),
|
||||
test.BuildTestPod("p9", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
},
|
||||
expectedPodsEvicted: 4,
|
||||
nodemetricses: []*v1beta1.NodeMetrics{
|
||||
test.BuildNodeMetrics(n1NodeName, 3201, 0),
|
||||
test.BuildNodeMetrics(n2NodeName, 401, 0),
|
||||
test.BuildNodeMetrics(n3NodeName, 11, 0),
|
||||
},
|
||||
podmetricses: []*v1beta1.PodMetrics{
|
||||
test.BuildPodMetrics("p1", 401, 0),
|
||||
test.BuildPodMetrics("p2", 401, 0),
|
||||
test.BuildPodMetrics("p3", 401, 0),
|
||||
test.BuildPodMetrics("p4", 401, 0),
|
||||
test.BuildPodMetrics("p5", 401, 0),
|
||||
},
|
||||
expectedPodsEvicted: 4,
|
||||
expectedPodsWithMetricsEvicted: 4,
|
||||
},
|
||||
{
|
||||
name: "without priorities evicting best-effort pods only",
|
||||
@@ -419,7 +511,7 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil),
|
||||
test.BuildTestNode(n3NodeName, 4000, 3000, 10, test.SetNodeUnschedulable),
|
||||
},
|
||||
// All pods are assumed to be burstable (test.BuildTestNode always sets both cpu/memory resource requests to some value)
|
||||
// All pods are assumed to be burstable (tc.BuildTestNode always sets both cpu/memory resource requests to some value)
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p1", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
test.SetRSOwnerRef(pod)
|
||||
@@ -463,14 +555,28 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
}),
|
||||
test.BuildTestPod("p8", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
// A Critical Pod.
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Namespace = "kube-system"
|
||||
priority := utils.SystemCriticalPriority
|
||||
pod.Spec.Priority = &priority
|
||||
}),
|
||||
test.BuildTestPod("p9", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
},
|
||||
expectedPodsEvicted: 4,
|
||||
evictedPods: []string{"p1", "p2", "p4", "p5"},
|
||||
nodemetricses: []*v1beta1.NodeMetrics{
|
||||
test.BuildNodeMetrics(n1NodeName, 3201, 0),
|
||||
test.BuildNodeMetrics(n2NodeName, 401, 0),
|
||||
test.BuildNodeMetrics(n3NodeName, 11, 0),
|
||||
},
|
||||
podmetricses: []*v1beta1.PodMetrics{
|
||||
test.BuildPodMetrics("p1", 401, 0),
|
||||
test.BuildPodMetrics("p2", 401, 0),
|
||||
test.BuildPodMetrics("p3", 401, 0),
|
||||
test.BuildPodMetrics("p4", 401, 0),
|
||||
test.BuildPodMetrics("p5", 401, 0),
|
||||
},
|
||||
expectedPodsEvicted: 4,
|
||||
expectedPodsWithMetricsEvicted: 4,
|
||||
evictedPods: []string{"p1", "p2", "p4", "p5"},
|
||||
},
|
||||
{
|
||||
name: "with extended resource",
|
||||
@@ -538,6 +644,7 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
}),
|
||||
test.BuildTestPod("p8", 0, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
// A Critical Pod.
|
||||
test.SetNormalOwnerRef(pod)
|
||||
test.SetPodExtendedResourceRequest(pod, extendedResource, 1)
|
||||
pod.Namespace = "kube-system"
|
||||
priority := utils.SystemCriticalPriority
|
||||
@@ -548,8 +655,21 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
test.SetPodExtendedResourceRequest(pod, extendedResource, 1)
|
||||
}),
|
||||
},
|
||||
nodemetricses: []*v1beta1.NodeMetrics{
|
||||
test.BuildNodeMetrics(n1NodeName, 3201, 0),
|
||||
test.BuildNodeMetrics(n2NodeName, 401, 0),
|
||||
test.BuildNodeMetrics(n3NodeName, 11, 0),
|
||||
},
|
||||
podmetricses: []*v1beta1.PodMetrics{
|
||||
test.BuildPodMetrics("p1", 401, 0),
|
||||
test.BuildPodMetrics("p2", 401, 0),
|
||||
test.BuildPodMetrics("p3", 401, 0),
|
||||
test.BuildPodMetrics("p4", 401, 0),
|
||||
test.BuildPodMetrics("p5", 401, 0),
|
||||
},
|
||||
// 4 pods available for eviction based on v1.ResourcePods, only 3 pods can be evicted before extended resource is depleted
|
||||
expectedPodsEvicted: 3,
|
||||
expectedPodsEvicted: 3,
|
||||
expectedPodsWithMetricsEvicted: 0,
|
||||
},
|
||||
{
|
||||
name: "with extended resource in some of nodes",
|
||||
@@ -576,8 +696,21 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
}),
|
||||
test.BuildTestPod("p9", 0, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
},
|
||||
nodemetricses: []*v1beta1.NodeMetrics{
|
||||
test.BuildNodeMetrics(n1NodeName, 3201, 0),
|
||||
test.BuildNodeMetrics(n2NodeName, 401, 0),
|
||||
test.BuildNodeMetrics(n3NodeName, 11, 0),
|
||||
},
|
||||
podmetricses: []*v1beta1.PodMetrics{
|
||||
test.BuildPodMetrics("p1", 401, 0),
|
||||
test.BuildPodMetrics("p2", 401, 0),
|
||||
test.BuildPodMetrics("p3", 401, 0),
|
||||
test.BuildPodMetrics("p4", 401, 0),
|
||||
test.BuildPodMetrics("p5", 401, 0),
|
||||
},
|
||||
// 0 pods available for eviction because there's no enough extended resource in node2
|
||||
expectedPodsEvicted: 0,
|
||||
expectedPodsEvicted: 0,
|
||||
expectedPodsWithMetricsEvicted: 0,
|
||||
},
|
||||
{
|
||||
name: "without priorities, but only other node is unschedulable",
|
||||
@@ -620,12 +753,25 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
}),
|
||||
test.BuildTestPod("p8", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
// A Critical Pod.
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Namespace = "kube-system"
|
||||
priority := utils.SystemCriticalPriority
|
||||
pod.Spec.Priority = &priority
|
||||
}),
|
||||
},
|
||||
expectedPodsEvicted: 0,
|
||||
nodemetricses: []*v1beta1.NodeMetrics{
|
||||
test.BuildNodeMetrics(n1NodeName, 3201, 0),
|
||||
test.BuildNodeMetrics(n2NodeName, 401, 0),
|
||||
},
|
||||
podmetricses: []*v1beta1.PodMetrics{
|
||||
test.BuildPodMetrics("p1", 401, 0),
|
||||
test.BuildPodMetrics("p2", 401, 0),
|
||||
test.BuildPodMetrics("p3", 401, 0),
|
||||
test.BuildPodMetrics("p4", 401, 0),
|
||||
test.BuildPodMetrics("p5", 401, 0),
|
||||
},
|
||||
expectedPodsEvicted: 0,
|
||||
expectedPodsWithMetricsEvicted: 0,
|
||||
},
|
||||
{
|
||||
name: "without priorities, but only other node doesn't match pod node selector for p4 and p5",
|
||||
@@ -684,12 +830,23 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
}),
|
||||
test.BuildTestPod("p8", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
// A Critical Pod.
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Namespace = "kube-system"
|
||||
priority := utils.SystemCriticalPriority
|
||||
pod.Spec.Priority = &priority
|
||||
}),
|
||||
},
|
||||
expectedPodsEvicted: 3,
|
||||
nodemetricses: []*v1beta1.NodeMetrics{
|
||||
test.BuildNodeMetrics(n1NodeName, 3201, 0),
|
||||
test.BuildNodeMetrics(n2NodeName, 401, 0),
|
||||
},
|
||||
podmetricses: []*v1beta1.PodMetrics{
|
||||
test.BuildPodMetrics("p1", 401, 0),
|
||||
test.BuildPodMetrics("p2", 401, 0),
|
||||
test.BuildPodMetrics("p3", 401, 0),
|
||||
},
|
||||
expectedPodsEvicted: 3,
|
||||
expectedPodsWithMetricsEvicted: 3,
|
||||
},
|
||||
{
|
||||
name: "without priorities, but only other node doesn't match pod node affinity for p4 and p5",
|
||||
@@ -776,13 +933,24 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
}),
|
||||
test.BuildTestPod("p8", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
// A Critical Pod.
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Namespace = "kube-system"
|
||||
priority := utils.SystemCriticalPriority
|
||||
pod.Spec.Priority = &priority
|
||||
}),
|
||||
test.BuildTestPod("p9", 0, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
},
|
||||
expectedPodsEvicted: 3,
|
||||
nodemetricses: []*v1beta1.NodeMetrics{
|
||||
test.BuildNodeMetrics(n1NodeName, 3201, 0),
|
||||
test.BuildNodeMetrics(n2NodeName, 401, 0),
|
||||
},
|
||||
podmetricses: []*v1beta1.PodMetrics{
|
||||
test.BuildPodMetrics("p1", 401, 0),
|
||||
test.BuildPodMetrics("p2", 401, 0),
|
||||
test.BuildPodMetrics("p3", 401, 0),
|
||||
},
|
||||
expectedPodsEvicted: 3,
|
||||
expectedPodsWithMetricsEvicted: 3,
|
||||
},
|
||||
{
|
||||
name: "deviation thresholds",
|
||||
@@ -827,124 +995,226 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
}),
|
||||
test.BuildTestPod("p8", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
// A Critical Pod.
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Namespace = "kube-system"
|
||||
priority := utils.SystemCriticalPriority
|
||||
pod.Spec.Priority = &priority
|
||||
}),
|
||||
test.BuildTestPod("p9", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
},
|
||||
expectedPodsEvicted: 2,
|
||||
evictedPods: []string{},
|
||||
nodemetricses: []*v1beta1.NodeMetrics{
|
||||
test.BuildNodeMetrics(n1NodeName, 3201, 0),
|
||||
test.BuildNodeMetrics(n2NodeName, 401, 0),
|
||||
test.BuildNodeMetrics(n3NodeName, 11, 0),
|
||||
},
|
||||
podmetricses: []*v1beta1.PodMetrics{
|
||||
test.BuildPodMetrics("p1", 401, 0),
|
||||
test.BuildPodMetrics("p2", 401, 0),
|
||||
test.BuildPodMetrics("p3", 401, 0),
|
||||
test.BuildPodMetrics("p4", 401, 0),
|
||||
test.BuildPodMetrics("p5", 401, 0),
|
||||
},
|
||||
expectedPodsEvicted: 2,
|
||||
expectedPodsWithMetricsEvicted: 2,
|
||||
evictedPods: []string{},
|
||||
},
|
||||
{
|
||||
name: "without priorities different evictions for requested and actual resources",
|
||||
thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 30,
|
||||
v1.ResourcePods: 30,
|
||||
},
|
||||
targetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 50,
|
||||
v1.ResourcePods: 50,
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode(n1NodeName, 4000, 3000, 9, nil),
|
||||
test.BuildTestNode(n2NodeName, 4000, 3000, 10, func(node *v1.Node) {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
nodeSelectorKey: notMatchingNodeSelectorValue,
|
||||
}
|
||||
}),
|
||||
},
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p1", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p2", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p3", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
// These won't be evicted.
|
||||
test.BuildTestPod("p4", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
// A pod with affinity to run in the "west" datacenter upon scheduling
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Spec.Affinity = &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: nodeSelectorKey,
|
||||
Operator: "In",
|
||||
Values: []string{nodeSelectorValue},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}),
|
||||
test.BuildTestPod("p5", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
// A pod with affinity to run in the "west" datacenter upon scheduling
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Spec.Affinity = &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: nodeSelectorKey,
|
||||
Operator: "In",
|
||||
Values: []string{nodeSelectorValue},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}),
|
||||
test.BuildTestPod("p6", 400, 0, n1NodeName, test.SetDSOwnerRef),
|
||||
test.BuildTestPod("p7", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
// A pod with local storage.
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Spec.Volumes = []v1.Volume{
|
||||
{
|
||||
Name: "sample",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
// A Mirror Pod.
|
||||
pod.Annotations = test.GetMirrorPodAnnotation()
|
||||
}),
|
||||
test.BuildTestPod("p8", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
// A Critical Pod.
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Namespace = "kube-system"
|
||||
priority := utils.SystemCriticalPriority
|
||||
pod.Spec.Priority = &priority
|
||||
}),
|
||||
test.BuildTestPod("p9", 0, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
},
|
||||
nodemetricses: []*v1beta1.NodeMetrics{
|
||||
test.BuildNodeMetrics(n1NodeName, 3201, 0),
|
||||
test.BuildNodeMetrics(n2NodeName, 401, 0),
|
||||
},
|
||||
podmetricses: []*v1beta1.PodMetrics{
|
||||
test.BuildPodMetrics("p1", 801, 0),
|
||||
test.BuildPodMetrics("p2", 801, 0),
|
||||
test.BuildPodMetrics("p3", 801, 0),
|
||||
},
|
||||
expectedPodsEvicted: 3,
|
||||
expectedPodsWithMetricsEvicted: 2,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range testCases {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
for _, tc := range testCases {
|
||||
testFnc := func(metricsEnabled bool, expectedPodsEvicted uint) func(t *testing.T) {
|
||||
return func(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
var objs []runtime.Object
|
||||
for _, node := range test.nodes {
|
||||
objs = append(objs, node)
|
||||
}
|
||||
for _, pod := range test.pods {
|
||||
objs = append(objs, pod)
|
||||
}
|
||||
fakeClient := fake.NewSimpleClientset(objs...)
|
||||
var objs []runtime.Object
|
||||
for _, node := range tc.nodes {
|
||||
objs = append(objs, node)
|
||||
}
|
||||
for _, pod := range tc.pods {
|
||||
objs = append(objs, pod)
|
||||
}
|
||||
|
||||
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
|
||||
podInformer := sharedInformerFactory.Core().V1().Pods().Informer()
|
||||
fakeClient := fake.NewSimpleClientset(objs...)
|
||||
|
||||
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
|
||||
if err != nil {
|
||||
t.Errorf("Build get pods assigned to node function error: %v", err)
|
||||
}
|
||||
|
||||
podsForEviction := make(map[string]struct{})
|
||||
for _, pod := range test.evictedPods {
|
||||
podsForEviction[pod] = struct{}{}
|
||||
}
|
||||
|
||||
evictionFailed := false
|
||||
if len(test.evictedPods) > 0 {
|
||||
fakeClient.Fake.AddReactor("create", "pods", func(action core.Action) (bool, runtime.Object, error) {
|
||||
getAction := action.(core.CreateAction)
|
||||
obj := getAction.GetObject()
|
||||
if eviction, ok := obj.(*policy.Eviction); ok {
|
||||
if _, exists := podsForEviction[eviction.Name]; exists {
|
||||
return true, obj, nil
|
||||
}
|
||||
evictionFailed = true
|
||||
return true, nil, fmt.Errorf("pod %q was unexpectedly evicted", eviction.Name)
|
||||
var collector *metricscollector.MetricsCollector
|
||||
if metricsEnabled {
|
||||
metricsClientset := fakemetricsclient.NewSimpleClientset()
|
||||
for _, nodemetrics := range tc.nodemetricses {
|
||||
metricsClientset.Tracker().Create(nodesgvr, nodemetrics, "")
|
||||
}
|
||||
for _, podmetrics := range tc.podmetricses {
|
||||
metricsClientset.Tracker().Create(podsgvr, podmetrics, podmetrics.Namespace)
|
||||
}
|
||||
return true, obj, nil
|
||||
})
|
||||
}
|
||||
|
||||
sharedInformerFactory.Start(ctx.Done())
|
||||
sharedInformerFactory.WaitForCacheSync(ctx.Done())
|
||||
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
|
||||
nodeLister := sharedInformerFactory.Core().V1().Nodes().Lister()
|
||||
sharedInformerFactory.Start(ctx.Done())
|
||||
sharedInformerFactory.WaitForCacheSync(ctx.Done())
|
||||
|
||||
eventRecorder := &events.FakeRecorder{}
|
||||
collector = metricscollector.NewMetricsCollector(nodeLister, metricsClientset, labels.Everything())
|
||||
err := collector.Collect(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to collect metrics: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
podEvictor := evictions.NewPodEvictor(
|
||||
fakeClient,
|
||||
policy.SchemeGroupVersion.String(),
|
||||
false,
|
||||
nil,
|
||||
nil,
|
||||
test.nodes,
|
||||
false,
|
||||
eventRecorder,
|
||||
)
|
||||
podsForEviction := make(map[string]struct{})
|
||||
for _, pod := range tc.evictedPods {
|
||||
podsForEviction[pod] = struct{}{}
|
||||
}
|
||||
|
||||
defaultEvictorFilterArgs := &defaultevictor.DefaultEvictorArgs{
|
||||
EvictLocalStoragePods: false,
|
||||
EvictSystemCriticalPods: false,
|
||||
IgnorePvcPods: false,
|
||||
EvictFailedBarePods: false,
|
||||
NodeFit: true,
|
||||
}
|
||||
evictionFailed := false
|
||||
if len(tc.evictedPods) > 0 {
|
||||
fakeClient.Fake.AddReactor("create", "pods", func(action core.Action) (bool, runtime.Object, error) {
|
||||
getAction := action.(core.CreateAction)
|
||||
obj := getAction.GetObject()
|
||||
if eviction, ok := obj.(*policy.Eviction); ok {
|
||||
if _, exists := podsForEviction[eviction.Name]; exists {
|
||||
return true, obj, nil
|
||||
}
|
||||
evictionFailed = true
|
||||
return true, nil, fmt.Errorf("pod %q was unexpectedly evicted", eviction.Name)
|
||||
}
|
||||
return true, obj, nil
|
||||
})
|
||||
}
|
||||
|
||||
evictorFilter, err := defaultevictor.New(
|
||||
defaultEvictorFilterArgs,
|
||||
&frameworkfake.HandleImpl{
|
||||
ClientsetImpl: fakeClient,
|
||||
GetPodsAssignedToNodeFuncImpl: getPodsAssignedToNode,
|
||||
SharedInformerFactoryImpl: sharedInformerFactory,
|
||||
handle, podEvictor, err := frameworktesting.InitFrameworkHandle(ctx, fakeClient, nil, defaultevictor.DefaultEvictorArgs{NodeFit: true}, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to initialize a framework handle: %v", err)
|
||||
}
|
||||
handle.MetricsCollectorImpl = collector
|
||||
|
||||
plugin, err := NewLowNodeUtilization(&LowNodeUtilizationArgs{
|
||||
Thresholds: tc.thresholds,
|
||||
TargetThresholds: tc.targetThresholds,
|
||||
UseDeviationThresholds: tc.useDeviationThresholds,
|
||||
EvictableNamespaces: tc.evictableNamespaces,
|
||||
MetricsUtilization: MetricsUtilization{
|
||||
MetricsServer: metricsEnabled,
|
||||
},
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to initialize the plugin: %v", err)
|
||||
}
|
||||
handle)
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to initialize the plugin: %v", err)
|
||||
}
|
||||
plugin.(frameworktypes.BalancePlugin).Balance(ctx, tc.nodes)
|
||||
|
||||
handle := &frameworkfake.HandleImpl{
|
||||
ClientsetImpl: fakeClient,
|
||||
GetPodsAssignedToNodeFuncImpl: getPodsAssignedToNode,
|
||||
PodEvictorImpl: podEvictor,
|
||||
EvictorFilterImpl: evictorFilter.(frameworktypes.EvictorPlugin),
|
||||
SharedInformerFactoryImpl: sharedInformerFactory,
|
||||
podsEvicted := podEvictor.TotalEvicted()
|
||||
if expectedPodsEvicted != podsEvicted {
|
||||
t.Errorf("Expected %v pods to be evicted but %v got evicted", expectedPodsEvicted, podsEvicted)
|
||||
}
|
||||
if evictionFailed {
|
||||
t.Errorf("Pod evictions failed unexpectedly")
|
||||
}
|
||||
}
|
||||
|
||||
plugin, err := NewLowNodeUtilization(&LowNodeUtilizationArgs{
|
||||
Thresholds: test.thresholds,
|
||||
TargetThresholds: test.targetThresholds,
|
||||
UseDeviationThresholds: test.useDeviationThresholds,
|
||||
EvictableNamespaces: test.evictableNamespaces,
|
||||
},
|
||||
handle)
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to initialize the plugin: %v", err)
|
||||
}
|
||||
plugin.(frameworktypes.BalancePlugin).Balance(ctx, test.nodes)
|
||||
|
||||
podsEvicted := podEvictor.TotalEvicted()
|
||||
if test.expectedPodsEvicted != podsEvicted {
|
||||
t.Errorf("Expected %v pods to be evicted but %v got evicted", test.expectedPodsEvicted, podsEvicted)
|
||||
}
|
||||
if evictionFailed {
|
||||
t.Errorf("Pod evictions failed unexpectedly")
|
||||
}
|
||||
})
|
||||
}
|
||||
t.Run(tc.name, testFnc(false, tc.expectedPodsEvicted))
|
||||
t.Run(tc.name+" with metrics enabled", testFnc(true, tc.expectedPodsWithMetricsEvicted))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -971,11 +1241,14 @@ func TestLowNodeUtilizationWithTaints(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
var uint0, uint1 uint = 0, 1
|
||||
tests := []struct {
|
||||
name string
|
||||
nodes []*v1.Node
|
||||
pods []*v1.Pod
|
||||
evictionsExpected uint
|
||||
name string
|
||||
nodes []*v1.Node
|
||||
pods []*v1.Pod
|
||||
maxPodsToEvictPerNode *uint
|
||||
maxPodsToEvictTotal *uint
|
||||
evictionsExpected uint
|
||||
}{
|
||||
{
|
||||
name: "No taints",
|
||||
@@ -1031,6 +1304,26 @@ func TestLowNodeUtilizationWithTaints(t *testing.T) {
|
||||
},
|
||||
evictionsExpected: 1,
|
||||
},
|
||||
{
|
||||
name: "Pod which tolerates node taint, set maxPodsToEvictTotal(0), should not be expelled",
|
||||
nodes: []*v1.Node{n1, n3withTaints},
|
||||
pods: []*v1.Pod{
|
||||
// Node 1 pods
|
||||
test.BuildTestPod(fmt.Sprintf("pod_1_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_2_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_3_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_4_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_5_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_6_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_7_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
|
||||
podThatToleratesTaint,
|
||||
// Node 3 pods
|
||||
test.BuildTestPod(fmt.Sprintf("pod_9_%s", n3withTaints.Name), 200, 0, n3withTaints.Name, test.SetRSOwnerRef),
|
||||
},
|
||||
maxPodsToEvictPerNode: &uint1,
|
||||
maxPodsToEvictTotal: &uint0,
|
||||
evictionsExpected: 0,
|
||||
},
|
||||
}
|
||||
|
||||
for _, item := range tests {
|
||||
@@ -1045,56 +1338,16 @@ func TestLowNodeUtilizationWithTaints(t *testing.T) {
|
||||
}
|
||||
|
||||
fakeClient := fake.NewSimpleClientset(objs...)
|
||||
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
|
||||
podInformer := sharedInformerFactory.Core().V1().Pods().Informer()
|
||||
|
||||
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
|
||||
if err != nil {
|
||||
t.Errorf("Build get pods assigned to node function error: %v", err)
|
||||
}
|
||||
|
||||
sharedInformerFactory.Start(ctx.Done())
|
||||
sharedInformerFactory.WaitForCacheSync(ctx.Done())
|
||||
|
||||
eventRecorder := &events.FakeRecorder{}
|
||||
|
||||
podEvictor := evictions.NewPodEvictor(
|
||||
handle, podEvictor, err := frameworktesting.InitFrameworkHandle(
|
||||
ctx,
|
||||
fakeClient,
|
||||
policy.SchemeGroupVersion.String(),
|
||||
false,
|
||||
&item.evictionsExpected,
|
||||
evictions.NewOptions().WithMaxPodsToEvictPerNode(&item.evictionsExpected),
|
||||
defaultevictor.DefaultEvictorArgs{NodeFit: true},
|
||||
nil,
|
||||
item.nodes,
|
||||
false,
|
||||
eventRecorder,
|
||||
)
|
||||
|
||||
defaultEvictorFilterArgs := &defaultevictor.DefaultEvictorArgs{
|
||||
EvictLocalStoragePods: false,
|
||||
EvictSystemCriticalPods: false,
|
||||
IgnorePvcPods: false,
|
||||
EvictFailedBarePods: false,
|
||||
NodeFit: true,
|
||||
}
|
||||
|
||||
evictorFilter, err := defaultevictor.New(
|
||||
defaultEvictorFilterArgs,
|
||||
&frameworkfake.HandleImpl{
|
||||
ClientsetImpl: fakeClient,
|
||||
GetPodsAssignedToNodeFuncImpl: getPodsAssignedToNode,
|
||||
SharedInformerFactoryImpl: sharedInformerFactory,
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to initialize the plugin: %v", err)
|
||||
}
|
||||
|
||||
handle := &frameworkfake.HandleImpl{
|
||||
ClientsetImpl: fakeClient,
|
||||
GetPodsAssignedToNodeFuncImpl: getPodsAssignedToNode,
|
||||
PodEvictorImpl: podEvictor,
|
||||
EvictorFilterImpl: evictorFilter.(frameworktypes.EvictorPlugin),
|
||||
SharedInformerFactoryImpl: sharedInformerFactory,
|
||||
t.Fatalf("Unable to initialize a framework handle: %v", err)
|
||||
}
|
||||
|
||||
plugin, err := NewLowNodeUtilization(&LowNodeUtilizationArgs{
|
||||
|
||||
@@ -28,7 +28,6 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/klog/v2"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/node"
|
||||
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
@@ -78,14 +77,14 @@ func getNodeThresholds(
|
||||
nodes []*v1.Node,
|
||||
lowThreshold, highThreshold api.ResourceThresholds,
|
||||
resourceNames []v1.ResourceName,
|
||||
getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc,
|
||||
useDeviationThresholds bool,
|
||||
usageClient usageClient,
|
||||
) map[string]NodeThresholds {
|
||||
nodeThresholdsMap := map[string]NodeThresholds{}
|
||||
|
||||
averageResourceUsagePercent := api.ResourceThresholds{}
|
||||
if useDeviationThresholds {
|
||||
averageResourceUsagePercent = averageNodeBasicresources(nodes, getPodsAssignedToNode, resourceNames)
|
||||
averageResourceUsagePercent = averageNodeBasicresources(nodes, usageClient)
|
||||
}
|
||||
|
||||
for _, node := range nodes {
|
||||
@@ -121,22 +120,15 @@ func getNodeThresholds(
|
||||
|
||||
func getNodeUsage(
|
||||
nodes []*v1.Node,
|
||||
resourceNames []v1.ResourceName,
|
||||
getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc,
|
||||
usageClient usageClient,
|
||||
) []NodeUsage {
|
||||
var nodeUsageList []NodeUsage
|
||||
|
||||
for _, node := range nodes {
|
||||
pods, err := podutil.ListPodsOnANode(node.Name, getPodsAssignedToNode, nil)
|
||||
if err != nil {
|
||||
klog.V(2).InfoS("Node will not be processed, error accessing its pods", "node", klog.KObj(node), "err", err)
|
||||
continue
|
||||
}
|
||||
|
||||
nodeUsageList = append(nodeUsageList, NodeUsage{
|
||||
node: node,
|
||||
usage: nodeutil.NodeUtilization(pods, resourceNames),
|
||||
allPods: pods,
|
||||
usage: usageClient.nodeUtilization(node.Name),
|
||||
allPods: usageClient.pods(node.Name),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -214,6 +206,26 @@ func classifyNodes(
|
||||
return lowNodes, highNodes
|
||||
}
|
||||
|
||||
func usageToKeysAndValues(usage map[v1.ResourceName]*resource.Quantity) []interface{} {
|
||||
// log message in one line
|
||||
keysAndValues := []interface{}{}
|
||||
if quantity, exists := usage[v1.ResourceCPU]; exists {
|
||||
keysAndValues = append(keysAndValues, "CPU", quantity.MilliValue())
|
||||
}
|
||||
if quantity, exists := usage[v1.ResourceMemory]; exists {
|
||||
keysAndValues = append(keysAndValues, "Mem", quantity.Value())
|
||||
}
|
||||
if quantity, exists := usage[v1.ResourcePods]; exists {
|
||||
keysAndValues = append(keysAndValues, "Pods", quantity.Value())
|
||||
}
|
||||
for name := range usage {
|
||||
if !nodeutil.IsBasicResource(name) {
|
||||
keysAndValues = append(keysAndValues, string(name), usage[name].Value())
|
||||
}
|
||||
}
|
||||
return keysAndValues
|
||||
}
|
||||
|
||||
// evictPodsFromSourceNodes evicts pods based on priority, if all the pods on the node have priority, if not
|
||||
// evicts them based on QoS as fallback option.
|
||||
// TODO: @ravig Break this function into smaller functions.
|
||||
@@ -222,15 +234,16 @@ func evictPodsFromSourceNodes(
|
||||
evictableNamespaces *api.Namespaces,
|
||||
sourceNodes, destinationNodes []NodeInfo,
|
||||
podEvictor frameworktypes.Evictor,
|
||||
evictOptions evictions.EvictOptions,
|
||||
podFilter func(pod *v1.Pod) bool,
|
||||
resourceNames []v1.ResourceName,
|
||||
continueEviction continueEvictionCond,
|
||||
usageClient usageClient,
|
||||
) {
|
||||
// upper bound on total number of pods/cpu/memory and optional extended resources to be moved
|
||||
totalAvailableUsage := map[v1.ResourceName]*resource.Quantity{
|
||||
v1.ResourcePods: {},
|
||||
v1.ResourceCPU: {},
|
||||
v1.ResourceMemory: {},
|
||||
totalAvailableUsage := map[v1.ResourceName]*resource.Quantity{}
|
||||
for _, resourceName := range resourceNames {
|
||||
totalAvailableUsage[resourceName] = &resource.Quantity{}
|
||||
}
|
||||
|
||||
taintsOfDestinationNodes := make(map[string][]v1.Taint, len(destinationNodes))
|
||||
@@ -238,6 +251,10 @@ func evictPodsFromSourceNodes(
|
||||
taintsOfDestinationNodes[node.node.Name] = node.node.Spec.Taints
|
||||
|
||||
for _, name := range resourceNames {
|
||||
if _, exists := node.usage[name]; !exists {
|
||||
klog.Errorf("unable to find %q resource in node's %q usage, terminating eviction", name, node.node.Name)
|
||||
return
|
||||
}
|
||||
if _, ok := totalAvailableUsage[name]; !ok {
|
||||
totalAvailableUsage[name] = resource.NewQuantity(0, resource.DecimalSI)
|
||||
}
|
||||
@@ -247,17 +264,7 @@ func evictPodsFromSourceNodes(
|
||||
}
|
||||
|
||||
// log message in one line
|
||||
keysAndValues := []interface{}{
|
||||
"CPU", totalAvailableUsage[v1.ResourceCPU].MilliValue(),
|
||||
"Mem", totalAvailableUsage[v1.ResourceMemory].Value(),
|
||||
"Pods", totalAvailableUsage[v1.ResourcePods].Value(),
|
||||
}
|
||||
for name := range totalAvailableUsage {
|
||||
if !node.IsBasicResource(name) {
|
||||
keysAndValues = append(keysAndValues, string(name), totalAvailableUsage[name].Value())
|
||||
}
|
||||
}
|
||||
klog.V(1).InfoS("Total capacity to be moved", keysAndValues...)
|
||||
klog.V(1).InfoS("Total capacity to be moved", usageToKeysAndValues(totalAvailableUsage)...)
|
||||
|
||||
for _, node := range sourceNodes {
|
||||
klog.V(3).InfoS("Evicting pods from node", "node", klog.KObj(node.node), "usage", node.usage)
|
||||
@@ -273,8 +280,14 @@ func evictPodsFromSourceNodes(
|
||||
klog.V(1).InfoS("Evicting pods based on priority, if they have same priority, they'll be evicted based on QoS tiers")
|
||||
// sort the evictable Pods based on priority. This also sorts them based on QoS. If there are multiple pods with same priority, they are sorted based on QoS tiers.
|
||||
podutil.SortPodsBasedOnPriorityLowToHigh(removablePods)
|
||||
evictPods(ctx, evictableNamespaces, removablePods, node, totalAvailableUsage, taintsOfDestinationNodes, podEvictor, continueEviction)
|
||||
|
||||
err := evictPods(ctx, evictableNamespaces, removablePods, node, totalAvailableUsage, taintsOfDestinationNodes, podEvictor, evictOptions, continueEviction, usageClient)
|
||||
if err != nil {
|
||||
switch err.(type) {
|
||||
case *evictions.EvictionTotalLimitError:
|
||||
return
|
||||
default:
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -286,8 +299,10 @@ func evictPods(
|
||||
totalAvailableUsage map[v1.ResourceName]*resource.Quantity,
|
||||
taintsOfLowNodes map[string][]v1.Taint,
|
||||
podEvictor frameworktypes.Evictor,
|
||||
evictOptions evictions.EvictOptions,
|
||||
continueEviction continueEvictionCond,
|
||||
) {
|
||||
usageClient usageClient,
|
||||
) error {
|
||||
var excludedNamespaces sets.Set[string]
|
||||
if evictableNamespaces != nil {
|
||||
excludedNamespaces = sets.New(evictableNamespaces.Exclude...)
|
||||
@@ -309,58 +324,67 @@ func evictPods(
|
||||
continue
|
||||
}
|
||||
|
||||
if preEvictionFilterWithOptions(pod) {
|
||||
if podEvictor.Evict(ctx, pod, evictions.EvictOptions{}) {
|
||||
klog.V(3).InfoS("Evicted pods", "pod", klog.KObj(pod))
|
||||
if !preEvictionFilterWithOptions(pod) {
|
||||
continue
|
||||
}
|
||||
podUsage, err := usageClient.podUsage(pod)
|
||||
if err != nil {
|
||||
klog.Errorf("unable to get pod usage for %v/%v: %v", pod.Namespace, pod.Name, err)
|
||||
continue
|
||||
}
|
||||
err = podEvictor.Evict(ctx, pod, evictOptions)
|
||||
if err == nil {
|
||||
klog.V(3).InfoS("Evicted pods", "pod", klog.KObj(pod))
|
||||
|
||||
for name := range totalAvailableUsage {
|
||||
if name == v1.ResourcePods {
|
||||
nodeInfo.usage[name].Sub(*resource.NewQuantity(1, resource.DecimalSI))
|
||||
totalAvailableUsage[name].Sub(*resource.NewQuantity(1, resource.DecimalSI))
|
||||
} else {
|
||||
quantity := utils.GetResourceRequestQuantity(pod, name)
|
||||
nodeInfo.usage[name].Sub(quantity)
|
||||
totalAvailableUsage[name].Sub(quantity)
|
||||
}
|
||||
}
|
||||
|
||||
keysAndValues := []interface{}{
|
||||
"node", nodeInfo.node.Name,
|
||||
"CPU", nodeInfo.usage[v1.ResourceCPU].MilliValue(),
|
||||
"Mem", nodeInfo.usage[v1.ResourceMemory].Value(),
|
||||
"Pods", nodeInfo.usage[v1.ResourcePods].Value(),
|
||||
}
|
||||
for name := range totalAvailableUsage {
|
||||
if !nodeutil.IsBasicResource(name) {
|
||||
keysAndValues = append(keysAndValues, string(name), totalAvailableUsage[name].Value())
|
||||
}
|
||||
}
|
||||
|
||||
klog.V(3).InfoS("Updated node usage", keysAndValues...)
|
||||
// check if pods can be still evicted
|
||||
if !continueEviction(nodeInfo, totalAvailableUsage) {
|
||||
break
|
||||
for name := range totalAvailableUsage {
|
||||
if name == v1.ResourcePods {
|
||||
nodeInfo.usage[name].Sub(*resource.NewQuantity(1, resource.DecimalSI))
|
||||
totalAvailableUsage[name].Sub(*resource.NewQuantity(1, resource.DecimalSI))
|
||||
} else {
|
||||
nodeInfo.usage[name].Sub(*podUsage[name])
|
||||
totalAvailableUsage[name].Sub(*podUsage[name])
|
||||
}
|
||||
}
|
||||
|
||||
keysAndValues := []interface{}{
|
||||
"node", nodeInfo.node.Name,
|
||||
}
|
||||
keysAndValues = append(keysAndValues, usageToKeysAndValues(nodeInfo.usage)...)
|
||||
klog.V(3).InfoS("Updated node usage", keysAndValues...)
|
||||
// check if pods can be still evicted
|
||||
if !continueEviction(nodeInfo, totalAvailableUsage) {
|
||||
break
|
||||
}
|
||||
continue
|
||||
}
|
||||
if podEvictor.NodeLimitExceeded(nodeInfo.node) {
|
||||
return
|
||||
switch err.(type) {
|
||||
case *evictions.EvictionNodeLimitError, *evictions.EvictionTotalLimitError:
|
||||
return err
|
||||
default:
|
||||
klog.Errorf("eviction failed: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// sortNodesByUsage sorts nodes based on usage according to the given plugin.
|
||||
func sortNodesByUsage(nodes []NodeInfo, ascending bool) {
|
||||
sort.Slice(nodes, func(i, j int) bool {
|
||||
ti := nodes[i].usage[v1.ResourceMemory].Value() + nodes[i].usage[v1.ResourceCPU].MilliValue() + nodes[i].usage[v1.ResourcePods].Value()
|
||||
tj := nodes[j].usage[v1.ResourceMemory].Value() + nodes[j].usage[v1.ResourceCPU].MilliValue() + nodes[j].usage[v1.ResourcePods].Value()
|
||||
|
||||
// extended resources
|
||||
for name := range nodes[i].usage {
|
||||
if !nodeutil.IsBasicResource(name) {
|
||||
ti = ti + nodes[i].usage[name].Value()
|
||||
tj = tj + nodes[j].usage[name].Value()
|
||||
ti := resource.NewQuantity(0, resource.DecimalSI).Value()
|
||||
tj := resource.NewQuantity(0, resource.DecimalSI).Value()
|
||||
for resourceName := range nodes[i].usage {
|
||||
if resourceName == v1.ResourceCPU {
|
||||
ti += nodes[i].usage[resourceName].MilliValue()
|
||||
} else {
|
||||
ti += nodes[i].usage[resourceName].Value()
|
||||
}
|
||||
}
|
||||
for resourceName := range nodes[j].usage {
|
||||
if resourceName == v1.ResourceCPU {
|
||||
tj += nodes[j].usage[resourceName].MilliValue()
|
||||
} else {
|
||||
tj += nodes[j].usage[resourceName].Value()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -422,17 +446,12 @@ func classifyPods(pods []*v1.Pod, filter func(pod *v1.Pod) bool) ([]*v1.Pod, []*
|
||||
return nonRemovablePods, removablePods
|
||||
}
|
||||
|
||||
func averageNodeBasicresources(nodes []*v1.Node, getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc, resourceNames []v1.ResourceName) api.ResourceThresholds {
|
||||
func averageNodeBasicresources(nodes []*v1.Node, usageClient usageClient) api.ResourceThresholds {
|
||||
total := api.ResourceThresholds{}
|
||||
average := api.ResourceThresholds{}
|
||||
numberOfNodes := len(nodes)
|
||||
for _, node := range nodes {
|
||||
pods, err := podutil.ListPodsOnANode(node.Name, getPodsAssignedToNode, nil)
|
||||
if err != nil {
|
||||
numberOfNodes--
|
||||
continue
|
||||
}
|
||||
usage := nodeutil.NodeUtilization(pods, resourceNames)
|
||||
usage := usageClient.nodeUtilization(node.Name)
|
||||
nodeCapacity := node.Status.Capacity
|
||||
if len(node.Status.Allocatable) > 0 {
|
||||
nodeCapacity = node.Status.Allocatable
|
||||
|
||||
@@ -25,82 +25,34 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
func BuildTestNodeInfo(name string, apply func(*NodeInfo)) *NodeInfo {
|
||||
nodeInfo := &NodeInfo{
|
||||
NodeUsage: NodeUsage{
|
||||
node: &v1.Node{
|
||||
Status: v1.NodeStatus{
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(3977868*1024, resource.BinarySI),
|
||||
v1.ResourcePods: *resource.NewQuantity(29, resource.BinarySI),
|
||||
},
|
||||
Allocatable: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(1930, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(3287692*1024, resource.BinarySI),
|
||||
v1.ResourcePods: *resource.NewQuantity(29, resource.BinarySI),
|
||||
},
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: name},
|
||||
},
|
||||
},
|
||||
}
|
||||
apply(nodeInfo)
|
||||
return nodeInfo
|
||||
}
|
||||
|
||||
var (
|
||||
lowPriority = int32(0)
|
||||
highPriority = int32(10000)
|
||||
extendedResource = v1.ResourceName("example.com/foo")
|
||||
testNode1 = NodeInfo{
|
||||
NodeUsage: NodeUsage{
|
||||
node: &v1.Node{
|
||||
Status: v1.NodeStatus{
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(3977868*1024, resource.BinarySI),
|
||||
v1.ResourcePods: *resource.NewQuantity(29, resource.BinarySI),
|
||||
},
|
||||
Allocatable: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(1930, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(3287692*1024, resource.BinarySI),
|
||||
v1.ResourcePods: *resource.NewQuantity(29, resource.BinarySI),
|
||||
},
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "node1"},
|
||||
},
|
||||
usage: map[v1.ResourceName]*resource.Quantity{
|
||||
v1.ResourceCPU: resource.NewMilliQuantity(1730, resource.DecimalSI),
|
||||
v1.ResourceMemory: resource.NewQuantity(3038982964, resource.BinarySI),
|
||||
v1.ResourcePods: resource.NewQuantity(25, resource.BinarySI),
|
||||
},
|
||||
},
|
||||
}
|
||||
testNode2 = NodeInfo{
|
||||
NodeUsage: NodeUsage{
|
||||
node: &v1.Node{
|
||||
Status: v1.NodeStatus{
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(3977868*1024, resource.BinarySI),
|
||||
v1.ResourcePods: *resource.NewQuantity(29, resource.BinarySI),
|
||||
},
|
||||
Allocatable: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(1930, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(3287692*1024, resource.BinarySI),
|
||||
v1.ResourcePods: *resource.NewQuantity(29, resource.BinarySI),
|
||||
},
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "node2"},
|
||||
},
|
||||
usage: map[v1.ResourceName]*resource.Quantity{
|
||||
v1.ResourceCPU: resource.NewMilliQuantity(1220, resource.DecimalSI),
|
||||
v1.ResourceMemory: resource.NewQuantity(3038982964, resource.BinarySI),
|
||||
v1.ResourcePods: resource.NewQuantity(11, resource.BinarySI),
|
||||
},
|
||||
},
|
||||
}
|
||||
testNode3 = NodeInfo{
|
||||
NodeUsage: NodeUsage{
|
||||
node: &v1.Node{
|
||||
Status: v1.NodeStatus{
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(3977868*1024, resource.BinarySI),
|
||||
v1.ResourcePods: *resource.NewQuantity(29, resource.BinarySI),
|
||||
},
|
||||
Allocatable: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(1930, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(3287692*1024, resource.BinarySI),
|
||||
v1.ResourcePods: *resource.NewQuantity(29, resource.BinarySI),
|
||||
},
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "node3"},
|
||||
},
|
||||
usage: map[v1.ResourceName]*resource.Quantity{
|
||||
v1.ResourceCPU: resource.NewMilliQuantity(1530, resource.DecimalSI),
|
||||
v1.ResourceMemory: resource.NewQuantity(5038982964, resource.BinarySI),
|
||||
v1.ResourcePods: resource.NewQuantity(20, resource.BinarySI),
|
||||
},
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
func TestResourceUsagePercentages(t *testing.T) {
|
||||
@@ -141,26 +93,81 @@ func TestResourceUsagePercentages(t *testing.T) {
|
||||
t.Logf("resourceUsagePercentage: %#v\n", resourceUsagePercentage)
|
||||
}
|
||||
|
||||
func TestSortNodesByUsageDescendingOrder(t *testing.T) {
|
||||
nodeList := []NodeInfo{testNode1, testNode2, testNode3}
|
||||
expectedNodeList := []NodeInfo{testNode3, testNode1, testNode2} // testNode3 has the highest usage
|
||||
sortNodesByUsage(nodeList, false) // ascending=false, sort nodes in descending order
|
||||
func TestSortNodesByUsage(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
nodeInfoList []NodeInfo
|
||||
expectedNodeInfoNames []string
|
||||
}{
|
||||
{
|
||||
name: "cpu memory pods",
|
||||
nodeInfoList: []NodeInfo{
|
||||
*BuildTestNodeInfo("node1", func(nodeInfo *NodeInfo) {
|
||||
nodeInfo.usage = map[v1.ResourceName]*resource.Quantity{
|
||||
v1.ResourceCPU: resource.NewMilliQuantity(1730, resource.DecimalSI),
|
||||
v1.ResourceMemory: resource.NewQuantity(3038982964, resource.BinarySI),
|
||||
v1.ResourcePods: resource.NewQuantity(25, resource.BinarySI),
|
||||
}
|
||||
}),
|
||||
*BuildTestNodeInfo("node2", func(nodeInfo *NodeInfo) {
|
||||
nodeInfo.usage = map[v1.ResourceName]*resource.Quantity{
|
||||
v1.ResourceCPU: resource.NewMilliQuantity(1220, resource.DecimalSI),
|
||||
v1.ResourceMemory: resource.NewQuantity(3038982964, resource.BinarySI),
|
||||
v1.ResourcePods: resource.NewQuantity(11, resource.BinarySI),
|
||||
}
|
||||
}),
|
||||
*BuildTestNodeInfo("node3", func(nodeInfo *NodeInfo) {
|
||||
nodeInfo.usage = map[v1.ResourceName]*resource.Quantity{
|
||||
v1.ResourceCPU: resource.NewMilliQuantity(1530, resource.DecimalSI),
|
||||
v1.ResourceMemory: resource.NewQuantity(5038982964, resource.BinarySI),
|
||||
v1.ResourcePods: resource.NewQuantity(20, resource.BinarySI),
|
||||
}
|
||||
}),
|
||||
},
|
||||
expectedNodeInfoNames: []string{"node3", "node1", "node2"},
|
||||
},
|
||||
{
|
||||
name: "memory",
|
||||
nodeInfoList: []NodeInfo{
|
||||
*BuildTestNodeInfo("node1", func(nodeInfo *NodeInfo) {
|
||||
nodeInfo.usage = map[v1.ResourceName]*resource.Quantity{
|
||||
v1.ResourceMemory: resource.NewQuantity(3038982964, resource.BinarySI),
|
||||
}
|
||||
}),
|
||||
*BuildTestNodeInfo("node2", func(nodeInfo *NodeInfo) {
|
||||
nodeInfo.usage = map[v1.ResourceName]*resource.Quantity{
|
||||
v1.ResourceMemory: resource.NewQuantity(2038982964, resource.BinarySI),
|
||||
}
|
||||
}),
|
||||
*BuildTestNodeInfo("node3", func(nodeInfo *NodeInfo) {
|
||||
nodeInfo.usage = map[v1.ResourceName]*resource.Quantity{
|
||||
v1.ResourceMemory: resource.NewQuantity(5038982964, resource.BinarySI),
|
||||
}
|
||||
}),
|
||||
},
|
||||
expectedNodeInfoNames: []string{"node3", "node1", "node2"},
|
||||
},
|
||||
}
|
||||
|
||||
for i := 0; i < len(expectedNodeList); i++ {
|
||||
if nodeList[i].NodeUsage.node.Name != expectedNodeList[i].NodeUsage.node.Name {
|
||||
t.Errorf("Expected %v, got %v", expectedNodeList[i].NodeUsage.node.Name, nodeList[i].NodeUsage.node.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSortNodesByUsageAscendingOrder(t *testing.T) {
|
||||
nodeList := []NodeInfo{testNode1, testNode2, testNode3}
|
||||
expectedNodeList := []NodeInfo{testNode2, testNode1, testNode3}
|
||||
sortNodesByUsage(nodeList, true) // ascending=true, sort nodes in ascending order
|
||||
|
||||
for i := 0; i < len(expectedNodeList); i++ {
|
||||
if nodeList[i].NodeUsage.node.Name != expectedNodeList[i].NodeUsage.node.Name {
|
||||
t.Errorf("Expected %v, got %v", expectedNodeList[i].NodeUsage.node.Name, nodeList[i].NodeUsage.node.Name)
|
||||
}
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name+" descending", func(t *testing.T) {
|
||||
sortNodesByUsage(tc.nodeInfoList, false) // ascending=false, sort nodes in descending order
|
||||
|
||||
for i := 0; i < len(tc.nodeInfoList); i++ {
|
||||
if tc.nodeInfoList[i].NodeUsage.node.Name != tc.expectedNodeInfoNames[i] {
|
||||
t.Errorf("Expected %v, got %v", tc.expectedNodeInfoNames[i], tc.nodeInfoList[i].NodeUsage.node.Name)
|
||||
}
|
||||
}
|
||||
})
|
||||
t.Run(tc.name+" ascending", func(t *testing.T) {
|
||||
sortNodesByUsage(tc.nodeInfoList, true) // ascending=true, sort nodes in ascending order
|
||||
|
||||
size := len(tc.nodeInfoList)
|
||||
for i := 0; i < size; i++ {
|
||||
if tc.nodeInfoList[i].NodeUsage.node.Name != tc.expectedNodeInfoNames[size-i-1] {
|
||||
t.Errorf("Expected %v, got %v", tc.expectedNodeInfoNames[size-i-1], tc.nodeInfoList[i].NodeUsage.node.Name)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user