1
0
mirror of https://github.com/kubernetes-sigs/descheduler.git synced 2026-01-26 05:14:13 +01:00

Compare commits

...

222 Commits

Author SHA1 Message Date
Kubernetes Prow Robot
f68d06ad4a Merge pull request #1600 from a7i/automated-cherry-pick-of-#1591-upstream-release-1.32
Automated cherry pick of #1591: bump x/net and x/crypto deps for CVE-2024-45337 and
2025-01-06 21:42:30 +01:00
Amir Alavi
0b68495a17 bump x/net and x/crypto deps for CVE-2024-45337 and CVE-2024-45338
Signed-off-by: Amir Alavi <amiralavi7@gmail.com>
2025-01-06 08:52:05 -06:00
Kubernetes Prow Robot
8e6be70ff9 Merge pull request #1592 from a7i/chart-image
[release v0.32.0] bump chart and images
2025-01-03 00:26:13 +01:00
Kubernetes Prow Robot
d536cf8ed0 Merge pull request #1593 from a7i/license-2025
update license to year 2025
2025-01-02 22:54:15 +01:00
Amir Alavi
48aede9fde update license to year 2025
Signed-off-by: Amir Alavi <amiralavi7@gmail.com>
2025-01-02 13:36:59 -05:00
Amir Alavi
bd5b95dbf9 [release v0.32.0] bump chart and images
Signed-off-by: Amir Alavi <amiralavi7@gmail.com>
2025-01-02 13:31:18 -05:00
Kubernetes Prow Robot
71726c8c85 Merge pull request #1588 from a7i/docs-1.32
[release v0.32] update docs/readme
2024-12-30 06:48:13 +01:00
Amir Alavi
32e29973d8 [release v0.32] update docs/readme
Signed-off-by: Amir Alavi <amiralavi7@gmail.com>
2024-12-29 23:19:16 -06:00
Kubernetes Prow Robot
d0fd115747 Merge pull request #1587 from a7i/k8s-1.32
[release v0.32] update kubernetes kind version to 1.32
2024-12-29 17:42:12 +01:00
Amir Alavi
da65808f77 [release v0.32] update kubernetes kind version to 1.32
Signed-off-by: Amir Alavi <amiralavi7@gmail.com>
2024-12-27 12:07:32 -06:00
Kubernetes Prow Robot
29ff28cbb5 Merge pull request #1536 from googs1025/test
feature(eviction): add event when EvictPod failed
2024-12-17 13:18:53 +01:00
Kubernetes Prow Robot
d653537ee6 Merge pull request #1575 from a7i/bump-k8s-1.32
bump to official kubernetes v0.32.0 deps
2024-12-17 11:17:00 +01:00
Kubernetes Prow Robot
c3b9c97827 Merge pull request #1564 from pipo02mix/improve-defaults
Improve chart default values
2024-12-17 11:16:53 +01:00
Kubernetes Prow Robot
75c5c75e13 Merge pull request #1576 from seanmalloy/bump-kind-0.26.0
Bump kind to v0.26.0
2024-12-17 10:38:52 +01:00
Sean Malloy
b66b5d35f0 Bump kind to v0.26.0
The new kind version defaults to k8s v1.32.0 version when creating
new clusters.
2024-12-16 22:28:54 -06:00
Kubernetes Prow Robot
5c3a3bdcf1 Merge pull request #1573 from icloudnote/charts
Fixed the issue when successfulJobsHistoryLimit and failedJobsHistoryLimit variables are 0.
2024-12-14 19:32:42 +01:00
Kubernetes Prow Robot
46fa370ede Merge pull request #1570 from felipewnp/patch-1
docs: Removing deschedulerPolicy.strategies since it does not exist
2024-12-14 19:02:44 +01:00
Amir Alavi
4e8c7e6702 bump to official kubernetes v0.32.0 deps
Signed-off-by: Amir Alavi <amiralavi7@gmail.com>
2024-12-13 20:03:43 -06:00
changjun
bc6323611b Fixed the issue when successfulJobsHistoryLimit and failedJobsHistoryLimit variables are 0. 2024-12-11 21:44:01 +08:00
Kubernetes Prow Robot
51a004c848 Merge pull request #1569 from a7i/bump-v0.32.0-rc.2
bump to kubernetes v0.32.0-rc.2
2024-12-11 06:48:02 +00:00
Kubernetes Prow Robot
44bde42b63 Merge pull request #1572 from seanmalloy/golangci-lint-verbose
Enable golangci-lint Verbose Output
2024-12-11 02:52:02 +00:00
googs1025
bbffb830b9 feature(eviction): add event when EvictPod failed 2024-12-07 19:38:20 +08:00
Sean Malloy
73fecfb7c4 Enable golangci-lint Verbose Output
The golangci-lint tool gets stuck for a variety of reasons when
running in Prow CI. Enabling verbose output in an attempt to make
debugging easier.

ref: https://golangci-lint.run/contributing/debug/
2024-12-06 22:44:28 -06:00
Amir Alavi
f4c3fdf418 bump to kubernetes v0.32.0-rc.2
Signed-off-by: Amir Alavi <amiralavi7@gmail.com>
2024-12-06 16:25:40 -05:00
Kubernetes Prow Robot
2c11481856 Merge pull request #1571 from seanmalloy/bump-golangci-lint-timeout
Bump golangci-lint timeout from 2m to 5m
2024-12-06 19:38:01 +00:00
Sean Malloy
e6deb65299 Bump golangci-lint timeout from 2m to 5m
Sometimes golangci-lint timesout when running in CI. Bump the timeout
from 2 minutes to 5 minutes to reduce flakey CI failures.

ref: https://golangci-lint.run/usage/configuration/#run-configurationhttps://golangci-lint.run/usage/configuration/#run-configuration
2024-12-06 13:03:40 -06:00
felipewnp
677c6a60ce docs: Removing deschedulerPolicy.strategies since it does not exist
Since the parameter strategies don't exist anywhere in the code or docs, I'm removing it from the chart readme as a possible option.

It just makes things more confusing.
2024-12-06 12:49:43 -03:00
Kubernetes Prow Robot
a2fd3aa1eb Merge pull request #1568 from seanmalloy/bump-kind-v0.25.0
Bump kind version to v0.25.0
2024-12-06 09:34:01 +00:00
Sean Malloy
697ecc79e4 Bump kind version to v0.25.0 2024-12-05 22:32:46 -06:00
Kubernetes Prow Robot
e619ec6c41 Merge pull request #1567 from seanmalloy/golangci-lint-bump-1.62.2
Bump golangci-lint to 1.62.2
2024-12-05 14:12:01 +00:00
Sean Malloy
be9e971cda Bump golangci-lint to 1.62.2 2024-12-04 16:02:41 -06:00
Kubernetes Prow Robot
a8e14ec14d Merge pull request #1565 from seanmalloy/go-bump-1.23.3
Bump to Go 1.23.3
2024-12-04 16:45:02 +00:00
Sean Malloy
00b6e3528f Bump to Go 1.23.3
The k/k repo was bumped to Go 1.23.3. See below PR for reference.

https://github.com/kubernetes/kubernetes/pull/128852
2024-12-03 22:58:23 -06:00
pipo02mix
18e3d17c29 Improve chart default values 2024-12-03 14:42:14 +01:00
Kubernetes Prow Robot
a962cca90d Merge pull request #1555 from ingvagabund/actual-utilization-kubernetes-metrics
Use actual node resource utilization by consuming kubernetes metrics
2024-11-20 13:58:54 +00:00
Jan Chaloupka
6567f01e86 [nodeutilization]: actual usage client through kubernetes metrics 2024-11-20 14:30:46 +01:00
Jan Chaloupka
c86416612e go mod tidy/vendor k8s.io/metrics 2024-11-19 16:08:14 +01:00
Kubernetes Prow Robot
a4c09bf560 Merge pull request #1466 from ingvagabund/eviction-in-background-code
Introduce RequestEviction feature for evicting pods in background (KEP-1397)
2024-11-19 14:54:54 +00:00
Jan Chaloupka
7d4ec60e2d bump(vendor) 2024-11-19 15:28:49 +01:00
Jan Chaloupka
3a1a3ff9d8 Introduce RequestEviction feature for evicting pods in background
When the feature is enabled each pod with descheduler.alpha.kubernetes.io/request-evict-only
annotation will have the eviction API error examined for a specific
error code/reason and message. If matched eviction of such a pod will be interpreted
as initiation of an eviction in background.
2024-11-19 15:28:37 +01:00
Kubernetes Prow Robot
343ebb9ff9 Merge pull request #1545 from ingvagabund/node-utilization-refactoring-III
nodeutilization: separate code responsible for requested resource extraction into a dedicated usage client
2024-11-15 14:34:53 +00:00
Jan Chaloupka
d1c64c48cd nodeutilization: separate code responsible for requested resource extraction into a dedicated usage client
Turning a usage client into an interface allows to implement other kinds
of usage clients like actual usage or prometheus based resource
collection.
2024-11-15 11:23:49 +01:00
Kubernetes Prow Robot
7b1178be9f Merge pull request #1551 from ingvagabund/bump-golangci-lint
bump(golangci-lint)=v1.62.0
2024-11-14 15:32:51 +00:00
Kubernetes Prow Robot
23a6d26209 Merge pull request #1549 from ingvagabund/usageKeysAndValues
nodeutilization: usage2KeysAndValues for constructing a key:value list for InfoS printing resource usage
2024-11-14 14:30:52 +00:00
Jan Chaloupka
cd408dd785 bump(golangci-lint)=v1.62.0 2024-11-14 15:03:03 +01:00
Jan Chaloupka
9950b8a55d nodeutilization: usage2KeysAndValues for constructing a key:value list for InfoS printing resource usage 2024-11-14 14:15:26 +01:00
Jan Chaloupka
f115e780d8 Define EvictionsInBackground feature gate 2024-11-14 13:29:59 +01:00
Kubernetes Prow Robot
af8a7445a4 Merge pull request #1544 from ingvagabund/node-utilization-refactoring-II
nodeutilization: evictPodsFromSourceNodes: iterate through existing resources
2024-11-13 22:00:47 +00:00
Kubernetes Prow Robot
5ba11e09c7 Merge pull request #1543 from ingvagabund/node-utilization-refactoring-I
nodeutilization: NodeUtilization: make pod utilization extraction configurable
2024-11-13 21:34:47 +00:00
Kubernetes Prow Robot
d41981644a Merge pull request #1546 from ingvagabund/sortNodesByUsage-extended
sortNodesByUsage: drop extended resources as they are already counted in
2024-11-13 20:50:47 +00:00
Jan Chaloupka
67d3d52de8 sortNodesByUsage: drop extended resources as they are already counted in 2024-11-13 21:31:02 +01:00
Jan Chaloupka
e9f43856a9 nodeutilization: iterate through existing resources 2024-11-13 15:31:48 +01:00
Jan Chaloupka
e655a7eb27 nodeutilization: NodeUtilization: make pod utilization extraction configurable 2024-11-13 14:21:32 +01:00
Kubernetes Prow Robot
da52983b27 Merge pull request #1542 from ingvagabund/descheduler-server-apply
DeschedulerServer: new Apply function for applying configuration
2024-11-13 13:10:47 +00:00
Kubernetes Prow Robot
1e48cfe6f8 Merge pull request #1541 from ingvagabund/sortNodesByUsage-dont-hardcode-resource-names
Update nodes sorting function to respect available resources
2024-11-13 12:46:46 +00:00
Jan Chaloupka
fb4b8746ec Move RunE code under Run 2024-11-12 15:46:12 +01:00
Jan Chaloupka
269f16cf73 DeschedulerServer: new Apply function for applying configuration 2024-11-12 15:43:14 +01:00
Jan Chaloupka
7eeb07d96a Update nodes sorting function to respect available resources 2024-11-11 16:26:56 +01:00
Kubernetes Prow Robot
a18425a18d Merge pull request #1539 from sagar-18/patch-1
Update Dockerfile - GoLang v 1.22.7 FIX - CVE-2024-34156, CVE-2024-34155 and CVE-2024-34158
2024-11-05 07:47:29 +00:00
Sagar Chauhan
0c552b667f Update Dockerfile - GoLang v 1.22.7 FIX - CVE-2024-34156
FIX - CVE-2024-34156
2024-10-31 21:27:06 +05:30
Simon Scharf
ef0c2c1c47 add ignorePodsWithoutPDB option (#1529)
* add ignoreNonPDBPods option

* take2

* add test

* poddisruptionbudgets are now used by defaultevictor plugin

* add poddisruptionbudgets to rbac

* review comments

* don't use GetPodPodDisruptionBudgets

* review comment, don't hide error
2024-10-15 21:21:04 +01:00
Kubernetes Prow Robot
7696f00518 Merge pull request #1532 from ingvagabund/node-utilization-refactoring
Node utilization refactoring
2024-10-14 20:10:22 +01:00
Jan Chaloupka
89bd188a35 hnu: move static code from Balance under plugin constructor 2024-10-11 16:49:23 +02:00
Jan Chaloupka
e3c41d6ea6 lnu: move static code from Balance under plugin constructor 2024-10-11 16:37:53 +02:00
Jan Chaloupka
e0ff750fa7 Move default LNU threshold setting under setDefaultForLNUThresholds 2024-10-11 16:31:37 +02:00
Kubernetes Prow Robot
b07be078c3 Merge pull request #1527 from ingvagabund/e2e-buildTestDeployment
test: construct e2e deployments through buildTestDeployment
2024-10-08 19:34:23 +01:00
Simon Scharf
22d9230a67 Make sure dry runs sees all the resources a normal run would do (#1526)
* generic resource handling, so that dry run has all the expected resource types and objects

* simpler code and better names

* fix imports
2024-10-04 12:20:28 +01:00
Jan Chaloupka
3e6166666b test: construct e2e deployments through buildTestDeployment 2024-10-01 15:23:44 +02:00
Kubernetes Prow Robot
e1e537de95 Merge pull request #1522 from fanhaouu/e2e-leaderelection
[LeaderElection] e2e: build a descheduler image and run the descheduler as a pod
2024-10-01 08:23:53 +01:00
Kubernetes Prow Robot
8e762d2585 Merge pull request #1523 from fanhaouu/e2e-topologyspreadconstraint
[TopologySpreadConstraint] e2e: build a descheduler image and run the descheduler as a pod
2024-09-30 20:37:32 +01:00
Kubernetes Prow Robot
042fef7c91 Merge pull request #1521 from fanhaouu/e2e-failedpods
[FailedPods] e2e: build a descheduler image and run the descheduler as a pod
2024-09-30 20:37:24 +01:00
Kubernetes Prow Robot
2c033a1f6d Merge pull request #1520 from fanhaouu/e2e-duplicatepods
[DuplicatePods] e2e: build a descheduler image and run the descheduler as a pod
2024-09-30 20:02:04 +01:00
Hao Fan
e0a8c77d0e e2e: DuplicatePods: build a descheduler image and run the descheduler as a pod 2024-09-23 19:37:56 +08:00
Hao Fan
05ce561a06 e2e: FailedPods: build a descheduler image and run the descheduler as a pod 2024-09-23 19:36:53 +08:00
Hao Fan
8b6a67535f remove policy_leaderelection yaml file 2024-09-23 19:36:01 +08:00
Hao Fan
347a08a11a add update lease permission 2024-09-23 19:36:01 +08:00
Hao Fan
0ac05f6ea3 e2e: LeaderElection: build a descheduler image and run the descheduler as a pod 2024-09-23 19:35:33 +08:00
Hao Fan
af495e65f7 e2e: TopologySpreadConstraint: build a descheduler image and run the descheduler as a pod 2024-09-23 19:33:59 +08:00
Kubernetes Prow Robot
18ef69584e Merge pull request #1517 from fanhaouu/e2e-common-method
[e2e] abstract common methods
2024-09-20 09:31:33 +01:00
Hao Fan
d25cba08a9 [e2e] abstract common methods 2024-09-19 21:51:11 +08:00
Kubernetes Prow Robot
8b0744c5b2 Merge pull request #1514 from a7i/amir/gha-perms
fix: github action Release Charts to have write permissions
2024-09-09 22:15:57 +01:00
Amir Alavi
6e30321989 fix: github action Release Charts to have write permissions 2024-09-09 16:56:11 -04:00
Kubernetes Prow Robot
b094acb572 Merge pull request #1512 from a7i/bump-helm
descheduler v0.31.0: bump helm chart
2024-09-09 21:48:34 +01:00
Kubernetes Prow Robot
9f15e02245 Merge pull request #1513 from a7i/amir/bump-golangci
chore: bump golangci-lint to latest
2024-09-09 20:35:09 +01:00
Amir Alavi
3bf40c830a chore: bump golangci-lint to latest 2024-09-09 14:53:15 -04:00
Kubernetes Prow Robot
c9c03ee536 Merge pull request #1511 from a7i/bump-kustomize
descheduler v0.31.0: bump kustomize files
2024-09-09 19:42:43 +01:00
Amir Alavi
f19a297d64 bump kustomize files
Signed-off-by: Amir Alavi <amiralavi7@gmail.com>
2024-09-09 14:22:02 -04:00
Amir Alavi
2c005600cc descheduler v0.31.0: bump helm chart
Signed-off-by: Amir Alavi <amiralavi7@gmail.com>
2024-09-09 14:20:56 -04:00
Kubernetes Prow Robot
b35e93ec7a Merge pull request #1510 from Juneezee/chore/yaml
chore: replace `github.com/ghodss/yaml` with `sigs.k8s.io/yaml`
2024-09-09 10:08:27 +01:00
Kubernetes Prow Robot
4d6a0f1c0e Merge pull request #1508 from fanhaouu/fix-run-e2e-tests-bug
[e2e] no test timeouts, produce the same image tag as in production
2024-09-09 09:22:27 +01:00
Kubernetes Prow Robot
73432b788c Merge pull request #1506 from a7i/docs-v0.31
descheduler v0.31: update docs and manifests
2024-09-09 09:02:28 +01:00
Eng Zer Jun
33868c44df chore: replace github.com/ghodss/yaml with sigs.k8s.io/yaml
At the time of making this commit, the package `github.com/ghodss/yaml`
is no longer actively maintained.

`sigs.k8s.io/yaml` is a permanent fork of `ghodss/yaml` and is actively
maintained by Kubernetes SIG.

Signed-off-by: Eng Zer Jun <engzerjun@gmail.com>
2024-09-08 23:48:16 +08:00
Amir Alavi
4989cc3b6c descheduler v0.31: update docs and manifests
Signed-off-by: Amir Alavi <amiralavi7@gmail.com>
2024-09-05 19:00:41 -04:00
Hao Fan
ab6a3ca2d6 avoid e2e test timeout 2024-09-04 01:26:30 +08:00
Hao Fan
fdd69106a3 modify IMAGE_TAG to fix the version parsing issue 2024-09-04 01:26:20 +08:00
Kubernetes Prow Robot
0f1890e5cd Merge pull request #1480 from ingvagabund/omitempty-for-plugin-args
Plugin args: tag arguments with omitempty to reduce the marshalled json size
2024-09-02 12:00:56 +01:00
Kubernetes Prow Robot
ed6a133449 Merge pull request #1507 from bendikp/make-security-context-conditional
feat(helm): make securityContext conditional in Deployment and CronJob
2024-09-02 11:42:56 +01:00
Bendik Paulsrud
0b505946bf feat(helm): make securityContext conditional in Deployment and CronJob 2024-08-30 08:29:51 +02:00
Kubernetes Prow Robot
dbe4423749 Merge pull request #1504 from a7i/k8s-1.31-e2e
descheduler v0.31: update e2e test versions
2024-08-29 11:42:29 +01:00
Kubernetes Prow Robot
a300009b5d Merge pull request #1505 from a7i/python-eol
chore: upgrade python EOL and action versions
2024-08-29 10:02:29 +01:00
Amir Alavi
9fa48cd97e chore: upgrade python EOL and action versions
Signed-off-by: Amir Alavi <amiralavi7@gmail.com>
2024-08-28 23:24:36 -04:00
Amir Alavi
0cf1fc906e descheduler v0.31: update e2e test versions
Signed-off-by: Amir Alavi <amiralavi7@gmail.com>
2024-08-28 23:04:59 -04:00
Kubernetes Prow Robot
4e4c5f79fb Merge pull request #1496 from a7i/k8s-1.30
bump k8s.io libs to v0.31.0
2024-08-15 12:47:53 -07:00
Kubernetes Prow Robot
8abb3509f9 Merge pull request #1498 from ingvagabund/readme-drop-v1alpha-descheduling-policy-mention
README: drop v1alpha1 descheduler policy mention
2024-08-15 07:29:18 -07:00
Jan Chaloupka
3eece465fb README: drop v1alpha1 descheduler policy mention 2024-08-15 15:08:11 +02:00
Amir Alavi
33a747096b bump k8s.io libs to v0.31.0
Signed-off-by: Amir Alavi <amiralavi7@gmail.com>
2024-08-14 14:51:32 -04:00
Kubernetes Prow Robot
f6fe8fd0bd Merge pull request #1485 from ingvagabund/simplify-TestPodEvictorReset
[unit test]: simplify test pod evictor reset
2024-08-14 02:18:43 -07:00
Jan Chaloupka
29c0a90998 TestPodEvictorReset: replace duplicates strategy with node taints to simplify the testing 2024-08-14 11:00:20 +02:00
Kubernetes Prow Robot
640b675e86 Merge pull request #1484 from ingvagabund/test-descheduling-limits
[unit test]: test descheduling limits
2024-08-14 01:53:04 -07:00
Kubernetes Prow Robot
c0c26e762b Merge pull request #1483 from ingvagabund/dedup-framework-init
tests: de-duplicate framework handle initialization
2024-08-14 01:20:26 -07:00
Jan Chaloupka
91e5e06b5f [unit test]: test descheduling limits 2024-08-14 10:15:58 +02:00
Kubernetes Prow Robot
df7791fafa Merge pull request #1482 from ingvagabund/drop-v1alpha1
Remove descheduler/v1alpha1 type
2024-08-13 19:14:43 -07:00
Jan Chaloupka
cbade38d23 [tests] de-duplicate framework handle initialization 2024-08-12 17:05:30 +02:00
Jan Chaloupka
1e0b1a9840 Remove descheduler/v1alpha1 type 2024-08-09 09:49:59 +02:00
Jan Chaloupka
cb0c1b660d Plugin args: tag arguments with omitempty to reduce the marshalled json size 2024-08-06 15:20:18 +02:00
Kubernetes Prow Robot
daaa3a277e Merge pull request #1479 from sklirg/feat/container-oci-annotation-source
docs: Provide OCI annotation for where to find image sources
2024-08-05 17:48:51 -07:00
Håkon Solbjørg
683cd7f794 docs: Provide OCI annotation for where to find image sources
Add a label for the OCI annotation for image source, as per
https://github.com/opencontainers/image-spec/blob/main/annotations.md
2024-08-05 17:35:13 +02:00
Kubernetes Prow Robot
2189fe4479 Merge pull request #1474 from ingvagabund/e2e-build-and-run-descheduler-image
[TestTooManyRestarts] e2e: build a descheduler image and run the descheduler as a pod
2024-08-04 11:58:38 -07:00
Jan Chaloupka
e4c361d902 e2e: build a descheduler image and run the descheduler as a pod 2024-08-04 12:06:01 +02:00
Kubernetes Prow Robot
de7cec0640 Merge pull request #1476 from shahar-h/go-1.22.5
bump go to 1.22.5
2024-08-03 08:34:37 -07:00
Shahar Harari
48690989da bump go to 1.22.5
Signed-off-by: Shahar Harari <shahar.harari@sap.com>
2024-07-30 08:24:34 +03:00
Kubernetes Prow Robot
601f213b4f Merge pull request #1469 from ingvagabund/e2e-fix-leaderelection
e2e: TestLeaderElection: delete the lease and increase the retry period
2024-07-29 20:01:28 -07:00
Kubernetes Prow Robot
8e70190c8a Merge pull request #1430 from a7i/fix-minor-ver-parse
fix: minor version parsing in version compatibility check
2024-07-29 19:41:17 -07:00
Amir Alavi
a3146a1705 fix: minor version parsing in version compatibility check
Signed-off-by: Amir Alavi <amiralavi7@gmail.com>
2024-07-28 11:44:12 -04:00
Victor Gonzalez
55a0812ae6 skip eviction when pod creation time is below minPodAge threshold setting (#1475)
* skip eviction when pod creation time is below minPodAge threshold setting

In the default initialization phase of the descheduler, add a new
constraint to not evict pods that creation time is below minPodAge
threshold.

Added value:

- Avoid crazy pod movement when the autoscaler scales up and down.

- Avoid evicting pods when they are warming up.

- Decreases the overall cost of eviction as no pod will be evicted
  before doing significant amount of work.

- Guard against scheduling. Descheduling loops in situations where
  the descheduler has a different node fit logic from scheduler,
  like not considering topology spread constraints.

* Use *time.Duration instead of uint for MinPodAge type

* Remove '(in minutes)' from default evictor configuration table

* make fmt

* Add explicit name for Duration field

* Use Duration.String()
2024-07-26 05:59:21 -07:00
Kubernetes Prow Robot
f3569b5fe2 Merge pull request #1354 from ingvagabund/evictions-in-background
KEP-1397: descheduler integration with evacuation API as an alternative to eviction API
2024-07-24 11:47:35 -07:00
Jan Chaloupka
d2bd573cdb e2e: TestLeaderElection: delete the lease and increase the retry period
In some random cases none of the deschedulers acquires a lease.
Making the test fail.
2024-07-22 17:24:56 +02:00
Kubernetes Prow Robot
95ef2bbec3 Merge pull request #1471 from ingvagabund/e2e-duplicates
e2e: TestRemoveDuplicates: limit the tested namespace
2024-07-21 08:55:18 -07:00
Kubernetes Prow Robot
355cff67c1 Merge pull request #1472 from ingvagabund/e2e-run-descheduler-from-top-instead-of-a-plugin
e2e: TestTooManyRestarts: run descheduler as a whole instead of a single plugin
2024-07-20 12:18:55 -07:00
Kubernetes Prow Robot
9220a1c009 Merge pull request #1473 from ingvagabund/bump-golangci-lint
bump(github.com/golangci/golangci-lint)=v1.59.1
2024-07-20 11:46:36 -07:00
Jan Chaloupka
b60a3fcfeb bump(github.com/golangci/golangci-lint)=v1.59.1 2024-07-19 12:54:25 +02:00
Jan Chaloupka
ab467a5dd2 e2e: TestTooManyRestarts: run descheduler as a whole instead of a single plugin 2024-07-19 11:10:19 +02:00
Jan Chaloupka
f66efaf8db e2e: TestRemoveDuplicates: limit the tested namespace
RemoveDuplicates is namespaced scope. Limiting the namespace will avoid
unexpected evictions from other namespaces.
2024-07-19 09:25:20 +02:00
Kubernetes Prow Robot
0c9750cc7f Merge pull request #1468 from adammw/adammw/podlifetime-initcontainerstatus
feat: add init and ephemeral container checks to PodLifeTime
2024-07-17 06:09:07 -07:00
Adam Malcontenti-Wilson
f23967a88e feat: add init and ephemeral container checks to PodLifeTime 2024-07-17 14:36:35 +10:00
Kubernetes Prow Robot
a6e75fe0bd Merge pull request #1467 from a7i/helm-unittest
helm unit tests
2024-07-15 23:39:10 -07:00
Amir Alavi
9b5026314f helm unit tests
Signed-off-by: Amir Alavi <amiralavi7@gmail.com>
2024-07-15 14:45:31 -04:00
Kubernetes Prow Robot
c56a408b2c Merge pull request #1443 from a7i/amir/leader-election-namespace-typo
fix: helm leader-election typo to `resourceNamespace`
2024-07-14 09:21:10 -07:00
Amir Alavi
fc1b54318f fix: helm leader-election typo to resourceNamespace
Signed-off-by: Amir Alavi <amiralavi7@gmail.com>
2024-07-13 11:46:23 -04:00
Kubernetes Prow Robot
da862a5698 Merge pull request #1444 from ternbusty/feature/namespace-override
feat: Add namespace override settings in Helm Chart
2024-07-11 20:19:22 -07:00
Kubernetes Prow Robot
eb6c325553 Merge pull request #1464 from eminaktas/update-pointer-to-ptr
refactor: replace k8s.io/utils/pointer with k8s.io/utils/ptr
2024-07-11 07:42:58 -07:00
Emin Aktas
f8e128d862 refactor: replace k8s.io/utils/pointer with k8s.io/utils/ptr
Signed-off-by: Emin Aktas <eminaktas34@gmail.com>
2024-07-11 11:36:34 +03:00
Kubernetes Prow Robot
a2a45db6de Merge pull request #1463 from zhifei92/update-readme
Update README about maxNoOfPodsToEvictTotal
2024-07-11 01:31:47 -07:00
zhifei92
d8084e8b39 update README about maxNoOfPodsToEvictTotal 2024-07-11 16:05:36 +08:00
Kubernetes Prow Robot
b614c8bc7c Merge pull request #1458 from ingvagabund/pod-evictor-thread-safe
pod evictor: make it thread safe
2024-07-10 04:48:26 -07:00
Kubernetes Prow Robot
9b41edd382 Merge pull request #1460 from ingvagabund/context
descheduler_test.go: initDescheduler: pass a new ctx with cancel inst…
2024-07-09 11:24:11 -07:00
Jan Chaloupka
bc60a058ef pod evictor: make it thread safe
Currently, all the plugins are run in a sequence.
No plugin executes evictions in parallel within.
Yet, there's no guarantee a future plugin (e.g. a custom one)
will not attemp to evict pods in parallel.
2024-07-09 12:39:08 +02:00
Kubernetes Prow Robot
546a39e88c Merge pull request #1451 from zhifei92/support-total-pods-limit
The descheduler supports limiting the total number of pods evicted per rescheduling cycle
2024-07-09 03:35:17 -07:00
zhifei92
578086ca8e refactor: optimize error descriptions 2024-07-09 17:14:34 +08:00
Jan Chaloupka
ea2eeccff4 descheduler_test.go: initDescheduler: pass a new ctx with cancel instead of returning a cancel 2024-07-09 09:19:59 +02:00
Kubernetes Prow Robot
d0695abea9 Merge pull request #1459 from ingvagabund/descheduler-test-refactoring
descheduler_test.go refactoring
2024-07-08 23:39:16 -07:00
zhifei92
e60f525ec6 feat: support MaxNoOfPodsToEvictTotal 2024-07-09 14:00:27 +08:00
Jan Chaloupka
3362fec7b0 Define initDescheduler for further use
Can be used by other tests executing individual descheduling cycle
explicitly.
2024-07-08 17:28:51 +02:00
Jan Chaloupka
f240648df2 Set OwnerReferences through GetReplicaSetOwnerRefList 2024-07-08 17:10:07 +02:00
Jan Chaloupka
a818c01832 Use v1alpha2 descheduling policy 2024-07-08 17:04:53 +02:00
Jan Chaloupka
44b59f9b1d initPluginRegistry as a single way to register all plugins in testing 2024-07-08 16:55:27 +02:00
Kubernetes Prow Robot
9d16c28f43 Merge pull request #1456 from ingvagabund/limit-exceeded-to-error
PodEvictor: turn an exceeded limit into an error
2024-07-08 06:25:06 -07:00
Kubernetes Prow Robot
f8afd679ed Merge pull request #1457 from googs1025/refactor_method
fix: add info for error return
2024-07-07 11:10:32 -07:00
googs1025
db0df6c6ca fix: add info for error return 2024-07-07 21:16:52 +08:00
Jan Chaloupka
18d0e4a540 PodEvictor: turn an exceeded limit into an error
When checking for node limit getting exceeded the pod eviction
never fails. Thus, ignoring the metric reporting when a pod fails
to be evicted due to node limit constrains.

The error also allows plugin to react on other limits getting
exceeded. E.g. the limit on the number of pods evicted per namespace.
2024-07-06 20:14:43 +02:00
Kubernetes Prow Robot
7657345079 Merge pull request #1452 from a7i/defaultevictor-reinit
fix: indexer cache error when default evictor is re-initialized
2024-07-06 04:30:53 -07:00
Kubernetes Prow Robot
d1118354c9 Merge pull request #1455 from a7i/amir/pod-evictor-options
feat: pod evictor options
2024-07-06 03:00:53 -07:00
Amir Alavi
e26f6429a2 feat: pod evictor options 2024-07-05 21:40:55 -04:00
Kubernetes Prow Robot
686417b6de Merge pull request #1454 from ingvagabund/generate-uuid-for-test-pods
test: generate uid when building a pod
2024-07-04 01:19:59 -07:00
Jan Chaloupka
287d1b1573 test: generate uid when building a pod
UID is an integral part of every pod.
Even though we don't test for UID in any of the existing test cases,
future test cases might rely on it.
2024-07-02 14:30:42 +02:00
Amir Alavi
7ab36daaec fix: indexer cache error when default evictor is re-initialized
Signed-off-by: Amir Alavi <amiralavi7@gmail.com>
2024-06-28 00:34:55 -04:00
Kubernetes Prow Robot
f2be3fd414 Merge pull request #1436 from fanhaouu/feat-return-node-fit-error
return node fit error in advance
2024-06-27 10:54:41 -07:00
Kubernetes Prow Robot
9eefbf05cb Merge pull request #1450 from googs1025/refactor
refactor: PodMatchNodeSelector method
2024-06-27 10:05:17 -07:00
googs1025
cfa6845a19 refactor: PodMatchNodeSelector method 2024-06-27 22:10:41 +08:00
Hao Fan
8a2b2eb37c return node fit error in advance 2024-06-26 17:18:32 +08:00
Kubernetes Prow Robot
972d28108a Merge pull request #1447 from ingvagabund/refactorings
PodEvictor: refactoring and preparation for eviction requests
2024-06-25 01:08:44 -07:00
Kubernetes Prow Robot
f294d953a3 Merge pull request #1445 from zhifei92/fix-unit-test
Unit-test: add necessary ownerRef to the pod.
2024-06-25 00:15:40 -07:00
Kubernetes Prow Robot
85837b1063 Merge pull request #1446 from googs1025/fix_filter
fix: return the unmatched cases first
2024-06-24 23:53:40 -07:00
Jan Chaloupka
fadef326ff TestPodEvictorReset: check the dry mode evicts duplicated pods 2024-06-23 20:30:36 +02:00
Jan Chaloupka
f5060adcd1 Move fake client from the cachedClient function
Remove the fakeClient from cachedClient function so a different
fakeClient can be injected for testing purposes
2024-06-23 19:59:46 +02:00
Jan Chaloupka
75880226c0 Set up the pod evictor only once
Currently, the pod evictor is created during each descheduling cycle
to reset the internal counters and the fake client (in case a dry run is
configured). Instead, create the pod evictor once and reset only what's
needed. So later on the pod evictor can be extended with e.g. a cache
keeping the track of eviction requests that are still in progress and
required more than a single descheduling cycle to complete.
2024-06-23 19:24:27 +02:00
Jan Chaloupka
0901cb18bf NewPodEvictor: drop nodes parameter 2024-06-22 15:08:00 +02:00
googs1025
6fdee47cbc fix: return the unmatched cases first, then perform the eviction 2024-06-22 18:16:40 +08:00
zhifei92
ae15fed7e7 fix(unit-test): add necessary ownerRef to the pod. 2024-06-21 19:12:40 +08:00
ternbusty
ebae217631 add namespaceOverride setting to README 2024-06-21 05:43:17 +09:00
ternbusty
d7178984df modify to use descheduler.namespace setting 2024-06-21 05:29:27 +09:00
ternbusty
2253e9816c add namespaceOverride Setting 2024-06-21 05:28:34 +09:00
Kubernetes Prow Robot
cdbd101eae Merge pull request #1438 from googs1025/clean_up_nodelister
chore: cleanup duplicated code
2024-06-20 07:07:16 -07:00
Kubernetes Prow Robot
15551bb834 Merge pull request #1431 from a7i/automated-cherry-pick-of-#1427-upstream-master
Automated cherry pick of #1427: helm: upgrade to v0.30.1
2024-06-20 07:07:09 -07:00
Kubernetes Prow Robot
bdaff92c10 Merge pull request #1441 from googs1025/e2e_test
refactor some methods in e2e test
2024-06-20 05:20:16 -07:00
googs1025
9fea59821f refactor e2e test 2024-06-18 22:48:53 +08:00
Kubernetes Prow Robot
2df11f837a Merge pull request #1439 from googs1025/add_ut
add validation ut
2024-06-17 23:49:47 -07:00
googs1025
0e2478ac41 add validation ut 2024-06-13 11:56:33 +08:00
googs1025
ec33490314 chore: cleanup duplicated code 2024-06-12 08:56:13 +08:00
Kubernetes Prow Robot
1c8ae64726 Merge pull request #1435 from fanhaouu/feat-return-pod-qos
return pod qos in advance
2024-06-10 19:38:05 -07:00
Hao Fan
fd7fcbddfe return pod qos in advance 2024-06-10 00:22:17 +08:00
Kubernetes Prow Robot
69e5c5a1ef Merge pull request #1433 from a7i/CVE-2024-24790
bump go to 1.22.4 for CVE-2024-24790 and CVE-2024-24789
2024-06-09 05:34:07 -07:00
Amir Alavi
8f1102b547 bump go to 1.22.4 for CVE-2024-24790 and CVE-2024-24789 2024-06-09 07:37:01 -04:00
Kubernetes Prow Robot
9a57a37cc0 Merge pull request #1432 from a7i/go-versions
chore: reduce repetition of go versions
2024-06-09 00:44:23 -07:00
Amir Alavi
1dd21ba507 use jq to parse go version per pr feedback
Signed-off-by: Amir Alavi <amiralavi7@gmail.com>
2024-06-08 18:25:56 -04:00
Amir Alavi
2ae79bee64 chore: reduce repetition of go versions
when we cut a new release of descheduler, we have to update the go version in multiple places
which presents an opportunity to miss updating one.

Signed-off-by: Amir Alavi <amiralavi7@gmail.com>
2024-06-08 12:52:34 -04:00
Amir Alavi
32065f0caa helm: upgrade to v0.30.1 2024-06-08 11:19:20 -04:00
Kubernetes Prow Robot
8714397ba6 Merge pull request #1429 from damemi/bump-otel-schema
Bump otel semconv to 1.24
2024-06-06 09:37:29 -07:00
Mike Dame
47cc875fe8 Don't crash if failing to create tracer provider 2024-06-06 15:57:58 +00:00
Mike Dame
d699454d5e Bump otel semconv to 1.24 2024-06-06 13:05:55 +00:00
Jan Chaloupka
a889e57768 Address review comments 2024-06-05 14:18:42 +02:00
Kubernetes Prow Robot
748495a022 Merge pull request #1395 from fanhaouu/fix-check-pod-anti-affinity
fix the issue that the pod anti-filtering rules are not taking effect
2024-06-03 05:04:34 -07:00
Hao Fan
c80556fc91 fix the issue that the pod anti-filtering rules are not taking effect 2024-06-02 18:58:57 +08:00
Kubernetes Prow Robot
17af29afe4 Merge pull request #1416 from googs1025/use_cmd_context
use cmd context instead of using context.Background()
2024-05-28 16:55:20 -07:00
googs1025
8be82b008c use cmd context instead of using context.Background() 2024-05-28 20:50:55 +08:00
Kubernetes Prow Robot
fc01793949 Merge pull request #1413 from duplabe/readme-toc-location-fix
fix TOC location in Readme
2024-05-22 00:21:24 -07:00
balazs.benyo
22dfa5d559 fix TOC location in Readme 2024-05-22 08:21:23 +02:00
Kubernetes Prow Robot
4b8e2076e9 Merge pull request #1412 from duplabe/helm-fix-default-descheduler-policy
fix helm's default deschedulerPolicy
2024-05-21 13:12:11 -07:00
Kubernetes Prow Robot
1c1b1a7207 Merge pull request #1390 from omerap12/helm_chart_allow_falsey_value
Helm chart - allow 'falsey' value in cmdOption
2024-05-21 12:32:53 -07:00
balazs.benyo
2675dbedef fix helm's default deschedulerPolicy 2024-05-21 20:47:59 +02:00
Kubernetes Prow Robot
77a0693e0c Merge pull request #1378 from hanyouqing/youqing/fix-the-replica-type-for-the-helm-chart
Fix the replicas value type for the descheduler helm-chart
2024-05-20 18:18:29 -07:00
Kubernetes Prow Robot
79990946eb Merge pull request #1405 from a7i/kustomize-image-tags
[release 1.30] update kustomize image tags and helm versions
2024-05-20 06:36:42 -07:00
Amir Alavi
4671199be7 upgrade helm chart to v0.30.0 2024-05-20 07:23:30 -04:00
Kubernetes Prow Robot
a82fc7b4e4 Merge pull request #1408 from a7i/amir/bump-go-1.22.3
bump to to 1.22.3 to address CVE-2024-24788
2024-05-19 19:56:17 -07:00
Amir Alavi
2ac072e5da bump to to 1.22.3 to address CVE-2024-24788
Signed-off-by: Amir Alavi <amiralavi7@gmail.com>
2024-05-19 20:27:28 -04:00
Amir Alavi
ee5bc6991d [release 1.30] update kustomize image tags
Signed-off-by: Amir Alavi <amiralavi7@gmail.com>
2024-05-18 21:05:44 -04:00
Jan Chaloupka
b1c6e24f93 KEP-1397: descheduler integration with evacuation API as an alternative to eviction API 2024-05-14 13:42:42 +02:00
Omer Aplatony
9f5fc14410 Fixed options without value 2024-05-08 17:42:40 +03:00
Youqing Han
b2bb8272af Convert the replicas value to int for comparision 2024-05-08 11:41:10 +08:00
Omer Aplatony
149a4c11c4 Added speaces 2024-05-03 17:37:32 +03:00
Omer Aplatony
2ce9d46b8c allow 'falsey' value in cmdOption 2024-05-03 14:39:37 +03:00
Youqing Han
667df9b606 Fix the replicas type for the helm-chart 2024-04-18 12:05:14 +08:00
3745 changed files with 297783 additions and 133512 deletions

View File

@@ -20,27 +20,35 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v3 uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
- name: Set up Helm - name: Set up Helm
uses: azure/setup-helm@v2.1 uses: azure/setup-helm@v4.2.0
with: with:
version: v3.9.2 version: v3.15.1
- uses: actions/setup-python@v3.1.2 - uses: actions/setup-python@v5.1.1
with: with:
python-version: 3.7 python-version: 3.12
- uses: actions/setup-go@v3 - uses: actions/setup-go@v5
with: with:
go-version: '1.22.2' go-version-file: 'go.mod'
- name: Set up chart-testing - name: Set up chart-testing
uses: helm/chart-testing-action@v2.2.1 uses: helm/chart-testing-action@v2.6.1
with: with:
version: v3.7.0 version: v3.11.0
- name: Install Helm Unit Test Plugin
run: |
helm plugin install https://github.com/helm-unittest/helm-unittest
- name: Run Helm Unit Tests
run: |
helm unittest charts/descheduler --strict -d
- name: Run chart-testing (list-changed) - name: Run chart-testing (list-changed)
id: list-changed id: list-changed

View File

@@ -7,16 +7,16 @@ jobs:
deploy: deploy:
strategy: strategy:
matrix: matrix:
k8s-version: ["v1.30.0"] k8s-version: ["v1.32.0"]
descheduler-version: ["v0.29.0"] descheduler-version: ["v0.32.0"]
descheduler-api: ["v1alpha1", "v1alpha2"] descheduler-api: ["v1alpha2"]
manifest: ["deployment"] manifest: ["deployment"]
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout Repo - name: Checkout Repo
uses: actions/checkout@v3 uses: actions/checkout@v4
- name: Create kind cluster - name: Create kind cluster
uses: helm/kind-action@v1.5.0 uses: helm/kind-action@v1.12.0
with: with:
node_image: kindest/node:${{ matrix.k8s-version }} node_image: kindest/node:${{ matrix.k8s-version }}
kubectl_version: ${{ matrix.k8s-version }} kubectl_version: ${{ matrix.k8s-version }}

View File

@@ -5,6 +5,9 @@ on:
branches: branches:
- release-* - release-*
permissions:
contents: write # allow actions to update gh-pages branch
jobs: jobs:
release: release:
runs-on: ubuntu-latest runs-on: ubuntu-latest
@@ -20,12 +23,12 @@ jobs:
git config user.email "$GITHUB_ACTOR@users.noreply.github.com" git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
- name: Install Helm - name: Install Helm
uses: azure/setup-helm@v1 uses: azure/setup-helm@v4.2.0
with: with:
version: v3.7.0 version: v3.15.1
- name: Run chart-releaser - name: Run chart-releaser
uses: helm/chart-releaser-action@v1.1.0 uses: helm/chart-releaser-action@v1.6.0
env: env:
CR_TOKEN: "${{ secrets.GITHUB_TOKEN }}" CR_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
CR_RELEASE_NAME_TEMPLATE: "descheduler-helm-chart-{{ .Version }}" CR_RELEASE_NAME_TEMPLATE: "descheduler-helm-chart-{{ .Version }}"

View File

@@ -22,7 +22,7 @@ jobs:
fail-fast: false fail-fast: false
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v3 uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0

View File

@@ -1,5 +1,5 @@
run: run:
timeout: 2m timeout: 5m
linters: linters:
disable-all: true disable-all: true

View File

@@ -11,7 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
FROM golang:1.22.2 FROM golang:1.23.3
WORKDIR /go/src/sigs.k8s.io/descheduler WORKDIR /go/src/sigs.k8s.io/descheduler
COPY . . COPY . .
@@ -23,6 +23,8 @@ FROM scratch
MAINTAINER Kubernetes SIG Scheduling <kubernetes-sig-scheduling@googlegroups.com> MAINTAINER Kubernetes SIG Scheduling <kubernetes-sig-scheduling@googlegroups.com>
LABEL org.opencontainers.image.source https://github.com/kubernetes-sigs/descheduler
USER 1000 USER 1000
COPY --from=0 /go/src/sigs.k8s.io/descheduler/_output/bin/descheduler /bin/descheduler COPY --from=0 /go/src/sigs.k8s.io/descheduler/_output/bin/descheduler /bin/descheduler

View File

@@ -26,12 +26,14 @@ ARCHS = amd64 arm arm64
LDFLAGS=-ldflags "-X ${LDFLAG_LOCATION}.version=${VERSION} -X ${LDFLAG_LOCATION}.buildDate=${BUILD} -X ${LDFLAG_LOCATION}.gitbranch=${BRANCH} -X ${LDFLAG_LOCATION}.gitsha1=${SHA1}" LDFLAGS=-ldflags "-X ${LDFLAG_LOCATION}.version=${VERSION} -X ${LDFLAG_LOCATION}.buildDate=${BUILD} -X ${LDFLAG_LOCATION}.gitbranch=${BRANCH} -X ${LDFLAG_LOCATION}.gitsha1=${SHA1}"
GOLANGCI_VERSION := v1.58.1 GOLANGCI_VERSION := v1.62.2
HAS_GOLANGCI := $(shell ls _output/bin/golangci-lint 2> /dev/null) HAS_GOLANGCI := $(shell ls _output/bin/golangci-lint 2> /dev/null)
GOFUMPT_VERSION := v0.4.0 GOFUMPT_VERSION := v0.7.0
HAS_GOFUMPT := $(shell command -v gofumpt 2> /dev/null) HAS_GOFUMPT := $(shell command -v gofumpt 2> /dev/null)
GO_VERSION := $(shell (command -v jq > /dev/null && (go mod edit -json | jq -r .Go)) || (sed -En 's/^go (.*)$$/\1/p' go.mod))
# REGISTRY is the container registry to push # REGISTRY is the container registry to push
# into. The default is to push to the staging # into. The default is to push to the staging
# registry, not production. # registry, not production.
@@ -134,7 +136,7 @@ gen:
./hack/update-docs.sh ./hack/update-docs.sh
gen-docker: gen-docker:
$(CONTAINER_ENGINE) run --entrypoint make -it -v $(CURRENT_DIR):/go/src/sigs.k8s.io/descheduler -w /go/src/sigs.k8s.io/descheduler golang:1.22.2 gen $(CONTAINER_ENGINE) run --entrypoint make -it -v $(CURRENT_DIR):/go/src/sigs.k8s.io/descheduler -w /go/src/sigs.k8s.io/descheduler golang:$(GO_VERSION) gen
verify-gen: verify-gen:
./hack/verify-conversions.sh ./hack/verify-conversions.sh
@@ -146,7 +148,7 @@ lint:
ifndef HAS_GOLANGCI ifndef HAS_GOLANGCI
curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b ./_output/bin ${GOLANGCI_VERSION} curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b ./_output/bin ${GOLANGCI_VERSION}
endif endif
./_output/bin/golangci-lint run ./_output/bin/golangci-lint run -v
fmt: fmt:
ifndef HAS_GOFUMPT ifndef HAS_GOFUMPT

View File

@@ -2,7 +2,7 @@
![Release Charts](https://github.com/kubernetes-sigs/descheduler/workflows/Release%20Charts/badge.svg) ![Release Charts](https://github.com/kubernetes-sigs/descheduler/workflows/Release%20Charts/badge.svg)
<p align="left"> <p align="left">
Click at the [bullet list icon] at the top left corner of the Readme visualization for the github generated table of contents. ↗️ Click at the [bullet list icon] at the top right corner of the Readme visualization for the github generated table of contents.
</p> </p>
<p align="center"> <p align="center">
@@ -33,18 +33,15 @@ but relies on the default scheduler for that.
## ⚠️ Documentation Versions by Release ## ⚠️ Documentation Versions by Release
If you are using a published release of Descheduler (such as If you are using a published release of Descheduler (such as
`registry.k8s.io/descheduler/descheduler:v0.26.1`), follow the documentation in `registry.k8s.io/descheduler/descheduler:v0.31.0`), follow the documentation in
that version's release branch, as listed below: that version's release branch, as listed below:
|Descheduler Version|Docs link| |Descheduler Version|Docs link|
|---|---| |---|---|
|v0.32.x|[`release-1.32`](https://github.com/kubernetes-sigs/descheduler/blob/release-1.32/README.md)|
|v0.31.x|[`release-1.31`](https://github.com/kubernetes-sigs/descheduler/blob/release-1.31/README.md)|
|v0.30.x|[`release-1.30`](https://github.com/kubernetes-sigs/descheduler/blob/release-1.30/README.md)| |v0.30.x|[`release-1.30`](https://github.com/kubernetes-sigs/descheduler/blob/release-1.30/README.md)|
|v0.29.x|[`release-1.29`](https://github.com/kubernetes-sigs/descheduler/blob/release-1.29/README.md)| |v0.29.x|[`release-1.29`](https://github.com/kubernetes-sigs/descheduler/blob/release-1.29/README.md)|
|v0.28.x|[`release-1.28`](https://github.com/kubernetes-sigs/descheduler/blob/release-1.28/README.md)|
|v0.27.x|[`release-1.27`](https://github.com/kubernetes-sigs/descheduler/blob/release-1.27/README.md)|
|v0.26.x|[`release-1.26`](https://github.com/kubernetes-sigs/descheduler/blob/release-1.26/README.md)|
|v0.25.x|[`release-1.25`](https://github.com/kubernetes-sigs/descheduler/blob/release-1.25/README.md)|
|v0.24.x|[`release-1.24`](https://github.com/kubernetes-sigs/descheduler/blob/release-1.24/README.md)|
The The
[`master`](https://github.com/kubernetes-sigs/descheduler/blob/master/README.md) [`master`](https://github.com/kubernetes-sigs/descheduler/blob/master/README.md)
@@ -96,17 +93,17 @@ See the [resources | Kustomize](https://kubectl.docs.kubernetes.io/references/ku
Run As A Job Run As A Job
``` ```
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/job?ref=v0.26.1' | kubectl apply -f - kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/job?ref=release-1.32' | kubectl apply -f -
``` ```
Run As A CronJob Run As A CronJob
``` ```
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/cronjob?ref=v0.26.1' | kubectl apply -f - kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/cronjob?ref=release-1.32' | kubectl apply -f -
``` ```
Run As A Deployment Run As A Deployment
``` ```
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/deployment?ref=v0.26.1' | kubectl apply -f - kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/deployment?ref=release-1.32' | kubectl apply -f -
``` ```
## User Guide ## User Guide
@@ -115,8 +112,6 @@ See the [user guide](docs/user-guide.md) in the `/docs` directory.
## Policy, Default Evictor and Strategy plugins ## Policy, Default Evictor and Strategy plugins
**⚠️ v1alpha1 configuration is still supported, but deprecated (and soon will be removed). Please consider migrating to v1alpha2 (described bellow). For previous v1alpha1 documentation go to [docs/deprecated/v1alpha1.md](docs/deprecated/v1alpha1.md) ⚠️**
The Descheduler Policy is configurable and includes default strategy plugins that can be enabled or disabled. It includes a common eviction configuration at the top level, as well as configuration from the Evictor plugin (Default Evictor, if not specified otherwise). Top-level configuration and Evictor plugin configuration are applied to all evictions. The Descheduler Policy is configurable and includes default strategy plugins that can be enabled or disabled. It includes a common eviction configuration at the top level, as well as configuration from the Evictor plugin (Default Evictor, if not specified otherwise). Top-level configuration and Evictor plugin configuration are applied to all evictions.
### Top Level configuration ### Top Level configuration
@@ -128,22 +123,27 @@ These are top level keys in the Descheduler Policy that you can use to configure
| `nodeSelector` |`string`| `nil` | limiting the nodes which are processed. Only used when `nodeFit`=`true` and only by the PreEvictionFilter Extension Point | | `nodeSelector` |`string`| `nil` | limiting the nodes which are processed. Only used when `nodeFit`=`true` and only by the PreEvictionFilter Extension Point |
| `maxNoOfPodsToEvictPerNode` |`int`| `nil` | maximum number of pods evicted from each node (summed through all strategies) | | `maxNoOfPodsToEvictPerNode` |`int`| `nil` | maximum number of pods evicted from each node (summed through all strategies) |
| `maxNoOfPodsToEvictPerNamespace` |`int`| `nil` | maximum number of pods evicted from each namespace (summed through all strategies) | | `maxNoOfPodsToEvictPerNamespace` |`int`| `nil` | maximum number of pods evicted from each namespace (summed through all strategies) |
| `maxNoOfPodsToEvictTotal` |`int`| `nil` | maximum number of pods evicted per rescheduling cycle (summed through all strategies) |
| `metricsCollector` |`object`| `nil` | configures collection of metrics for actual resource utilization |
| `metricsCollector.enabled` |`bool`| `false` | enables kubernetes [metrics server](https://kubernetes-sigs.github.io/metrics-server/) collection |
### Evictor Plugin configuration (Default Evictor) ### Evictor Plugin configuration (Default Evictor)
The Default Evictor Plugin is used by default for filtering pods before processing them in an strategy plugin, or for applying a PreEvictionFilter of pods before eviction. You can also create your own Evictor Plugin or use the Default one provided by Descheduler. Other uses for the Evictor plugin can be to sort, filter, validate or group pods by different criteria, and that's why this is handled by a plugin and not configured in the top level config. The Default Evictor Plugin is used by default for filtering pods before processing them in an strategy plugin, or for applying a PreEvictionFilter of pods before eviction. You can also create your own Evictor Plugin or use the Default one provided by Descheduler. Other uses for the Evictor plugin can be to sort, filter, validate or group pods by different criteria, and that's why this is handled by a plugin and not configured in the top level config.
| Name |type| Default Value | Description | | Name |type| Default Value | Description |
|------|----|---------------|-------------| |---------------------------|----|---------------|-----------------------------------------------------------------------------------------------------------------------------|
| `nodeSelector` |`string`| `nil` | limiting the nodes which are processed | | `nodeSelector` |`string`| `nil` | limiting the nodes which are processed |
| `evictLocalStoragePods` |`bool`| `false` | allows eviction of pods with local storage | | `evictLocalStoragePods` |`bool`| `false` | allows eviction of pods with local storage |
| `evictSystemCriticalPods` |`bool`| `false` | [Warning: Will evict Kubernetes system pods] allows eviction of pods with any priority, including system pods like kube-dns | | `evictSystemCriticalPods` |`bool`| `false` | [Warning: Will evict Kubernetes system pods] allows eviction of pods with any priority, including system pods like kube-dns |
| `ignorePvcPods` |`bool`| `false` | set whether PVC pods should be evicted or ignored | | `ignorePvcPods` |`bool`| `false` | set whether PVC pods should be evicted or ignored |
| `evictFailedBarePods` |`bool`| `false` | allow eviction of pods without owner references and in failed phase | | `evictFailedBarePods` |`bool`| `false` | allow eviction of pods without owner references and in failed phase |
|`labelSelector`|`metav1.LabelSelector`||(see [label filtering](#label-filtering))| | `labelSelector` |`metav1.LabelSelector`|| (see [label filtering](#label-filtering)) |
|`priorityThreshold`|`priorityThreshold`||(see [priority filtering](#priority-filtering))| | `priorityThreshold` |`priorityThreshold`|| (see [priority filtering](#priority-filtering)) |
|`nodeFit`|`bool`|`false`|(see [node fit filtering](#node-fit-filtering))| | `nodeFit` |`bool`|`false`| (see [node fit filtering](#node-fit-filtering)) |
|`minReplicas`|`uint`|`0`| ignore eviction of pods where owner (e.g. `ReplicaSet`) replicas is below this threshold | | `minReplicas` |`uint`|`0`| ignore eviction of pods where owner (e.g. `ReplicaSet`) replicas is below this threshold |
| `minPodAge` |`metav1.Duration`|`0`| ignore eviction of pods with a creation time within this threshold |
| `ignorePodsWithoutPDB` |`bool`|`false`| set whether pods without PodDisruptionBudget should be evicted or ignored |
### Example policy ### Example policy
@@ -159,6 +159,9 @@ kind: "DeschedulerPolicy"
nodeSelector: "node=node1" # you don't need to set this, if not set all will be processed nodeSelector: "node=node1" # you don't need to set this, if not set all will be processed
maxNoOfPodsToEvictPerNode: 5000 # you don't need to set this, unlimited if not set maxNoOfPodsToEvictPerNode: 5000 # you don't need to set this, unlimited if not set
maxNoOfPodsToEvictPerNamespace: 5000 # you don't need to set this, unlimited if not set maxNoOfPodsToEvictPerNamespace: 5000 # you don't need to set this, unlimited if not set
maxNoOfPodsToEvictTotal: 5000 # you don't need to set this, unlimited if not set
metricsCollector:
enabled: true # you don't need to set this, metrics are not collected if not set
profiles: profiles:
- name: ProfileName - name: ProfileName
pluginConfig: pluginConfig:
@@ -278,11 +281,13 @@ If that parameter is set to `true`, the thresholds are considered as percentage
`thresholds` will be deducted from the mean among all nodes and `targetThresholds` will be added to the mean. `thresholds` will be deducted from the mean among all nodes and `targetThresholds` will be added to the mean.
A resource consumption above (resp. below) this window is considered as overutilization (resp. underutilization). A resource consumption above (resp. below) this window is considered as overutilization (resp. underutilization).
**NOTE:** Node resource consumption is determined by the requests and limits of pods, not actual usage. **NOTE:** By default node resource consumption is determined by the requests and limits of pods, not actual usage.
This approach is chosen in order to maintain consistency with the kube-scheduler, which follows the same This approach is chosen in order to maintain consistency with the kube-scheduler, which follows the same
design for scheduling pods onto nodes. This means that resource usage as reported by Kubelet (or commands design for scheduling pods onto nodes. This means that resource usage as reported by Kubelet (or commands
like `kubectl top`) may differ from the calculated consumption, due to these components reporting like `kubectl top`) may differ from the calculated consumption, due to these components reporting
actual usage metrics. Implementing metrics-based descheduling is currently TODO for the project. actual usage metrics. Metrics-based descheduling can be enabled by setting `metricsUtilization.metricsServer` field.
In order to have the plugin consume the metrics the metric collector needs to be configured as well.
See `metricsCollector` field at [Top Level configuration](#top-level-configuration) for available options.
**Parameters:** **Parameters:**
@@ -293,6 +298,9 @@ actual usage metrics. Implementing metrics-based descheduling is currently TODO
|`targetThresholds`|map(string:int)| |`targetThresholds`|map(string:int)|
|`numberOfNodes`|int| |`numberOfNodes`|int|
|`evictableNamespaces`|(see [namespace filtering](#namespace-filtering))| |`evictableNamespaces`|(see [namespace filtering](#namespace-filtering))|
|`metricsUtilization`|object|
|`metricsUtilization.metricsServer`|bool|
**Example:** **Example:**
@@ -312,6 +320,8 @@ profiles:
"cpu" : 50 "cpu" : 50
"memory": 50 "memory": 50
"pods": 50 "pods": 50
metricsUtilization:
metricsServer: true
plugins: plugins:
balance: balance:
enabled: enabled:
@@ -503,7 +513,7 @@ key=value matches an excludedTaints entry, the taint will be ignored.
For example, excludedTaints entry "dedicated" would match all taints with key "dedicated", regardless of value. For example, excludedTaints entry "dedicated" would match all taints with key "dedicated", regardless of value.
excludedTaints entry "dedicated=special-user" would match taints with key "dedicated" and value "special-user". excludedTaints entry "dedicated=special-user" would match taints with key "dedicated" and value "special-user".
If a list of includedTaints is provided, a taint will be considered if and only if it matches an included key **or** key=value from the list. Otherwise it will be ignored. Leaving includedTaints unset will include any taint by default. If a list of includedTaints is provided, a taint will be considered if and only if it matches an included key **or** key=value from the list. Otherwise it will be ignored. Leaving includedTaints unset will include any taint by default.
**Parameters:** **Parameters:**
@@ -671,12 +681,14 @@ Pods in any state (even `Running`) are considered for eviction.
**Parameters:** **Parameters:**
|Name|Type|Notes| | Name | Type | Notes |
|---|---|---| |--------------------------------|---------------------------------------------------|--------------------------|
|`maxPodLifeTimeSeconds`|int|| | `maxPodLifeTimeSeconds` | int | |
|`states`|list(string)|Only supported in v0.25+| | `states` | list(string) | Only supported in v0.25+ |
|`namespaces`|(see [namespace filtering](#namespace-filtering))|| | `includingInitContainers` | bool | Only supported in v0.31+ |
|`labelSelector`|(see [label filtering](#label-filtering))|| | `includingEphemeralContainers` | bool | Only supported in v0.31+ |
| `namespaces` | (see [namespace filtering](#namespace-filtering)) | |
| `labelSelector` | (see [label filtering](#label-filtering)) | |
**Example:** **Example:**
@@ -858,7 +870,7 @@ does not exist, descheduler won't create it and will throw an error.
### Label filtering ### Label filtering
The following strategies can configure a [standard kubernetes labelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#labelselector-v1-meta) The following strategies can configure a [standard kubernetes labelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#labelselector-v1-meta)
to filter pods by their labels: to filter pods by their labels:
* `PodLifeTime` * `PodLifeTime`
@@ -997,6 +1009,11 @@ packages that it is compiled with.
| Descheduler | Supported Kubernetes Version | | Descheduler | Supported Kubernetes Version |
|-------------|------------------------------| |-------------|------------------------------|
| v0.32 | v1.32 |
| v0.31 | v1.31 |
| v0.30 | v1.30 |
| v0.29 | v1.29 |
| v0.28 | v1.28 |
| v0.27 | v1.27 | | v0.27 | v1.27 |
| v0.26 | v1.26 | | v0.26 | v1.26 |
| v0.25 | v1.25 | | v0.25 | v1.25 |

View File

@@ -1,7 +1,7 @@
apiVersion: v1 apiVersion: v1
name: descheduler name: descheduler
version: 0.29.0 version: 0.32.0
appVersion: 0.29.0 appVersion: 0.32.0
description: Descheduler for Kubernetes is used to rebalance clusters by evicting pods that can potentially be scheduled on better nodes. In the current implementation, descheduler does not schedule replacement of evicted pods but relies on the default scheduler for that. description: Descheduler for Kubernetes is used to rebalance clusters by evicting pods that can potentially be scheduled on better nodes. In the current implementation, descheduler does not schedule replacement of evicted pods but relies on the default scheduler for that.
keywords: keywords:
- kubernetes - kubernetes

View File

@@ -52,6 +52,7 @@ The following table lists the configurable parameters of the _descheduler_ chart
| `imagePullSecrets` | Docker repository secrets | `[]` | | `imagePullSecrets` | Docker repository secrets | `[]` |
| `nameOverride` | String to partially override `descheduler.fullname` template (will prepend the release name) | `""` | | `nameOverride` | String to partially override `descheduler.fullname` template (will prepend the release name) | `""` |
| `fullnameOverride` | String to fully override `descheduler.fullname` template | `""` | | `fullnameOverride` | String to fully override `descheduler.fullname` template | `""` |
| `namespaceOverride` | Override the deployment namespace; defaults to .Release.Namespace | `""` |
| `cronJobApiVersion` | CronJob API Group Version | `"batch/v1"` | | `cronJobApiVersion` | CronJob API Group Version | `"batch/v1"` |
| `schedule` | The cron schedule to run the _descheduler_ job on | `"*/2 * * * *"` | | `schedule` | The cron schedule to run the _descheduler_ job on | `"*/2 * * * *"` |
| `startingDeadlineSeconds` | If set, configure `startingDeadlineSeconds` for the _descheduler_ job | `nil` | | `startingDeadlineSeconds` | If set, configure `startingDeadlineSeconds` for the _descheduler_ job | `nil` |
@@ -63,7 +64,6 @@ The following table lists the configurable parameters of the _descheduler_ chart
| `replicas` | The replica count for Deployment | `1` | | `replicas` | The replica count for Deployment | `1` |
| `leaderElection` | The options for high availability when running replicated components | _see values.yaml_ | | `leaderElection` | The options for high availability when running replicated components | _see values.yaml_ |
| `cmdOptions` | The options to pass to the _descheduler_ command | _see values.yaml_ | | `cmdOptions` | The options to pass to the _descheduler_ command | _see values.yaml_ |
| `deschedulerPolicy.strategies` | The _descheduler_ strategies to apply | _see values.yaml_ |
| `priorityClassName` | The name of the priority class to add to pods | `system-cluster-critical` | | `priorityClassName` | The name of the priority class to add to pods | `system-cluster-critical` |
| `rbac.create` | If `true`, create & use RBAC resources | `true` | | `rbac.create` | If `true`, create & use RBAC resources | `true` |
| `resources` | Descheduler container CPU and memory requests/limits | _see values.yaml_ | | `resources` | Descheduler container CPU and memory requests/limits | _see values.yaml_ |

View File

@@ -1,7 +1,7 @@
Descheduler installed as a {{ .Values.kind }}. Descheduler installed as a {{ .Values.kind }}.
{{- if eq .Values.kind "Deployment" }} {{- if eq .Values.kind "Deployment" }}
{{- if eq .Values.replicas 1.0}} {{- if eq (.Values.replicas | int) 1 }}
WARNING: You set replica count as 1 and workload kind as Deployment however leaderElection is not enabled. Consider enabling Leader Election for HA mode. WARNING: You set replica count as 1 and workload kind as Deployment however leaderElection is not enabled. Consider enabling Leader Election for HA mode.
{{- end}} {{- end}}
{{- if .Values.leaderElection }} {{- if .Values.leaderElection }}

View File

@@ -24,6 +24,14 @@ If release name contains chart name it will be used as a full name.
{{- end -}} {{- end -}}
{{- end -}} {{- end -}}
{{/*
Expand the namespace of the release.
Allows overriding it for multi-namespace deployments in combined charts.
*/}}
{{- define "descheduler.namespace" -}}
{{- default .Release.Namespace .Values.namespaceOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/* {{/*
Create chart name and version as used by the chart label. Create chart name and version as used by the chart label.
*/}} */}}
@@ -87,8 +95,10 @@ Leader Election
{{- if .Values.leaderElection.resourceName }} {{- if .Values.leaderElection.resourceName }}
- --leader-elect-resource-name={{ .Values.leaderElection.resourceName }} - --leader-elect-resource-name={{ .Values.leaderElection.resourceName }}
{{- end }} {{- end }}
{{- if .Values.leaderElection.resourceNamescape }} {{/* resource namespace value starts with a typo so keeping resourceNamescape for backwards compatibility */}}
- --leader-elect-resource-namespace={{ .Values.leaderElection.resourceNamescape }} {{- $resourceNamespace := default .Values.leaderElection.resourceNamespace .Values.leaderElection.resourceNamescape -}}
{{- if $resourceNamespace -}}
- --leader-elect-resource-namespace={{ $resourceNamespace }}
{{- end -}} {{- end -}}
{{- end }} {{- end }}
{{- end }} {{- end }}

View File

@@ -12,5 +12,5 @@ roleRef:
subjects: subjects:
- kind: ServiceAccount - kind: ServiceAccount
name: {{ template "descheduler.serviceAccountName" . }} name: {{ template "descheduler.serviceAccountName" . }}
namespace: {{ .Release.Namespace }} namespace: {{ include "descheduler.namespace" . }}
{{- end -}} {{- end -}}

View File

@@ -3,7 +3,7 @@ apiVersion: v1
kind: ConfigMap kind: ConfigMap
metadata: metadata:
name: {{ template "descheduler.fullname" . }} name: {{ template "descheduler.fullname" . }}
namespace: {{ .Release.Namespace }} namespace: {{ include "descheduler.namespace" . }}
labels: labels:
{{- include "descheduler.labels" . | nindent 4 }} {{- include "descheduler.labels" . | nindent 4 }}
data: data:

View File

@@ -3,7 +3,7 @@ apiVersion: {{ .Values.cronJobApiVersion | default "batch/v1" }}
kind: CronJob kind: CronJob
metadata: metadata:
name: {{ template "descheduler.fullname" . }} name: {{ template "descheduler.fullname" . }}
namespace: {{ .Release.Namespace }} namespace: {{ include "descheduler.namespace" . }}
labels: labels:
{{- include "descheduler.labels" . | nindent 4 }} {{- include "descheduler.labels" . | nindent 4 }}
spec: spec:
@@ -15,10 +15,10 @@ spec:
{{- if .Values.startingDeadlineSeconds }} {{- if .Values.startingDeadlineSeconds }}
startingDeadlineSeconds: {{ .Values.startingDeadlineSeconds }} startingDeadlineSeconds: {{ .Values.startingDeadlineSeconds }}
{{- end }} {{- end }}
{{- if .Values.successfulJobsHistoryLimit }} {{- if ne .Values.successfulJobsHistoryLimit nil }}
successfulJobsHistoryLimit: {{ .Values.successfulJobsHistoryLimit }} successfulJobsHistoryLimit: {{ .Values.successfulJobsHistoryLimit }}
{{- end }} {{- end }}
{{- if .Values.failedJobsHistoryLimit }} {{- if ne .Values.failedJobsHistoryLimit nil }}
failedJobsHistoryLimit: {{ .Values.failedJobsHistoryLimit }} failedJobsHistoryLimit: {{ .Values.failedJobsHistoryLimit }}
{{- end }} {{- end }}
{{- if .Values.timeZone }} {{- if .Values.timeZone }}
@@ -81,14 +81,22 @@ spec:
args: args:
- --policy-config-file=/policy-dir/policy.yaml - --policy-config-file=/policy-dir/policy.yaml
{{- range $key, $value := .Values.cmdOptions }} {{- range $key, $value := .Values.cmdOptions }}
- {{ printf "--%s" $key }}{{ if $value }}={{ $value }}{{ end }} {{- if ne $value nil }}
- {{ printf "--%s=%s" $key (toString $value) }}
{{- else }}
- {{ printf "--%s" $key }}
{{- end }}
{{- end }} {{- end }}
livenessProbe: livenessProbe:
{{- toYaml .Values.livenessProbe | nindent 16 }} {{- toYaml .Values.livenessProbe | nindent 16 }}
ports:
{{- toYaml .Values.ports | nindent 16 }}
resources: resources:
{{- toYaml .Values.resources | nindent 16 }} {{- toYaml .Values.resources | nindent 16 }}
{{- if .Values.securityContext }}
securityContext: securityContext:
{{- toYaml .Values.securityContext | nindent 16 }} {{- toYaml .Values.securityContext | nindent 16 }}
{{- end }}
volumeMounts: volumeMounts:
- mountPath: /policy-dir - mountPath: /policy-dir
name: policy-volume name: policy-volume

View File

@@ -3,11 +3,11 @@ apiVersion: apps/v1
kind: Deployment kind: Deployment
metadata: metadata:
name: {{ template "descheduler.fullname" . }} name: {{ template "descheduler.fullname" . }}
namespace: {{ .Release.Namespace }} namespace: {{ include "descheduler.namespace" . }}
labels: labels:
{{- include "descheduler.labels" . | nindent 4 }} {{- include "descheduler.labels" . | nindent 4 }}
spec: spec:
{{- if gt .Values.replicas 1.0}} {{- if gt (.Values.replicas | int) 1 }}
{{- if not .Values.leaderElection.enabled }} {{- if not .Values.leaderElection.enabled }}
{{- fail "You must set leaderElection to use more than 1 replica"}} {{- fail "You must set leaderElection to use more than 1 replica"}}
{{- end}} {{- end}}
@@ -53,18 +53,23 @@ spec:
- --policy-config-file=/policy-dir/policy.yaml - --policy-config-file=/policy-dir/policy.yaml
- --descheduling-interval={{ required "deschedulingInterval required for running as Deployment" .Values.deschedulingInterval }} - --descheduling-interval={{ required "deschedulingInterval required for running as Deployment" .Values.deschedulingInterval }}
{{- range $key, $value := .Values.cmdOptions }} {{- range $key, $value := .Values.cmdOptions }}
- {{ printf "--%s" $key }}{{ if $value }}={{ $value }}{{ end }} {{- if ne $value nil }}
- {{ printf "--%s=%s" $key (toString $value) }}
{{- else }}
- {{ printf "--%s" $key }}
{{- end }}
{{- end }} {{- end }}
{{- include "descheduler.leaderElection" . | nindent 12 }} {{- include "descheduler.leaderElection" . | nindent 12 }}
ports: ports:
- containerPort: 10258 {{- toYaml .Values.ports | nindent 12 }}
protocol: TCP
livenessProbe: livenessProbe:
{{- toYaml .Values.livenessProbe | nindent 12 }} {{- toYaml .Values.livenessProbe | nindent 12 }}
resources: resources:
{{- toYaml .Values.resources | nindent 12 }} {{- toYaml .Values.resources | nindent 12 }}
{{- if .Values.securityContext }}
securityContext: securityContext:
{{- toYaml .Values.securityContext | nindent 12 }} {{- toYaml .Values.securityContext | nindent 12 }}
{{- end }}
volumeMounts: volumeMounts:
- mountPath: /policy-dir - mountPath: /policy-dir
name: policy-volume name: policy-volume

View File

@@ -6,7 +6,7 @@ metadata:
labels: labels:
{{- include "descheduler.labels" . | nindent 4 }} {{- include "descheduler.labels" . | nindent 4 }}
name: {{ template "descheduler.fullname" . }} name: {{ template "descheduler.fullname" . }}
namespace: {{ .Release.Namespace }} namespace: {{ include "descheduler.namespace" . }}
spec: spec:
clusterIP: None clusterIP: None
{{- if .Values.service.ipFamilyPolicy }} {{- if .Values.service.ipFamilyPolicy }}

View File

@@ -3,7 +3,7 @@ apiVersion: v1
kind: ServiceAccount kind: ServiceAccount
metadata: metadata:
name: {{ template "descheduler.serviceAccountName" . }} name: {{ template "descheduler.serviceAccountName" . }}
namespace: {{ .Release.Namespace }} namespace: {{ include "descheduler.namespace" . }}
labels: labels:
{{- include "descheduler.labels" . | nindent 4 }} {{- include "descheduler.labels" . | nindent 4 }}
{{- if .Values.serviceAccount.annotations }} {{- if .Values.serviceAccount.annotations }}

View File

@@ -14,7 +14,7 @@ spec:
jobLabel: jobLabel jobLabel: jobLabel
namespaceSelector: namespaceSelector:
matchNames: matchNames:
- {{ .Release.Namespace }} - {{ include "descheduler.namespace" . }}
selector: selector:
matchLabels: matchLabels:
{{- include "descheduler.selectorLabels" . | nindent 6 }} {{- include "descheduler.selectorLabels" . | nindent 6 }}

1
charts/descheduler/tests/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
__snapshot__

View File

@@ -0,0 +1,17 @@
suite: Test Descheduler CronJob
templates:
- "*.yaml"
release:
name: descheduler
set:
kind: CronJob
tests:
- it: creates CronJob when kind is set
template: templates/cronjob.yaml
asserts:
- isKind:
of: CronJob

View File

@@ -0,0 +1,49 @@
suite: Test Descheduler Deployment
templates:
- "*.yaml"
release:
name: descheduler
set:
kind: Deployment
tests:
- it: creates Deployment when kind is set
template: templates/deployment.yaml
asserts:
- isKind:
of: Deployment
- it: enables leader-election
set:
leaderElection:
enabled: true
template: templates/deployment.yaml
asserts:
- contains:
path: spec.template.spec.containers[0].args
content: --leader-elect=true
- it: support leader-election resourceNamespace
set:
leaderElection:
enabled: true
resourceNamespace: test
template: templates/deployment.yaml
asserts:
- contains:
path: spec.template.spec.containers[0].args
content: --leader-elect-resource-namespace=test
- it: support legacy leader-election resourceNamescape
set:
leaderElection:
enabled: true
resourceNamescape: typo
template: templates/deployment.yaml
asserts:
- contains:
path: spec.template.spec.containers[0].args
content: --leader-elect-resource-namespace=typo

View File

@@ -18,9 +18,13 @@ resources:
requests: requests:
cpu: 500m cpu: 500m
memory: 256Mi memory: 256Mi
# limits: limits:
# cpu: 100m cpu: 500m
# memory: 128Mi memory: 256Mi
ports:
- containerPort: 10258
protocol: TCP
securityContext: securityContext:
allowPrivilegeEscalation: false allowPrivilegeEscalation: false
@@ -39,6 +43,9 @@ podSecurityContext: {}
nameOverride: "" nameOverride: ""
fullnameOverride: "" fullnameOverride: ""
# -- Override the deployment namespace; defaults to .Release.Namespace
namespaceOverride: ""
# labels that'll be applied to all resources # labels that'll be applied to all resources
commonLabels: {} commonLabels: {}
@@ -70,7 +77,7 @@ leaderElection: {}
# retryPeriod: 2s # retryPeriod: 2s
# resourceLock: "leases" # resourceLock: "leases"
# resourceName: "descheduler" # resourceName: "descheduler"
# resourceNamescape: "kube-system" # resourceNamespace: "kube-system"
command: command:
- "/bin/descheduler" - "/bin/descheduler"
@@ -111,14 +118,13 @@ deschedulerPolicy:
args: args:
podRestartThreshold: 100 podRestartThreshold: 100
includingInitContainers: true includingInitContainers: true
- name: RemovePodsViolatingNodeTaints - name: RemovePodsViolatingNodeAffinity
args: args:
nodeAffinityType: nodeAffinityType:
- requiredDuringSchedulingIgnoredDuringExecution - requiredDuringSchedulingIgnoredDuringExecution
- name: RemovePodsViolatingNodeTaints
- name: RemovePodsViolatingInterPodAntiAffinity - name: RemovePodsViolatingInterPodAntiAffinity
- name: RemovePodsViolatingTopologySpreadConstraint - name: RemovePodsViolatingTopologySpreadConstraint
args:
includeSoftConstraints: false
- name: LowNodeUtilization - name: LowNodeUtilization
args: args:
thresholds: thresholds:
@@ -133,7 +139,6 @@ deschedulerPolicy:
balance: balance:
enabled: enabled:
- RemoveDuplicates - RemoveDuplicates
- RemovePodsViolatingNodeAffinity
- RemovePodsViolatingTopologySpreadConstraint - RemovePodsViolatingTopologySpreadConstraint
- LowNodeUtilization - LowNodeUtilization
deschedule: deschedule:

View File

@@ -18,17 +18,28 @@ limitations under the License.
package options package options
import ( import (
"strings"
"time" "time"
"github.com/spf13/pflag" "github.com/spf13/pflag"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
apiserver "k8s.io/apiserver/pkg/server"
apiserveroptions "k8s.io/apiserver/pkg/server/options" apiserveroptions "k8s.io/apiserver/pkg/server/options"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
restclient "k8s.io/client-go/rest"
cliflag "k8s.io/component-base/cli/flag"
componentbaseconfig "k8s.io/component-base/config" componentbaseconfig "k8s.io/component-base/config"
componentbaseoptions "k8s.io/component-base/config/options" componentbaseoptions "k8s.io/component-base/config/options"
"k8s.io/component-base/featuregate"
"k8s.io/klog/v2"
metricsclient "k8s.io/metrics/pkg/client/clientset/versioned"
"sigs.k8s.io/descheduler/pkg/apis/componentconfig" "sigs.k8s.io/descheduler/pkg/apis/componentconfig"
"sigs.k8s.io/descheduler/pkg/apis/componentconfig/v1alpha1" "sigs.k8s.io/descheduler/pkg/apis/componentconfig/v1alpha1"
deschedulerscheme "sigs.k8s.io/descheduler/pkg/descheduler/scheme" deschedulerscheme "sigs.k8s.io/descheduler/pkg/descheduler/scheme"
"sigs.k8s.io/descheduler/pkg/features"
"sigs.k8s.io/descheduler/pkg/tracing" "sigs.k8s.io/descheduler/pkg/tracing"
) )
@@ -40,11 +51,17 @@ const (
type DeschedulerServer struct { type DeschedulerServer struct {
componentconfig.DeschedulerConfiguration componentconfig.DeschedulerConfiguration
Client clientset.Interface Client clientset.Interface
EventClient clientset.Interface EventClient clientset.Interface
SecureServing *apiserveroptions.SecureServingOptionsWithLoopback MetricsClient metricsclient.Interface
DisableMetrics bool SecureServing *apiserveroptions.SecureServingOptionsWithLoopback
EnableHTTP2 bool SecureServingInfo *apiserver.SecureServingInfo
DisableMetrics bool
EnableHTTP2 bool
// FeatureGates enabled by the user
FeatureGates map[string]bool
// DefaultFeatureGates for internal accessing so unit tests can enable/disable specific features
DefaultFeatureGates featuregate.FeatureGate
} }
// NewDeschedulerServer creates a new DeschedulerServer with default parameters // NewDeschedulerServer creates a new DeschedulerServer with default parameters
@@ -102,8 +119,31 @@ func (rs *DeschedulerServer) AddFlags(fs *pflag.FlagSet) {
fs.Float64Var(&rs.Tracing.SampleRate, "otel-sample-rate", 1.0, "Sample rate to collect the Traces") fs.Float64Var(&rs.Tracing.SampleRate, "otel-sample-rate", 1.0, "Sample rate to collect the Traces")
fs.BoolVar(&rs.Tracing.FallbackToNoOpProviderOnError, "otel-fallback-no-op-on-error", false, "Fallback to NoOp Tracer in case of error") fs.BoolVar(&rs.Tracing.FallbackToNoOpProviderOnError, "otel-fallback-no-op-on-error", false, "Fallback to NoOp Tracer in case of error")
fs.BoolVar(&rs.EnableHTTP2, "enable-http2", false, "If http/2 should be enabled for the metrics and health check") fs.BoolVar(&rs.EnableHTTP2, "enable-http2", false, "If http/2 should be enabled for the metrics and health check")
fs.Var(cliflag.NewMapStringBool(&rs.FeatureGates), "feature-gates", "A set of key=value pairs that describe feature gates for alpha/experimental features. "+
"Options are:\n"+strings.Join(features.DefaultMutableFeatureGate.KnownFeatures(), "\n"))
componentbaseoptions.BindLeaderElectionFlags(&rs.LeaderElection, fs) componentbaseoptions.BindLeaderElectionFlags(&rs.LeaderElection, fs)
rs.SecureServing.AddFlags(fs) rs.SecureServing.AddFlags(fs)
} }
func (rs *DeschedulerServer) Apply() error {
err := features.DefaultMutableFeatureGate.SetFromMap(rs.FeatureGates)
if err != nil {
return err
}
rs.DefaultFeatureGates = features.DefaultMutableFeatureGate
// loopbackClientConfig is a config for a privileged loopback connection
var loopbackClientConfig *restclient.Config
var secureServing *apiserver.SecureServingInfo
if err := rs.SecureServing.ApplyTo(&secureServing, &loopbackClientConfig); err != nil {
klog.ErrorS(err, "failed to apply secure server configuration")
return err
}
secureServing.DisableHTTP2 = !rs.EnableHTTP2
rs.SecureServingInfo = secureServing
return nil
}

View File

@@ -23,19 +23,16 @@ import (
"os/signal" "os/signal"
"syscall" "syscall"
"k8s.io/apiserver/pkg/server/healthz" "github.com/spf13/cobra"
"sigs.k8s.io/descheduler/cmd/descheduler/app/options" "sigs.k8s.io/descheduler/cmd/descheduler/app/options"
"sigs.k8s.io/descheduler/pkg/descheduler" "sigs.k8s.io/descheduler/pkg/descheduler"
"sigs.k8s.io/descheduler/pkg/tracing" "sigs.k8s.io/descheduler/pkg/tracing"
"github.com/spf13/cobra"
"k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/watch" "k8s.io/apimachinery/pkg/watch"
apiserver "k8s.io/apiserver/pkg/server" "k8s.io/apiserver/pkg/server/healthz"
"k8s.io/apiserver/pkg/server/mux" "k8s.io/apiserver/pkg/server/mux"
restclient "k8s.io/client-go/rest"
"k8s.io/component-base/featuregate" "k8s.io/component-base/featuregate"
"k8s.io/component-base/logs" "k8s.io/component-base/logs"
logsapi "k8s.io/component-base/logs/api/v1" logsapi "k8s.io/component-base/logs/api/v1"
@@ -67,40 +64,16 @@ func NewDeschedulerCommand(out io.Writer) *cobra.Command {
return nil return nil
}, },
RunE: func(cmd *cobra.Command, args []string) error { RunE: func(cmd *cobra.Command, args []string) error {
// loopbackClientConfig is a config for a privileged loopback connection if err = s.Apply(); err != nil {
var loopbackClientConfig *restclient.Config klog.ErrorS(err, "failed to apply")
var secureServing *apiserver.SecureServingInfo
if err := s.SecureServing.ApplyTo(&secureServing, &loopbackClientConfig); err != nil {
klog.ErrorS(err, "failed to apply secure server configuration")
return err return err
} }
secureServing.DisableHTTP2 = !s.EnableHTTP2 if err = Run(cmd.Context(), s); err != nil {
klog.ErrorS(err, "failed to run descheduler server")
ctx, done := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM)
pathRecorderMux := mux.NewPathRecorderMux("descheduler")
if !s.DisableMetrics {
pathRecorderMux.Handle("/metrics", legacyregistry.HandlerWithReset())
}
healthz.InstallHandler(pathRecorderMux, healthz.NamedCheck("Descheduler", healthz.PingHealthz.Check))
stoppedCh, _, err := secureServing.Serve(pathRecorderMux, 0, ctx.Done())
if err != nil {
klog.Fatalf("failed to start secure server: %v", err)
return err return err
} }
if err = Run(ctx, s); err != nil {
klog.ErrorS(err, "descheduler server")
return err
}
done()
// wait for metrics server to close
<-stoppedCh
return nil return nil
}, },
} }
@@ -114,14 +87,39 @@ func NewDeschedulerCommand(out io.Writer) *cobra.Command {
return cmd return cmd
} }
func Run(ctx context.Context, rs *options.DeschedulerServer) error { func Run(rootCtx context.Context, rs *options.DeschedulerServer) error {
err := tracing.NewTracerProvider(ctx, rs.Tracing.CollectorEndpoint, rs.Tracing.TransportCert, rs.Tracing.ServiceName, rs.Tracing.ServiceNamespace, rs.Tracing.SampleRate, rs.Tracing.FallbackToNoOpProviderOnError) ctx, done := signal.NotifyContext(rootCtx, syscall.SIGINT, syscall.SIGTERM)
pathRecorderMux := mux.NewPathRecorderMux("descheduler")
if !rs.DisableMetrics {
pathRecorderMux.Handle("/metrics", legacyregistry.HandlerWithReset())
}
healthz.InstallHandler(pathRecorderMux, healthz.NamedCheck("Descheduler", healthz.PingHealthz.Check))
stoppedCh, _, err := rs.SecureServingInfo.Serve(pathRecorderMux, 0, ctx.Done())
if err != nil { if err != nil {
klog.Fatalf("failed to start secure server: %v", err)
return err return err
} }
err = tracing.NewTracerProvider(ctx, rs.Tracing.CollectorEndpoint, rs.Tracing.TransportCert, rs.Tracing.ServiceName, rs.Tracing.ServiceNamespace, rs.Tracing.SampleRate, rs.Tracing.FallbackToNoOpProviderOnError)
if err != nil {
klog.ErrorS(err, "failed to create tracer provider")
}
defer tracing.Shutdown(ctx) defer tracing.Shutdown(ctx)
// increase the fake watch channel so the dry-run mode can be run // increase the fake watch channel so the dry-run mode can be run
// over a cluster with thousands of pods // over a cluster with thousands of pods
watch.DefaultChanSize = 100000 watch.DefaultChanSize = 100000
return descheduler.Run(ctx, rs) err = descheduler.Run(ctx, rs)
if err != nil {
return err
}
done()
// wait for metrics server to close
<-stoppedCh
return nil
} }

View File

@@ -19,16 +19,21 @@ descheduler [flags]
--client-connection-kubeconfig string File path to kube configuration for interacting with kubernetes apiserver. --client-connection-kubeconfig string File path to kube configuration for interacting with kubernetes apiserver.
--client-connection-qps float32 QPS to use for interacting with kubernetes apiserver. --client-connection-qps float32 QPS to use for interacting with kubernetes apiserver.
--descheduling-interval duration Time interval between two consecutive descheduler executions. Setting this value instructs the descheduler to run in a continuous loop at the interval specified. --descheduling-interval duration Time interval between two consecutive descheduler executions. Setting this value instructs the descheduler to run in a continuous loop at the interval specified.
--disable-http2-serving If true, HTTP2 serving will be disabled [default=false]
--disable-metrics Disables metrics. The metrics are by default served through https://localhost:10258/metrics. Secure address, resp. port can be changed through --bind-address, resp. --secure-port flags. --disable-metrics Disables metrics. The metrics are by default served through https://localhost:10258/metrics. Secure address, resp. port can be changed through --bind-address, resp. --secure-port flags.
--dry-run Execute descheduler in dry run mode. --dry-run Execute descheduler in dry run mode.
--enable-http2 If http/2 should be enabled for the metrics and health check --enable-http2 If http/2 should be enabled for the metrics and health check
--feature-gates mapStringBool A set of key=value pairs that describe feature gates for alpha/experimental features. Options are:
AllAlpha=true|false (ALPHA - default=false)
AllBeta=true|false (BETA - default=false)
EvictionsInBackground=true|false (ALPHA - default=false)
-h, --help help for descheduler -h, --help help for descheduler
--http2-max-streams-per-connection int The limit that the server gives to clients for the maximum number of streams in an HTTP/2 connection. Zero means to use golang's default. --http2-max-streams-per-connection int The limit that the server gives to clients for the maximum number of streams in an HTTP/2 connection. Zero means to use golang's default.
--kubeconfig string File with kube configuration. Deprecated, use client-connection-kubeconfig instead. --kubeconfig string File with kube configuration. Deprecated, use client-connection-kubeconfig instead.
--leader-elect Start a leader election client and gain leadership before executing the main loop. Enable this when running replicated components for high availability. --leader-elect Start a leader election client and gain leadership before executing the main loop. Enable this when running replicated components for high availability.
--leader-elect-lease-duration duration The duration that non-leader candidates will wait after observing a leadership renewal until attempting to acquire leadership of a led but unrenewed leader slot. This is effectively the maximum duration that a leader can be stopped before it is replaced by another candidate. This is only applicable if leader election is enabled. (default 2m17s) --leader-elect-lease-duration duration The duration that non-leader candidates will wait after observing a leadership renewal until attempting to acquire leadership of a led but unrenewed leader slot. This is effectively the maximum duration that a leader can be stopped before it is replaced by another candidate. This is only applicable if leader election is enabled. (default 2m17s)
--leader-elect-renew-deadline duration The interval between attempts by the acting master to renew a leadership slot before it stops leading. This must be less than the lease duration. This is only applicable if leader election is enabled. (default 1m47s) --leader-elect-renew-deadline duration The interval between attempts by the acting master to renew a leadership slot before it stops leading. This must be less than the lease duration. This is only applicable if leader election is enabled. (default 1m47s)
--leader-elect-resource-lock string The type of resource object that is used for locking during leader election. Supported options are 'leases', 'endpointsleases' and 'configmapsleases'. (default "leases") --leader-elect-resource-lock string The type of resource object that is used for locking during leader election. Supported options are 'leases'. (default "leases")
--leader-elect-resource-name string The name of resource object that is used for locking during leader election. (default "descheduler") --leader-elect-resource-name string The name of resource object that is used for locking during leader election. (default "descheduler")
--leader-elect-resource-namespace string The namespace of resource object that is used for locking during leader election. (default "kube-system") --leader-elect-resource-namespace string The namespace of resource object that is used for locking during leader election. (default "kube-system")
--leader-elect-retry-period duration The duration the clients should wait between attempting acquisition and renewal of a leadership. This is only applicable if leader election is enabled. (default 26s) --leader-elect-retry-period duration The duration the clients should wait between attempting acquisition and renewal of a leadership. This is only applicable if leader election is enabled. (default 26s)

View File

@@ -3,7 +3,7 @@
## Required Tools ## Required Tools
- [Git](https://git-scm.com/downloads) - [Git](https://git-scm.com/downloads)
- [Go 1.16+](https://golang.org/dl/) - [Go 1.23+](https://golang.org/dl/)
- [Docker](https://docs.docker.com/install/) - [Docker](https://docs.docker.com/install/)
- [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl) - [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl)
- [kind v0.10.0+](https://kind.sigs.k8s.io/) - [kind v0.10.0+](https://kind.sigs.k8s.io/)

View File

@@ -1,786 +0,0 @@
[![Go Report Card](https://goreportcard.com/badge/sigs.k8s.io/descheduler)](https://goreportcard.com/report/sigs.k8s.io/descheduler)
![Release Charts](https://github.com/kubernetes-sigs/descheduler/workflows/Release%20Charts/badge.svg)
<p align="center">
<img src="assets/logo/descheduler-stacked-color.png" width="40%" align="center" alt="descheduler">
</p>
# Descheduler for Kubernetes
Scheduling in Kubernetes is the process of binding pending pods to nodes, and is performed by
a component of Kubernetes called kube-scheduler. The scheduler's decisions, whether or where a
pod can or can not be scheduled, are guided by its configurable policy which comprises of set of
rules, called predicates and priorities. The scheduler's decisions are influenced by its view of
a Kubernetes cluster at that point of time when a new pod appears for scheduling.
As Kubernetes clusters are very dynamic and their state changes over time, there may be desire
to move already running pods to some other nodes for various reasons:
* Some nodes are under or over utilized.
* The original scheduling decision does not hold true any more, as taints or labels are added to
or removed from nodes, pod/node affinity requirements are not satisfied any more.
* Some nodes failed and their pods moved to other nodes.
* New nodes are added to clusters.
Consequently, there might be several pods scheduled on less desired nodes in a cluster.
Descheduler, based on its policy, finds pods that can be moved and evicts them. Please
note, in current implementation, descheduler does not schedule replacement of evicted pods
but relies on the default scheduler for that.
Table of Contents
=================
<!-- toc -->
- [Quick Start](#quick-start)
- [Run As A Job](#run-as-a-job)
- [Run As A CronJob](#run-as-a-cronjob)
- [Run As A Deployment](#run-as-a-deployment)
- [Install Using Helm](#install-using-helm)
- [Install Using Kustomize](#install-using-kustomize)
- [User Guide](#user-guide)
- [Policy and Strategies](#policy-and-strategies)
- [RemoveDuplicates](#removeduplicates)
- [LowNodeUtilization](#lownodeutilization)
- [HighNodeUtilization](#highnodeutilization)
- [RemovePodsViolatingInterPodAntiAffinity](#removepodsviolatinginterpodantiaffinity)
- [RemovePodsViolatingNodeAffinity](#removepodsviolatingnodeaffinity)
- [RemovePodsViolatingNodeTaints](#removepodsviolatingnodetaints)
- [RemovePodsViolatingTopologySpreadConstraint](#removepodsviolatingtopologyspreadconstraint)
- [RemovePodsHavingTooManyRestarts](#removepodshavingtoomanyrestarts)
- [PodLifeTime](#podlifetime)
- [RemoveFailedPods](#removefailedpods)
- [Filter Pods](#filter-pods)
- [Namespace filtering](#namespace-filtering)
- [Priority filtering](#priority-filtering)
- [Label filtering](#label-filtering)
- [Node Fit filtering](#node-fit-filtering)
- [Pod Evictions](#pod-evictions)
- [Pod Disruption Budget (PDB)](#pod-disruption-budget-pdb)
- [High Availability](#high-availability)
- [Configure HA Mode](#configure-ha-mode)
- [Metrics](#metrics)
- [Compatibility Matrix](#compatibility-matrix)
- [Getting Involved and Contributing](#getting-involved-and-contributing)
- [Communicating With Contributors](#communicating-with-contributors)
- [Roadmap](#roadmap)
- [Code of conduct](#code-of-conduct)
<!-- /toc -->
## Quick Start
The descheduler can be run as a `Job`, `CronJob`, or `Deployment` inside of a k8s cluster. It has the
advantage of being able to be run multiple times without needing user intervention.
The descheduler pod is run as a critical pod in the `kube-system` namespace to avoid
being evicted by itself or by the kubelet.
### Run As A Job
```
kubectl create -f kubernetes/base/rbac.yaml
kubectl create -f kubernetes/base/configmap.yaml
kubectl create -f kubernetes/job/job.yaml
```
### Run As A CronJob
```
kubectl create -f kubernetes/base/rbac.yaml
kubectl create -f kubernetes/base/configmap.yaml
kubectl create -f kubernetes/cronjob/cronjob.yaml
```
### Run As A Deployment
```
kubectl create -f kubernetes/base/rbac.yaml
kubectl create -f kubernetes/base/configmap.yaml
kubectl create -f kubernetes/deployment/deployment.yaml
```
### Install Using Helm
Starting with release v0.18.0 there is an official helm chart that can be used to install the
descheduler. See the [helm chart README](https://github.com/kubernetes-sigs/descheduler/blob/master/charts/descheduler/README.md) for detailed instructions.
The descheduler helm chart is also listed on the [artifact hub](https://artifacthub.io/packages/helm/descheduler/descheduler).
### Install Using Kustomize
You can use kustomize to install descheduler.
See the [resources | Kustomize](https://kubectl.docs.kubernetes.io/references/kustomize/cmd/build/) for detailed instructions.
Run As A Job
```
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/job?ref=v0.30.0' | kubectl apply -f -
```
Run As A CronJob
```
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/cronjob?ref=v0.30.0' | kubectl apply -f -
```
Run As A Deployment
```
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/deployment?ref=v0.30.0' | kubectl apply -f -
```
## User Guide
See the [user guide](docs/user-guide.md) in the `/docs` directory.
## Policy and Strategies
Descheduler's policy is configurable and includes strategies that can be enabled or disabled. By default, all strategies are enabled.
The policy includes a common configuration that applies to all the strategies:
| Name | Default Value | Description |
|------|---------------|-------------|
| `nodeSelector` | `nil` | limiting the nodes which are processed |
| `evictLocalStoragePods` | `false` | allows eviction of pods with local storage |
| `evictDaemonSetPods` | `false` | allows eviction of pods associated to DaemonSet resources |
| `evictSystemCriticalPods` | `false` | [Warning: Will evict Kubernetes system pods] allows eviction of pods with any priority, including system pods like kube-dns |
| `ignorePvcPods` | `false` | set whether PVC pods should be evicted or ignored |
| `maxNoOfPodsToEvictPerNode` | `nil` | maximum number of pods evicted from each node (summed through all strategies) |
| `maxNoOfPodsToEvictPerNamespace` | `nil` | maximum number of pods evicted from each namespace (summed through all strategies) |
| `evictFailedBarePods` | `false` | allow eviction of pods without owner references and in failed phase |
As part of the policy, the parameters associated with each strategy can be configured.
See each strategy for details on available parameters.
**Policy:**
```yaml
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
nodeSelector: prod=dev
evictFailedBarePods: false
evictLocalStoragePods: true
evictDaemonSetPods: true
evictSystemCriticalPods: true
maxNoOfPodsToEvictPerNode: 40
ignorePvcPods: false
strategies:
...
```
The following diagram provides a visualization of most of the strategies to help
categorize how strategies fit together.
![Strategies diagram](strategies_diagram.png)
### RemoveDuplicates
This strategy makes sure that there is only one pod associated with a ReplicaSet (RS),
ReplicationController (RC), StatefulSet, or Job running on the same node. If there are more,
those duplicate pods are evicted for better spreading of pods in a cluster. This issue could happen
if some nodes went down due to whatever reasons, and pods on them were moved to other nodes leading to
more than one pod associated with a RS or RC, for example, running on the same node. Once the failed nodes
are ready again, this strategy could be enabled to evict those duplicate pods.
It provides one optional parameter, `excludeOwnerKinds`, which is a list of OwnerRef `Kind`s. If a pod
has any of these `Kind`s listed as an `OwnerRef`, that pod will not be considered for eviction. Note that
pods created by Deployments are considered for eviction by this strategy. The `excludeOwnerKinds` parameter
should include `ReplicaSet` to have pods created by Deployments excluded.
**Parameters:**
|Name|Type|
|---|---|
|`excludeOwnerKinds`|list(string)|
|`namespaces`|(see [namespace filtering](#namespace-filtering))|
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))|
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))|
|`nodeFit`|bool (see [node fit filtering](#node-fit-filtering))|
**Example:**
```yaml
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
strategies:
"RemoveDuplicates":
enabled: true
params:
removeDuplicates:
excludeOwnerKinds:
- "ReplicaSet"
```
### LowNodeUtilization
This strategy finds nodes that are under utilized and evicts pods, if possible, from other nodes
in the hope that recreation of evicted pods will be scheduled on these underutilized nodes. The
parameters of this strategy are configured under `nodeResourceUtilizationThresholds`.
The under utilization of nodes is determined by a configurable threshold `thresholds`. The threshold
`thresholds` can be configured for cpu, memory, number of pods, and extended resources in terms of percentage (the percentage is
calculated as the current resources requested on the node vs [total allocatable](https://kubernetes.io/docs/concepts/architecture/nodes/#capacity).
For pods, this means the number of pods on the node as a fraction of the pod capacity set for that node).
If a node's usage is below threshold for all (cpu, memory, number of pods and extended resources), the node is considered underutilized.
Currently, pods request resource requirements are considered for computing node resource utilization.
There is another configurable threshold, `targetThresholds`, that is used to compute those potential nodes
from where pods could be evicted. If a node's usage is above targetThreshold for any (cpu, memory, number of pods, or extended resources),
the node is considered over utilized. Any node between the thresholds, `thresholds` and `targetThresholds` is
considered appropriately utilized and is not considered for eviction. The threshold, `targetThresholds`,
can be configured for cpu, memory, and number of pods too in terms of percentage.
These thresholds, `thresholds` and `targetThresholds`, could be tuned as per your cluster requirements. Note that this
strategy evicts pods from `overutilized nodes` (those with usage above `targetThresholds`) to `underutilized nodes`
(those with usage below `thresholds`), it will abort if any number of `underutilized nodes` or `overutilized nodes` is zero.
Additionally, the strategy accepts a `useDeviationThresholds` parameter.
If that parameter is set to `true`, the thresholds are considered as percentage deviations from mean resource usage.
`thresholds` will be deducted from the mean among all nodes and `targetThresholds` will be added to the mean.
A resource consumption above (resp. below) this window is considered as overutilization (resp. underutilization).
**NOTE:** Node resource consumption is determined by the requests and limits of pods, not actual usage.
This approach is chosen in order to maintain consistency with the kube-scheduler, which follows the same
design for scheduling pods onto nodes. This means that resource usage as reported by Kubelet (or commands
like `kubectl top`) may differ from the calculated consumption, due to these components reporting
actual usage metrics. Implementing metrics-based descheduling is currently TODO for the project.
**Parameters:**
|Name|Type|
|---|---|
|`thresholds`|map(string:int)|
|`targetThresholds`|map(string:int)|
|`numberOfNodes`|int|
|`useDeviationThresholds`|bool|
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))|
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))|
|`nodeFit`|bool (see [node fit filtering](#node-fit-filtering))|
|`Namespaces`|(see [namespace filtering](#namespace-filtering))|
**Example:**
```yaml
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
strategies:
"LowNodeUtilization":
enabled: true
params:
nodeResourceUtilizationThresholds:
thresholds:
"cpu" : 20
"memory": 20
"pods": 20
targetThresholds:
"cpu" : 50
"memory": 50
"pods": 50
```
Policy should pass the following validation checks:
* Three basic native types of resources are supported: `cpu`, `memory` and `pods`.
If any of these resource types is not specified, all its thresholds default to 100% to avoid nodes going from underutilized to overutilized.
* Extended resources are supported. For example, resource type `nvidia.com/gpu` is specified for GPU node utilization. Extended resources are optional,
and will not be used to compute node's usage if it's not specified in `thresholds` and `targetThresholds` explicitly.
* `thresholds` or `targetThresholds` can not be nil and they must configure exactly the same types of resources.
* The valid range of the resource's percentage value is \[0, 100\]
* Percentage value of `thresholds` can not be greater than `targetThresholds` for the same resource.
There is another parameter associated with the `LowNodeUtilization` strategy, called `numberOfNodes`.
This parameter can be configured to activate the strategy only when the number of under utilized nodes
are above the configured value. This could be helpful in large clusters where a few nodes could go
under utilized frequently or for a short period of time. By default, `numberOfNodes` is set to zero.
### HighNodeUtilization
This strategy finds nodes that are under utilized and evicts pods from the nodes in the hope that these pods will be
scheduled compactly into fewer nodes. Used in conjunction with node auto-scaling, this strategy is intended to help
trigger down scaling of under utilized nodes.
This strategy **must** be used with the scheduler scoring strategy `MostAllocated`. The parameters of this strategy are
configured under `nodeResourceUtilizationThresholds`.
The under utilization of nodes is determined by a configurable threshold `thresholds`. The threshold
`thresholds` can be configured for cpu, memory, number of pods, and extended resources in terms of percentage. The percentage is
calculated as the current resources requested on the node vs [total allocatable](https://kubernetes.io/docs/concepts/architecture/nodes/#capacity).
For pods, this means the number of pods on the node as a fraction of the pod capacity set for that node.
If a node's usage is below threshold for all (cpu, memory, number of pods and extended resources), the node is considered underutilized.
Currently, pods request resource requirements are considered for computing node resource utilization.
Any node above `thresholds` is considered appropriately utilized and is not considered for eviction.
The `thresholds` param could be tuned as per your cluster requirements. Note that this
strategy evicts pods from `underutilized nodes` (those with usage below `thresholds`)
so that they can be recreated in appropriately utilized nodes.
The strategy will abort if any number of `underutilized nodes` or `appropriately utilized nodes` is zero.
**NOTE:** Node resource consumption is determined by the requests and limits of pods, not actual usage.
This approach is chosen in order to maintain consistency with the kube-scheduler, which follows the same
design for scheduling pods onto nodes. This means that resource usage as reported by Kubelet (or commands
like `kubectl top`) may differ from the calculated consumption, due to these components reporting
actual usage metrics. Implementing metrics-based descheduling is currently TODO for the project.
**Parameters:**
|Name|Type|
|---|---|
|`thresholds`|map(string:int)|
|`numberOfNodes`|int|
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))|
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))|
|`nodeFit`|bool (see [node fit filtering](#node-fit-filtering))|
|`Namespaces`|(see [namespace filtering](#namespace-filtering))|
**Example:**
```yaml
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
strategies:
"HighNodeUtilization":
enabled: true
params:
nodeResourceUtilizationThresholds:
thresholds:
"cpu" : 20
"memory": 20
"pods": 20
```
Policy should pass the following validation checks:
* Three basic native types of resources are supported: `cpu`, `memory` and `pods`. If any of these resource types is not specified, all its thresholds default to 100%.
* Extended resources are supported. For example, resource type `nvidia.com/gpu` is specified for GPU node utilization. Extended resources are optional, and will not be used to compute node's usage if it's not specified in `thresholds` explicitly.
* `thresholds` can not be nil.
* The valid range of the resource's percentage value is \[0, 100\]
There is another parameter associated with the `HighNodeUtilization` strategy, called `numberOfNodes`.
This parameter can be configured to activate the strategy only when the number of under utilized nodes
is above the configured value. This could be helpful in large clusters where a few nodes could go
under utilized frequently or for a short period of time. By default, `numberOfNodes` is set to zero.
### RemovePodsViolatingInterPodAntiAffinity
This strategy makes sure that pods violating interpod anti-affinity are removed from nodes. For example,
if there is podA on a node and podB and podC (running on the same node) have anti-affinity rules which prohibit
them to run on the same node, then podA will be evicted from the node so that podB and podC could run. This
issue could happen, when the anti-affinity rules for podB and podC are created when they are already running on
node.
**Parameters:**
|Name|Type|
|---|---|
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))|
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))|
|`namespaces`|(see [namespace filtering](#namespace-filtering))|
|`labelSelector`|(see [label filtering](#label-filtering))|
|`nodeFit`|bool (see [node fit filtering](#node-fit-filtering))|
**Example:**
```yaml
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
strategies:
"RemovePodsViolatingInterPodAntiAffinity":
enabled: true
```
### RemovePodsViolatingNodeAffinity
This strategy makes sure all pods violating
[node affinity](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#node-affinity)
are eventually removed from nodes. Node affinity rules allow a pod to specify
`requiredDuringSchedulingIgnoredDuringExecution` type, which tells the scheduler
to respect node affinity when scheduling the pod but kubelet to ignore
in case node changes over time and no longer respects the affinity.
When enabled, the strategy serves as a temporary implementation
of `requiredDuringSchedulingRequiredDuringExecution` and evicts pod for kubelet
that no longer respects node affinity.
For example, there is podA scheduled on nodeA which satisfies the node
affinity rule `requiredDuringSchedulingIgnoredDuringExecution` at the time
of scheduling. Over time nodeA stops to satisfy the rule. When the strategy gets
executed and there is another node available that satisfies the node affinity rule,
podA gets evicted from nodeA.
**Parameters:**
|Name|Type|
|---|---|
|`nodeAffinityType`|list(string)|
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))|
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))|
|`namespaces`|(see [namespace filtering](#namespace-filtering))|
|`labelSelector`|(see [label filtering](#label-filtering))|
|`nodeFit`|bool (see [node fit filtering](#node-fit-filtering))|
**Example:**
```yaml
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
strategies:
"RemovePodsViolatingNodeAffinity":
enabled: true
params:
nodeAffinityType:
- "requiredDuringSchedulingIgnoredDuringExecution"
```
### RemovePodsViolatingNodeTaints
This strategy makes sure that pods violating NoSchedule taints on nodes are removed. For example there is a
pod "podA" with a toleration to tolerate a taint ``key=value:NoSchedule`` scheduled and running on the tainted
node. If the node's taint is subsequently updated/removed, taint is no longer satisfied by its pods' tolerations
and will be evicted.
Node taints can be excluded from consideration by specifying a list of excludedTaints. If a node taint key **or**
key=value matches an excludedTaints entry, the taint will be ignored.
For example, excludedTaints entry "dedicated" would match all taints with key "dedicated", regardless of value.
excludedTaints entry "dedicated=special-user" would match taints with key "dedicated" and value "special-user".
**Parameters:**
|Name|Type|
|---|---|
|`excludedTaints`|list(string)|
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))|
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))|
|`namespaces`|(see [namespace filtering](#namespace-filtering))|
|`labelSelector`|(see [label filtering](#label-filtering))|
|`nodeFit`|bool (see [node fit filtering](#node-fit-filtering))|
**Example:**
````yaml
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
strategies:
"RemovePodsViolatingNodeTaints":
enabled: true
params:
excludedTaints:
- dedicated=special-user # exclude taints with key "dedicated" and value "special-user"
- reserved # exclude all taints with key "reserved"
````
### RemovePodsViolatingTopologySpreadConstraint
This strategy makes sure that pods violating [topology spread constraints](https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/)
are evicted from nodes. Specifically, it tries to evict the minimum number of pods required to balance topology domains to within each constraint's `maxSkew`.
This strategy requires k8s version 1.18 at a minimum.
By default, this strategy only deals with hard constraints, setting parameter `includeSoftConstraints` to `true` will
include soft constraints.
Strategy parameter `labelSelector` is not utilized when balancing topology domains and is only applied during eviction to determine if the pod can be evicted.
**Parameters:**
|Name|Type|
|---|---|
|`includeSoftConstraints`|bool|
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))|
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))|
|`namespaces`|(see [namespace filtering](#namespace-filtering))|
|`labelSelector`|(see [label filtering](#label-filtering))|
|`nodeFit`|bool (see [node fit filtering](#node-fit-filtering))|
**Example:**
```yaml
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
strategies:
"RemovePodsViolatingTopologySpreadConstraint":
enabled: true
params:
includeSoftConstraints: false
```
### RemovePodsHavingTooManyRestarts
This strategy makes sure that pods having too many restarts are removed from nodes. For example a pod with EBS/PD that
can't get the volume/disk attached to the instance, then the pod should be re-scheduled to other nodes. Its parameters
include `podRestartThreshold`, which is the number of restarts (summed over all eligible containers) at which a pod
should be evicted, and `includingInitContainers`, which determines whether init container restarts should be factored
into that calculation.
**Parameters:**
|Name|Type|
|---|---|
|`podRestartThreshold`|int|
|`includingInitContainers`|bool|
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))|
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))|
|`namespaces`|(see [namespace filtering](#namespace-filtering))|
|`labelSelector`|(see [label filtering](#label-filtering))|
|`nodeFit`|bool (see [node fit filtering](#node-fit-filtering))|
**Example:**
```yaml
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
strategies:
"RemovePodsHavingTooManyRestarts":
enabled: true
params:
podsHavingTooManyRestarts:
podRestartThreshold: 100
includingInitContainers: true
```
### PodLifeTime
This strategy evicts pods that are older than `maxPodLifeTimeSeconds`.
You can also specify `states` parameter to **only** evict pods matching the following conditions:
- [Pod Phase](https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-phase) status of: `Running`, `Pending`
- [Container State Waiting](https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-state-waiting) condition of: `PodInitializing`, `ContainerCreating`
If a value for `states` or `podStatusPhases` is not specified,
Pods in any state (even `Running`) are considered for eviction.
**Parameters:**
|Name|Type|Notes|
|---|---|---|
|`maxPodLifeTimeSeconds`|int||
|`podStatusPhases`|list(string)|Deprecated in v0.25+ Use `states` instead|
|`states`|list(string)|Only supported in v0.25+|
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))||
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))||
|`namespaces`|(see [namespace filtering](#namespace-filtering))||
|`labelSelector`|(see [label filtering](#label-filtering))||
**Example:**
```yaml
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
strategies:
"PodLifeTime":
enabled: true
params:
podLifeTime:
maxPodLifeTimeSeconds: 86400
states:
- "Pending"
- "PodInitializing"
```
### RemoveFailedPods
This strategy evicts pods that are in failed status phase.
You can provide an optional parameter to filter by failed `reasons`.
`reasons` can be expanded to include reasons of InitContainers as well by setting the optional parameter `includingInitContainers` to `true`.
You can specify an optional parameter `minPodLifetimeSeconds` to evict pods that are older than specified seconds.
Lastly, you can specify the optional parameter `excludeOwnerKinds` and if a pod
has any of these `Kind`s listed as an `OwnerRef`, that pod will not be considered for eviction.
**Parameters:**
|Name|Type|
|---|---|
|`minPodLifetimeSeconds`|uint|
|`excludeOwnerKinds`|list(string)|
|`reasons`|list(string)|
|`includingInitContainers`|bool|
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))|
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))|
|`namespaces`|(see [namespace filtering](#namespace-filtering))|
|`labelSelector`|(see [label filtering](#label-filtering))|
|`nodeFit`|bool (see [node fit filtering](#node-fit-filtering))|
**Example:**
```yaml
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
strategies:
"RemoveFailedPods":
enabled: true
params:
failedPods:
reasons:
- "NodeAffinity"
includingInitContainers: true
excludeOwnerKinds:
- "Job"
minPodLifetimeSeconds: 3600
```
## Filter Pods
### Namespace filtering
The following strategies accept a `namespaces` parameter which allows to specify a list of including, resp. excluding namespaces:
* `PodLifeTime`
* `RemovePodsHavingTooManyRestarts`
* `RemovePodsViolatingNodeTaints`
* `RemovePodsViolatingNodeAffinity`
* `RemovePodsViolatingInterPodAntiAffinity`
* `RemoveDuplicates`
* `RemovePodsViolatingTopologySpreadConstraint`
* `RemoveFailedPods`
* `LowNodeUtilization` and `HighNodeUtilization` (Only filtered right before eviction)
For example:
```yaml
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
strategies:
"PodLifeTime":
enabled: true
params:
podLifeTime:
maxPodLifeTimeSeconds: 86400
namespaces:
include:
- "namespace1"
- "namespace2"
```
In the examples `PodLifeTime` gets executed only over `namespace1` and `namespace2`.
The similar holds for `exclude` field:
```yaml
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
strategies:
"PodLifeTime":
enabled: true
params:
podLifeTime:
maxPodLifeTimeSeconds: 86400
namespaces:
exclude:
- "namespace1"
- "namespace2"
```
The strategy gets executed over all namespaces but `namespace1` and `namespace2`.
It's not allowed to compute `include` with `exclude` field.
### Priority filtering
All strategies are able to configure a priority threshold, only pods under the threshold can be evicted. You can
specify this threshold by setting `thresholdPriorityClassName`(setting the threshold to the value of the given
priority class) or `thresholdPriority`(directly setting the threshold) parameters. By default, this threshold
is set to the value of `system-cluster-critical` priority class.
Note: Setting `evictSystemCriticalPods` to true disables priority filtering entirely.
E.g.
Setting `thresholdPriority`
```yaml
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
strategies:
"PodLifeTime":
enabled: true
params:
podLifeTime:
maxPodLifeTimeSeconds: 86400
thresholdPriority: 10000
```
Setting `thresholdPriorityClassName`
```yaml
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
strategies:
"PodLifeTime":
enabled: true
params:
podLifeTime:
maxPodLifeTimeSeconds: 86400
thresholdPriorityClassName: "priorityclass1"
```
Note that you can't configure both `thresholdPriority` and `thresholdPriorityClassName`, if the given priority class
does not exist, descheduler won't create it and will throw an error.
### Label filtering
The following strategies can configure a [standard kubernetes labelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#labelselector-v1-meta)
to filter pods by their labels:
* `PodLifeTime`
* `RemovePodsHavingTooManyRestarts`
* `RemovePodsViolatingNodeTaints`
* `RemovePodsViolatingNodeAffinity`
* `RemovePodsViolatingInterPodAntiAffinity`
* `RemovePodsViolatingTopologySpreadConstraint`
* `RemoveFailedPods`
This allows running strategies among pods the descheduler is interested in.
For example:
```yaml
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
strategies:
"PodLifeTime":
enabled: true
params:
podLifeTime:
maxPodLifeTimeSeconds: 86400
labelSelector:
matchLabels:
component: redis
matchExpressions:
- {key: tier, operator: In, values: [cache]}
- {key: environment, operator: NotIn, values: [dev]}
```
### Node Fit filtering
The following strategies accept a `nodeFit` boolean parameter which can optimize descheduling:
* `RemoveDuplicates`
* `LowNodeUtilization`
* `HighNodeUtilization`
* `RemovePodsViolatingInterPodAntiAffinity`
* `RemovePodsViolatingNodeAffinity`
* `RemovePodsViolatingNodeTaints`
* `RemovePodsViolatingTopologySpreadConstraint`
* `RemovePodsHavingTooManyRestarts`
* `RemoveFailedPods`
If set to `true` the descheduler will consider whether or not the pods that meet eviction criteria will fit on other nodes before evicting them. If a pod cannot be rescheduled to another node, it will not be evicted. Currently the following criteria are considered when setting `nodeFit` to `true`:
- A `nodeSelector` on the pod
- Any `tolerations` on the pod and any `taints` on the other nodes
- `nodeAffinity` on the pod
- Resource `requests` made by the pod and the resources available on other nodes
- Whether any of the other nodes are marked as `unschedulable`
E.g.
```yaml
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
strategies:
"LowNodeUtilization":
enabled: true
params:
nodeFit: true
nodeResourceUtilizationThresholds:
thresholds:
"cpu": 20
"memory": 20
"pods": 20
targetThresholds:
"cpu": 50
"memory": 50
"pods": 50
```
Note that node fit filtering references the current pod spec, and not that of it's owner.
Thus, if the pod is owned by a ReplicationController (and that ReplicationController was modified recently),
the pod may be running with an outdated spec, which the descheduler will reference when determining node fit.
This is expected behavior as the descheduler is a "best-effort" mechanism.
Using Deployments instead of ReplicationControllers provides an automated rollout of pod spec changes, therefore ensuring that the descheduler has an up-to-date view of the cluster state.

View File

@@ -4,25 +4,11 @@ Starting with descheduler release v0.10.0 container images are available in the
Descheduler Version | Container Image | Architectures | Descheduler Version | Container Image | Architectures |
------------------- |-------------------------------------------------|-------------------------| ------------------- |-------------------------------------------------|-------------------------|
v0.32.0 | registry.k8s.io/descheduler/descheduler:v0.32.0 | AMD64<br>ARM64<br>ARMv7 |
v0.31.0 | registry.k8s.io/descheduler/descheduler:v0.31.0 | AMD64<br>ARM64<br>ARMv7 |
v0.30.1 | registry.k8s.io/descheduler/descheduler:v0.30.1 | AMD64<br>ARM64<br>ARMv7 |
v0.30.0 | registry.k8s.io/descheduler/descheduler:v0.30.0 | AMD64<br>ARM64<br>ARMv7 | v0.30.0 | registry.k8s.io/descheduler/descheduler:v0.30.0 | AMD64<br>ARM64<br>ARMv7 |
v0.29.0 | registry.k8s.io/descheduler/descheduler:v0.29.0 | AMD64<br>ARM64<br>ARMv7 | v0.29.0 | registry.k8s.io/descheduler/descheduler:v0.29.0 | AMD64<br>ARM64<br>ARMv7 |
v0.28.1 | registry.k8s.io/descheduler/descheduler:v0.28.1 | AMD64<br>ARM64<br>ARMv7 |
v0.28.0 | registry.k8s.io/descheduler/descheduler:v0.28.0 | AMD64<br>ARM64<br>ARMv7 |
v0.27.1 | registry.k8s.io/descheduler/descheduler:v0.27.1 | AMD64<br>ARM64<br>ARMv7 |
v0.27.0 | registry.k8s.io/descheduler/descheduler:v0.27.0 | AMD64<br>ARM64<br>ARMv7 |
v0.26.1 | registry.k8s.io/descheduler/descheduler:v0.26.1 | AMD64<br>ARM64<br>ARMv7 |
v0.26.0 | registry.k8s.io/descheduler/descheduler:v0.26.0 | AMD64<br>ARM64<br>ARMv7 |
v0.25.1 | registry.k8s.io/descheduler/descheduler:v0.25.1 | AMD64<br>ARM64<br>ARMv7 |
v0.25.0 | registry.k8s.io/descheduler/descheduler:v0.25.0 | AMD64<br>ARM64<br>ARMv7 |
v0.24.1 | registry.k8s.io/descheduler/descheduler:v0.24.1 | AMD64<br>ARM64<br>ARMv7 |
v0.24.0 | registry.k8s.io/descheduler/descheduler:v0.24.0 | AMD64<br>ARM64<br>ARMv7 |
v0.23.1 | registry.k8s.io/descheduler/descheduler:v0.23.1 | AMD64<br>ARM64<br>ARMv7 |
v0.22.0 | registry.k8s.io/descheduler/descheduler:v0.22.0 | AMD64<br>ARM64<br>ARMv7 |
v0.21.0 | registry.k8s.io/descheduler/descheduler:v0.21.0 | AMD64<br>ARM64<br>ARMv7 |
v0.20.0 | registry.k8s.io/descheduler/descheduler:v0.20.0 | AMD64<br>ARM64 |
v0.19.0 | registry.k8s.io/descheduler/descheduler:v0.19.0 | AMD64 |
v0.18.0 | registry.k8s.io/descheduler/descheduler:v0.18.0 | AMD64 |
v0.10.0 | registry.k8s.io/descheduler/descheduler:v0.10.0 | AMD64 |
Note that multi-arch container images cannot be pulled by [kind](https://kind.sigs.k8s.io) from a registry. Therefore Note that multi-arch container images cannot be pulled by [kind](https://kind.sigs.k8s.io) from a registry. Therefore
starting with descheduler release v0.20.0 use the below process to download the official descheduler starting with descheduler release v0.20.0 use the below process to download the official descheduler

153
go.mod
View File

@@ -1,116 +1,131 @@
module sigs.k8s.io/descheduler module sigs.k8s.io/descheduler
go 1.22.2 go 1.23.3
require ( require (
github.com/client9/misspell v0.3.4 github.com/client9/misspell v0.3.4
github.com/google/go-cmp v0.6.0 github.com/google/go-cmp v0.6.0
github.com/spf13/cobra v1.8.0 github.com/spf13/cobra v1.8.1
github.com/spf13/pflag v1.0.5 github.com/spf13/pflag v1.0.5
go.opentelemetry.io/otel v1.24.0 go.opentelemetry.io/otel v1.28.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.24.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.24.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0
go.opentelemetry.io/otel/sdk v1.24.0 go.opentelemetry.io/otel/sdk v1.28.0
go.opentelemetry.io/otel/trace v1.24.0 go.opentelemetry.io/otel/trace v1.28.0
google.golang.org/grpc v1.62.0 google.golang.org/grpc v1.65.0
k8s.io/api v0.30.0 k8s.io/api v0.32.0
k8s.io/apimachinery v0.30.0 k8s.io/apimachinery v0.32.0
k8s.io/apiserver v0.30.0 k8s.io/apiserver v0.32.0
k8s.io/client-go v0.30.0 k8s.io/client-go v0.32.0
k8s.io/code-generator v0.30.0 k8s.io/code-generator v0.32.0
k8s.io/component-base v0.30.0 k8s.io/component-base v0.32.0
k8s.io/component-helpers v0.30.0 k8s.io/component-helpers v0.32.0
k8s.io/klog/v2 v2.120.1 k8s.io/klog/v2 v2.130.1
k8s.io/utils v0.0.0-20240310230437-4693a0247e57 k8s.io/metrics v0.32.0
k8s.io/utils v0.0.0-20241210054802-24370beab758
kubevirt.io/api v1.3.0
kubevirt.io/client-go v1.3.0
kubevirt.io/containerized-data-importer-api v1.60.1 // indirect; drops dependency on o/api
sigs.k8s.io/mdtoc v1.1.0 sigs.k8s.io/mdtoc v1.1.0
sigs.k8s.io/yaml v1.4.0
) )
require ( require (
cel.dev/expr v0.18.0 // indirect
github.com/BurntSushi/toml v0.3.1 // indirect github.com/BurntSushi/toml v0.3.1 // indirect
github.com/NYTimes/gziphandler v1.1.1 // indirect github.com/NYTimes/gziphandler v1.1.1 // indirect
github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df // indirect github.com/antlr4-go/antlr/v4 v4.13.0 // indirect
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a // indirect github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a // indirect
github.com/beorn7/perks v1.0.1 // indirect github.com/beorn7/perks v1.0.1 // indirect
github.com/blang/semver/v4 v4.0.0 // indirect github.com/blang/semver/v4 v4.0.0 // indirect
github.com/cenkalti/backoff/v4 v4.2.1 // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/coreos/go-semver v0.3.1 // indirect github.com/coreos/go-semver v0.3.1 // indirect
github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.4 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/emicklei/go-restful/v3 v3.11.0 // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect
github.com/evanphx/json-patch v4.12.0+incompatible // indirect github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/felixge/httpsnoop v1.0.3 // indirect
github.com/fsnotify/fsnotify v1.7.0 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect
github.com/go-logr/logr v1.4.1 // indirect github.com/fxamacker/cbor/v2 v2.7.0 // indirect
github.com/go-kit/kit v0.13.0 // indirect
github.com/go-kit/log v0.2.1 // indirect
github.com/go-logfmt/logfmt v0.6.0 // indirect
github.com/go-logr/logr v1.4.2 // indirect
github.com/go-logr/stdr v1.2.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-logr/zapr v1.3.0 // indirect github.com/go-logr/zapr v1.3.0 // indirect
github.com/go-openapi/jsonpointer v0.19.6 // indirect github.com/go-openapi/jsonpointer v0.21.0 // indirect
github.com/go-openapi/jsonreference v0.20.2 // indirect github.com/go-openapi/jsonreference v0.20.2 // indirect
github.com/go-openapi/swag v0.22.3 // indirect github.com/go-openapi/swag v0.23.0 // indirect
github.com/gogo/protobuf v1.3.2 // indirect github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/glog v1.2.1 // indirect
github.com/golang/protobuf v1.5.4 // indirect github.com/golang/protobuf v1.5.4 // indirect
github.com/gomarkdown/markdown v0.0.0-20210514010506-3b9f47219fe7 // indirect github.com/gomarkdown/markdown v0.0.0-20210514010506-3b9f47219fe7 // indirect
github.com/google/cel-go v0.17.8 // indirect github.com/google/btree v1.0.1 // indirect
github.com/google/cel-go v0.22.0 // indirect
github.com/google/gnostic-models v0.6.8 // indirect github.com/google/gnostic-models v0.6.8 // indirect
github.com/google/gofuzz v1.2.0 // indirect github.com/google/gofuzz v1.2.0 // indirect
github.com/google/uuid v1.6.0 // indirect github.com/google/uuid v1.6.0 // indirect
github.com/gorilla/websocket v1.5.0 // indirect
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect
github.com/imdario/mergo v0.3.6 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/josharian/intern v1.0.0 // indirect github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect github.com/json-iterator/go v1.1.12 // indirect
github.com/mailru/easyjson v0.7.7 // indirect github.com/mailru/easyjson v0.7.7 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
github.com/mmarkdown/mmark v2.0.40+incompatible // indirect github.com/mmarkdown/mmark v2.0.40+incompatible // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/openshift/custom-resource-status v1.1.2 // indirect
github.com/pkg/errors v0.9.1 // indirect github.com/pkg/errors v0.9.1 // indirect
github.com/prometheus/client_golang v1.16.0 // indirect github.com/prometheus/client_golang v1.19.1 // indirect
github.com/prometheus/client_model v0.4.0 // indirect github.com/prometheus/client_model v0.6.1 // indirect
github.com/prometheus/common v0.44.0 // indirect github.com/prometheus/common v0.55.0 // indirect
github.com/prometheus/procfs v0.10.1 // indirect github.com/prometheus/procfs v0.15.1 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/stoewer/go-strcase v1.2.0 // indirect github.com/stoewer/go-strcase v1.3.0 // indirect
go.etcd.io/etcd/api/v3 v3.5.10 // indirect github.com/x448/float16 v0.8.4 // indirect
go.etcd.io/etcd/client/pkg/v3 v3.5.10 // indirect go.etcd.io/etcd/api/v3 v3.5.16 // indirect
go.etcd.io/etcd/client/v3 v3.5.10 // indirect go.etcd.io/etcd/client/pkg/v3 v3.5.16 // indirect
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.42.0 // indirect go.etcd.io/etcd/client/v3 v3.5.16 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 // indirect
go.opentelemetry.io/otel/metric v1.24.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 // indirect
go.opentelemetry.io/proto/otlp v1.1.0 // indirect go.opentelemetry.io/otel/metric v1.28.0 // indirect
go.opentelemetry.io/proto/otlp v1.3.1 // indirect
go.uber.org/multierr v1.11.0 // indirect go.uber.org/multierr v1.11.0 // indirect
go.uber.org/zap v1.26.0 // indirect go.uber.org/zap v1.27.0 // indirect
golang.org/x/crypto v0.21.0 // indirect golang.org/x/crypto v0.31.0 // indirect
golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e // indirect golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect
golang.org/x/mod v0.15.0 // indirect golang.org/x/mod v0.21.0 // indirect
golang.org/x/net v0.23.0 // indirect golang.org/x/net v0.30.0 // indirect
golang.org/x/oauth2 v0.16.0 // indirect golang.org/x/oauth2 v0.23.0 // indirect
golang.org/x/sync v0.6.0 // indirect golang.org/x/sync v0.10.0 // indirect
golang.org/x/sys v0.18.0 // indirect golang.org/x/sys v0.28.0 // indirect
golang.org/x/term v0.18.0 // indirect golang.org/x/term v0.27.0 // indirect
golang.org/x/text v0.14.0 // indirect golang.org/x/text v0.21.0 // indirect
golang.org/x/time v0.3.0 // indirect golang.org/x/time v0.7.0 // indirect
golang.org/x/tools v0.18.0 // indirect golang.org/x/tools v0.26.0 // indirect
google.golang.org/appengine v1.6.8 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7 // indirect
google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20240123012728-ef4313101c80 // indirect google.golang.org/protobuf v1.35.1 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
google.golang.org/protobuf v1.33.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect
k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70 // indirect k8s.io/apiextensions-apiserver v0.30.0 // indirect
k8s.io/kms v0.30.0 // indirect k8s.io/gengo/v2 v2.0.0-20240911193312-2b36238f13e9 // indirect
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect k8s.io/kms v0.32.0 // indirect
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.29.0 // indirect k8s.io/kube-openapi v0.30.0 // indirect
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect kubevirt.io/controller-lifecycle-operator-sdk/api v0.0.0-20220329064328-f3cc58c6ed90 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0 // indirect
sigs.k8s.io/yaml v1.3.0 // indirect sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.4.2 // indirect
) )
replace go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc => go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.47.0 replace k8s.io/kube-openapi => k8s.io/kube-openapi v0.0.0-20240430033511-f0e62f92d13f
replace golang.org/x/net => golang.org/x/net v0.33.0
replace golang.org/x/crypto => golang.org/x/crypto v0.31.0

617
go.sum
View File

@@ -1,96 +1,163 @@
cloud.google.com/go v0.112.0 h1:tpFCD7hpHFlQ8yPwT3x+QeXqc2T6+n6T+hmABHfDUSM= cel.dev/expr v0.18.0 h1:CJ6drgk+Hf96lkLikr4rFf19WrU0BOWEihyZnI2TAzo=
cloud.google.com/go/compute v1.23.3 h1:6sVlXXBmbd7jNX0Ipq0trII3e4n1/MsADLK6a+aiVlk= cel.dev/expr v0.18.0/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw=
cloud.google.com/go/compute v1.23.3/go.mod h1:VCgBUoMnIVIR0CscqQiPJLAG25E3ZRZMzcFZeQ+h8CI= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY=
cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA=
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I= github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I=
github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c=
github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df h1:7RFfzj4SSt6nnvCPbCqijJi1nWCd+TqAT3bYCStRC18= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df/go.mod h1:pSwJ0fSY5KhvocuWSx4fz3BA8OrA1bQn+K1Eli3BRwM= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
github.com/antlr4-go/antlr/v4 v4.13.0 h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8TVTI=
github.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g=
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA=
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM=
github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ=
github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/chromedp/cdproto v0.0.0-20230802225258-3cf4e6d46a89/go.mod h1:GKljq0VrfU4D5yc+2qA6OVr8pmO/MBbPEWqWQ/oqGEs=
github.com/chromedp/chromedp v0.9.2/go.mod h1:LkSXJKONWTCHAfQasKFUZI+mxqS4tZqhmtGzzhLsnLs=
github.com/chromedp/sysutil v1.0.0/go.mod h1:kgWmDdq8fTzXYcKIBqIYvRRTnYb9aNS9moAV0xufSww=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/logex v1.2.1/go.mod h1:JLbx6lG2kDbNRFnfkgvh4eRJRPX1QCoOIWomwysCBrQ=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
github.com/chzyer/readline v1.5.1/go.mod h1:Eh+b79XXUwfKfcPLepksvw2tcLE/Ct21YObkaSkeBlk=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
github.com/chzyer/test v1.0.0/go.mod h1:2JlltgoNkt4TW/z9V/IzDdFaMTM2JPIi26O1pF38GC8=
github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI= github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa h1:jQCWAUqqlij9Pgj2i/PB79y4KOPYVyFYdROxgaCwdTQ=
github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa/go.mod h1:x/1Gn8zydmfq8dk6e9PdstVsDgu9RuyIIJqAaF//0IM=
github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4= github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4=
github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec=
github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/cpuguy83/go-md2man/v2 v2.0.3 h1:qMCsGGgs+MAzDFyp9LpAe1Lqy/fY/qCovCm0qnXZOBM= github.com/cpuguy83/go-md2man/v2 v2.0.4 h1:wfIWP927BUkWJb2NmU/kNDYIBTh/ziUX91+lVfRxZq4=
github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
github.com/emicklei/go-restful v2.15.0+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
github.com/emicklei/go-restful/v3 v3.8.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g=
github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
github.com/envoyproxy/protoc-gen-validate v1.0.4 h1:gVPz/FMfvh57HdSJQyvBtF00j8JU4zdyUgIUNhlgg0A= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84=
github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E=
github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ=
github.com/go-kit/kit v0.13.0 h1:OoneCcHKHQ03LfBpoQCUfCluwd2Vt3ohz+kvbJneZAU=
github.com/go-kit/kit v0.13.0/go.mod h1:phqEHMMUbyrCFCTgH48JueqrM3md2HcAZ8N3XE4FKDg=
github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU=
github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0=
github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4=
github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ=
github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg=
github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs=
github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ=
github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY=
github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg=
github.com/go-openapi/jsonreference v0.19.6/go.mod h1:diGHMEHg2IqXZGKxqyvWdfWU/aim5Dprw5bqpKkTvns=
github.com/go-openapi/jsonreference v0.20.1/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k=
github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE=
github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k=
github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
github.com/go-openapi/swag v0.21.1/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE=
github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ=
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls=
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
github.com/gobwas/httphead v0.1.0/go.mod h1:O/RXo79gxV8G+RqlR/otEwx4Q36zl9rqC5u12GKvMCM=
github.com/gobwas/pool v0.2.1/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw=
github.com/gobwas/ws v1.2.1/go.mod h1:hRKAFb8wOxFROYNsT1bqfWnhX+b5MFeJM9r2ZSwg/KY=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg=
github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/glog v1.2.1 h1:OptwRhECazUx5ix5TTWC3EZhsZEHWcYWY4FQHTIubm4=
github.com/golang/glog v1.2.1/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
github.com/gomarkdown/markdown v0.0.0-20210514010506-3b9f47219fe7 h1:oKYOfNR7Hp6XpZ4JqolL5u642Js5Z0n7psPVl+S5heo= github.com/gomarkdown/markdown v0.0.0-20210514010506-3b9f47219fe7 h1:oKYOfNR7Hp6XpZ4JqolL5u642Js5Z0n7psPVl+S5heo=
github.com/gomarkdown/markdown v0.0.0-20210514010506-3b9f47219fe7/go.mod h1:aii0r/K0ZnHv7G0KF7xy1v0A7s2Ljrb5byB7MO5p6TU= github.com/gomarkdown/markdown v0.0.0-20210514010506-3b9f47219fe7/go.mod h1:aii0r/K0ZnHv7G0KF7xy1v0A7s2Ljrb5byB7MO5p6TU=
github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4=
github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
github.com/google/cel-go v0.17.8 h1:j9m730pMZt1Fc4oKhCLUHfjj6527LuhYcYw0Rl8gqto= github.com/google/cel-go v0.22.0 h1:b3FJZxpiv1vTMo2/5RDUqAHPxkT8mmMfJIrq1llbf7g=
github.com/google/cel-go v0.17.8/go.mod h1:HXZKzB0LXqer5lHHgfWAnlYwJaQBDKMjxjulNQzhwhY= github.com/google/cel-go v0.22.0/go.mod h1:BuznPXXfQDpXKWQ9sPW3TzlAJN5zzFe+i9tIs0yC4s8=
github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I=
github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6/go.mod h1:kf6iHlnVGwgKolg33glAes7Yg/8iWP8ukqeldJSO7jw=
github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo=
github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA=
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw=
@@ -99,20 +166,24 @@ github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92Bcuy
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo=
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0 h1:Wqo399gCIufwto+VfwCSvsnfGpF/w5E9CNxSwbpD6No= github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0/go.mod h1:qmOFXW2epJhM0qSnUUYpldc7gVz2KMQwJ/QYCDIa7XU= github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k=
github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/ianlancetaylor/demangle v0.0.0-20240312041847-bd984b5ce465/go.mod h1:gx7rwoVhcfuVKG5uya9Hs3Sxj7EIvldVofAWIUtGouw=
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
github.com/jonboulle/clockwork v0.2.2 h1:UOGuzwb1PwsrDAObMuhUnj0p5ULPj8V/xJ7Kx9qUBdQ= github.com/jonboulle/clockwork v0.4.0 h1:p4Cf1aMWXnXAUh8lVfewRBx1zaTSYKrKMF2g3ST4RZ4=
github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= github.com/jonboulle/clockwork v0.4.0/go.mod h1:xgRqUGwRcjKCO1vbZUEtSLrqKoPSsUpK7fnezOII0kc=
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
@@ -120,229 +191,433 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/ledongthuc/pdf v0.0.0-20220302134840-0c2507a12d80/go.mod h1:imJHygn/1yfhB7XSJJKlFZKl/J+dCPAknuiaGOshXAs=
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
github.com/mmarkdown/mmark v2.0.40+incompatible h1:vMeUeDzBK3H+/mU0oMVfMuhSXJlIA+DE/DMPQNAj5C4= github.com/mmarkdown/mmark v2.0.40+incompatible h1:vMeUeDzBK3H+/mU0oMVfMuhSXJlIA+DE/DMPQNAj5C4=
github.com/mmarkdown/mmark v2.0.40+incompatible/go.mod h1:Uvmoz7tvsWpr7bMVxIpqZPyN3FbOtzDmnsJDFp7ltJs= github.com/mmarkdown/mmark v2.0.40+incompatible/go.mod h1:Uvmoz7tvsWpr7bMVxIpqZPyN3FbOtzDmnsJDFp7ltJs=
github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/onsi/ginkgo/v2 v2.15.0 h1:79HwNRBAZHOEwrczrgSOPy+eFTTlIGELKy5as+ClttY= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
github.com/onsi/ginkgo/v2 v2.15.0/go.mod h1:HlxMHtYF57y6Dpf+mc5529KKmSq9h2FpCF+/ZkwUxKM= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
github.com/onsi/gomega v1.31.0 h1:54UJxxj6cPInHS3a35wm6BK/F9nHYueZ1NVujHDrnXE= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
github.com/onsi/gomega v1.31.0/go.mod h1:DW9aCi7U6Yi40wNVAvT6kzFnEVEI5n3DloYBiKiT6zk= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc=
github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
github.com/onsi/ginkgo/v2 v2.0.0/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c=
github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c=
github.com/onsi/ginkgo/v2 v2.1.4/go.mod h1:um6tUpWM/cxCK3/FK8BXqEiUMUwRgSM4JXG47RKZmLU=
github.com/onsi/ginkgo/v2 v2.1.6/go.mod h1:MEH45j8TBi6u9BMogfbp0stKC5cdGjumZj5Y7AG4VIk=
github.com/onsi/ginkgo/v2 v2.3.0/go.mod h1:Eew0uilEqZmIEZr8JrvYlvOM7Rr6xzTmMV8AyFNU9d0=
github.com/onsi/ginkgo/v2 v2.4.0/go.mod h1:iHkDK1fKGcBoEHT5W7YBq4RFWaQulw+caOMkAt4OrFo=
github.com/onsi/ginkgo/v2 v2.5.0/go.mod h1:Luc4sArBICYCS8THh8v3i3i5CuSZO+RaQRaJoeNwomw=
github.com/onsi/ginkgo/v2 v2.7.0/go.mod h1:yjiuMwPokqY1XauOgju45q3sJt6VzQ/Fict1LFVcsAo=
github.com/onsi/ginkgo/v2 v2.8.1/go.mod h1:N1/NbDngAFcSLdyZ+/aYTYGSlq9qMCS/cNKGJjy+csc=
github.com/onsi/ginkgo/v2 v2.9.0/go.mod h1:4xkjoL/tZv4SMWeww56BU5kAt19mVB47gTWxmrTcxyk=
github.com/onsi/ginkgo/v2 v2.9.1/go.mod h1:FEcmzVcCHl+4o9bQZVab+4dC9+j+91t2FHSzmGAPfuo=
github.com/onsi/ginkgo/v2 v2.9.2/go.mod h1:WHcJJG2dIlcCqVfBAwUCrJxSPFb6v4azBwgxeMeDuts=
github.com/onsi/ginkgo/v2 v2.9.5/go.mod h1:tvAoo1QUJwNEU2ITftXTpR7R1RbCzoZUOs3RonqW57k=
github.com/onsi/ginkgo/v2 v2.9.7/go.mod h1:cxrmXWykAwTwhQsJOPfdIDiJ+l2RYq7U8hFU+M/1uw0=
github.com/onsi/ginkgo/v2 v2.11.0/go.mod h1:ZhrRA5XmEE3x3rhlzamx/JJvujdZoJ2uvgI7kR0iZvM=
github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o=
github.com/onsi/ginkgo/v2 v2.17.1/go.mod h1:llBI3WDLL9Z6taip6f33H76YcWtJv+7R3HigUjbIBOs=
github.com/onsi/ginkgo/v2 v2.17.2/go.mod h1:nP2DPOQoNsQmsVyv5rDA8JkXQoCs6goXIvr/PRJ1eCc=
github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM=
github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
github.com/onsi/gomega v1.18.1/go.mod h1:0q+aL8jAiMXy9hbwj2mr5GziHiwhAIQpFmmtT5hitRs=
github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro=
github.com/onsi/gomega v1.20.1/go.mod h1:DtrZpjmvpn2mPm4YWQa0/ALMDj9v4YxLgojwPeREyVo=
github.com/onsi/gomega v1.21.1/go.mod h1:iYAIXgPSaDHak0LCMA+AWBpIKBr8WZicMxnE8luStNc=
github.com/onsi/gomega v1.22.1/go.mod h1:x6n7VNe4hw0vkyYUM4mjIXx3JbLiPaBPNgB7PRQ1tuM=
github.com/onsi/gomega v1.24.0/go.mod h1:Z/NWtiqwBrwUt4/2loMmHL63EDLnYHmVbuBpDr2vQAg=
github.com/onsi/gomega v1.24.1/go.mod h1:3AOiACssS3/MajrniINInwbfOOtfZvplPzuRSmvt1jM=
github.com/onsi/gomega v1.26.0/go.mod h1:r+zV744Re+DiYCIPRlYOTxn0YkOLcAnW8k1xXdMPGhM=
github.com/onsi/gomega v1.27.1/go.mod h1:aHX5xOykVYzWOV4WqQy0sy8BQptgukenXpCXfadcIAw=
github.com/onsi/gomega v1.27.3/go.mod h1:5vG284IBtfDAmDyrK+eGyZmUgUlmi+Wngqo557cZ6Gw=
github.com/onsi/gomega v1.27.4/go.mod h1:riYq/GJKh8hhoM01HN6Vmuy93AarCXCBGpvFDK3q3fQ=
github.com/onsi/gomega v1.27.6/go.mod h1:PIQNjfQwkP3aQAH7lf7j87O/5FiNr+ZR8+ipb+qQlhg=
github.com/onsi/gomega v1.27.7/go.mod h1:1p8OOlwo2iUUDsHnOrjE5UKYJ+e3W8eQ3qSlRahPmr4=
github.com/onsi/gomega v1.27.8/go.mod h1:2J8vzI/s+2shY9XHRApDkdgPo1TKT7P2u6fXeJKFnNQ=
github.com/onsi/gomega v1.27.10/go.mod h1:RsS8tutOdbdgzbPtzzATp12yT7kM5I5aElG3evPbQ0M=
github.com/onsi/gomega v1.30.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ=
github.com/onsi/gomega v1.33.0/go.mod h1:+925n5YtiFsLzzafLUHzVMBpvvRAzrydIBiSIxjX3wY=
github.com/onsi/gomega v1.33.1/go.mod h1:U4R44UsT+9eLIaYRB2a5qajjtQYn0hauxvRm16AVYg0=
github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4=
github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog=
github.com/openshift/custom-resource-status v1.1.2 h1:C3DL44LEbvlbItfd8mT5jWrqPfHnSOQoQf/sypqA6A4=
github.com/openshift/custom-resource-status v1.1.2/go.mod h1:DB/Mf2oTeiAmVVX1gN+NEqweonAPY0TKUwADizj8+ZA=
github.com/orisano/pixelmatch v0.0.0-20220722002657-fb0b55479cde/go.mod h1:nZgzbfBr3hhjoZnS66nKrHmduYNpc34ny7RK4z5/HM0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY= github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE=
github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho=
github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM= github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc=
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8=
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js= github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js=
github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0=
github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM=
github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/stoewer/go-strcase v1.2.0 h1:Z2iHWqGXH00XYgqDmNgQbIBxf3wrNq0F3feEy0ainaU=
github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8=
github.com/stoewer/go-strcase v1.3.0 h1:g0eASXYtp+yvN9fK8sH94oCIk0fau9uV1/ZdJ0AVEzs=
github.com/stoewer/go-strcase v1.3.0/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8wodgtPmh1xo=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 h1:6fotK7otjonDflCTK0BCfls4SPy3NcCVb5dqqmbRknE= github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 h1:6fotK7otjonDflCTK0BCfls4SPy3NcCVb5dqqmbRknE=
github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75/go.mod h1:KO6IkyS8Y3j8OdNO85qEYBsRPuteD+YciPomcXdrMnk= github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75/go.mod h1:KO6IkyS8Y3j8OdNO85qEYBsRPuteD+YciPomcXdrMnk=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
github.com/xiang90/probing v0.0.0-20221125231312-a49e3df8f510 h1:S2dVYn90KE98chqDkyE9Z4N61UnQd+KOfgp5Iu53llk=
github.com/xiang90/probing v0.0.0-20221125231312-a49e3df8f510/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
go.etcd.io/bbolt v1.3.8 h1:xs88BrvEv273UsB79e0hcVrlUWmS0a8upikMFhSyAtA= go.etcd.io/bbolt v1.3.11 h1:yGEzV1wPz2yVCLsD8ZAiGHhHVlczyC9d1rP43/VCRJ0=
go.etcd.io/bbolt v1.3.8/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw= go.etcd.io/bbolt v1.3.11/go.mod h1:dksAq7YMXoljX0xu6VF5DMZGbhYYoLUalEiSySYAS4I=
go.etcd.io/etcd/api/v3 v3.5.10 h1:szRajuUUbLyppkhs9K6BRtjY37l66XQQmw7oZRANE4k= go.etcd.io/etcd/api/v3 v3.5.16 h1:WvmyJVbjWqK4R1E+B12RRHz3bRGy9XVfh++MgbN+6n0=
go.etcd.io/etcd/api/v3 v3.5.10/go.mod h1:TidfmT4Uycad3NM/o25fG3J07odo4GBB9hoxaodFCtI= go.etcd.io/etcd/api/v3 v3.5.16/go.mod h1:1P4SlIP/VwkDmGo3OlOD7faPeP8KDIFhqvciH5EfN28=
go.etcd.io/etcd/client/pkg/v3 v3.5.10 h1:kfYIdQftBnbAq8pUWFXfpuuxFSKzlmM5cSn76JByiT0= go.etcd.io/etcd/client/pkg/v3 v3.5.16 h1:ZgY48uH6UvB+/7R9Yf4x574uCO3jIx0TRDyetSfId3Q=
go.etcd.io/etcd/client/pkg/v3 v3.5.10/go.mod h1:DYivfIviIuQ8+/lCq4vcxuseg2P2XbHygkKwFo9fc8U= go.etcd.io/etcd/client/pkg/v3 v3.5.16/go.mod h1:V8acl8pcEK0Y2g19YlOV9m9ssUe6MgiDSobSoaBAM0E=
go.etcd.io/etcd/client/v2 v2.305.10 h1:MrmRktzv/XF8CvtQt+P6wLUlURaNpSDJHFZhe//2QE4= go.etcd.io/etcd/client/v2 v2.305.16 h1:kQrn9o5czVNaukf2A2At43cE9ZtWauOtf9vRZuiKXow=
go.etcd.io/etcd/client/v2 v2.305.10/go.mod h1:m3CKZi69HzilhVqtPDcjhSGp+kA1OmbNn0qamH80xjA= go.etcd.io/etcd/client/v2 v2.305.16/go.mod h1:h9YxWCzcdvZENbfzBTFCnoNumr2ax3F19sKMqHFmXHE=
go.etcd.io/etcd/client/v3 v3.5.10 h1:W9TXNZ+oB3MCd/8UjxHTWK5J9Nquw9fQBLJd5ne5/Ao= go.etcd.io/etcd/client/v3 v3.5.16 h1:sSmVYOAHeC9doqi0gv7v86oY/BTld0SEFGaxsU9eRhE=
go.etcd.io/etcd/client/v3 v3.5.10/go.mod h1:RVeBnDz2PUEZqTpgqwAtUd8nAPf5kjyFyND7P1VkOKc= go.etcd.io/etcd/client/v3 v3.5.16/go.mod h1:X+rExSGkyqxvu276cr2OwPLBaeqFu1cIl4vmRjAD/50=
go.etcd.io/etcd/pkg/v3 v3.5.10 h1:WPR8K0e9kWl1gAhB5A7gEa5ZBTNkT9NdNWrR8Qpo1CM= go.etcd.io/etcd/pkg/v3 v3.5.16 h1:cnavs5WSPWeK4TYwPYfmcr3Joz9BH+TZ6qoUtz6/+mc=
go.etcd.io/etcd/pkg/v3 v3.5.10/go.mod h1:TKTuCKKcF1zxmfKWDkfz5qqYaE3JncKKZPFf8c1nFUs= go.etcd.io/etcd/pkg/v3 v3.5.16/go.mod h1:+lutCZHG5MBBFI/U4eYT5yL7sJfnexsoM20Y0t2uNuY=
go.etcd.io/etcd/raft/v3 v3.5.10 h1:cgNAYe7xrsrn/5kXMSaH8kM/Ky8mAdMqGOxyYwpP0LA= go.etcd.io/etcd/raft/v3 v3.5.16 h1:zBXA3ZUpYs1AwiLGPafYAKKl/CORn/uaxYDwlNwndAk=
go.etcd.io/etcd/raft/v3 v3.5.10/go.mod h1:odD6kr8XQXTy9oQnyMPBOr0TVe+gT0neQhElQ6jbGRc= go.etcd.io/etcd/raft/v3 v3.5.16/go.mod h1:P4UP14AxofMJ/54boWilabqqWoW9eLodl6I5GdGzazI=
go.etcd.io/etcd/server/v3 v3.5.10 h1:4NOGyOwD5sUZ22PiWYKmfxqoeh72z6EhYjNosKGLmZg= go.etcd.io/etcd/server/v3 v3.5.16 h1:d0/SAdJ3vVsZvF8IFVb1k8zqMZ+heGcNfft71ul9GWE=
go.etcd.io/etcd/server/v3 v3.5.10/go.mod h1:gBplPHfs6YI0L+RpGkTQO7buDbHv5HJGG/Bst0/zIPo= go.etcd.io/etcd/server/v3 v3.5.16/go.mod h1:ynhyZZpdDp1Gq49jkUg5mfkDWZwXnn3eIqCqtJnrD/s=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.47.0 h1:UNQQKPfTDe1J81ViolILjTKPr9WetKW6uei2hFgJmFs= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 h1:9G6E0TXzGFVfTnawRzrPl83iHOAV7L8NJiR8RSGYV1g=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.47.0/go.mod h1:r9vWsPS/3AQItv3OSlEJ/E4mbrhUbbw18meOjArPtKQ= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0/go.mod h1:azvtTADFQJA8mX80jIH/akaE7h+dbm/sVuaHqN13w74=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0 h1:KfYpVmrjI7JuToy5k8XV3nkapjWx48k4E4JOtVstzQI= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0/go.mod h1:SeQhzAEccGVZVEy7aH87Nh0km+utSpo1pTv6eMMop48= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg=
go.opentelemetry.io/otel v1.24.0 h1:0LAOdjNmQeSTzGBzduGe/rU4tZhMwL5rWgtp9Ku5Jfo= go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo=
go.opentelemetry.io/otel v1.24.0/go.mod h1:W7b9Ozg4nkF5tWI5zsXkaKKDjdVjpD4oAt9Qi/MArHo= go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.24.0 h1:t6wl9SPayj+c7lEIFgm4ooDBZVb01IhLB4InpomhRw8= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 h1:3Q/xZUyC1BBkualc9ROb4G8qkH90LXEIICcs5zv1OYY=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.24.0/go.mod h1:iSDOcsnSA5INXzZtwaBPrKp/lWu/V14Dd+llD0oI2EA= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0/go.mod h1:s75jGIWA9OfCMzF0xr+ZgfrB5FEbbV7UuYo32ahUiFI=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.24.0 h1:Mw5xcxMwlqoJd97vwPxA8isEaIoxsta9/Q51+TTJLGE= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 h1:R3X6ZXmNPRR8ul6i3WgFURCHzaXjHdm0karRG/+dj3s=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.24.0/go.mod h1:CQNu9bj7o7mC6U7+CA/schKEYakYXWr79ucDHTMGhCM= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0/go.mod h1:QWFXnDavXWwMx2EEcZsf3yxgEKAqsxQ+Syjp+seyInw=
go.opentelemetry.io/otel/metric v1.24.0 h1:6EhoGWWK28x1fbpA4tYTOWBkPefTDQnb8WSGXlc88kI= go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q=
go.opentelemetry.io/otel/metric v1.24.0/go.mod h1:VYhLe1rFfxuTXLgj4CBiyz+9WYBA8pNGJgDcSFRKBco= go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s=
go.opentelemetry.io/otel/sdk v1.24.0 h1:YMPPDNymmQN3ZgczicBY3B6sf9n62Dlj9pWD3ucgoDw= go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE=
go.opentelemetry.io/otel/sdk v1.24.0/go.mod h1:KVrIYw6tEubO9E96HQpcmpTKDVn9gdv35HoYiQWGDFg= go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg=
go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y1YELI= go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g=
go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU= go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI=
go.opentelemetry.io/proto/otlp v1.1.0 h1:2Di21piLrCqJ3U3eXGCTPHE9R8Nh+0uglSnOyxikMeI= go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0=
go.opentelemetry.io/proto/otlp v1.1.0/go.mod h1:GpBHCBWiqvVLDqmHZsoMM3C5ySeKTC7ej/RNTae6MdY= go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo= go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
golang.org/dl v0.0.0-20190829154251-82a15e2f2ead/go.mod h1:IUMfjQLJQd4UTqG1Z90tenwKoCX93Gn3MAQJMOSBsDQ= golang.org/dl v0.0.0-20190829154251-82a15e2f2ead/go.mod h1:IUMfjQLJQd4UTqG1Z90tenwKoCX93Gn3MAQJMOSBsDQ=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8=
golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY=
golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e h1:+WEEuIdZHnUeJJmEUjyYC2gfUMj69yZXw17EnHg/otA= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e/go.mod h1:Kr81I6Kryrl9sr8s2FK3vxD90NdsKWRuOIl2O4CvYbA= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro=
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.15.0 h1:SernR4v+D55NyBH2QiEQrlBAnj1ECL6AGrA5+dPaMY8= golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI=
golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs=
golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ=
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8= golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA=
golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE=
golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q=
golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ=
golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.6-0.20210820212750-d4cc65f0b2ff/go.mod h1:YD9qOF0M9xpSpdWTBbzEl5e/RnCefISl8E5Noe10jFM=
golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU=
golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.18.0 h1:k8NLag8AGHnn+PHbl7g43CtqZAwG60vZkLqgyZgIHgQ= golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA=
golang.org/x/tools v0.18.0/go.mod h1:GL7B4CwcLLeo59yx/9UWWuNOW1n3VZ4f5axWfML7Lcg= golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ=
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s=
golang.org/x/tools v0.9.1/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc=
golang.org/x/tools v0.9.3/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc=
golang.org/x/tools v0.12.0/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM=
golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58=
golang.org/x/tools v0.16.1/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0=
golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps=
golang.org/x/tools v0.20.0/go.mod h1:WvitBU7JJf6A4jOdg4S1tviW9bhUxkgeCui/0JHctQg=
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
golang.org/x/tools v0.26.0 h1:v/60pFQmzmT9ExmjDv2gGIfi3OqfKoEP6I5+umXlbnQ=
golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 h1:KAeGQVN3M9nD0/bQXnr/ClcEMJ968gUXJQ9pwfSynuQ= google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 h1:KAeGQVN3M9nD0/bQXnr/ClcEMJ968gUXJQ9pwfSynuQ=
google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80/go.mod h1:cc8bqMqtv9gMOr0zHg2Vzff5ULhhL2IXP4sbcn32Dro= google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80/go.mod h1:cc8bqMqtv9gMOr0zHg2Vzff5ULhhL2IXP4sbcn32Dro=
google.golang.org/genproto/googleapis/api v0.0.0-20240123012728-ef4313101c80 h1:Lj5rbfG876hIAYFjqiJnPHfhXbv+nzTWfm04Fg/XSVU= google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7 h1:YcyjlL1PRr2Q17/I0dPk2JmYS5CDXfcdb2Z3YRioEbw=
google.golang.org/genproto/googleapis/api v0.0.0-20240123012728-ef4313101c80/go.mod h1:4jWUdICTdgc3Ibxmr8nAJiiLHwQBY0UI0XZcEMaFKaA= google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:OCdP9MfskevB/rbYvHTsXTtKC+3bHWajPdoKgjcYkfo=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80 h1:AjyfHzEPEFp/NpvfN5g+KDla3EMojjhRVZc1i7cj+oM= google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7 h1:2035KHhUv+EpyB+hWgJnaWKJOdX1E95w2S8Rr4uWKTs=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80/go.mod h1:PAREbraiVEVGVdTZsVWjSbbTtSyGbAgIIvni8a8CD5s= google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU=
google.golang.org/grpc v1.62.0 h1:HQKZ/fa1bXkX1oFOvSjmZEUL8wLSaZTjCcLAlmZRtdk= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.62.0/go.mod h1:IWTG0VlJLCh1SkC58F7np9ka9mx/WNkjl4PGJaiq+QE= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc=
google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA=
google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4=
gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc= gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc=
gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc= gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
k8s.io/api v0.30.0 h1:siWhRq7cNjy2iHssOB9SCGNCl2spiF1dO3dABqZ8niA= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
k8s.io/api v0.30.0/go.mod h1:OPlaYhoHs8EQ1ql0R/TsUgaRPhpKNxIMrKQfWUp8QSE= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
k8s.io/apimachinery v0.30.0 h1:qxVPsyDM5XS96NIh9Oj6LavoVFYff/Pon9cZeDIkHHA= k8s.io/api v0.23.3/go.mod h1:w258XdGyvCmnBj/vGzQMj6kzdufJZVUwEM1U2fRJwSQ=
k8s.io/apimachinery v0.30.0/go.mod h1:iexa2somDaxdnj7bha06bhb43Zpa6eWH8N8dbqVjTUc= k8s.io/api v0.32.0 h1:OL9JpbvAU5ny9ga2fb24X8H6xQlVp+aJMFlgtQjR9CE=
k8s.io/apiserver v0.30.0 h1:QCec+U72tMQ+9tR6A0sMBB5Vh6ImCEkoKkTDRABWq6M= k8s.io/api v0.32.0/go.mod h1:4LEwHZEf6Q/cG96F3dqR965sYOfmPM7rq81BLgsE0p0=
k8s.io/apiserver v0.30.0/go.mod h1:smOIBq8t0MbKZi7O7SyIpjPsiKJ8qa+llcFCluKyqiY= k8s.io/apiextensions-apiserver v0.30.0 h1:jcZFKMqnICJfRxTgnC4E+Hpcq8UEhT8B2lhBcQ+6uAs=
k8s.io/client-go v0.30.0 h1:sB1AGGlhY/o7KCyCEQ0bPWzYDL0pwOZO4vAtTSh/gJQ= k8s.io/apiextensions-apiserver v0.30.0/go.mod h1:N9ogQFGcrbWqAY9p2mUAL5mGxsLqwgtUce127VtRX5Y=
k8s.io/client-go v0.30.0/go.mod h1:g7li5O5256qe6TYdAMyX/otJqMhIiGgTapdLchhmOaY= k8s.io/apimachinery v0.23.3/go.mod h1:BEuFMMBaIbcOqVIJqNZJXGFTP4W6AycEpb5+m/97hrM=
k8s.io/code-generator v0.30.0 h1:3VUVqHvWFSVSm9kqL/G6kD4ZwNdHF6J/jPyo3Jgjy3k= k8s.io/apimachinery v0.32.0 h1:cFSE7N3rmEEtv4ei5X6DaJPHHX0C+upp+v5lVPiEwpg=
k8s.io/code-generator v0.30.0/go.mod h1:mBMZhfRR4IunJUh2+7LVmdcWwpouCH5+LNPkZ3t/v7Q= k8s.io/apimachinery v0.32.0/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE=
k8s.io/component-base v0.30.0 h1:cj6bp38g0ainlfYtaOQuRELh5KSYjhKxM+io7AUIk4o= k8s.io/apiserver v0.32.0 h1:VJ89ZvQZ8p1sLeiWdRJpRD6oLozNZD2+qVSLi+ft5Qs=
k8s.io/component-base v0.30.0/go.mod h1:V9x/0ePFNaKeKYA3bOvIbrNoluTSG+fSJKjLdjOoeXQ= k8s.io/apiserver v0.32.0/go.mod h1:HFh+dM1/BE/Hm4bS4nTXHVfN6Z6tFIZPi649n83b4Ag=
k8s.io/component-helpers v0.30.0 h1:xbJtNCfSM4SB/Tz5JqCKDZv4eT5LVi/AWQ1VOxhmStU= k8s.io/client-go v0.32.0 h1:DimtMcnN/JIKZcrSrstiwvvZvLjG0aSxy8PxN8IChp8=
k8s.io/component-helpers v0.30.0/go.mod h1:68HlSwXIumMKmCx8cZe1PoafQEYh581/sEpxMrkhmX4= k8s.io/client-go v0.32.0/go.mod h1:boDWvdM1Drk4NJj/VddSLnx59X3OPgwrOo0vGbtq9+8=
k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70 h1:NGrVE502P0s0/1hudf8zjgwki1X/TByhmAoILTarmzo= k8s.io/code-generator v0.23.3/go.mod h1:S0Q1JVA+kSzTI1oUvbKAxZY/DYbA/ZUb4Uknog12ETk=
k8s.io/code-generator v0.32.0 h1:s0lNN8VSWny8LBz5t5iy7MCdgwdOhdg7vAGVxvS+VWU=
k8s.io/code-generator v0.32.0/go.mod h1:b7Q7KMZkvsYFy72A79QYjiv4aTz3GvW0f1T3UfhFq4s=
k8s.io/component-base v0.32.0 h1:d6cWHZkCiiep41ObYQS6IcgzOUQUNpywm39KVYaUqzU=
k8s.io/component-base v0.32.0/go.mod h1:JLG2W5TUxUu5uDyKiH2R/7NnxJo1HlPoRIIbVLkK5eM=
k8s.io/component-helpers v0.32.0 h1:pQEEBmRt3pDJJX98cQvZshDgJFeKRM4YtYkMmfOlczw=
k8s.io/component-helpers v0.32.0/go.mod h1:9RuClQatbClcokXOcDWSzFKQm1huIf0FzQlPRpizlMc=
k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
k8s.io/gengo v0.0.0-20211129171323-c02415ce4185/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70/go.mod h1:VH3AT8AaQOqiGjMF9p0/IM1Dj+82ZwjfxUP1IxaHE+8= k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70/go.mod h1:VH3AT8AaQOqiGjMF9p0/IM1Dj+82ZwjfxUP1IxaHE+8=
k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw= k8s.io/gengo/v2 v2.0.0-20240911193312-2b36238f13e9 h1:si3PfKm8dDYxgfbeA6orqrtLkvvIeH8UqffFJDl0bz4=
k8s.io/gengo/v2 v2.0.0-20240911193312-2b36238f13e9/go.mod h1:EJykeLsmFC60UQbYJezXkEsG2FLrt0GPNkU5iK5GWxU=
k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
k8s.io/klog/v2 v2.40.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
k8s.io/kms v0.30.0 h1:ZlnD/ei5lpvUlPw6eLfVvH7d8i9qZ6HwUQgydNVks8g= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
k8s.io/kms v0.30.0/go.mod h1:GrMurD0qk3G4yNgGcsCEmepqf9KyyIrTXYR2lyUOJC4= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag= k8s.io/kms v0.32.0 h1:jwOfunHIrcdYl5FRcA+uUKKtg6qiqoPCwmS2T3XTYL4=
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= k8s.io/kms v0.32.0/go.mod h1:Bk2evz/Yvk0oVrvm4MvZbgq8BD34Ksxs2SRHn4/UiOM=
k8s.io/utils v0.0.0-20240310230437-4693a0247e57 h1:gbqbevonBh57eILzModw6mrkbwM0gQBEuevE/AaBsHY= k8s.io/kube-openapi v0.0.0-20240430033511-f0e62f92d13f h1:0LQagt0gDpKqvIkAMPaRGcXawNMouPECM1+F9BVxEaM=
k8s.io/utils v0.0.0-20240310230437-4693a0247e57/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= k8s.io/kube-openapi v0.0.0-20240430033511-f0e62f92d13f/go.mod h1:S9tOR0FxgyusSNR+MboCuiDpVWkAifZvaYI1Q2ubgro=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.29.0 h1:/U5vjBbQn3RChhv7P11uhYvCSm5G2GaIi5AIGBS6r4c= k8s.io/metrics v0.32.0 h1:70qJ3ZS/9DrtH0UA0NVBI6gW2ip2GAn9e7NtoKERpns=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.29.0/go.mod h1:z7+wmGM2dfIiLRfrC6jb5kV2Mq/sK1ZP303cxzkV5Y4= k8s.io/metrics v0.32.0/go.mod h1:skdg9pDjVjCPIQqmc5rBzDL4noY64ORhKu9KCPv1+QI=
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= k8s.io/utils v0.0.0-20211116205334-6203023598ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
k8s.io/utils v0.0.0-20241210054802-24370beab758 h1:sdbE21q2nlQtFh65saZY+rRM6x6aJJI8IUa1AmH/qa0=
k8s.io/utils v0.0.0-20241210054802-24370beab758/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
kubevirt.io/api v1.3.0 h1:9sGElMmnRU50pGED+MPPD2OwQl4S5lvjCUjm+t0mI90=
kubevirt.io/api v1.3.0/go.mod h1:e6LkElYZZm8NcP2gKlFVHZS9pgNhIARHIjSBSfeiP1s=
kubevirt.io/client-go v1.3.0 h1:/HKn4exzwsctEVTwVtEFaeT9D2v4TgWr2SmxITVEZ/4=
kubevirt.io/client-go v1.3.0/go.mod h1:qmcJZvUjbmggY1pp7irO3zesBJj7wwGIWAdnYEoh3yc=
kubevirt.io/containerized-data-importer-api v1.60.1 h1:chmxuINvA7TPmIe8LpShCoKPxoegcKjkG9tYboFBs/U=
kubevirt.io/containerized-data-importer-api v1.60.1/go.mod h1:8mwrkZIdy8j/LmCyKt2wFXbiMavLUIqDaegaIF67CZs=
kubevirt.io/controller-lifecycle-operator-sdk/api v0.0.0-20220329064328-f3cc58c6ed90 h1:QMrd0nKP0BGbnxTqakhDZAUhGKxPiPiN5gSDqKUmGGc=
kubevirt.io/controller-lifecycle-operator-sdk/api v0.0.0-20220329064328-f3cc58c6ed90/go.mod h1:018lASpFYBsYN6XwmA2TIrPCx6e0gviTd/ZNtSitKgc=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0 h1:CPT0ExVicCzcpeN4baWEV2ko2Z/AsiZgEdwgcfwLgMo=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw=
sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6/go.mod h1:p4QtZmO4uMYipTQNzagwnNoseA6OxSUutVw05NhYDRs=
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8=
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo=
sigs.k8s.io/mdtoc v1.1.0 h1:q3YtqYzmC2e0hgLXRIOm7/QLuPux1CX3ZHCwlbABxZo= sigs.k8s.io/mdtoc v1.1.0 h1:q3YtqYzmC2e0hgLXRIOm7/QLuPux1CX3ZHCwlbABxZo=
sigs.k8s.io/mdtoc v1.1.0/go.mod h1:QZLVEdHH2iNIR4uHAZyvFRtjloHgVItk8lo/mzCtq3w= sigs.k8s.io/mdtoc v1.1.0/go.mod h1:QZLVEdHH2iNIR4uHAZyvFRtjloHgVItk8lo/mzCtq3w=
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= sigs.k8s.io/structured-merge-diff/v4 v4.2.1/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4=
sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E=
sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= sigs.k8s.io/structured-merge-diff/v4 v4.4.2 h1:MdmvkGuXi/8io6ixD5wud3vOLwc1rj0aNqRlpuvjmwA=
sigs.k8s.io/structured-merge-diff/v4 v4.4.2/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4=
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8=
sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=

26
hack/lib/go.sh Normal file
View File

@@ -0,0 +1,26 @@
#!/bin/bash
# Copyright 2024 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# go::verify_version verifies the go version is supported by the project.
# descheduler actively supports 3 versions, therefore 3 go versions are supported.
go::verify_version() {
GO_VERSION=($(go version))
if [[ -z $(echo "${GO_VERSION[2]}" | grep -E 'go1.21|go1.22|go1.23') ]]; then
echo "Unknown go version '${GO_VERSION[2]}', skipping gofmt."
exit 1
fi
}

View File

@@ -6,6 +6,6 @@ go build -o "${OS_OUTPUT_BINPATH}/defaulter-gen" "k8s.io/code-generator/cmd/defa
${OS_OUTPUT_BINPATH}/defaulter-gen \ ${OS_OUTPUT_BINPATH}/defaulter-gen \
--go-header-file "hack/boilerplate/boilerplate.go.txt" \ --go-header-file "hack/boilerplate/boilerplate.go.txt" \
--extra-peer-dirs "${PRJ_PREFIX}/pkg/apis/componentconfig/v1alpha1,${PRJ_PREFIX}/pkg/api/v1alpha1" \ --extra-peer-dirs "${PRJ_PREFIX}/pkg/apis/componentconfig/v1alpha1,${PRJ_PREFIX}/pkg/api/v1alpha2" \
--output-file zz_generated.defaults.go \ --output-file zz_generated.defaults.go \
$(find_dirs_containing_comment_tags "+k8s:defaulter-gen=") $(find_dirs_containing_comment_tags "+k8s:defaulter-gen=")

View File

@@ -20,13 +20,9 @@ set -o nounset
set -o pipefail set -o pipefail
DESCHEDULER_ROOT=$(dirname "${BASH_SOURCE}")/.. DESCHEDULER_ROOT=$(dirname "${BASH_SOURCE}")/..
source "${DESCHEDULER_ROOT}/hack/lib/go.sh"
GO_VERSION=($(go version)) go::verify_version
if [[ -z $(echo "${GO_VERSION[2]}" | grep -E 'go1.18|go1.19|go1.20|go1.21|go1.22') ]]; then
echo "Unknown go version '${GO_VERSION[2]}', skipping gofmt."
exit 1
fi
cd "${DESCHEDULER_ROOT}" cd "${DESCHEDULER_ROOT}"

View File

@@ -20,13 +20,9 @@ set -o nounset
set -o pipefail set -o pipefail
DESCHEDULER_ROOT=$(dirname "${BASH_SOURCE}")/.. DESCHEDULER_ROOT=$(dirname "${BASH_SOURCE}")/..
source "${DESCHEDULER_ROOT}/hack/lib/go.sh"
GO_VERSION=($(go version)) go::verify_version
if [[ -z $(echo "${GO_VERSION[2]}" | grep -E 'go1.18|go1.19|go1.20|go1.21|go1.22') ]]; then
echo "Unknown go version '${GO_VERSION[2]}', skipping gofmt."
exit 1
fi
cd "${DESCHEDULER_ROOT}" cd "${DESCHEDULER_ROOT}"

View File

@@ -70,7 +70,7 @@ pushd "${DESCHEDULER_ROOT}" > /dev/null 2>&1
ret=1 ret=1
fi fi
if ! _out="$(diff -Naupr -x "BUILD" -x "AUTHORS*" -x "CONTRIBUTORS*" vendor "${_deschedulertmp}/vendor")"; then if ! _out="$(diff -Naupr -x "BUILD" -x "AUTHORS*" -x "CONTRIBUTORS*" -x "README*" vendor "${_deschedulertmp}/vendor")"; then
echo "Your vendored results are different:" >&2 echo "Your vendored results are different:" >&2
echo "${_out}" >&2 echo "${_out}" >&2
echo "Vendor Verify failed." >&2 echo "Vendor Verify failed." >&2

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,16 @@
title: descheduler integration with evacuation API as an alternative to eviction API
kep-number: 1397
authors:
- "@ingvagabund"
owning-sig: sig-scheduling
participating-sigs:
- sig-apps
status: provisional
creation-date: 2024-04-14
reviewers:
- atiratree
approvers:
- TBD
feature-gates:
- TBD
stage: alpha

View File

@@ -22,13 +22,19 @@ rules:
- apiGroups: ["scheduling.k8s.io"] - apiGroups: ["scheduling.k8s.io"]
resources: ["priorityclasses"] resources: ["priorityclasses"]
verbs: ["get", "watch", "list"] verbs: ["get", "watch", "list"]
- apiGroups: ["policy"]
resources: ["poddisruptionbudgets"]
verbs: ["get", "watch", "list"]
- apiGroups: ["coordination.k8s.io"] - apiGroups: ["coordination.k8s.io"]
resources: ["leases"] resources: ["leases"]
verbs: ["create"] verbs: ["create", "update"]
- apiGroups: ["coordination.k8s.io"] - apiGroups: ["coordination.k8s.io"]
resources: ["leases"] resources: ["leases"]
resourceNames: ["descheduler"] resourceNames: ["descheduler"]
verbs: ["get", "patch", "delete"] verbs: ["get", "patch", "delete"]
- apiGroups: ["metrics.k8s.io"]
resources: ["nodes", "pods"]
verbs: ["get", "list"]
--- ---
apiVersion: v1 apiVersion: v1
kind: ServiceAccount kind: ServiceAccount

View File

@@ -16,7 +16,7 @@ spec:
priorityClassName: system-cluster-critical priorityClassName: system-cluster-critical
containers: containers:
- name: descheduler - name: descheduler
image: registry.k8s.io/descheduler/descheduler:v0.29.0 image: registry.k8s.io/descheduler/descheduler:v0.32.0
volumeMounts: volumeMounts:
- mountPath: /policy-dir - mountPath: /policy-dir
name: policy-volume name: policy-volume

View File

@@ -19,7 +19,7 @@ spec:
serviceAccountName: descheduler-sa serviceAccountName: descheduler-sa
containers: containers:
- name: descheduler - name: descheduler
image: registry.k8s.io/descheduler/descheduler:v0.29.0 image: registry.k8s.io/descheduler/descheduler:v0.32.0
imagePullPolicy: IfNotPresent imagePullPolicy: IfNotPresent
command: command:
- "/bin/descheduler" - "/bin/descheduler"

View File

@@ -14,7 +14,7 @@ spec:
priorityClassName: system-cluster-critical priorityClassName: system-cluster-critical
containers: containers:
- name: descheduler - name: descheduler
image: registry.k8s.io/descheduler/descheduler:v0.29.0 image: registry.k8s.io/descheduler/descheduler:v0.32.0
volumeMounts: volumeMounts:
- mountPath: /policy-dir - mountPath: /policy-dir
name: policy-volume name: policy-volume

View File

@@ -38,13 +38,23 @@ type DeschedulerPolicy struct {
// MaxNoOfPodsToEvictPerNamespace restricts maximum of pods to be evicted per namespace. // MaxNoOfPodsToEvictPerNamespace restricts maximum of pods to be evicted per namespace.
MaxNoOfPodsToEvictPerNamespace *uint MaxNoOfPodsToEvictPerNamespace *uint
// MaxNoOfPodsToTotal restricts maximum of pods to be evicted total.
MaxNoOfPodsToEvictTotal *uint
// EvictionFailureEventNotification should be set to true to enable eviction failure event notification.
// Default is false.
EvictionFailureEventNotification *bool
// MetricsCollector configures collection of metrics about actual resource utilization
MetricsCollector MetricsCollector
} }
// Namespaces carries a list of included/excluded namespaces // Namespaces carries a list of included/excluded namespaces
// for which a given strategy is applicable // for which a given strategy is applicable
type Namespaces struct { type Namespaces struct {
Include []string `json:"include"` Include []string `json:"include,omitempty"`
Exclude []string `json:"exclude"` Exclude []string `json:"exclude,omitempty"`
} }
type ( type (
@@ -81,3 +91,10 @@ type PluginSet struct {
Enabled []string Enabled []string
Disabled []string Disabled []string
} }
// MetricsCollector configures collection of metrics about actual resource utilization
type MetricsCollector struct {
// Enabled metrics collection from kubernetes metrics.
// Later, the collection can be extended to other providers.
Enabled bool
}

View File

@@ -1,282 +0,0 @@
/*
Copyright 2023 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
"context"
"fmt"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/conversion"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/serializer"
"k8s.io/client-go/informers"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/klog/v2"
"sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
"sigs.k8s.io/descheduler/pkg/framework/pluginregistry"
"sigs.k8s.io/descheduler/pkg/framework/plugins/defaultevictor"
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
)
var (
// pluginArgConversionScheme is a scheme with internal and v1alpha2 registered,
// used for defaulting/converting typed PluginConfig Args.
// Access via getPluginArgConversionScheme()
Scheme = runtime.NewScheme()
Codecs = serializer.NewCodecFactory(Scheme, serializer.EnableStrict)
)
// evictorImpl implements the Evictor interface so plugins
// can evict a pod without importing a specific pod evictor
type evictorImpl struct {
podEvictor *evictions.PodEvictor
evictorFilter frameworktypes.EvictorPlugin
}
var _ frameworktypes.Evictor = &evictorImpl{}
// Filter checks if a pod can be evicted
func (ei *evictorImpl) Filter(pod *v1.Pod) bool {
return ei.evictorFilter.Filter(pod)
}
// PreEvictionFilter checks if pod can be evicted right before eviction
func (ei *evictorImpl) PreEvictionFilter(pod *v1.Pod) bool {
return ei.evictorFilter.PreEvictionFilter(pod)
}
// Evict evicts a pod (no pre-check performed)
func (ei *evictorImpl) Evict(ctx context.Context, pod *v1.Pod, opts evictions.EvictOptions) bool {
return ei.podEvictor.EvictPod(ctx, pod, opts)
}
func (ei *evictorImpl) NodeLimitExceeded(node *v1.Node) bool {
return ei.podEvictor.NodeLimitExceeded(node)
}
// handleImpl implements the framework handle which gets passed to plugins
type handleImpl struct {
clientSet clientset.Interface
getPodsAssignedToNodeFunc podutil.GetPodsAssignedToNodeFunc
sharedInformerFactory informers.SharedInformerFactory
evictor *evictorImpl
}
var _ frameworktypes.Handle = &handleImpl{}
// ClientSet retrieves kube client set
func (hi *handleImpl) ClientSet() clientset.Interface {
return hi.clientSet
}
// GetPodsAssignedToNodeFunc retrieves GetPodsAssignedToNodeFunc implementation
func (hi *handleImpl) GetPodsAssignedToNodeFunc() podutil.GetPodsAssignedToNodeFunc {
return hi.getPodsAssignedToNodeFunc
}
// SharedInformerFactory retrieves shared informer factory
func (hi *handleImpl) SharedInformerFactory() informers.SharedInformerFactory {
return hi.sharedInformerFactory
}
// Evictor retrieves evictor so plugins can filter and evict pods
func (hi *handleImpl) Evictor() frameworktypes.Evictor {
return hi.evictor
}
func Convert_v1alpha1_DeschedulerPolicy_To_api_DeschedulerPolicy(in *DeschedulerPolicy, out *api.DeschedulerPolicy, s conversion.Scope) error {
klog.V(1).Info("Warning: v1alpha1 API is deprecated and will be removed in a future release. Use v1alpha2 API instead.")
err := V1alpha1ToInternal(in, pluginregistry.PluginRegistry, out, s)
if err != nil {
return err
}
return nil
}
func V1alpha1ToInternal(
deschedulerPolicy *DeschedulerPolicy,
registry pluginregistry.Registry,
out *api.DeschedulerPolicy,
s conversion.Scope,
) error {
var evictLocalStoragePods bool
if deschedulerPolicy.EvictLocalStoragePods != nil {
evictLocalStoragePods = *deschedulerPolicy.EvictLocalStoragePods
}
evictBarePods := false
if deschedulerPolicy.EvictFailedBarePods != nil {
evictBarePods = *deschedulerPolicy.EvictFailedBarePods
if evictBarePods {
klog.V(1).Info("Warning: EvictFailedBarePods is set to True. This could cause eviction of pods without ownerReferences.")
}
}
evictSystemCriticalPods := false
if deschedulerPolicy.EvictSystemCriticalPods != nil {
evictSystemCriticalPods = *deschedulerPolicy.EvictSystemCriticalPods
if evictSystemCriticalPods {
klog.V(1).Info("Warning: EvictSystemCriticalPods is set to True. This could cause eviction of Kubernetes system pods.")
}
}
evictDaemonSetPods := false
if deschedulerPolicy.EvictDaemonSetPods != nil {
evictDaemonSetPods = *deschedulerPolicy.EvictDaemonSetPods
if evictDaemonSetPods {
klog.V(1).Info("Warning: EvictDaemonSetPods is set to True. This could cause eviction of Kubernetes DaemonSet pods.")
}
}
ignorePvcPods := false
if deschedulerPolicy.IgnorePVCPods != nil {
ignorePvcPods = *deschedulerPolicy.IgnorePVCPods
}
var profiles []api.DeschedulerProfile
// Build profiles
for name, strategy := range deschedulerPolicy.Strategies {
if _, ok := pluginregistry.PluginRegistry[string(name)]; ok {
if strategy.Enabled {
params := strategy.Params
if params == nil {
params = &StrategyParameters{}
}
nodeFit := false
if name != "PodLifeTime" {
nodeFit = params.NodeFit
}
if params.ThresholdPriority != nil && params.ThresholdPriorityClassName != "" {
klog.ErrorS(fmt.Errorf("priority threshold misconfigured"), "only one of priorityThreshold fields can be set", "pluginName", name)
return fmt.Errorf("priority threshold misconfigured for plugin %v", name)
}
var priorityThreshold *api.PriorityThreshold
if strategy.Params != nil {
priorityThreshold = &api.PriorityThreshold{
Value: strategy.Params.ThresholdPriority,
Name: strategy.Params.ThresholdPriorityClassName,
}
}
var pluginConfig *api.PluginConfig
var err error
if pcFnc, exists := StrategyParamsToPluginArgs[string(name)]; exists {
pluginConfig, err = pcFnc(params)
if err != nil {
klog.ErrorS(err, "skipping strategy", "strategy", name)
return fmt.Errorf("failed to get plugin config for strategy %v: %v", name, err)
}
} else {
klog.ErrorS(fmt.Errorf("unknown strategy name"), "skipping strategy", "strategy", name)
return fmt.Errorf("unknown strategy name: %v", name)
}
profile := api.DeschedulerProfile{
Name: fmt.Sprintf("strategy-%v-profile", name),
PluginConfigs: []api.PluginConfig{
{
Name: defaultevictor.PluginName,
Args: &defaultevictor.DefaultEvictorArgs{
EvictLocalStoragePods: evictLocalStoragePods,
EvictDaemonSetPods: evictDaemonSetPods,
EvictSystemCriticalPods: evictSystemCriticalPods,
IgnorePvcPods: ignorePvcPods,
EvictFailedBarePods: evictBarePods,
NodeFit: nodeFit,
PriorityThreshold: priorityThreshold,
},
},
*pluginConfig,
},
Plugins: api.Plugins{
Filter: api.PluginSet{
Enabled: []string{defaultevictor.PluginName},
},
PreEvictionFilter: api.PluginSet{
Enabled: []string{defaultevictor.PluginName},
},
},
}
pluginArgs := registry[string(name)].PluginArgInstance
pluginInstance, err := registry[string(name)].PluginBuilder(pluginArgs, &handleImpl{})
if err != nil {
klog.ErrorS(fmt.Errorf("could not build plugin"), "plugin build error", "plugin", name)
return fmt.Errorf("could not build plugin: %v", name)
}
// pluginInstance can be of any of each type, or both
profilePlugins := profile.Plugins
profile.Plugins = enableProfilePluginsByType(profilePlugins, pluginInstance, pluginConfig)
profiles = append(profiles, profile)
}
} else {
klog.ErrorS(fmt.Errorf("unknown strategy name"), "skipping strategy", "strategy", name)
return fmt.Errorf("unknown strategy name: %v", name)
}
}
out.Profiles = profiles
out.NodeSelector = deschedulerPolicy.NodeSelector
out.MaxNoOfPodsToEvictPerNamespace = deschedulerPolicy.MaxNoOfPodsToEvictPerNamespace
out.MaxNoOfPodsToEvictPerNode = deschedulerPolicy.MaxNoOfPodsToEvictPerNode
return nil
}
func enableProfilePluginsByType(profilePlugins api.Plugins, pluginInstance frameworktypes.Plugin, pluginConfig *api.PluginConfig) api.Plugins {
profilePlugins = checkBalance(profilePlugins, pluginInstance, pluginConfig)
profilePlugins = checkDeschedule(profilePlugins, pluginInstance, pluginConfig)
return profilePlugins
}
func checkBalance(profilePlugins api.Plugins, pluginInstance frameworktypes.Plugin, pluginConfig *api.PluginConfig) api.Plugins {
_, ok := pluginInstance.(frameworktypes.BalancePlugin)
if ok {
klog.V(3).Infof("converting Balance plugin: %s", pluginInstance.Name())
profilePlugins.Balance.Enabled = []string{pluginConfig.Name}
}
return profilePlugins
}
func checkDeschedule(profilePlugins api.Plugins, pluginInstance frameworktypes.Plugin, pluginConfig *api.PluginConfig) api.Plugins {
_, ok := pluginInstance.(frameworktypes.DeschedulePlugin)
if ok {
klog.V(3).Infof("converting Deschedule plugin: %s", pluginInstance.Name())
profilePlugins.Deschedule.Enabled = []string{pluginConfig.Name}
}
return profilePlugins
}
// Register Conversions
func RegisterConversions(s *runtime.Scheme) error {
if err := s.AddGeneratedConversionFunc((*DeschedulerPolicy)(nil), (*api.DeschedulerPolicy)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_DeschedulerPolicy_To_api_DeschedulerPolicy(a.(*DeschedulerPolicy), b.(*api.DeschedulerPolicy), scope)
}); err != nil {
return err
}
return nil
}

View File

@@ -1,256 +0,0 @@
/*
Copyright 2023 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
"fmt"
v1 "k8s.io/api/core/v1"
"k8s.io/klog/v2"
utilpointer "k8s.io/utils/pointer"
"sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/framework/plugins/nodeutilization"
"sigs.k8s.io/descheduler/pkg/framework/plugins/podlifetime"
"sigs.k8s.io/descheduler/pkg/framework/plugins/removeduplicates"
"sigs.k8s.io/descheduler/pkg/framework/plugins/removefailedpods"
"sigs.k8s.io/descheduler/pkg/framework/plugins/removepodshavingtoomanyrestarts"
"sigs.k8s.io/descheduler/pkg/framework/plugins/removepodsviolatinginterpodantiaffinity"
"sigs.k8s.io/descheduler/pkg/framework/plugins/removepodsviolatingnodeaffinity"
"sigs.k8s.io/descheduler/pkg/framework/plugins/removepodsviolatingnodetaints"
"sigs.k8s.io/descheduler/pkg/framework/plugins/removepodsviolatingtopologyspreadconstraint"
)
// Once all strategies are migrated the arguments get read from the configuration file
// without any wiring. Keeping the wiring here so the descheduler can still use
// the v1alpha1 configuration during the strategy migration to plugins.
var StrategyParamsToPluginArgs = map[string]func(params *StrategyParameters) (*api.PluginConfig, error){
"RemovePodsViolatingNodeTaints": func(params *StrategyParameters) (*api.PluginConfig, error) {
args := &removepodsviolatingnodetaints.RemovePodsViolatingNodeTaintsArgs{
Namespaces: v1alpha1NamespacesToInternal(params.Namespaces),
LabelSelector: params.LabelSelector,
IncludePreferNoSchedule: params.IncludePreferNoSchedule,
ExcludedTaints: params.ExcludedTaints,
}
if err := removepodsviolatingnodetaints.ValidateRemovePodsViolatingNodeTaintsArgs(args); err != nil {
klog.ErrorS(err, "unable to validate plugin arguments", "pluginName", removepodsviolatingnodetaints.PluginName)
return nil, fmt.Errorf("strategy %q param validation failed: %v", removepodsviolatingnodetaints.PluginName, err)
}
return &api.PluginConfig{
Name: removepodsviolatingnodetaints.PluginName,
Args: args,
}, nil
},
"RemoveFailedPods": func(params *StrategyParameters) (*api.PluginConfig, error) {
failedPodsParams := params.FailedPods
if failedPodsParams == nil {
failedPodsParams = &FailedPods{}
}
args := &removefailedpods.RemoveFailedPodsArgs{
Namespaces: v1alpha1NamespacesToInternal(params.Namespaces),
LabelSelector: params.LabelSelector,
IncludingInitContainers: failedPodsParams.IncludingInitContainers,
MinPodLifetimeSeconds: failedPodsParams.MinPodLifetimeSeconds,
ExcludeOwnerKinds: failedPodsParams.ExcludeOwnerKinds,
Reasons: failedPodsParams.Reasons,
}
if err := removefailedpods.ValidateRemoveFailedPodsArgs(args); err != nil {
klog.ErrorS(err, "unable to validate plugin arguments", "pluginName", removefailedpods.PluginName)
return nil, fmt.Errorf("strategy %q param validation failed: %v", removefailedpods.PluginName, err)
}
return &api.PluginConfig{
Name: removefailedpods.PluginName,
Args: args,
}, nil
},
"RemovePodsViolatingNodeAffinity": func(params *StrategyParameters) (*api.PluginConfig, error) {
args := &removepodsviolatingnodeaffinity.RemovePodsViolatingNodeAffinityArgs{
Namespaces: v1alpha1NamespacesToInternal(params.Namespaces),
LabelSelector: params.LabelSelector,
NodeAffinityType: params.NodeAffinityType,
}
if err := removepodsviolatingnodeaffinity.ValidateRemovePodsViolatingNodeAffinityArgs(args); err != nil {
klog.ErrorS(err, "unable to validate plugin arguments", "pluginName", removepodsviolatingnodeaffinity.PluginName)
return nil, fmt.Errorf("strategy %q param validation failed: %v", removepodsviolatingnodeaffinity.PluginName, err)
}
return &api.PluginConfig{
Name: removepodsviolatingnodeaffinity.PluginName,
Args: args,
}, nil
},
"RemovePodsViolatingInterPodAntiAffinity": func(params *StrategyParameters) (*api.PluginConfig, error) {
args := &removepodsviolatinginterpodantiaffinity.RemovePodsViolatingInterPodAntiAffinityArgs{
Namespaces: v1alpha1NamespacesToInternal(params.Namespaces),
LabelSelector: params.LabelSelector,
}
if err := removepodsviolatinginterpodantiaffinity.ValidateRemovePodsViolatingInterPodAntiAffinityArgs(args); err != nil {
klog.ErrorS(err, "unable to validate plugin arguments", "pluginName", removepodsviolatinginterpodantiaffinity.PluginName)
return nil, fmt.Errorf("strategy %q param validation failed: %v", removepodsviolatinginterpodantiaffinity.PluginName, err)
}
return &api.PluginConfig{
Name: removepodsviolatinginterpodantiaffinity.PluginName,
Args: args,
}, nil
},
"RemovePodsHavingTooManyRestarts": func(params *StrategyParameters) (*api.PluginConfig, error) {
tooManyRestartsParams := params.PodsHavingTooManyRestarts
if tooManyRestartsParams == nil {
tooManyRestartsParams = &PodsHavingTooManyRestarts{}
}
args := &removepodshavingtoomanyrestarts.RemovePodsHavingTooManyRestartsArgs{
Namespaces: v1alpha1NamespacesToInternal(params.Namespaces),
LabelSelector: params.LabelSelector,
PodRestartThreshold: tooManyRestartsParams.PodRestartThreshold,
IncludingInitContainers: tooManyRestartsParams.IncludingInitContainers,
}
if err := removepodshavingtoomanyrestarts.ValidateRemovePodsHavingTooManyRestartsArgs(args); err != nil {
klog.ErrorS(err, "unable to validate plugin arguments", "pluginName", removepodshavingtoomanyrestarts.PluginName)
return nil, fmt.Errorf("strategy %q param validation failed: %v", removepodshavingtoomanyrestarts.PluginName, err)
}
return &api.PluginConfig{
Name: removepodshavingtoomanyrestarts.PluginName,
Args: args,
}, nil
},
"PodLifeTime": func(params *StrategyParameters) (*api.PluginConfig, error) {
podLifeTimeParams := params.PodLifeTime
if podLifeTimeParams == nil {
podLifeTimeParams = &PodLifeTime{}
}
var states []string
if podLifeTimeParams.PodStatusPhases != nil {
states = append(states, podLifeTimeParams.PodStatusPhases...)
}
if podLifeTimeParams.States != nil {
states = append(states, podLifeTimeParams.States...)
}
args := &podlifetime.PodLifeTimeArgs{
Namespaces: v1alpha1NamespacesToInternal(params.Namespaces),
LabelSelector: params.LabelSelector,
MaxPodLifeTimeSeconds: podLifeTimeParams.MaxPodLifeTimeSeconds,
States: states,
}
if err := podlifetime.ValidatePodLifeTimeArgs(args); err != nil {
klog.ErrorS(err, "unable to validate plugin arguments", "pluginName", podlifetime.PluginName)
return nil, fmt.Errorf("strategy %q param validation failed: %v", podlifetime.PluginName, err)
}
return &api.PluginConfig{
Name: podlifetime.PluginName,
Args: args,
}, nil
},
"RemoveDuplicates": func(params *StrategyParameters) (*api.PluginConfig, error) {
args := &removeduplicates.RemoveDuplicatesArgs{
Namespaces: v1alpha1NamespacesToInternal(params.Namespaces),
}
if params.RemoveDuplicates != nil {
args.ExcludeOwnerKinds = params.RemoveDuplicates.ExcludeOwnerKinds
}
if err := removeduplicates.ValidateRemoveDuplicatesArgs(args); err != nil {
klog.ErrorS(err, "unable to validate plugin arguments", "pluginName", removeduplicates.PluginName)
return nil, fmt.Errorf("strategy %q param validation failed: %v", removeduplicates.PluginName, err)
}
return &api.PluginConfig{
Name: removeduplicates.PluginName,
Args: args,
}, nil
},
"RemovePodsViolatingTopologySpreadConstraint": func(params *StrategyParameters) (*api.PluginConfig, error) {
constraints := []v1.UnsatisfiableConstraintAction{v1.DoNotSchedule}
if params.IncludeSoftConstraints {
constraints = append(constraints, v1.ScheduleAnyway)
}
args := &removepodsviolatingtopologyspreadconstraint.RemovePodsViolatingTopologySpreadConstraintArgs{
Namespaces: v1alpha1NamespacesToInternal(params.Namespaces),
LabelSelector: params.LabelSelector,
Constraints: constraints,
TopologyBalanceNodeFit: utilpointer.Bool(true),
}
if err := removepodsviolatingtopologyspreadconstraint.ValidateRemovePodsViolatingTopologySpreadConstraintArgs(args); err != nil {
klog.ErrorS(err, "unable to validate plugin arguments", "pluginName", removepodsviolatingtopologyspreadconstraint.PluginName)
return nil, fmt.Errorf("strategy %q param validation failed: %v", removepodsviolatingtopologyspreadconstraint.PluginName, err)
}
return &api.PluginConfig{
Name: removepodsviolatingtopologyspreadconstraint.PluginName,
Args: args,
}, nil
},
"HighNodeUtilization": func(params *StrategyParameters) (*api.PluginConfig, error) {
if params.NodeResourceUtilizationThresholds == nil {
params.NodeResourceUtilizationThresholds = &NodeResourceUtilizationThresholds{}
}
args := &nodeutilization.HighNodeUtilizationArgs{
EvictableNamespaces: v1alpha1NamespacesToInternal(params.Namespaces),
Thresholds: v1alpha1ThresholdToInternal(params.NodeResourceUtilizationThresholds.Thresholds),
NumberOfNodes: params.NodeResourceUtilizationThresholds.NumberOfNodes,
}
if err := nodeutilization.ValidateHighNodeUtilizationArgs(args); err != nil {
klog.ErrorS(err, "unable to validate plugin arguments", "pluginName", nodeutilization.HighNodeUtilizationPluginName)
return nil, fmt.Errorf("strategy %q param validation failed: %v", nodeutilization.HighNodeUtilizationPluginName, err)
}
return &api.PluginConfig{
Name: nodeutilization.HighNodeUtilizationPluginName,
Args: args,
}, nil
},
"LowNodeUtilization": func(params *StrategyParameters) (*api.PluginConfig, error) {
if params.NodeResourceUtilizationThresholds == nil {
params.NodeResourceUtilizationThresholds = &NodeResourceUtilizationThresholds{}
}
args := &nodeutilization.LowNodeUtilizationArgs{
EvictableNamespaces: v1alpha1NamespacesToInternal(params.Namespaces),
Thresholds: v1alpha1ThresholdToInternal(params.NodeResourceUtilizationThresholds.Thresholds),
TargetThresholds: v1alpha1ThresholdToInternal(params.NodeResourceUtilizationThresholds.TargetThresholds),
UseDeviationThresholds: params.NodeResourceUtilizationThresholds.UseDeviationThresholds,
NumberOfNodes: params.NodeResourceUtilizationThresholds.NumberOfNodes,
}
if err := nodeutilization.ValidateLowNodeUtilizationArgs(args); err != nil {
klog.ErrorS(err, "unable to validate plugin arguments", "pluginName", nodeutilization.LowNodeUtilizationPluginName)
return nil, fmt.Errorf("strategy %q param validation failed: %v", nodeutilization.LowNodeUtilizationPluginName, err)
}
return &api.PluginConfig{
Name: nodeutilization.LowNodeUtilizationPluginName,
Args: args,
}, nil
},
}
func v1alpha1NamespacesToInternal(namespaces *Namespaces) *api.Namespaces {
internal := &api.Namespaces{}
if namespaces != nil {
if namespaces.Exclude != nil {
internal.Exclude = namespaces.Exclude
}
if namespaces.Include != nil {
internal.Include = namespaces.Include
}
} else {
internal = nil
}
return internal
}
func v1alpha1ThresholdToInternal(thresholds ResourceThresholds) api.ResourceThresholds {
internal := make(api.ResourceThresholds, len(thresholds))
for k, v := range thresholds {
internal[k] = api.Percentage(float64(v))
}
return internal
}

View File

@@ -1,859 +0,0 @@
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
"fmt"
"testing"
"github.com/google/go-cmp/cmp"
v1 "k8s.io/api/core/v1"
utilpointer "k8s.io/utils/pointer"
"sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/framework/plugins/nodeutilization"
"sigs.k8s.io/descheduler/pkg/framework/plugins/podlifetime"
"sigs.k8s.io/descheduler/pkg/framework/plugins/removeduplicates"
"sigs.k8s.io/descheduler/pkg/framework/plugins/removefailedpods"
"sigs.k8s.io/descheduler/pkg/framework/plugins/removepodshavingtoomanyrestarts"
"sigs.k8s.io/descheduler/pkg/framework/plugins/removepodsviolatinginterpodantiaffinity"
"sigs.k8s.io/descheduler/pkg/framework/plugins/removepodsviolatingnodeaffinity"
"sigs.k8s.io/descheduler/pkg/framework/plugins/removepodsviolatingnodetaints"
"sigs.k8s.io/descheduler/pkg/framework/plugins/removepodsviolatingtopologyspreadconstraint"
)
func TestStrategyParamsToPluginArgsRemovePodsViolatingNodeTaints(t *testing.T) {
strategyName := "RemovePodsViolatingNodeTaints"
type testCase struct {
description string
params *StrategyParameters
err error
result *api.PluginConfig
}
testCases := []testCase{
{
description: "wire in all valid parameters",
params: &StrategyParameters{
ExcludedTaints: []string{
"dedicated=special-user",
"reserved",
},
ThresholdPriority: utilpointer.Int32(100),
Namespaces: &Namespaces{
Exclude: []string{"test1"},
},
},
err: nil,
result: &api.PluginConfig{
Name: removepodsviolatingnodetaints.PluginName,
Args: &removepodsviolatingnodetaints.RemovePodsViolatingNodeTaintsArgs{
Namespaces: &api.Namespaces{
Exclude: []string{"test1"},
},
ExcludedTaints: []string{"dedicated=special-user", "reserved"},
},
},
},
{
description: "invalid params namespaces",
params: &StrategyParameters{
Namespaces: &Namespaces{
Exclude: []string{"test1"},
Include: []string{"test2"},
},
},
err: fmt.Errorf("strategy \"%s\" param validation failed: only one of Include/Exclude namespaces can be set", strategyName),
result: nil,
},
}
for _, tc := range testCases {
t.Run(tc.description, func(t *testing.T) {
var result *api.PluginConfig
var err error
if pcFnc, exists := StrategyParamsToPluginArgs[strategyName]; exists {
result, err = pcFnc(tc.params)
}
if err != nil {
if err.Error() != tc.err.Error() {
t.Errorf("unexpected error: %s", err.Error())
}
}
if err == nil {
// sort to easily compare deepequality
diff := cmp.Diff(tc.result, result)
if diff != "" {
t.Errorf("test '%s' failed. Results are not deep equal. mismatch (-want +got):\n%s", tc.description, diff)
}
}
})
}
}
func TestStrategyParamsToPluginArgsRemoveFailedPods(t *testing.T) {
strategyName := "RemoveFailedPods"
type testCase struct {
description string
params *StrategyParameters
err error
result *api.PluginConfig
}
testCases := []testCase{
{
description: "wire in all valid parameters",
params: &StrategyParameters{
FailedPods: &FailedPods{
MinPodLifetimeSeconds: utilpointer.Uint(3600),
ExcludeOwnerKinds: []string{"Job"},
Reasons: []string{"NodeAffinity"},
IncludingInitContainers: true,
},
ThresholdPriority: utilpointer.Int32(100),
Namespaces: &Namespaces{
Exclude: []string{"test1"},
},
},
err: nil,
result: &api.PluginConfig{
Name: removefailedpods.PluginName,
Args: &removefailedpods.RemoveFailedPodsArgs{
ExcludeOwnerKinds: []string{"Job"},
MinPodLifetimeSeconds: utilpointer.Uint(3600),
Reasons: []string{"NodeAffinity"},
IncludingInitContainers: true,
Namespaces: &api.Namespaces{
Exclude: []string{"test1"},
},
},
},
},
{
description: "invalid params namespaces",
params: &StrategyParameters{
Namespaces: &Namespaces{
Exclude: []string{"test1"},
Include: []string{"test2"},
},
},
err: fmt.Errorf("strategy \"%s\" param validation failed: only one of Include/Exclude namespaces can be set", strategyName),
result: nil,
},
}
for _, tc := range testCases {
t.Run(tc.description, func(t *testing.T) {
var result *api.PluginConfig
var err error
if pcFnc, exists := StrategyParamsToPluginArgs[strategyName]; exists {
result, err = pcFnc(tc.params)
}
if err != nil {
if err.Error() != tc.err.Error() {
t.Errorf("unexpected error: %s", err.Error())
}
}
if err == nil {
// sort to easily compare deepequality
diff := cmp.Diff(tc.result, result)
if diff != "" {
t.Errorf("test '%s' failed. Results are not deep equal. mismatch (-want +got):\n%s", tc.description, diff)
}
}
})
}
}
func TestStrategyParamsToPluginArgsRemovePodsViolatingNodeAffinity(t *testing.T) {
strategyName := "RemovePodsViolatingNodeAffinity"
type testCase struct {
description string
params *StrategyParameters
err error
result *api.PluginConfig
}
testCases := []testCase{
{
description: "wire in all valid parameters",
params: &StrategyParameters{
NodeAffinityType: []string{"requiredDuringSchedulingIgnoredDuringExecution"},
ThresholdPriority: utilpointer.Int32(100),
Namespaces: &Namespaces{
Exclude: []string{"test1"},
},
},
err: nil,
result: &api.PluginConfig{
Name: removepodsviolatingnodeaffinity.PluginName,
Args: &removepodsviolatingnodeaffinity.RemovePodsViolatingNodeAffinityArgs{
NodeAffinityType: []string{"requiredDuringSchedulingIgnoredDuringExecution"},
Namespaces: &api.Namespaces{
Exclude: []string{"test1"},
},
},
},
},
{
description: "invalid params, not setting nodeaffinity type",
params: &StrategyParameters{},
err: fmt.Errorf("strategy \"%s\" param validation failed: nodeAffinityType needs to be set", strategyName),
result: nil,
},
{
description: "invalid params namespaces",
params: &StrategyParameters{
NodeAffinityType: []string{"requiredDuringSchedulingIgnoredDuringExecution"},
Namespaces: &Namespaces{
Exclude: []string{"test1"},
Include: []string{"test2"},
},
},
err: fmt.Errorf("strategy \"%s\" param validation failed: only one of Include/Exclude namespaces can be set", strategyName),
result: nil,
},
}
for _, tc := range testCases {
t.Run(tc.description, func(t *testing.T) {
var result *api.PluginConfig
var err error
if pcFnc, exists := StrategyParamsToPluginArgs[strategyName]; exists {
result, err = pcFnc(tc.params)
}
if err != nil {
if err.Error() != tc.err.Error() {
t.Errorf("unexpected error: %s", err.Error())
}
}
if err == nil {
// sort to easily compare deepequality
diff := cmp.Diff(tc.result, result)
if diff != "" {
t.Errorf("test '%s' failed. Results are not deep equal. mismatch (-want +got):\n%s", tc.description, diff)
}
}
})
}
}
func TestStrategyParamsToPluginArgsRemovePodsViolatingInterPodAntiAffinity(t *testing.T) {
strategyName := "RemovePodsViolatingInterPodAntiAffinity"
type testCase struct {
description string
params *StrategyParameters
err error
result *api.PluginConfig
}
testCases := []testCase{
{
description: "wire in all valid parameters",
params: &StrategyParameters{
ThresholdPriority: utilpointer.Int32(100),
Namespaces: &Namespaces{
Exclude: []string{"test1"},
},
},
err: nil,
result: &api.PluginConfig{
Name: removepodsviolatinginterpodantiaffinity.PluginName,
Args: &removepodsviolatinginterpodantiaffinity.RemovePodsViolatingInterPodAntiAffinityArgs{
Namespaces: &api.Namespaces{
Exclude: []string{"test1"},
},
},
},
},
{
description: "invalid params namespaces",
params: &StrategyParameters{
Namespaces: &Namespaces{
Exclude: []string{"test1"},
Include: []string{"test2"},
},
},
err: fmt.Errorf("strategy \"%s\" param validation failed: only one of Include/Exclude namespaces can be set", strategyName),
result: nil,
},
}
for _, tc := range testCases {
t.Run(tc.description, func(t *testing.T) {
var result *api.PluginConfig
var err error
if pcFnc, exists := StrategyParamsToPluginArgs[strategyName]; exists {
result, err = pcFnc(tc.params)
}
if err != nil {
if err.Error() != tc.err.Error() {
t.Errorf("unexpected error: %s", err.Error())
}
}
if err == nil {
// sort to easily compare deepequality
diff := cmp.Diff(tc.result, result)
if diff != "" {
t.Errorf("test '%s' failed. Results are not deep equal. mismatch (-want +got):\n%s", tc.description, diff)
}
}
})
}
}
func TestStrategyParamsToPluginArgsRemovePodsHavingTooManyRestarts(t *testing.T) {
strategyName := "RemovePodsHavingTooManyRestarts"
type testCase struct {
description string
params *StrategyParameters
err error
result *api.PluginConfig
}
testCases := []testCase{
{
description: "wire in all valid parameters",
params: &StrategyParameters{
PodsHavingTooManyRestarts: &PodsHavingTooManyRestarts{
PodRestartThreshold: 100,
IncludingInitContainers: true,
},
ThresholdPriority: utilpointer.Int32(100),
Namespaces: &Namespaces{
Exclude: []string{"test1"},
},
},
err: nil,
result: &api.PluginConfig{
Name: removepodshavingtoomanyrestarts.PluginName,
Args: &removepodshavingtoomanyrestarts.RemovePodsHavingTooManyRestartsArgs{
PodRestartThreshold: 100,
IncludingInitContainers: true,
Namespaces: &api.Namespaces{
Exclude: []string{"test1"},
},
},
},
},
{
description: "invalid params namespaces",
params: &StrategyParameters{
Namespaces: &Namespaces{
Exclude: []string{"test1"},
Include: []string{"test2"},
},
},
err: fmt.Errorf("strategy \"%s\" param validation failed: only one of Include/Exclude namespaces can be set", strategyName),
result: nil,
},
{
description: "invalid params restart threshold",
params: &StrategyParameters{
PodsHavingTooManyRestarts: &PodsHavingTooManyRestarts{
PodRestartThreshold: 0,
},
},
err: fmt.Errorf("strategy \"%s\" param validation failed: invalid PodsHavingTooManyRestarts threshold", strategyName),
result: nil,
},
}
for _, tc := range testCases {
t.Run(tc.description, func(t *testing.T) {
var result *api.PluginConfig
var err error
if pcFnc, exists := StrategyParamsToPluginArgs[strategyName]; exists {
result, err = pcFnc(tc.params)
}
if err != nil {
if err.Error() != tc.err.Error() {
t.Errorf("unexpected error: %s", err.Error())
}
}
if err == nil {
// sort to easily compare deepequality
diff := cmp.Diff(tc.result, result)
if diff != "" {
t.Errorf("test '%s' failed. Results are not deep equal. mismatch (-want +got):\n%s", tc.description, diff)
}
}
})
}
}
func TestStrategyParamsToPluginArgsPodLifeTime(t *testing.T) {
strategyName := "PodLifeTime"
type testCase struct {
description string
params *StrategyParameters
err error
result *api.PluginConfig
}
testCases := []testCase{
{
description: "wire in all valid parameters",
params: &StrategyParameters{
PodLifeTime: &PodLifeTime{
MaxPodLifeTimeSeconds: utilpointer.Uint(86400),
States: []string{
"Pending",
"PodInitializing",
},
},
ThresholdPriority: utilpointer.Int32(100),
Namespaces: &Namespaces{
Exclude: []string{"test1"},
},
},
err: nil,
result: &api.PluginConfig{
Name: podlifetime.PluginName,
Args: &podlifetime.PodLifeTimeArgs{
MaxPodLifeTimeSeconds: utilpointer.Uint(86400),
States: []string{
"Pending",
"PodInitializing",
},
Namespaces: &api.Namespaces{
Exclude: []string{"test1"},
},
},
},
},
{
description: "invalid params namespaces",
params: &StrategyParameters{
PodLifeTime: &PodLifeTime{
MaxPodLifeTimeSeconds: utilpointer.Uint(86400),
},
Namespaces: &Namespaces{
Exclude: []string{"test1"},
Include: []string{"test2"},
},
},
err: fmt.Errorf("strategy \"%s\" param validation failed: only one of Include/Exclude namespaces can be set", strategyName),
result: nil,
},
{
description: "invalid params MaxPodLifeTimeSeconds not set",
params: &StrategyParameters{
PodLifeTime: &PodLifeTime{},
},
err: fmt.Errorf("strategy \"%s\" param validation failed: MaxPodLifeTimeSeconds not set", strategyName),
result: nil,
},
}
for _, tc := range testCases {
t.Run(tc.description, func(t *testing.T) {
var result *api.PluginConfig
var err error
if pcFnc, exists := StrategyParamsToPluginArgs[strategyName]; exists {
result, err = pcFnc(tc.params)
}
if err != nil {
if err.Error() != tc.err.Error() {
t.Errorf("unexpected error: %s", err.Error())
}
}
if err == nil {
// sort to easily compare deepequality
diff := cmp.Diff(tc.result, result)
if diff != "" {
t.Errorf("test '%s' failed. Results are not deep equal. mismatch (-want +got):\n%s", tc.description, diff)
}
}
})
}
}
func TestStrategyParamsToPluginArgsRemoveDuplicates(t *testing.T) {
strategyName := "RemoveDuplicates"
type testCase struct {
description string
params *StrategyParameters
err error
result *api.PluginConfig
}
testCases := []testCase{
{
description: "wire in all valid parameters",
params: &StrategyParameters{
RemoveDuplicates: &RemoveDuplicates{
ExcludeOwnerKinds: []string{"ReplicaSet"},
},
ThresholdPriority: utilpointer.Int32(100),
Namespaces: &Namespaces{
Exclude: []string{"test1"},
},
},
err: nil,
result: &api.PluginConfig{
Name: removeduplicates.PluginName,
Args: &removeduplicates.RemoveDuplicatesArgs{
ExcludeOwnerKinds: []string{"ReplicaSet"},
Namespaces: &api.Namespaces{
Exclude: []string{"test1"},
},
},
},
},
{
description: "invalid params namespaces",
params: &StrategyParameters{
PodLifeTime: &PodLifeTime{
MaxPodLifeTimeSeconds: utilpointer.Uint(86400),
},
Namespaces: &Namespaces{
Exclude: []string{"test1"},
Include: []string{"test2"},
},
},
err: fmt.Errorf("strategy \"%s\" param validation failed: only one of Include/Exclude namespaces can be set", strategyName),
result: nil,
},
}
for _, tc := range testCases {
t.Run(tc.description, func(t *testing.T) {
var result *api.PluginConfig
var err error
if pcFnc, exists := StrategyParamsToPluginArgs[strategyName]; exists {
result, err = pcFnc(tc.params)
}
if err != nil {
if err.Error() != tc.err.Error() {
t.Errorf("unexpected error: %s", err.Error())
}
}
if err == nil {
// sort to easily compare deepequality
diff := cmp.Diff(tc.result, result)
if diff != "" {
t.Errorf("test '%s' failed. Results are not deep equal. mismatch (-want +got):\n%s", tc.description, diff)
}
}
})
}
}
func TestStrategyParamsToPluginArgsRemovePodsViolatingTopologySpreadConstraint(t *testing.T) {
strategyName := "RemovePodsViolatingTopologySpreadConstraint"
type testCase struct {
description string
params *StrategyParameters
err error
result *api.PluginConfig
}
testCases := []testCase{
{
description: "wire in all valid parameters",
params: &StrategyParameters{
IncludeSoftConstraints: true,
ThresholdPriority: utilpointer.Int32(100),
Namespaces: &Namespaces{
Exclude: []string{"test1"},
},
},
err: nil,
result: &api.PluginConfig{
Name: removepodsviolatingtopologyspreadconstraint.PluginName,
Args: &removepodsviolatingtopologyspreadconstraint.RemovePodsViolatingTopologySpreadConstraintArgs{
Constraints: []v1.UnsatisfiableConstraintAction{v1.DoNotSchedule, v1.ScheduleAnyway},
TopologyBalanceNodeFit: utilpointer.Bool(true),
Namespaces: &api.Namespaces{
Exclude: []string{"test1"},
},
},
},
},
{
description: "params without soft constraints",
params: &StrategyParameters{
IncludeSoftConstraints: false,
},
err: nil,
result: &api.PluginConfig{
Name: removepodsviolatingtopologyspreadconstraint.PluginName,
Args: &removepodsviolatingtopologyspreadconstraint.RemovePodsViolatingTopologySpreadConstraintArgs{
Constraints: []v1.UnsatisfiableConstraintAction{v1.DoNotSchedule},
TopologyBalanceNodeFit: utilpointer.Bool(true),
},
},
},
{
description: "invalid params namespaces",
params: &StrategyParameters{
Namespaces: &Namespaces{
Exclude: []string{"test1"},
Include: []string{"test2"},
},
},
err: fmt.Errorf("strategy \"%s\" param validation failed: only one of Include/Exclude namespaces can be set", strategyName),
result: nil,
},
}
for _, tc := range testCases {
t.Run(tc.description, func(t *testing.T) {
var result *api.PluginConfig
var err error
if pcFnc, exists := StrategyParamsToPluginArgs[strategyName]; exists {
result, err = pcFnc(tc.params)
}
if err != nil {
if err.Error() != tc.err.Error() {
t.Errorf("unexpected error: %s", err.Error())
}
}
if err == nil {
// sort to easily compare deepequality
diff := cmp.Diff(tc.result, result)
if diff != "" {
t.Errorf("test '%s' failed. Results are not deep equal. mismatch (-want +got):\n%s", tc.description, diff)
}
}
})
}
}
func TestStrategyParamsToPluginArgsHighNodeUtilization(t *testing.T) {
strategyName := "HighNodeUtilization"
type testCase struct {
description string
params *StrategyParameters
err error
result *api.PluginConfig
}
testCases := []testCase{
{
description: "wire in all valid parameters",
params: &StrategyParameters{
NodeResourceUtilizationThresholds: &NodeResourceUtilizationThresholds{
NumberOfNodes: 3,
Thresholds: ResourceThresholds{
"cpu": Percentage(20),
"memory": Percentage(20),
"pods": Percentage(20),
},
},
ThresholdPriority: utilpointer.Int32(100),
Namespaces: &Namespaces{
Exclude: []string{"test1"},
},
},
err: nil,
result: &api.PluginConfig{
Name: nodeutilization.HighNodeUtilizationPluginName,
Args: &nodeutilization.HighNodeUtilizationArgs{
Thresholds: api.ResourceThresholds{
"cpu": api.Percentage(20),
"memory": api.Percentage(20),
"pods": api.Percentage(20),
},
NumberOfNodes: 3,
EvictableNamespaces: &api.Namespaces{
Exclude: []string{"test1"},
},
},
},
},
{
description: "invalid params namespaces",
params: &StrategyParameters{
NodeResourceUtilizationThresholds: &NodeResourceUtilizationThresholds{
NumberOfNodes: 3,
Thresholds: ResourceThresholds{
"cpu": Percentage(20),
"memory": Percentage(20),
"pods": Percentage(20),
},
},
Namespaces: &Namespaces{
Include: []string{"test2"},
},
},
err: fmt.Errorf("strategy \"%s\" param validation failed: only Exclude namespaces can be set, inclusion is not supported", strategyName),
result: nil,
},
{
description: "invalid params nil ResourceThresholds",
params: &StrategyParameters{
NodeResourceUtilizationThresholds: &NodeResourceUtilizationThresholds{
NumberOfNodes: 3,
},
},
err: fmt.Errorf("strategy \"%s\" param validation failed: no resource threshold is configured", strategyName),
result: nil,
},
{
description: "invalid params out of bounds threshold",
params: &StrategyParameters{
NodeResourceUtilizationThresholds: &NodeResourceUtilizationThresholds{
NumberOfNodes: 3,
Thresholds: ResourceThresholds{
"cpu": Percentage(150),
},
},
},
err: fmt.Errorf("strategy \"%s\" param validation failed: cpu threshold not in [0, 100] range", strategyName),
result: nil,
},
}
for _, tc := range testCases {
t.Run(tc.description, func(t *testing.T) {
var result *api.PluginConfig
var err error
if pcFnc, exists := StrategyParamsToPluginArgs[strategyName]; exists {
result, err = pcFnc(tc.params)
}
if err != nil {
if err.Error() != tc.err.Error() {
t.Errorf("unexpected error: %s", err.Error())
}
}
if err == nil {
// sort to easily compare deepequality
diff := cmp.Diff(tc.result, result)
if diff != "" {
t.Errorf("test '%s' failed. Results are not deep equal. mismatch (-want +got):\n%s", tc.description, diff)
}
}
})
}
}
func TestStrategyParamsToPluginArgsLowNodeUtilization(t *testing.T) {
strategyName := "LowNodeUtilization"
type testCase struct {
description string
params *StrategyParameters
err error
result *api.PluginConfig
}
testCases := []testCase{
{
description: "wire in all valid parameters",
params: &StrategyParameters{
NodeResourceUtilizationThresholds: &NodeResourceUtilizationThresholds{
NumberOfNodes: 3,
Thresholds: ResourceThresholds{
"cpu": Percentage(20),
"memory": Percentage(20),
"pods": Percentage(20),
},
TargetThresholds: ResourceThresholds{
"cpu": Percentage(50),
"memory": Percentage(50),
"pods": Percentage(50),
},
UseDeviationThresholds: true,
},
ThresholdPriority: utilpointer.Int32(100),
Namespaces: &Namespaces{
Exclude: []string{"test1"},
},
},
err: nil,
result: &api.PluginConfig{
Name: nodeutilization.LowNodeUtilizationPluginName,
Args: &nodeutilization.LowNodeUtilizationArgs{
Thresholds: api.ResourceThresholds{
"cpu": api.Percentage(20),
"memory": api.Percentage(20),
"pods": api.Percentage(20),
},
TargetThresholds: api.ResourceThresholds{
"cpu": api.Percentage(50),
"memory": api.Percentage(50),
"pods": api.Percentage(50),
},
UseDeviationThresholds: true,
NumberOfNodes: 3,
EvictableNamespaces: &api.Namespaces{
Exclude: []string{"test1"},
},
},
},
},
{
description: "invalid params namespaces",
params: &StrategyParameters{
NodeResourceUtilizationThresholds: &NodeResourceUtilizationThresholds{
NumberOfNodes: 3,
Thresholds: ResourceThresholds{
"cpu": Percentage(20),
"memory": Percentage(20),
"pods": Percentage(20),
},
TargetThresholds: ResourceThresholds{
"cpu": Percentage(50),
"memory": Percentage(50),
"pods": Percentage(50),
},
UseDeviationThresholds: true,
},
Namespaces: &Namespaces{
Include: []string{"test2"},
},
},
err: fmt.Errorf("strategy \"%s\" param validation failed: only Exclude namespaces can be set, inclusion is not supported", strategyName),
result: nil,
},
{
description: "invalid params nil ResourceThresholds",
params: &StrategyParameters{
NodeResourceUtilizationThresholds: &NodeResourceUtilizationThresholds{
NumberOfNodes: 3,
},
},
err: fmt.Errorf("strategy \"%s\" param validation failed: thresholds config is not valid: no resource threshold is configured", strategyName),
result: nil,
},
{
description: "invalid params out of bounds threshold",
params: &StrategyParameters{
NodeResourceUtilizationThresholds: &NodeResourceUtilizationThresholds{
NumberOfNodes: 3,
Thresholds: ResourceThresholds{
"cpu": Percentage(150),
},
},
},
err: fmt.Errorf("strategy \"%s\" param validation failed: thresholds config is not valid: cpu threshold not in [0, 100] range", strategyName),
result: nil,
},
}
for _, tc := range testCases {
t.Run(tc.description, func(t *testing.T) {
var result *api.PluginConfig
var err error
if pcFnc, exists := StrategyParamsToPluginArgs[strategyName]; exists {
result, err = pcFnc(tc.params)
}
if err != nil {
if err.Error() != tc.err.Error() {
t.Errorf("unexpected error: %s", err.Error())
}
}
if err == nil {
// sort to easily compare deepequality
diff := cmp.Diff(tc.result, result)
if diff != "" {
t.Errorf("test '%s' failed. Results are not deep equal. mismatch (-want +got):\n%s", tc.description, diff)
}
}
})
}
}

View File

@@ -1,133 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type DeschedulerPolicy struct {
metav1.TypeMeta `json:",inline"`
// Strategies
Strategies StrategyList `json:"strategies,omitempty"`
// NodeSelector for a set of nodes to operate over
NodeSelector *string `json:"nodeSelector,omitempty"`
// EvictFailedBarePods allows pods without ownerReferences and in failed phase to be evicted.
EvictFailedBarePods *bool `json:"evictFailedBarePods,omitempty"`
// EvictLocalStoragePods allows pods using local storage to be evicted.
EvictLocalStoragePods *bool `json:"evictLocalStoragePods,omitempty"`
// EvictSystemCriticalPods allows eviction of pods of any priority (including Kubernetes system pods)
EvictSystemCriticalPods *bool `json:"evictSystemCriticalPods,omitempty"`
// EvictDaemonSetPods allows pods owned by a DaemonSet resource to be evicted.
EvictDaemonSetPods *bool `json:"evictDaemonSetPods,omitempty"`
// IgnorePVCPods prevents pods with PVCs from being evicted.
IgnorePVCPods *bool `json:"ignorePvcPods,omitempty"`
// MaxNoOfPodsToEvictPerNode restricts maximum of pods to be evicted per node.
MaxNoOfPodsToEvictPerNode *uint `json:"maxNoOfPodsToEvictPerNode,omitempty"`
// MaxNoOfPodsToEvictPerNamespace restricts maximum of pods to be evicted per namespace.
MaxNoOfPodsToEvictPerNamespace *uint `json:"maxNoOfPodsToEvictPerNamespace,omitempty"`
}
type (
StrategyName string
StrategyList map[StrategyName]DeschedulerStrategy
)
type DeschedulerStrategy struct {
// Enabled or disabled
Enabled bool `json:"enabled,omitempty"`
// Weight
Weight int `json:"weight,omitempty"`
// Strategy parameters
Params *StrategyParameters `json:"params,omitempty"`
}
// Namespaces carries a list of included/excluded namespaces
// for which a given strategy is applicable.
type Namespaces struct {
Include []string `json:"include"`
Exclude []string `json:"exclude"`
}
// Besides Namespaces ThresholdPriority and ThresholdPriorityClassName only one of its members may be specified
type StrategyParameters struct {
NodeResourceUtilizationThresholds *NodeResourceUtilizationThresholds `json:"nodeResourceUtilizationThresholds,omitempty"`
NodeAffinityType []string `json:"nodeAffinityType,omitempty"`
PodsHavingTooManyRestarts *PodsHavingTooManyRestarts `json:"podsHavingTooManyRestarts,omitempty"`
PodLifeTime *PodLifeTime `json:"podLifeTime,omitempty"`
RemoveDuplicates *RemoveDuplicates `json:"removeDuplicates,omitempty"`
FailedPods *FailedPods `json:"failedPods,omitempty"`
IncludeSoftConstraints bool `json:"includeSoftConstraints"`
Namespaces *Namespaces `json:"namespaces"`
ThresholdPriority *int32 `json:"thresholdPriority"`
ThresholdPriorityClassName string `json:"thresholdPriorityClassName"`
LabelSelector *metav1.LabelSelector `json:"labelSelector"`
NodeFit bool `json:"nodeFit"`
IncludePreferNoSchedule bool `json:"includePreferNoSchedule"`
ExcludedTaints []string `json:"excludedTaints,omitempty"`
IncludedTaints []string `json:"includedTaints,omitempty"`
}
type (
Percentage float64
ResourceThresholds map[v1.ResourceName]Percentage
)
type NodeResourceUtilizationThresholds struct {
UseDeviationThresholds bool `json:"useDeviationThresholds,omitempty"`
Thresholds ResourceThresholds `json:"thresholds,omitempty"`
TargetThresholds ResourceThresholds `json:"targetThresholds,omitempty"`
NumberOfNodes int `json:"numberOfNodes,omitempty"`
}
type PodsHavingTooManyRestarts struct {
PodRestartThreshold int32 `json:"podRestartThreshold,omitempty"`
IncludingInitContainers bool `json:"includingInitContainers,omitempty"`
}
type RemoveDuplicates struct {
ExcludeOwnerKinds []string `json:"excludeOwnerKinds,omitempty"`
}
type PodLifeTime struct {
MaxPodLifeTimeSeconds *uint `json:"maxPodLifeTimeSeconds,omitempty"`
States []string `json:"states,omitempty"`
// Deprecated: Use States instead.
PodStatusPhases []string `json:"podStatusPhases,omitempty"`
}
type FailedPods struct {
ExcludeOwnerKinds []string `json:"excludeOwnerKinds,omitempty"`
MinPodLifetimeSeconds *uint `json:"minPodLifetimeSeconds,omitempty"`
Reasons []string `json:"reasons,omitempty"`
IncludingInitContainers bool `json:"includingInitContainers,omitempty"`
}

View File

@@ -1,390 +0,0 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package v1alpha1
import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DeschedulerPolicy) DeepCopyInto(out *DeschedulerPolicy) {
*out = *in
out.TypeMeta = in.TypeMeta
if in.Strategies != nil {
in, out := &in.Strategies, &out.Strategies
*out = make(StrategyList, len(*in))
for key, val := range *in {
(*out)[key] = *val.DeepCopy()
}
}
if in.NodeSelector != nil {
in, out := &in.NodeSelector, &out.NodeSelector
*out = new(string)
**out = **in
}
if in.EvictFailedBarePods != nil {
in, out := &in.EvictFailedBarePods, &out.EvictFailedBarePods
*out = new(bool)
**out = **in
}
if in.EvictLocalStoragePods != nil {
in, out := &in.EvictLocalStoragePods, &out.EvictLocalStoragePods
*out = new(bool)
**out = **in
}
if in.EvictSystemCriticalPods != nil {
in, out := &in.EvictSystemCriticalPods, &out.EvictSystemCriticalPods
*out = new(bool)
**out = **in
}
if in.EvictDaemonSetPods != nil {
in, out := &in.EvictDaemonSetPods, &out.EvictDaemonSetPods
*out = new(bool)
**out = **in
}
if in.IgnorePVCPods != nil {
in, out := &in.IgnorePVCPods, &out.IgnorePVCPods
*out = new(bool)
**out = **in
}
if in.MaxNoOfPodsToEvictPerNode != nil {
in, out := &in.MaxNoOfPodsToEvictPerNode, &out.MaxNoOfPodsToEvictPerNode
*out = new(uint)
**out = **in
}
if in.MaxNoOfPodsToEvictPerNamespace != nil {
in, out := &in.MaxNoOfPodsToEvictPerNamespace, &out.MaxNoOfPodsToEvictPerNamespace
*out = new(uint)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeschedulerPolicy.
func (in *DeschedulerPolicy) DeepCopy() *DeschedulerPolicy {
if in == nil {
return nil
}
out := new(DeschedulerPolicy)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *DeschedulerPolicy) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DeschedulerStrategy) DeepCopyInto(out *DeschedulerStrategy) {
*out = *in
if in.Params != nil {
in, out := &in.Params, &out.Params
*out = new(StrategyParameters)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeschedulerStrategy.
func (in *DeschedulerStrategy) DeepCopy() *DeschedulerStrategy {
if in == nil {
return nil
}
out := new(DeschedulerStrategy)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *FailedPods) DeepCopyInto(out *FailedPods) {
*out = *in
if in.ExcludeOwnerKinds != nil {
in, out := &in.ExcludeOwnerKinds, &out.ExcludeOwnerKinds
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.MinPodLifetimeSeconds != nil {
in, out := &in.MinPodLifetimeSeconds, &out.MinPodLifetimeSeconds
*out = new(uint)
**out = **in
}
if in.Reasons != nil {
in, out := &in.Reasons, &out.Reasons
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FailedPods.
func (in *FailedPods) DeepCopy() *FailedPods {
if in == nil {
return nil
}
out := new(FailedPods)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Namespaces) DeepCopyInto(out *Namespaces) {
*out = *in
if in.Include != nil {
in, out := &in.Include, &out.Include
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Exclude != nil {
in, out := &in.Exclude, &out.Exclude
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Namespaces.
func (in *Namespaces) DeepCopy() *Namespaces {
if in == nil {
return nil
}
out := new(Namespaces)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NodeResourceUtilizationThresholds) DeepCopyInto(out *NodeResourceUtilizationThresholds) {
*out = *in
if in.Thresholds != nil {
in, out := &in.Thresholds, &out.Thresholds
*out = make(ResourceThresholds, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.TargetThresholds != nil {
in, out := &in.TargetThresholds, &out.TargetThresholds
*out = make(ResourceThresholds, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeResourceUtilizationThresholds.
func (in *NodeResourceUtilizationThresholds) DeepCopy() *NodeResourceUtilizationThresholds {
if in == nil {
return nil
}
out := new(NodeResourceUtilizationThresholds)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodLifeTime) DeepCopyInto(out *PodLifeTime) {
*out = *in
if in.MaxPodLifeTimeSeconds != nil {
in, out := &in.MaxPodLifeTimeSeconds, &out.MaxPodLifeTimeSeconds
*out = new(uint)
**out = **in
}
if in.States != nil {
in, out := &in.States, &out.States
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.PodStatusPhases != nil {
in, out := &in.PodStatusPhases, &out.PodStatusPhases
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodLifeTime.
func (in *PodLifeTime) DeepCopy() *PodLifeTime {
if in == nil {
return nil
}
out := new(PodLifeTime)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodsHavingTooManyRestarts) DeepCopyInto(out *PodsHavingTooManyRestarts) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodsHavingTooManyRestarts.
func (in *PodsHavingTooManyRestarts) DeepCopy() *PodsHavingTooManyRestarts {
if in == nil {
return nil
}
out := new(PodsHavingTooManyRestarts)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RemoveDuplicates) DeepCopyInto(out *RemoveDuplicates) {
*out = *in
if in.ExcludeOwnerKinds != nil {
in, out := &in.ExcludeOwnerKinds, &out.ExcludeOwnerKinds
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RemoveDuplicates.
func (in *RemoveDuplicates) DeepCopy() *RemoveDuplicates {
if in == nil {
return nil
}
out := new(RemoveDuplicates)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in ResourceThresholds) DeepCopyInto(out *ResourceThresholds) {
{
in := &in
*out = make(ResourceThresholds, len(*in))
for key, val := range *in {
(*out)[key] = val
}
return
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceThresholds.
func (in ResourceThresholds) DeepCopy() ResourceThresholds {
if in == nil {
return nil
}
out := new(ResourceThresholds)
in.DeepCopyInto(out)
return *out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in StrategyList) DeepCopyInto(out *StrategyList) {
{
in := &in
*out = make(StrategyList, len(*in))
for key, val := range *in {
(*out)[key] = *val.DeepCopy()
}
return
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StrategyList.
func (in StrategyList) DeepCopy() StrategyList {
if in == nil {
return nil
}
out := new(StrategyList)
in.DeepCopyInto(out)
return *out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *StrategyParameters) DeepCopyInto(out *StrategyParameters) {
*out = *in
if in.NodeResourceUtilizationThresholds != nil {
in, out := &in.NodeResourceUtilizationThresholds, &out.NodeResourceUtilizationThresholds
*out = new(NodeResourceUtilizationThresholds)
(*in).DeepCopyInto(*out)
}
if in.NodeAffinityType != nil {
in, out := &in.NodeAffinityType, &out.NodeAffinityType
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.PodsHavingTooManyRestarts != nil {
in, out := &in.PodsHavingTooManyRestarts, &out.PodsHavingTooManyRestarts
*out = new(PodsHavingTooManyRestarts)
**out = **in
}
if in.PodLifeTime != nil {
in, out := &in.PodLifeTime, &out.PodLifeTime
*out = new(PodLifeTime)
(*in).DeepCopyInto(*out)
}
if in.RemoveDuplicates != nil {
in, out := &in.RemoveDuplicates, &out.RemoveDuplicates
*out = new(RemoveDuplicates)
(*in).DeepCopyInto(*out)
}
if in.FailedPods != nil {
in, out := &in.FailedPods, &out.FailedPods
*out = new(FailedPods)
(*in).DeepCopyInto(*out)
}
if in.Namespaces != nil {
in, out := &in.Namespaces, &out.Namespaces
*out = new(Namespaces)
(*in).DeepCopyInto(*out)
}
if in.ThresholdPriority != nil {
in, out := &in.ThresholdPriority, &out.ThresholdPriority
*out = new(int32)
**out = **in
}
if in.LabelSelector != nil {
in, out := &in.LabelSelector, &out.LabelSelector
*out = new(v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
if in.ExcludedTaints != nil {
in, out := &in.ExcludedTaints, &out.ExcludedTaints
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.IncludedTaints != nil {
in, out := &in.IncludedTaints, &out.IncludedTaints
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StrategyParameters.
func (in *StrategyParameters) DeepCopy() *StrategyParameters {
if in == nil {
return nil
}
out := new(StrategyParameters)
in.DeepCopyInto(out)
return out
}

View File

@@ -37,6 +37,16 @@ type DeschedulerPolicy struct {
// MaxNoOfPodsToEvictPerNamespace restricts maximum of pods to be evicted per namespace. // MaxNoOfPodsToEvictPerNamespace restricts maximum of pods to be evicted per namespace.
MaxNoOfPodsToEvictPerNamespace *uint `json:"maxNoOfPodsToEvictPerNamespace,omitempty"` MaxNoOfPodsToEvictPerNamespace *uint `json:"maxNoOfPodsToEvictPerNamespace,omitempty"`
// MaxNoOfPodsToTotal restricts maximum of pods to be evicted total.
MaxNoOfPodsToEvictTotal *uint `json:"maxNoOfPodsToEvictTotal,omitempty"`
// EvictionFailureEventNotification should be set to true to enable eviction failure event notification.
// Default is false.
EvictionFailureEventNotification *bool
// MetricsCollector configures collection of metrics for actual resource utilization
MetricsCollector MetricsCollector `json:"metricsCollector,omitempty"`
} }
type DeschedulerProfile struct { type DeschedulerProfile struct {
@@ -63,3 +73,10 @@ type PluginSet struct {
Enabled []string `json:"enabled"` Enabled []string `json:"enabled"`
Disabled []string `json:"disabled"` Disabled []string `json:"disabled"`
} }
// MetricsCollector configures collection of metrics about actual resource utilization
type MetricsCollector struct {
// Enabled metrics collection from kubernetes metrics.
// Later, the collection can be extended to other providers.
Enabled bool `json:"enabled,omitempty"`
}

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated // +build !ignore_autogenerated
/* /*
Copyright 2024 The Kubernetes Authors. Copyright 2025 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.
@@ -46,6 +46,16 @@ func RegisterConversions(s *runtime.Scheme) error {
}); err != nil { }); err != nil {
return err return err
} }
if err := s.AddGeneratedConversionFunc((*MetricsCollector)(nil), (*api.MetricsCollector)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha2_MetricsCollector_To_api_MetricsCollector(a.(*MetricsCollector), b.(*api.MetricsCollector), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*api.MetricsCollector)(nil), (*MetricsCollector)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_api_MetricsCollector_To_v1alpha2_MetricsCollector(a.(*api.MetricsCollector), b.(*MetricsCollector), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*api.PluginConfig)(nil), (*PluginConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { if err := s.AddGeneratedConversionFunc((*api.PluginConfig)(nil), (*PluginConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_api_PluginConfig_To_v1alpha2_PluginConfig(a.(*api.PluginConfig), b.(*PluginConfig), scope) return Convert_api_PluginConfig_To_v1alpha2_PluginConfig(a.(*api.PluginConfig), b.(*PluginConfig), scope)
}); err != nil { }); err != nil {
@@ -104,6 +114,11 @@ func autoConvert_v1alpha2_DeschedulerPolicy_To_api_DeschedulerPolicy(in *Desched
out.NodeSelector = (*string)(unsafe.Pointer(in.NodeSelector)) out.NodeSelector = (*string)(unsafe.Pointer(in.NodeSelector))
out.MaxNoOfPodsToEvictPerNode = (*uint)(unsafe.Pointer(in.MaxNoOfPodsToEvictPerNode)) out.MaxNoOfPodsToEvictPerNode = (*uint)(unsafe.Pointer(in.MaxNoOfPodsToEvictPerNode))
out.MaxNoOfPodsToEvictPerNamespace = (*uint)(unsafe.Pointer(in.MaxNoOfPodsToEvictPerNamespace)) out.MaxNoOfPodsToEvictPerNamespace = (*uint)(unsafe.Pointer(in.MaxNoOfPodsToEvictPerNamespace))
out.MaxNoOfPodsToEvictTotal = (*uint)(unsafe.Pointer(in.MaxNoOfPodsToEvictTotal))
out.EvictionFailureEventNotification = (*bool)(unsafe.Pointer(in.EvictionFailureEventNotification))
if err := Convert_v1alpha2_MetricsCollector_To_api_MetricsCollector(&in.MetricsCollector, &out.MetricsCollector, s); err != nil {
return err
}
return nil return nil
} }
@@ -122,6 +137,11 @@ func autoConvert_api_DeschedulerPolicy_To_v1alpha2_DeschedulerPolicy(in *api.Des
out.NodeSelector = (*string)(unsafe.Pointer(in.NodeSelector)) out.NodeSelector = (*string)(unsafe.Pointer(in.NodeSelector))
out.MaxNoOfPodsToEvictPerNode = (*uint)(unsafe.Pointer(in.MaxNoOfPodsToEvictPerNode)) out.MaxNoOfPodsToEvictPerNode = (*uint)(unsafe.Pointer(in.MaxNoOfPodsToEvictPerNode))
out.MaxNoOfPodsToEvictPerNamespace = (*uint)(unsafe.Pointer(in.MaxNoOfPodsToEvictPerNamespace)) out.MaxNoOfPodsToEvictPerNamespace = (*uint)(unsafe.Pointer(in.MaxNoOfPodsToEvictPerNamespace))
out.MaxNoOfPodsToEvictTotal = (*uint)(unsafe.Pointer(in.MaxNoOfPodsToEvictTotal))
out.EvictionFailureEventNotification = (*bool)(unsafe.Pointer(in.EvictionFailureEventNotification))
if err := Convert_api_MetricsCollector_To_v1alpha2_MetricsCollector(&in.MetricsCollector, &out.MetricsCollector, s); err != nil {
return err
}
return nil return nil
} }
@@ -173,6 +193,26 @@ func Convert_api_DeschedulerProfile_To_v1alpha2_DeschedulerProfile(in *api.Desch
return autoConvert_api_DeschedulerProfile_To_v1alpha2_DeschedulerProfile(in, out, s) return autoConvert_api_DeschedulerProfile_To_v1alpha2_DeschedulerProfile(in, out, s)
} }
func autoConvert_v1alpha2_MetricsCollector_To_api_MetricsCollector(in *MetricsCollector, out *api.MetricsCollector, s conversion.Scope) error {
out.Enabled = in.Enabled
return nil
}
// Convert_v1alpha2_MetricsCollector_To_api_MetricsCollector is an autogenerated conversion function.
func Convert_v1alpha2_MetricsCollector_To_api_MetricsCollector(in *MetricsCollector, out *api.MetricsCollector, s conversion.Scope) error {
return autoConvert_v1alpha2_MetricsCollector_To_api_MetricsCollector(in, out, s)
}
func autoConvert_api_MetricsCollector_To_v1alpha2_MetricsCollector(in *api.MetricsCollector, out *MetricsCollector, s conversion.Scope) error {
out.Enabled = in.Enabled
return nil
}
// Convert_api_MetricsCollector_To_v1alpha2_MetricsCollector is an autogenerated conversion function.
func Convert_api_MetricsCollector_To_v1alpha2_MetricsCollector(in *api.MetricsCollector, out *MetricsCollector, s conversion.Scope) error {
return autoConvert_api_MetricsCollector_To_v1alpha2_MetricsCollector(in, out, s)
}
func autoConvert_v1alpha2_PluginConfig_To_api_PluginConfig(in *PluginConfig, out *api.PluginConfig, s conversion.Scope) error { func autoConvert_v1alpha2_PluginConfig_To_api_PluginConfig(in *PluginConfig, out *api.PluginConfig, s conversion.Scope) error {
out.Name = in.Name out.Name = in.Name
if err := runtime.Convert_runtime_RawExtension_To_runtime_Object(&in.Args, &out.Args, s); err != nil { if err := runtime.Convert_runtime_RawExtension_To_runtime_Object(&in.Args, &out.Args, s); err != nil {

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated // +build !ignore_autogenerated
/* /*
Copyright 2024 The Kubernetes Authors. Copyright 2025 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.
@@ -51,6 +51,17 @@ func (in *DeschedulerPolicy) DeepCopyInto(out *DeschedulerPolicy) {
*out = new(uint) *out = new(uint)
**out = **in **out = **in
} }
if in.MaxNoOfPodsToEvictTotal != nil {
in, out := &in.MaxNoOfPodsToEvictTotal, &out.MaxNoOfPodsToEvictTotal
*out = new(uint)
**out = **in
}
if in.EvictionFailureEventNotification != nil {
in, out := &in.EvictionFailureEventNotification, &out.EvictionFailureEventNotification
*out = new(bool)
**out = **in
}
out.MetricsCollector = in.MetricsCollector
return return
} }
@@ -96,6 +107,22 @@ func (in *DeschedulerProfile) DeepCopy() *DeschedulerProfile {
return out return out
} }
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MetricsCollector) DeepCopyInto(out *MetricsCollector) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricsCollector.
func (in *MetricsCollector) DeepCopy() *MetricsCollector {
if in == nil {
return nil
}
out := new(MetricsCollector)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PluginConfig) DeepCopyInto(out *PluginConfig) { func (in *PluginConfig) DeepCopyInto(out *PluginConfig) {
*out = *in *out = *in

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated // +build !ignore_autogenerated
/* /*
Copyright 2024 The Kubernetes Authors. Copyright 2025 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated // +build !ignore_autogenerated
/* /*
Copyright 2024 The Kubernetes Authors. Copyright 2025 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.
@@ -51,6 +51,17 @@ func (in *DeschedulerPolicy) DeepCopyInto(out *DeschedulerPolicy) {
*out = new(uint) *out = new(uint)
**out = **in **out = **in
} }
if in.MaxNoOfPodsToEvictTotal != nil {
in, out := &in.MaxNoOfPodsToEvictTotal, &out.MaxNoOfPodsToEvictTotal
*out = new(uint)
**out = **in
}
if in.EvictionFailureEventNotification != nil {
in, out := &in.EvictionFailureEventNotification, &out.EvictionFailureEventNotification
*out = new(bool)
**out = **in
}
out.MetricsCollector = in.MetricsCollector
return return
} }
@@ -96,6 +107,22 @@ func (in *DeschedulerProfile) DeepCopy() *DeschedulerProfile {
return out return out
} }
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MetricsCollector) DeepCopyInto(out *MetricsCollector) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricsCollector.
func (in *MetricsCollector) DeepCopy() *MetricsCollector {
if in == nil {
return nil
}
out := new(MetricsCollector)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Namespaces) DeepCopyInto(out *Namespaces) { func (in *Namespaces) DeepCopyInto(out *Namespaces) {
*out = *in *out = *in

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated // +build !ignore_autogenerated
/* /*
Copyright 2024 The Kubernetes Authors. Copyright 2025 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated // +build !ignore_autogenerated
/* /*
Copyright 2024 The Kubernetes Authors. Copyright 2025 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated // +build !ignore_autogenerated
/* /*
Copyright 2024 The Kubernetes Authors. Copyright 2025 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated // +build !ignore_autogenerated
/* /*
Copyright 2024 The Kubernetes Authors. Copyright 2025 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.

View File

@@ -19,16 +19,16 @@ package client
import ( import (
"fmt" "fmt"
clientset "k8s.io/client-go/kubernetes"
componentbaseconfig "k8s.io/component-base/config"
// Ensure to load all auth plugins. // Ensure to load all auth plugins.
clientset "k8s.io/client-go/kubernetes"
_ "k8s.io/client-go/plugin/pkg/client/auth" _ "k8s.io/client-go/plugin/pkg/client/auth"
"k8s.io/client-go/rest" "k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd" "k8s.io/client-go/tools/clientcmd"
componentbaseconfig "k8s.io/component-base/config"
metricsclient "k8s.io/metrics/pkg/client/clientset/versioned"
) )
func CreateClient(clientConnection componentbaseconfig.ClientConnectionConfiguration, userAgt string) (clientset.Interface, error) { func createConfig(clientConnection componentbaseconfig.ClientConnectionConfiguration, userAgt string) (*rest.Config, error) {
var cfg *rest.Config var cfg *rest.Config
if len(clientConnection.Kubeconfig) != 0 { if len(clientConnection.Kubeconfig) != 0 {
master, err := GetMasterFromKubeconfig(clientConnection.Kubeconfig) master, err := GetMasterFromKubeconfig(clientConnection.Kubeconfig)
@@ -56,9 +56,28 @@ func CreateClient(clientConnection componentbaseconfig.ClientConnectionConfigura
cfg = rest.AddUserAgent(cfg, userAgt) cfg = rest.AddUserAgent(cfg, userAgt)
} }
return cfg, nil
}
func CreateClient(clientConnection componentbaseconfig.ClientConnectionConfiguration, userAgt string) (clientset.Interface, error) {
cfg, err := createConfig(clientConnection, userAgt)
if err != nil {
return nil, fmt.Errorf("unable to create config: %v", err)
}
return clientset.NewForConfig(cfg) return clientset.NewForConfig(cfg)
} }
func CreateMetricsClient(clientConnection componentbaseconfig.ClientConnectionConfiguration, userAgt string) (metricsclient.Interface, error) {
cfg, err := createConfig(clientConnection, userAgt)
if err != nil {
return nil, fmt.Errorf("unable to create config: %v", err)
}
// Create the metrics clientset to access the metrics.k8s.io API
return metricsclient.NewForConfig(cfg)
}
func GetMasterFromKubeconfig(filename string) (string, error) { func GetMasterFromKubeconfig(filename string) (string, error) {
config, err := clientcmd.LoadFromFile(filename) config, err := clientcmd.LoadFromFile(filename)
if err != nil { if err != nil {
@@ -67,11 +86,11 @@ func GetMasterFromKubeconfig(filename string) (string, error) {
context, ok := config.Contexts[config.CurrentContext] context, ok := config.Contexts[config.CurrentContext]
if !ok { if !ok {
return "", fmt.Errorf("failed to get master address from kubeconfig") return "", fmt.Errorf("failed to get master address from kubeconfig: current context not found")
} }
if val, ok := config.Clusters[context.Cluster]; ok { if val, ok := config.Clusters[context.Cluster]; ok {
return val.Server, nil return val.Server, nil
} }
return "", fmt.Errorf("failed to get master address from kubeconfig") return "", fmt.Errorf("failed to get master address from kubeconfig: cluster information not found")
} }

View File

@@ -18,48 +18,48 @@ package descheduler
import ( import (
"context" "context"
"errors"
"fmt" "fmt"
"math" "math"
"strconv"
"time" "time"
"go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace" "go.opentelemetry.io/otel/trace"
v1 "k8s.io/api/core/v1"
policy "k8s.io/api/policy/v1"
policyv1 "k8s.io/api/policy/v1"
schedulingv1 "k8s.io/api/scheduling/v1"
"k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
utilversion "k8s.io/apimachinery/pkg/util/version"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/discovery" "k8s.io/client-go/discovery"
"k8s.io/client-go/informers" "k8s.io/client-go/informers"
clientset "k8s.io/client-go/kubernetes"
fakeclientset "k8s.io/client-go/kubernetes/fake"
core "k8s.io/client-go/testing"
"k8s.io/client-go/tools/events" "k8s.io/client-go/tools/events"
componentbaseconfig "k8s.io/component-base/config" componentbaseconfig "k8s.io/component-base/config"
"k8s.io/klog/v2" "k8s.io/klog/v2"
v1 "k8s.io/api/core/v1"
policy "k8s.io/api/policy/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
utilversion "k8s.io/apimachinery/pkg/util/version"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
fakeclientset "k8s.io/client-go/kubernetes/fake"
listersv1 "k8s.io/client-go/listers/core/v1"
schedulingv1 "k8s.io/client-go/listers/scheduling/v1"
core "k8s.io/client-go/testing"
"sigs.k8s.io/descheduler/pkg/descheduler/client"
eutils "sigs.k8s.io/descheduler/pkg/descheduler/evictions/utils"
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
"sigs.k8s.io/descheduler/pkg/tracing"
"sigs.k8s.io/descheduler/pkg/utils"
"sigs.k8s.io/descheduler/pkg/version"
"sigs.k8s.io/descheduler/cmd/descheduler/app/options" "sigs.k8s.io/descheduler/cmd/descheduler/app/options"
"sigs.k8s.io/descheduler/metrics" "sigs.k8s.io/descheduler/metrics"
"sigs.k8s.io/descheduler/pkg/api" "sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/descheduler/client"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions" "sigs.k8s.io/descheduler/pkg/descheduler/evictions"
eutils "sigs.k8s.io/descheduler/pkg/descheduler/evictions/utils"
"sigs.k8s.io/descheduler/pkg/descheduler/metricscollector"
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod" podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
"sigs.k8s.io/descheduler/pkg/framework/pluginregistry" "sigs.k8s.io/descheduler/pkg/framework/pluginregistry"
frameworkprofile "sigs.k8s.io/descheduler/pkg/framework/profile" frameworkprofile "sigs.k8s.io/descheduler/pkg/framework/profile"
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types" frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
"sigs.k8s.io/descheduler/pkg/tracing"
"sigs.k8s.io/descheduler/pkg/utils"
"sigs.k8s.io/descheduler/pkg/version"
) )
type eprunner func(ctx context.Context, nodes []*v1.Node) *frameworktypes.Status type eprunner func(ctx context.Context, nodes []*v1.Node) *frameworktypes.Status
@@ -70,41 +70,123 @@ type profileRunner struct {
} }
type descheduler struct { type descheduler struct {
rs *options.DeschedulerServer rs *options.DeschedulerServer
podLister listersv1.PodLister ir *informerResources
nodeLister listersv1.NodeLister getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc
namespaceLister listersv1.NamespaceLister sharedInformerFactory informers.SharedInformerFactory
priorityClassLister schedulingv1.PriorityClassLister deschedulerPolicy *api.DeschedulerPolicy
getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc eventRecorder events.EventRecorder
sharedInformerFactory informers.SharedInformerFactory podEvictor *evictions.PodEvictor
evictionPolicyGroupVersion string podEvictionReactionFnc func(*fakeclientset.Clientset) func(action core.Action) (bool, runtime.Object, error)
deschedulerPolicy *api.DeschedulerPolicy metricsCollector *metricscollector.MetricsCollector
eventRecorder events.EventRecorder
} }
func newDescheduler(ctx context.Context, rs *options.DeschedulerServer, deschedulerPolicy *api.DeschedulerPolicy, evictionPolicyGroupVersion string, eventRecorder events.EventRecorder, sharedInformerFactory informers.SharedInformerFactory) (*descheduler, error) { type informerResources struct {
sharedInformerFactory informers.SharedInformerFactory
resourceToInformer map[schema.GroupVersionResource]informers.GenericInformer
}
func newInformerResources(sharedInformerFactory informers.SharedInformerFactory) *informerResources {
return &informerResources{
sharedInformerFactory: sharedInformerFactory,
resourceToInformer: make(map[schema.GroupVersionResource]informers.GenericInformer),
}
}
func (ir *informerResources) Uses(resources ...schema.GroupVersionResource) error {
for _, resource := range resources {
informer, err := ir.sharedInformerFactory.ForResource(resource)
if err != nil {
return err
}
ir.resourceToInformer[resource] = informer
}
return nil
}
// CopyTo Copy informer subscriptions to the new factory and objects to the fake client so that the backing caches are populated for when listers are used.
func (ir *informerResources) CopyTo(fakeClient *fakeclientset.Clientset, newFactory informers.SharedInformerFactory) error {
for resource, informer := range ir.resourceToInformer {
_, err := newFactory.ForResource(resource)
if err != nil {
return fmt.Errorf("error getting resource %s: %w", resource, err)
}
objects, err := informer.Lister().List(labels.Everything())
if err != nil {
return fmt.Errorf("error listing %s: %w", informer, err)
}
for _, object := range objects {
fakeClient.Tracker().Add(object)
}
}
return nil
}
func newDescheduler(ctx context.Context, rs *options.DeschedulerServer, deschedulerPolicy *api.DeschedulerPolicy, evictionPolicyGroupVersion string, eventRecorder events.EventRecorder, sharedInformerFactory informers.SharedInformerFactory,
) (*descheduler, error) {
podInformer := sharedInformerFactory.Core().V1().Pods().Informer() podInformer := sharedInformerFactory.Core().V1().Pods().Informer()
podLister := sharedInformerFactory.Core().V1().Pods().Lister()
nodeLister := sharedInformerFactory.Core().V1().Nodes().Lister() ir := newInformerResources(sharedInformerFactory)
namespaceLister := sharedInformerFactory.Core().V1().Namespaces().Lister() ir.Uses(v1.SchemeGroupVersion.WithResource("pods"),
priorityClassLister := sharedInformerFactory.Scheduling().V1().PriorityClasses().Lister() v1.SchemeGroupVersion.WithResource("nodes"),
// Future work could be to let each plugin declare what type of resources it needs; that way dry runs would stay
// consistent with the real runs without having to keep the list here in sync.
v1.SchemeGroupVersion.WithResource("namespaces"), // Used by the defaultevictor plugin
schedulingv1.SchemeGroupVersion.WithResource("priorityclasses"), // Used by the defaultevictor plugin
policyv1.SchemeGroupVersion.WithResource("poddisruptionbudgets"), // Used by the defaultevictor plugin
) // Used by the defaultevictor plugin
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer) getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
if err != nil { if err != nil {
return nil, fmt.Errorf("build get pods assigned to node function error: %v", err) return nil, fmt.Errorf("build get pods assigned to node function error: %v", err)
} }
podEvictor, err := evictions.NewPodEvictor(
ctx,
rs.Client,
eventRecorder,
podInformer,
rs.DefaultFeatureGates,
evictions.NewOptions().
WithPolicyGroupVersion(evictionPolicyGroupVersion).
WithMaxPodsToEvictPerNode(deschedulerPolicy.MaxNoOfPodsToEvictPerNode).
WithMaxPodsToEvictPerNamespace(deschedulerPolicy.MaxNoOfPodsToEvictPerNamespace).
WithMaxPodsToEvictTotal(deschedulerPolicy.MaxNoOfPodsToEvictTotal).
WithEvictionFailureEventNotification(deschedulerPolicy.EvictionFailureEventNotification).
WithDryRun(rs.DryRun).
WithMetricsEnabled(!rs.DisableMetrics),
)
if err != nil {
return nil, err
}
var metricsCollector *metricscollector.MetricsCollector
if deschedulerPolicy.MetricsCollector.Enabled {
nodeSelector := labels.Everything()
if deschedulerPolicy.NodeSelector != nil {
sel, err := labels.Parse(*deschedulerPolicy.NodeSelector)
if err != nil {
return nil, err
}
nodeSelector = sel
}
metricsCollector = metricscollector.NewMetricsCollector(sharedInformerFactory.Core().V1().Nodes().Lister(), rs.MetricsClient, nodeSelector)
}
return &descheduler{ return &descheduler{
rs: rs, rs: rs,
podLister: podLister, ir: ir,
nodeLister: nodeLister, getPodsAssignedToNode: getPodsAssignedToNode,
namespaceLister: namespaceLister, sharedInformerFactory: sharedInformerFactory,
priorityClassLister: priorityClassLister, deschedulerPolicy: deschedulerPolicy,
getPodsAssignedToNode: getPodsAssignedToNode, eventRecorder: eventRecorder,
sharedInformerFactory: sharedInformerFactory, podEvictor: podEvictor,
evictionPolicyGroupVersion: evictionPolicyGroupVersion, podEvictionReactionFnc: podEvictionReactionFnc,
deschedulerPolicy: deschedulerPolicy, metricsCollector: metricsCollector,
eventRecorder: eventRecorder,
}, nil }, nil
} }
@@ -129,13 +211,17 @@ func (d *descheduler) runDeschedulerLoop(ctx context.Context, nodes []*v1.Node)
if d.rs.DryRun { if d.rs.DryRun {
klog.V(3).Infof("Building a cached client from the cluster for the dry run") klog.V(3).Infof("Building a cached client from the cluster for the dry run")
// Create a new cache so we start from scratch without any leftovers // Create a new cache so we start from scratch without any leftovers
fakeClient, err := cachedClient(d.rs.Client, d.podLister, d.nodeLister, d.namespaceLister, d.priorityClassLister) fakeClient := fakeclientset.NewSimpleClientset()
// simulate a pod eviction by deleting a pod
fakeClient.PrependReactor("create", "pods", d.podEvictionReactionFnc(fakeClient))
fakeSharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
err := d.ir.CopyTo(fakeClient, fakeSharedInformerFactory)
if err != nil { if err != nil {
return err return err
} }
// create a new instance of the shared informer factor from the cached client // create a new instance of the shared informer factor from the cached client
fakeSharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
// register the pod informer, otherwise it will not get running // register the pod informer, otherwise it will not get running
d.getPodsAssignedToNode, err = podutil.BuildGetPodsAssignedToNodeFunc(fakeSharedInformerFactory.Core().V1().Pods().Informer()) d.getPodsAssignedToNode, err = podutil.BuildGetPodsAssignedToNodeFunc(fakeSharedInformerFactory.Core().V1().Pods().Informer())
if err != nil { if err != nil {
@@ -153,21 +239,13 @@ func (d *descheduler) runDeschedulerLoop(ctx context.Context, nodes []*v1.Node)
client = d.rs.Client client = d.rs.Client
} }
klog.V(3).Infof("Building a pod evictor") klog.V(3).Infof("Setting up the pod evictor")
podEvictor := evictions.NewPodEvictor( d.podEvictor.SetClient(client)
client, d.podEvictor.ResetCounters()
d.evictionPolicyGroupVersion,
d.rs.DryRun,
d.deschedulerPolicy.MaxNoOfPodsToEvictPerNode,
d.deschedulerPolicy.MaxNoOfPodsToEvictPerNamespace,
nodes,
!d.rs.DisableMetrics,
d.eventRecorder,
)
d.runProfiles(ctx, client, nodes, podEvictor) d.runProfiles(ctx, client, nodes)
klog.V(1).InfoS("Number of evicted pods", "totalEvicted", podEvictor.TotalEvicted()) klog.V(1).InfoS("Number of evictions/requests", "totalEvicted", d.podEvictor.TotalEvicted(), "evictionRequests", d.podEvictor.TotalEvictionRequests())
return nil return nil
} }
@@ -175,7 +253,7 @@ func (d *descheduler) runDeschedulerLoop(ctx context.Context, nodes []*v1.Node)
// runProfiles runs all the deschedule plugins of all profiles and // runProfiles runs all the deschedule plugins of all profiles and
// later runs through all balance plugins of all profiles. (All Balance plugins should come after all Deschedule plugins) // later runs through all balance plugins of all profiles. (All Balance plugins should come after all Deschedule plugins)
// see https://github.com/kubernetes-sigs/descheduler/issues/979 // see https://github.com/kubernetes-sigs/descheduler/issues/979
func (d *descheduler) runProfiles(ctx context.Context, client clientset.Interface, nodes []*v1.Node, podEvictor *evictions.PodEvictor) { func (d *descheduler) runProfiles(ctx context.Context, client clientset.Interface, nodes []*v1.Node) {
var span trace.Span var span trace.Span
ctx, span = tracing.Tracer().Start(ctx, "runProfiles") ctx, span = tracing.Tracer().Start(ctx, "runProfiles")
defer span.End() defer span.End()
@@ -186,8 +264,9 @@ func (d *descheduler) runProfiles(ctx context.Context, client clientset.Interfac
pluginregistry.PluginRegistry, pluginregistry.PluginRegistry,
frameworkprofile.WithClientSet(client), frameworkprofile.WithClientSet(client),
frameworkprofile.WithSharedInformerFactory(d.sharedInformerFactory), frameworkprofile.WithSharedInformerFactory(d.sharedInformerFactory),
frameworkprofile.WithPodEvictor(podEvictor), frameworkprofile.WithPodEvictor(d.podEvictor),
frameworkprofile.WithGetPodsAssignedToNodeFnc(d.getPodsAssignedToNode), frameworkprofile.WithGetPodsAssignedToNodeFnc(d.getPodsAssignedToNode),
frameworkprofile.WithMetricsCollector(d.metricsCollector),
) )
if err != nil { if err != nil {
klog.ErrorS(err, "unable to create a profile", "profile", profile.Name) klog.ErrorS(err, "unable to create a profile", "profile", profile.Name)
@@ -252,6 +331,14 @@ func Run(ctx context.Context, rs *options.DeschedulerServer) error {
return err return err
} }
if deschedulerPolicy.MetricsCollector.Enabled {
metricsClient, err := client.CreateMetricsClient(clientConnection, "descheduler")
if err != nil {
return err
}
rs.MetricsClient = metricsClient
}
runFn := func() error { runFn := func() error {
return RunDeschedulerStrategies(ctx, rs, deschedulerPolicy, evictionPolicyGroupVersion) return RunDeschedulerStrategies(ctx, rs, deschedulerPolicy, evictionPolicyGroupVersion)
} }
@@ -276,46 +363,38 @@ func Run(ctx context.Context, rs *options.DeschedulerServer) error {
return runFn() return runFn()
} }
func validateVersionCompatibility(discovery discovery.DiscoveryInterface, versionInfo version.Info) error { func validateVersionCompatibility(discovery discovery.DiscoveryInterface, deschedulerVersionInfo version.Info) error {
serverVersionInfo, err := discovery.ServerVersion() kubeServerVersionInfo, err := discovery.ServerVersion()
if err != nil { if err != nil {
return errors.New("failed to discover Kubernetes server version") return fmt.Errorf("failed to discover Kubernetes server version: %v", err)
} }
serverVersion, err := utilversion.ParseSemantic(serverVersionInfo.String()) kubeServerVersion, err := utilversion.ParseSemantic(kubeServerVersionInfo.String())
if err != nil { if err != nil {
return errors.New("failed to parse Kubernetes server version") return fmt.Errorf("failed to parse Kubernetes server version '%s': %v", kubeServerVersionInfo.String(), err)
} }
deschedulerVersion, err := utilversion.ParseGeneric(versionInfo.GitVersion) deschedulerMinor, err := strconv.ParseFloat(deschedulerVersionInfo.Minor, 64)
if err != nil { if err != nil {
return errors.New("failed to convert Descheduler minor version to float") return fmt.Errorf("failed to convert Descheduler minor version '%s' to float: %v", deschedulerVersionInfo.Minor, err)
} }
deschedulerMinor := float64(deschedulerVersion.Minor()) kubeServerMinor := float64(kubeServerVersion.Minor())
serverMinor := float64(serverVersion.Minor()) if math.Abs(deschedulerMinor-kubeServerMinor) > 3 {
if math.Abs(deschedulerMinor-serverMinor) > 3 {
return fmt.Errorf( return fmt.Errorf(
"descheduler version %v may not be supported on your version of Kubernetes %v."+ "descheduler version %s.%s may not be supported on your version of Kubernetes %v."+
"See compatibility docs for more info: https://github.com/kubernetes-sigs/descheduler#compatibility-matrix", "See compatibility docs for more info: https://github.com/kubernetes-sigs/descheduler#compatibility-matrix",
deschedulerVersion.String(), deschedulerVersionInfo.Major,
serverVersionInfo.String(), deschedulerVersionInfo.Minor,
kubeServerVersionInfo.String(),
) )
} }
return nil return nil
} }
func cachedClient( func podEvictionReactionFnc(fakeClient *fakeclientset.Clientset) func(action core.Action) (bool, runtime.Object, error) {
realClient clientset.Interface, return func(action core.Action) (bool, runtime.Object, error) {
podLister listersv1.PodLister,
nodeLister listersv1.NodeLister,
namespaceLister listersv1.NamespaceLister,
priorityClassLister schedulingv1.PriorityClassLister,
) (clientset.Interface, error) {
fakeClient := fakeclientset.NewSimpleClientset()
// simulate a pod eviction by deleting a pod
fakeClient.PrependReactor("create", "pods", func(action core.Action) (bool, runtime.Object, error) {
if action.GetSubresource() == "eviction" { if action.GetSubresource() == "eviction" {
createAct, matched := action.(core.CreateActionImpl) createAct, matched := action.(core.CreateActionImpl)
if !matched { if !matched {
@@ -332,54 +411,7 @@ func cachedClient(
} }
// fallback to the default reactor // fallback to the default reactor
return false, nil, nil return false, nil, nil
})
klog.V(3).Infof("Pulling resources for the cached client from the cluster")
pods, err := podLister.List(labels.Everything())
if err != nil {
return nil, fmt.Errorf("unable to list pods: %v", err)
} }
for _, item := range pods {
if _, err := fakeClient.CoreV1().Pods(item.Namespace).Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
return nil, fmt.Errorf("unable to copy pod: %v", err)
}
}
nodes, err := nodeLister.List(labels.Everything())
if err != nil {
return nil, fmt.Errorf("unable to list nodes: %v", err)
}
for _, item := range nodes {
if _, err := fakeClient.CoreV1().Nodes().Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
return nil, fmt.Errorf("unable to copy node: %v", err)
}
}
namespaces, err := namespaceLister.List(labels.Everything())
if err != nil {
return nil, fmt.Errorf("unable to list namespaces: %v", err)
}
for _, item := range namespaces {
if _, err := fakeClient.CoreV1().Namespaces().Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
return nil, fmt.Errorf("unable to copy namespace: %v", err)
}
}
priorityClasses, err := priorityClassLister.List(labels.Everything())
if err != nil {
return nil, fmt.Errorf("unable to list priorityclasses: %v", err)
}
for _, item := range priorityClasses {
if _, err := fakeClient.SchedulingV1().PriorityClasses().Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
return nil, fmt.Errorf("unable to copy priorityclass: %v", err)
}
}
return fakeClient, nil
} }
func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer, deschedulerPolicy *api.DeschedulerPolicy, evictionPolicyGroupVersion string) error { func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer, deschedulerPolicy *api.DeschedulerPolicy, evictionPolicyGroupVersion string) error {
@@ -388,7 +420,6 @@ func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer
defer span.End() defer span.End()
sharedInformerFactory := informers.NewSharedInformerFactoryWithOptions(rs.Client, 0, informers.WithTransform(trimManagedFields)) sharedInformerFactory := informers.NewSharedInformerFactoryWithOptions(rs.Client, 0, informers.WithTransform(trimManagedFields))
nodeLister := sharedInformerFactory.Core().V1().Nodes().Lister()
var nodeSelector string var nodeSelector string
if deschedulerPolicy.NodeSelector != nil { if deschedulerPolicy.NodeSelector != nil {
@@ -414,12 +445,28 @@ func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer
sharedInformerFactory.Start(ctx.Done()) sharedInformerFactory.Start(ctx.Done())
sharedInformerFactory.WaitForCacheSync(ctx.Done()) sharedInformerFactory.WaitForCacheSync(ctx.Done())
descheduler.podEvictor.WaitForEventHandlersSync(ctx)
if deschedulerPolicy.MetricsCollector.Enabled {
go func() {
klog.V(2).Infof("Starting metrics collector")
descheduler.metricsCollector.Run(ctx)
klog.V(2).Infof("Stopped metrics collector")
}()
klog.V(2).Infof("Waiting for metrics collector to sync")
if err := wait.PollWithContext(ctx, time.Second, time.Minute, func(context.Context) (done bool, err error) {
return descheduler.metricsCollector.HasSynced(), nil
}); err != nil {
return fmt.Errorf("unable to wait for metrics collector to sync: %v", err)
}
}
wait.NonSlidingUntil(func() { wait.NonSlidingUntil(func() {
// A next context is created here intentionally to avoid nesting the spans via context. // A next context is created here intentionally to avoid nesting the spans via context.
sCtx, sSpan := tracing.Tracer().Start(ctx, "NonSlidingUntil") sCtx, sSpan := tracing.Tracer().Start(ctx, "NonSlidingUntil")
defer sSpan.End() defer sSpan.End()
nodes, err := nodeutil.ReadyNodes(sCtx, rs.Client, nodeLister, nodeSelector)
nodes, err := nodeutil.ReadyNodes(sCtx, rs.Client, descheduler.sharedInformerFactory.Core().V1().Nodes().Lister(), nodeSelector)
if err != nil { if err != nil {
sSpan.AddEvent("Failed to detect ready nodes", trace.WithAttributes(attribute.String("err", err.Error()))) sSpan.AddEvent("Failed to detect ready nodes", trace.WithAttributes(attribute.String("err", err.Error())))
klog.Error(err) klog.Error(err)

View File

@@ -2,69 +2,180 @@ package descheduler
import ( import (
"context" "context"
"errors"
"fmt" "fmt"
"math/rand"
"net/http"
"testing" "testing"
"time" "time"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
policy "k8s.io/api/policy/v1" policy "k8s.io/api/policy/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/conversion"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
apiversion "k8s.io/apimachinery/pkg/version" apiversion "k8s.io/apimachinery/pkg/version"
fakediscovery "k8s.io/client-go/discovery/fake" fakediscovery "k8s.io/client-go/discovery/fake"
"k8s.io/client-go/informers"
fakeclientset "k8s.io/client-go/kubernetes/fake" fakeclientset "k8s.io/client-go/kubernetes/fake"
core "k8s.io/client-go/testing" core "k8s.io/client-go/testing"
"k8s.io/component-base/featuregate"
"k8s.io/klog/v2"
"k8s.io/metrics/pkg/apis/metrics/v1beta1"
metricsclient "k8s.io/metrics/pkg/client/clientset/versioned"
fakemetricsclient "k8s.io/metrics/pkg/client/clientset/versioned/fake"
utilptr "k8s.io/utils/ptr"
"sigs.k8s.io/descheduler/cmd/descheduler/app/options" "sigs.k8s.io/descheduler/cmd/descheduler/app/options"
"sigs.k8s.io/descheduler/pkg/api" "sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/api/v1alpha1" "sigs.k8s.io/descheduler/pkg/descheduler/evictions"
"sigs.k8s.io/descheduler/pkg/features"
"sigs.k8s.io/descheduler/pkg/framework/pluginregistry" "sigs.k8s.io/descheduler/pkg/framework/pluginregistry"
"sigs.k8s.io/descheduler/pkg/framework/plugins/defaultevictor" "sigs.k8s.io/descheduler/pkg/framework/plugins/defaultevictor"
"sigs.k8s.io/descheduler/pkg/framework/plugins/nodeutilization"
"sigs.k8s.io/descheduler/pkg/framework/plugins/removeduplicates" "sigs.k8s.io/descheduler/pkg/framework/plugins/removeduplicates"
"sigs.k8s.io/descheduler/pkg/framework/plugins/removepodsviolatingnodetaints" "sigs.k8s.io/descheduler/pkg/framework/plugins/removepodsviolatingnodetaints"
"sigs.k8s.io/descheduler/pkg/utils"
deschedulerversion "sigs.k8s.io/descheduler/pkg/version" deschedulerversion "sigs.k8s.io/descheduler/pkg/version"
"sigs.k8s.io/descheduler/test" "sigs.k8s.io/descheduler/test"
) )
// scope contains information about an ongoing conversion. var (
type scope struct { podEvictionError = errors.New("PodEvictionError")
converter *conversion.Converter tooManyRequestsError = &apierrors.StatusError{
meta *conversion.Meta ErrStatus: metav1.Status{
} Status: metav1.StatusFailure,
Code: http.StatusTooManyRequests,
// Convert continues a conversion. Reason: metav1.StatusReasonTooManyRequests,
func (s scope) Convert(src, dest interface{}) error { Message: "admission webhook \"virt-launcher-eviction-interceptor.kubevirt.io\" denied the request: Eviction triggered evacuation of VMI",
return s.converter.Convert(src, dest, s.meta) },
}
// Meta returns the meta object that was originally passed to Convert.
func (s scope) Meta() *conversion.Meta {
return s.meta
}
func TestTaintsUpdated(t *testing.T) {
pluginregistry.PluginRegistry = pluginregistry.NewRegistry()
pluginregistry.Register(removepodsviolatingnodetaints.PluginName, removepodsviolatingnodetaints.New, &removepodsviolatingnodetaints.RemovePodsViolatingNodeTaints{}, &removepodsviolatingnodetaints.RemovePodsViolatingNodeTaintsArgs{}, removepodsviolatingnodetaints.ValidateRemovePodsViolatingNodeTaintsArgs, removepodsviolatingnodetaints.SetDefaults_RemovePodsViolatingNodeTaintsArgs, pluginregistry.PluginRegistry)
pluginregistry.Register(defaultevictor.PluginName, defaultevictor.New, &defaultevictor.DefaultEvictor{}, &defaultevictor.DefaultEvictorArgs{}, defaultevictor.ValidateDefaultEvictorArgs, defaultevictor.SetDefaults_DefaultEvictorArgs, pluginregistry.PluginRegistry)
ctx := context.Background()
n1 := test.BuildTestNode("n1", 2000, 3000, 10, nil)
n2 := test.BuildTestNode("n2", 2000, 3000, 10, nil)
p1 := test.BuildTestPod(fmt.Sprintf("pod_1_%s", n1.Name), 200, 0, n1.Name, nil)
p1.ObjectMeta.OwnerReferences = []metav1.OwnerReference{
{},
} }
nodesgvr = schema.GroupVersionResource{Group: "metrics.k8s.io", Version: "v1beta1", Resource: "nodes"}
podsgvr = schema.GroupVersionResource{Group: "metrics.k8s.io", Version: "v1beta1", Resource: "pods"}
)
client := fakeclientset.NewSimpleClientset(n1, n2, p1) func initFeatureGates() featuregate.FeatureGate {
eventClient := fakeclientset.NewSimpleClientset(n1, n2, p1) featureGates := featuregate.NewFeatureGate()
dp := &v1alpha1.DeschedulerPolicy{ featureGates.Add(map[featuregate.Feature]featuregate.FeatureSpec{
Strategies: v1alpha1.StrategyList{ features.EvictionsInBackground: {Default: false, PreRelease: featuregate.Alpha},
"RemovePodsViolatingNodeTaints": v1alpha1.DeschedulerStrategy{ })
Enabled: true, return featureGates
}
func initPluginRegistry() {
pluginregistry.PluginRegistry = pluginregistry.NewRegistry()
pluginregistry.Register(removeduplicates.PluginName, removeduplicates.New, &removeduplicates.RemoveDuplicates{}, &removeduplicates.RemoveDuplicatesArgs{}, removeduplicates.ValidateRemoveDuplicatesArgs, removeduplicates.SetDefaults_RemoveDuplicatesArgs, pluginregistry.PluginRegistry)
pluginregistry.Register(defaultevictor.PluginName, defaultevictor.New, &defaultevictor.DefaultEvictor{}, &defaultevictor.DefaultEvictorArgs{}, defaultevictor.ValidateDefaultEvictorArgs, defaultevictor.SetDefaults_DefaultEvictorArgs, pluginregistry.PluginRegistry)
pluginregistry.Register(removepodsviolatingnodetaints.PluginName, removepodsviolatingnodetaints.New, &removepodsviolatingnodetaints.RemovePodsViolatingNodeTaints{}, &removepodsviolatingnodetaints.RemovePodsViolatingNodeTaintsArgs{}, removepodsviolatingnodetaints.ValidateRemovePodsViolatingNodeTaintsArgs, removepodsviolatingnodetaints.SetDefaults_RemovePodsViolatingNodeTaintsArgs, pluginregistry.PluginRegistry)
pluginregistry.Register(nodeutilization.LowNodeUtilizationPluginName, nodeutilization.NewLowNodeUtilization, &nodeutilization.LowNodeUtilization{}, &nodeutilization.LowNodeUtilizationArgs{}, nodeutilization.ValidateLowNodeUtilizationArgs, nodeutilization.SetDefaults_LowNodeUtilizationArgs, pluginregistry.PluginRegistry)
}
func removePodsViolatingNodeTaintsPolicy() *api.DeschedulerPolicy {
return &api.DeschedulerPolicy{
Profiles: []api.DeschedulerProfile{
{
Name: "Profile",
PluginConfigs: []api.PluginConfig{
{
Name: "RemovePodsViolatingNodeTaints",
Args: &removepodsviolatingnodetaints.RemovePodsViolatingNodeTaintsArgs{},
},
{
Name: "DefaultEvictor",
Args: &defaultevictor.DefaultEvictorArgs{},
},
},
Plugins: api.Plugins{
Filter: api.PluginSet{
Enabled: []string{
"DefaultEvictor",
},
},
Deschedule: api.PluginSet{
Enabled: []string{
"RemovePodsViolatingNodeTaints",
},
},
},
}, },
}, },
} }
}
func removeDuplicatesPolicy() *api.DeschedulerPolicy {
return &api.DeschedulerPolicy{
Profiles: []api.DeschedulerProfile{
{
Name: "Profile",
PluginConfigs: []api.PluginConfig{
{
Name: "RemoveDuplicates",
Args: &removeduplicates.RemoveDuplicatesArgs{},
},
{
Name: "DefaultEvictor",
Args: &defaultevictor.DefaultEvictorArgs{},
},
},
Plugins: api.Plugins{
Filter: api.PluginSet{
Enabled: []string{
"DefaultEvictor",
},
},
Balance: api.PluginSet{
Enabled: []string{
"RemoveDuplicates",
},
},
},
},
},
}
}
func lowNodeUtilizationPolicy(thresholds, targetThresholds api.ResourceThresholds, metricsEnabled bool) *api.DeschedulerPolicy {
return &api.DeschedulerPolicy{
Profiles: []api.DeschedulerProfile{
{
Name: "Profile",
PluginConfigs: []api.PluginConfig{
{
Name: nodeutilization.LowNodeUtilizationPluginName,
Args: &nodeutilization.LowNodeUtilizationArgs{
Thresholds: thresholds,
TargetThresholds: targetThresholds,
MetricsUtilization: nodeutilization.MetricsUtilization{
MetricsServer: metricsEnabled,
},
},
},
{
Name: defaultevictor.PluginName,
Args: &defaultevictor.DefaultEvictorArgs{},
},
},
Plugins: api.Plugins{
Filter: api.PluginSet{
Enabled: []string{
defaultevictor.PluginName,
},
},
Balance: api.PluginSet{
Enabled: []string{
nodeutilization.LowNodeUtilizationPluginName,
},
},
},
},
},
}
}
func initDescheduler(t *testing.T, ctx context.Context, featureGates featuregate.FeatureGate, internalDeschedulerPolicy *api.DeschedulerPolicy, metricsClient metricsclient.Interface, objects ...runtime.Object) (*options.DeschedulerServer, *descheduler, *fakeclientset.Clientset) {
client := fakeclientset.NewSimpleClientset(objects...)
eventClient := fakeclientset.NewSimpleClientset(objects...)
rs, err := options.NewDeschedulerServer() rs, err := options.NewDeschedulerServer()
if err != nil { if err != nil {
@@ -72,6 +183,44 @@ func TestTaintsUpdated(t *testing.T) {
} }
rs.Client = client rs.Client = client
rs.EventClient = eventClient rs.EventClient = eventClient
rs.DefaultFeatureGates = featureGates
rs.MetricsClient = metricsClient
sharedInformerFactory := informers.NewSharedInformerFactoryWithOptions(rs.Client, 0, informers.WithTransform(trimManagedFields))
eventBroadcaster, eventRecorder := utils.GetRecorderAndBroadcaster(ctx, client)
descheduler, err := newDescheduler(ctx, rs, internalDeschedulerPolicy, "v1", eventRecorder, sharedInformerFactory)
if err != nil {
eventBroadcaster.Shutdown()
t.Fatalf("Unable to create a descheduler instance: %v", err)
}
sharedInformerFactory.Start(ctx.Done())
sharedInformerFactory.WaitForCacheSync(ctx.Done())
return rs, descheduler, client
}
func TestTaintsUpdated(t *testing.T) {
initPluginRegistry()
ctx := context.Background()
n1 := test.BuildTestNode("n1", 2000, 3000, 10, nil)
n2 := test.BuildTestNode("n2", 2000, 3000, 10, nil)
p1 := test.BuildTestPod(fmt.Sprintf("pod_1_%s", n1.Name), 200, 0, n1.Name, nil)
p1.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
client := fakeclientset.NewSimpleClientset(n1, n2, p1)
eventClient := fakeclientset.NewSimpleClientset(n1, n2, p1)
rs, err := options.NewDeschedulerServer()
if err != nil {
t.Fatalf("Unable to initialize server: %v", err)
}
rs.Client = client
rs.EventClient = eventClient
rs.DefaultFeatureGates = initFeatureGates()
pods, err := client.CoreV1().Pods(p1.Namespace).List(ctx, metav1.ListOptions{}) pods, err := client.CoreV1().Pods(p1.Namespace).List(ctx, metav1.ListOptions{})
if err != nil { if err != nil {
@@ -95,16 +244,9 @@ func TestTaintsUpdated(t *testing.T) {
} }
var evictedPods []string var evictedPods []string
client.PrependReactor("create", "pods", podEvictionReactionFuc(&evictedPods)) client.PrependReactor("create", "pods", podEvictionReactionTestingFnc(&evictedPods, nil, nil))
internalDeschedulerPolicy := &api.DeschedulerPolicy{} if err := RunDeschedulerStrategies(ctx, rs, removePodsViolatingNodeTaintsPolicy(), "v1"); err != nil {
scope := scope{}
err = v1alpha1.V1alpha1ToInternal(dp, pluginregistry.PluginRegistry, internalDeschedulerPolicy, scope)
if err != nil {
t.Fatalf("Unable to convert v1alpha1 to v1alpha2: %v", err)
}
if err := RunDeschedulerStrategies(ctx, rs, internalDeschedulerPolicy, "v1"); err != nil {
t.Fatalf("Unable to run descheduler strategies: %v", err) t.Fatalf("Unable to run descheduler strategies: %v", err)
} }
@@ -114,9 +256,7 @@ func TestTaintsUpdated(t *testing.T) {
} }
func TestDuplicate(t *testing.T) { func TestDuplicate(t *testing.T) {
pluginregistry.PluginRegistry = pluginregistry.NewRegistry() initPluginRegistry()
pluginregistry.Register(removeduplicates.PluginName, removeduplicates.New, &removeduplicates.RemoveDuplicates{}, &removeduplicates.RemoveDuplicatesArgs{}, removeduplicates.ValidateRemoveDuplicatesArgs, removeduplicates.SetDefaults_RemoveDuplicatesArgs, pluginregistry.PluginRegistry)
pluginregistry.Register(defaultevictor.PluginName, defaultevictor.New, &defaultevictor.DefaultEvictor{}, &defaultevictor.DefaultEvictorArgs{}, defaultevictor.ValidateDefaultEvictorArgs, defaultevictor.SetDefaults_DefaultEvictorArgs, pluginregistry.PluginRegistry)
ctx := context.Background() ctx := context.Background()
node1 := test.BuildTestNode("n1", 2000, 3000, 10, nil) node1 := test.BuildTestNode("n1", 2000, 3000, 10, nil)
@@ -136,13 +276,6 @@ func TestDuplicate(t *testing.T) {
client := fakeclientset.NewSimpleClientset(node1, node2, p1, p2, p3) client := fakeclientset.NewSimpleClientset(node1, node2, p1, p2, p3)
eventClient := fakeclientset.NewSimpleClientset(node1, node2, p1, p2, p3) eventClient := fakeclientset.NewSimpleClientset(node1, node2, p1, p2, p3)
dp := &v1alpha1.DeschedulerPolicy{
Strategies: v1alpha1.StrategyList{
"RemoveDuplicates": v1alpha1.DeschedulerStrategy{
Enabled: true,
},
},
}
rs, err := options.NewDeschedulerServer() rs, err := options.NewDeschedulerServer()
if err != nil { if err != nil {
@@ -150,6 +283,7 @@ func TestDuplicate(t *testing.T) {
} }
rs.Client = client rs.Client = client
rs.EventClient = eventClient rs.EventClient = eventClient
rs.DefaultFeatureGates = initFeatureGates()
pods, err := client.CoreV1().Pods(p1.Namespace).List(ctx, metav1.ListOptions{}) pods, err := client.CoreV1().Pods(p1.Namespace).List(ctx, metav1.ListOptions{})
if err != nil { if err != nil {
@@ -161,20 +295,14 @@ func TestDuplicate(t *testing.T) {
} }
var evictedPods []string var evictedPods []string
client.PrependReactor("create", "pods", podEvictionReactionFuc(&evictedPods)) client.PrependReactor("create", "pods", podEvictionReactionTestingFnc(&evictedPods, nil, nil))
internalDeschedulerPolicy := &api.DeschedulerPolicy{} if err := RunDeschedulerStrategies(ctx, rs, removeDuplicatesPolicy(), "v1"); err != nil {
scope := scope{}
err = v1alpha1.V1alpha1ToInternal(dp, pluginregistry.PluginRegistry, internalDeschedulerPolicy, scope)
if err != nil {
t.Fatalf("Unable to convert v1alpha1 to v1alpha2: %v", err)
}
if err := RunDeschedulerStrategies(ctx, rs, internalDeschedulerPolicy, "v1"); err != nil {
t.Fatalf("Unable to run descheduler strategies: %v", err) t.Fatalf("Unable to run descheduler strategies: %v", err)
} }
if len(evictedPods) == 0 { if len(evictedPods) == 0 {
t.Fatalf("Unable to evict pod, node taint did not get propagated to descheduler strategies %v\n", err) t.Fatalf("Unable to evict pods\n")
} }
} }
@@ -195,6 +323,7 @@ func TestRootCancel(t *testing.T) {
rs.Client = client rs.Client = client
rs.EventClient = eventClient rs.EventClient = eventClient
rs.DeschedulingInterval = 100 * time.Millisecond rs.DeschedulingInterval = 100 * time.Millisecond
rs.DefaultFeatureGates = initFeatureGates()
errChan := make(chan error, 1) errChan := make(chan error, 1)
defer close(errChan) defer close(errChan)
@@ -230,6 +359,7 @@ func TestRootCancelWithNoInterval(t *testing.T) {
rs.Client = client rs.Client = client
rs.EventClient = eventClient rs.EventClient = eventClient
rs.DeschedulingInterval = 0 rs.DeschedulingInterval = 0
rs.DefaultFeatureGates = initFeatureGates()
errChan := make(chan error, 1) errChan := make(chan error, 1)
defer close(errChan) defer close(errChan)
@@ -251,44 +381,44 @@ func TestRootCancelWithNoInterval(t *testing.T) {
func TestValidateVersionCompatibility(t *testing.T) { func TestValidateVersionCompatibility(t *testing.T) {
type testCase struct { type testCase struct {
name string name string
deschedulerVersion string deschedulerVersion deschedulerversion.Info
serverVersion string serverVersion string
expectError bool expectError bool
} }
testCases := []testCase{ testCases := []testCase{
{ {
name: "no error when descheduler minor equals to server minor", name: "no error when descheduler minor equals to server minor",
deschedulerVersion: "v0.26", deschedulerVersion: deschedulerversion.Info{Major: "0", Minor: "26"},
serverVersion: "v1.26.1", serverVersion: "v1.26.1",
expectError: false, expectError: false,
}, },
{ {
name: "no error when descheduler minor is 3 behind server minor", name: "no error when descheduler minor is 3 behind server minor",
deschedulerVersion: "0.23", deschedulerVersion: deschedulerversion.Info{Major: "0", Minor: "23"},
serverVersion: "v1.26.1", serverVersion: "v1.26.1",
expectError: false, expectError: false,
}, },
{ {
name: "no error when descheduler minor is 3 ahead of server minor", name: "no error when descheduler minor is 3 ahead of server minor",
deschedulerVersion: "v0.26", deschedulerVersion: deschedulerversion.Info{Major: "0", Minor: "26"},
serverVersion: "v1.26.1", serverVersion: "v1.26.1",
expectError: false, expectError: false,
}, },
{ {
name: "error when descheduler minor is 4 behind server minor", name: "error when descheduler minor is 4 behind server minor",
deschedulerVersion: "v0.22", deschedulerVersion: deschedulerversion.Info{Major: "0", Minor: "22"},
serverVersion: "v1.26.1", serverVersion: "v1.26.1",
expectError: true, expectError: true,
}, },
{ {
name: "error when descheduler minor is 4 ahead of server minor", name: "error when descheduler minor is 4 ahead of server minor",
deschedulerVersion: "v0.27", deschedulerVersion: deschedulerversion.Info{Major: "0", Minor: "27"},
serverVersion: "v1.23.1", serverVersion: "v1.23.1",
expectError: true, expectError: true,
}, },
{ {
name: "no error when using managed provider version", name: "no error when using managed provider version",
deschedulerVersion: "v0.25", deschedulerVersion: deschedulerversion.Info{Major: "0", Minor: "25"},
serverVersion: "v1.25.12-eks-2d98532", serverVersion: "v1.25.12-eks-2d98532",
expectError: false, expectError: false,
}, },
@@ -298,8 +428,7 @@ func TestValidateVersionCompatibility(t *testing.T) {
for _, tc := range testCases { for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) { t.Run(tc.name, func(t *testing.T) {
fakeDiscovery.FakedServerVersion = &apiversion.Info{GitVersion: tc.serverVersion} fakeDiscovery.FakedServerVersion = &apiversion.Info{GitVersion: tc.serverVersion}
deschedulerVersion := deschedulerversion.Info{GitVersion: tc.deschedulerVersion} err := validateVersionCompatibility(fakeDiscovery, tc.deschedulerVersion)
err := validateVersionCompatibility(fakeDiscovery, deschedulerVersion)
hasError := err != nil hasError := err != nil
if tc.expectError != hasError { if tc.expectError != hasError {
@@ -309,7 +438,7 @@ func TestValidateVersionCompatibility(t *testing.T) {
} }
} }
func podEvictionReactionFuc(evictedPods *[]string) func(action core.Action) (bool, runtime.Object, error) { func podEvictionReactionTestingFnc(evictedPods *[]string, isEvictionsInBackground func(podName string) bool, evictionErr error) func(action core.Action) (bool, runtime.Object, error) {
return func(action core.Action) (bool, runtime.Object, error) { return func(action core.Action) (bool, runtime.Object, error) {
if action.GetSubresource() == "eviction" { if action.GetSubresource() == "eviction" {
createAct, matched := action.(core.CreateActionImpl) createAct, matched := action.(core.CreateActionImpl)
@@ -317,9 +446,420 @@ func podEvictionReactionFuc(evictedPods *[]string) func(action core.Action) (boo
return false, nil, fmt.Errorf("unable to convert action to core.CreateActionImpl") return false, nil, fmt.Errorf("unable to convert action to core.CreateActionImpl")
} }
if eviction, matched := createAct.Object.(*policy.Eviction); matched { if eviction, matched := createAct.Object.(*policy.Eviction); matched {
if isEvictionsInBackground != nil && isEvictionsInBackground(eviction.GetName()) {
return true, nil, tooManyRequestsError
}
if evictionErr != nil {
return true, nil, evictionErr
}
*evictedPods = append(*evictedPods, eviction.GetName()) *evictedPods = append(*evictedPods, eviction.GetName())
return true, nil, nil
} }
} }
return false, nil, nil // fallback to the default reactor return false, nil, nil // fallback to the default reactor
} }
} }
func taintNodeNoSchedule(node *v1.Node) {
node.Spec.Taints = []v1.Taint{
{
Key: "key",
Value: "value",
Effect: v1.TaintEffectNoSchedule,
},
}
}
func TestPodEvictorReset(t *testing.T) {
initPluginRegistry()
ctx := context.Background()
node1 := test.BuildTestNode("n1", 2000, 3000, 10, taintNodeNoSchedule)
node2 := test.BuildTestNode("n2", 2000, 3000, 10, nil)
nodes := []*v1.Node{node1, node2}
ownerRef1 := test.GetReplicaSetOwnerRefList()
updatePod := func(pod *v1.Pod) {
pod.Namespace = "dev"
pod.ObjectMeta.OwnerReferences = ownerRef1
}
p1 := test.BuildTestPod("p1", 100, 0, node1.Name, updatePod)
p2 := test.BuildTestPod("p2", 100, 0, node1.Name, updatePod)
internalDeschedulerPolicy := removePodsViolatingNodeTaintsPolicy()
ctxCancel, cancel := context.WithCancel(ctx)
rs, descheduler, client := initDescheduler(t, ctxCancel, initFeatureGates(), internalDeschedulerPolicy, nil, node1, node2, p1, p2)
defer cancel()
var evictedPods []string
client.PrependReactor("create", "pods", podEvictionReactionTestingFnc(&evictedPods, nil, nil))
var fakeEvictedPods []string
descheduler.podEvictionReactionFnc = func(*fakeclientset.Clientset) func(action core.Action) (bool, runtime.Object, error) {
return podEvictionReactionTestingFnc(&fakeEvictedPods, nil, nil)
}
// a single pod eviction expected
klog.Infof("2 pod eviction expected per a descheduling cycle, 2 real evictions in total")
if err := descheduler.runDeschedulerLoop(ctx, nodes); err != nil {
t.Fatalf("Unable to run a descheduling loop: %v", err)
}
if descheduler.podEvictor.TotalEvicted() != 2 || len(evictedPods) != 2 || len(fakeEvictedPods) != 0 {
t.Fatalf("Expected (2,2,0) pods evicted, got (%v, %v, %v) instead", descheduler.podEvictor.TotalEvicted(), len(evictedPods), len(fakeEvictedPods))
}
// a single pod eviction expected
klog.Infof("2 pod eviction expected per a descheduling cycle, 4 real evictions in total")
if err := descheduler.runDeschedulerLoop(ctx, nodes); err != nil {
t.Fatalf("Unable to run a descheduling loop: %v", err)
}
if descheduler.podEvictor.TotalEvicted() != 2 || len(evictedPods) != 4 || len(fakeEvictedPods) != 0 {
t.Fatalf("Expected (2,4,0) pods evicted, got (%v, %v, %v) instead", descheduler.podEvictor.TotalEvicted(), len(evictedPods), len(fakeEvictedPods))
}
// check the fake client syncing and the right pods evicted
klog.Infof("Enabling the dry run mode")
rs.DryRun = true
evictedPods = []string{}
klog.Infof("2 pod eviction expected per a descheduling cycle, 2 fake evictions in total")
if err := descheduler.runDeschedulerLoop(ctx, nodes); err != nil {
t.Fatalf("Unable to run a descheduling loop: %v", err)
}
if descheduler.podEvictor.TotalEvicted() != 2 || len(evictedPods) != 0 || len(fakeEvictedPods) != 2 {
t.Fatalf("Expected (2,0,2) pods evicted, got (%v, %v, %v) instead", descheduler.podEvictor.TotalEvicted(), len(evictedPods), len(fakeEvictedPods))
}
klog.Infof("2 pod eviction expected per a descheduling cycle, 4 fake evictions in total")
if err := descheduler.runDeschedulerLoop(ctx, nodes); err != nil {
t.Fatalf("Unable to run a descheduling loop: %v", err)
}
if descheduler.podEvictor.TotalEvicted() != 2 || len(evictedPods) != 0 || len(fakeEvictedPods) != 4 {
t.Fatalf("Expected (2,0,4) pods evicted, got (%v, %v, %v) instead", descheduler.podEvictor.TotalEvicted(), len(evictedPods), len(fakeEvictedPods))
}
}
func checkTotals(t *testing.T, ctx context.Context, descheduler *descheduler, totalEvictionRequests, totalEvicted uint) {
if total := descheduler.podEvictor.TotalEvictionRequests(); total != totalEvictionRequests {
t.Fatalf("Expected %v total eviction requests, got %v instead", totalEvictionRequests, total)
}
if total := descheduler.podEvictor.TotalEvicted(); total != totalEvicted {
t.Fatalf("Expected %v total evictions, got %v instead", totalEvicted, total)
}
t.Logf("Total evictions: %v, total eviction requests: %v, total evictions and eviction requests: %v", totalEvicted, totalEvictionRequests, totalEvicted+totalEvictionRequests)
}
func runDeschedulingCycleAndCheckTotals(t *testing.T, ctx context.Context, nodes []*v1.Node, descheduler *descheduler, totalEvictionRequests, totalEvicted uint) {
err := descheduler.runDeschedulerLoop(ctx, nodes)
if err != nil {
t.Fatalf("Unable to run a descheduling loop: %v", err)
}
checkTotals(t, ctx, descheduler, totalEvictionRequests, totalEvicted)
}
func TestEvictionRequestsCache(t *testing.T) {
initPluginRegistry()
ctx := context.Background()
node1 := test.BuildTestNode("n1", 2000, 3000, 10, taintNodeNoSchedule)
node2 := test.BuildTestNode("n2", 2000, 3000, 10, nil)
nodes := []*v1.Node{node1, node2}
ownerRef1 := test.GetReplicaSetOwnerRefList()
updatePod := func(pod *v1.Pod) {
pod.Namespace = "dev"
pod.ObjectMeta.OwnerReferences = ownerRef1
pod.Status.Phase = v1.PodRunning
}
updatePodWithEvictionInBackground := func(pod *v1.Pod) {
updatePod(pod)
pod.Annotations = map[string]string{
evictions.EvictionRequestAnnotationKey: "",
}
}
p1 := test.BuildTestPod("p1", 100, 0, node1.Name, updatePodWithEvictionInBackground)
p2 := test.BuildTestPod("p2", 100, 0, node1.Name, updatePodWithEvictionInBackground)
p3 := test.BuildTestPod("p3", 100, 0, node1.Name, updatePod)
p4 := test.BuildTestPod("p4", 100, 0, node1.Name, updatePod)
p5 := test.BuildTestPod("p5", 100, 0, node1.Name, updatePod)
internalDeschedulerPolicy := removePodsViolatingNodeTaintsPolicy()
ctxCancel, cancel := context.WithCancel(ctx)
featureGates := featuregate.NewFeatureGate()
featureGates.Add(map[featuregate.Feature]featuregate.FeatureSpec{
features.EvictionsInBackground: {Default: true, PreRelease: featuregate.Alpha},
})
_, descheduler, client := initDescheduler(t, ctxCancel, featureGates, internalDeschedulerPolicy, nil, node1, node2, p1, p2, p3, p4)
defer cancel()
var fakeEvictedPods []string
descheduler.podEvictionReactionFnc = func(*fakeclientset.Clientset) func(action core.Action) (bool, runtime.Object, error) {
return podEvictionReactionTestingFnc(&fakeEvictedPods, nil, podEvictionError)
}
var evictedPods []string
client.PrependReactor("create", "pods", podEvictionReactionTestingFnc(&evictedPods, func(name string) bool { return name == "p1" || name == "p2" }, nil))
klog.Infof("2 evictions in background expected, 2 normal evictions")
runDeschedulingCycleAndCheckTotals(t, ctx, nodes, descheduler, 2, 2)
klog.Infof("Repeat the same as previously to confirm no more evictions in background are requested")
// No evicted pod is actually deleted on purpose so the test can run the descheduling cycle repeatedly
// without recreating the pods.
runDeschedulingCycleAndCheckTotals(t, ctx, nodes, descheduler, 2, 2)
klog.Infof("Scenario: Eviction in background got initiated")
p2.Annotations[evictions.EvictionInProgressAnnotationKey] = ""
if _, err := client.CoreV1().Pods(p2.Namespace).Update(context.TODO(), p2, metav1.UpdateOptions{}); err != nil {
t.Fatalf("unable to update a pod: %v", err)
}
time.Sleep(100 * time.Millisecond)
klog.Infof("Repeat the same as previously to confirm no more evictions in background are requested")
runDeschedulingCycleAndCheckTotals(t, ctx, nodes, descheduler, 2, 2)
klog.Infof("Scenario: Another eviction in background got initiated")
p1.Annotations[evictions.EvictionInProgressAnnotationKey] = ""
if _, err := client.CoreV1().Pods(p1.Namespace).Update(context.TODO(), p1, metav1.UpdateOptions{}); err != nil {
t.Fatalf("unable to update a pod: %v", err)
}
time.Sleep(100 * time.Millisecond)
klog.Infof("Repeat the same as previously to confirm no more evictions in background are requested")
runDeschedulingCycleAndCheckTotals(t, ctx, nodes, descheduler, 2, 2)
klog.Infof("Scenario: Eviction in background completed")
if err := client.CoreV1().Pods(p1.Namespace).Delete(context.TODO(), p1.Name, metav1.DeleteOptions{}); err != nil {
t.Fatalf("unable to delete a pod: %v", err)
}
time.Sleep(100 * time.Millisecond)
klog.Infof("Check the number of evictions in background decreased")
runDeschedulingCycleAndCheckTotals(t, ctx, nodes, descheduler, 1, 2)
klog.Infof("Scenario: A new pod without eviction in background added")
if _, err := client.CoreV1().Pods(p5.Namespace).Create(context.TODO(), p5, metav1.CreateOptions{}); err != nil {
t.Fatalf("unable to create a pod: %v", err)
}
time.Sleep(100 * time.Millisecond)
klog.Infof("Check the number of evictions increased after running a descheduling cycle")
runDeschedulingCycleAndCheckTotals(t, ctx, nodes, descheduler, 1, 3)
klog.Infof("Scenario: Eviction in background canceled => eviction in progress annotation removed")
delete(p2.Annotations, evictions.EvictionInProgressAnnotationKey)
if _, err := client.CoreV1().Pods(p2.Namespace).Update(context.TODO(), p2, metav1.UpdateOptions{}); err != nil {
t.Fatalf("unable to update a pod: %v", err)
}
time.Sleep(100 * time.Millisecond)
klog.Infof("Check the number of evictions in background decreased")
checkTotals(t, ctx, descheduler, 0, 3)
klog.Infof("Scenario: Re-run the descheduling cycle to re-request eviction in background")
runDeschedulingCycleAndCheckTotals(t, ctx, nodes, descheduler, 1, 3)
klog.Infof("Scenario: Eviction in background completed with a pod in completed state")
p2.Status.Phase = v1.PodSucceeded
if _, err := client.CoreV1().Pods(p2.Namespace).Update(context.TODO(), p2, metav1.UpdateOptions{}); err != nil {
t.Fatalf("unable to delete a pod: %v", err)
}
time.Sleep(100 * time.Millisecond)
klog.Infof("Check the number of evictions in background decreased")
runDeschedulingCycleAndCheckTotals(t, ctx, nodes, descheduler, 0, 3)
}
func TestDeschedulingLimits(t *testing.T) {
initPluginRegistry()
tests := []struct {
description string
policy *api.DeschedulerPolicy
limit uint
}{
{
description: "limits per node",
policy: func() *api.DeschedulerPolicy {
policy := removePodsViolatingNodeTaintsPolicy()
policy.MaxNoOfPodsToEvictPerNode = utilptr.To[uint](4)
return policy
}(),
limit: uint(4),
},
{
description: "limits per namespace",
policy: func() *api.DeschedulerPolicy {
policy := removePodsViolatingNodeTaintsPolicy()
policy.MaxNoOfPodsToEvictPerNamespace = utilptr.To[uint](4)
return policy
}(),
limit: uint(4),
},
{
description: "limits per cycle",
policy: func() *api.DeschedulerPolicy {
policy := removePodsViolatingNodeTaintsPolicy()
policy.MaxNoOfPodsToEvictTotal = utilptr.To[uint](4)
return policy
}(),
limit: uint(4),
},
}
ownerRef1 := test.GetReplicaSetOwnerRefList()
updatePod := func(pod *v1.Pod) {
pod.Namespace = "dev"
pod.ObjectMeta.OwnerReferences = ownerRef1
}
updatePodWithEvictionInBackground := func(pod *v1.Pod) {
updatePod(pod)
pod.Annotations = map[string]string{
evictions.EvictionRequestAnnotationKey: "",
}
}
for _, tc := range tests {
t.Run(tc.description, func(t *testing.T) {
ctx := context.Background()
node1 := test.BuildTestNode("n1", 2000, 3000, 10, taintNodeNoSchedule)
node2 := test.BuildTestNode("n2", 2000, 3000, 10, nil)
nodes := []*v1.Node{node1, node2}
ctxCancel, cancel := context.WithCancel(ctx)
featureGates := featuregate.NewFeatureGate()
featureGates.Add(map[featuregate.Feature]featuregate.FeatureSpec{
features.EvictionsInBackground: {Default: true, PreRelease: featuregate.Alpha},
})
_, descheduler, client := initDescheduler(t, ctxCancel, featureGates, tc.policy, nil, node1, node2)
defer cancel()
var fakeEvictedPods []string
descheduler.podEvictionReactionFnc = func(*fakeclientset.Clientset) func(action core.Action) (bool, runtime.Object, error) {
return podEvictionReactionTestingFnc(&fakeEvictedPods, nil, podEvictionError)
}
var evictedPods []string
client.PrependReactor("create", "pods", podEvictionReactionTestingFnc(&evictedPods, func(name string) bool { return name == "p1" || name == "p2" }, nil))
rand.Seed(time.Now().UnixNano())
pods := []*v1.Pod{
test.BuildTestPod("p1", 100, 0, node1.Name, updatePodWithEvictionInBackground),
test.BuildTestPod("p2", 100, 0, node1.Name, updatePodWithEvictionInBackground),
test.BuildTestPod("p3", 100, 0, node1.Name, updatePod),
test.BuildTestPod("p4", 100, 0, node1.Name, updatePod),
test.BuildTestPod("p5", 100, 0, node1.Name, updatePod),
}
for i := 0; i < 10; i++ {
rand.Shuffle(len(pods), func(i, j int) { pods[i], pods[j] = pods[j], pods[i] })
func() {
for j := 0; j < 5; j++ {
idx := j
if _, err := client.CoreV1().Pods(pods[idx].Namespace).Create(context.TODO(), pods[idx], metav1.CreateOptions{}); err != nil {
t.Fatalf("unable to create a pod: %v", err)
}
defer func() {
if err := client.CoreV1().Pods(pods[idx].Namespace).Delete(context.TODO(), pods[idx].Name, metav1.DeleteOptions{}); err != nil {
t.Fatalf("unable to delete a pod: %v", err)
}
}()
}
time.Sleep(100 * time.Millisecond)
klog.Infof("2 evictions in background expected, 2 normal evictions")
err := descheduler.runDeschedulerLoop(ctx, nodes)
if err != nil {
t.Fatalf("Unable to run a descheduling loop: %v", err)
}
totalERs := descheduler.podEvictor.TotalEvictionRequests()
totalEs := descheduler.podEvictor.TotalEvicted()
if totalERs+totalEs > tc.limit {
t.Fatalf("Expected %v evictions and eviction requests in total, got %v instead", tc.limit, totalERs+totalEs)
}
t.Logf("Total evictions and eviction requests: %v (er=%v, e=%v)", totalERs+totalEs, totalERs, totalEs)
}()
}
})
}
}
func TestLoadAwareDescheduling(t *testing.T) {
initPluginRegistry()
ownerRef1 := test.GetReplicaSetOwnerRefList()
updatePod := func(pod *v1.Pod) {
pod.ObjectMeta.OwnerReferences = ownerRef1
}
ctx := context.Background()
node1 := test.BuildTestNode("n1", 2000, 3000, 10, taintNodeNoSchedule)
node2 := test.BuildTestNode("n2", 2000, 3000, 10, nil)
nodes := []*v1.Node{node1, node2}
p1 := test.BuildTestPod("p1", 300, 0, node1.Name, updatePod)
p2 := test.BuildTestPod("p2", 300, 0, node1.Name, updatePod)
p3 := test.BuildTestPod("p3", 300, 0, node1.Name, updatePod)
p4 := test.BuildTestPod("p4", 300, 0, node1.Name, updatePod)
p5 := test.BuildTestPod("p5", 300, 0, node1.Name, updatePod)
nodemetricses := []*v1beta1.NodeMetrics{
test.BuildNodeMetrics("n1", 2400, 3000),
test.BuildNodeMetrics("n2", 400, 0),
}
podmetricses := []*v1beta1.PodMetrics{
test.BuildPodMetrics("p1", 400, 0),
test.BuildPodMetrics("p2", 400, 0),
test.BuildPodMetrics("p3", 400, 0),
test.BuildPodMetrics("p4", 400, 0),
test.BuildPodMetrics("p5", 400, 0),
}
metricsClientset := fakemetricsclient.NewSimpleClientset()
for _, nodemetrics := range nodemetricses {
metricsClientset.Tracker().Create(nodesgvr, nodemetrics, "")
}
for _, podmetrics := range podmetricses {
metricsClientset.Tracker().Create(podsgvr, podmetrics, podmetrics.Namespace)
}
policy := lowNodeUtilizationPolicy(
api.ResourceThresholds{
v1.ResourceCPU: 30,
v1.ResourcePods: 30,
},
api.ResourceThresholds{
v1.ResourceCPU: 50,
v1.ResourcePods: 50,
},
true, // enabled metrics utilization
)
policy.MetricsCollector.Enabled = true
ctxCancel, cancel := context.WithCancel(ctx)
_, descheduler, _ := initDescheduler(
t,
ctxCancel,
initFeatureGates(),
policy,
metricsClientset,
node1, node2, p1, p2, p3, p4, p5)
defer cancel()
// This needs to be run since the metrics collector is started
// after newDescheduler in RunDeschedulerStrategies.
descheduler.metricsCollector.Collect(ctx)
err := descheduler.runDeschedulerLoop(ctx, nodes)
if err != nil {
t.Fatalf("Unable to run a descheduling loop: %v", err)
}
totalEs := descheduler.podEvictor.TotalEvicted()
if totalEs != 2 {
t.Fatalf("Expected %v evictions in total, got %v instead", 2, totalEs)
}
t.Logf("Total evictions: %v", totalEs)
}

View File

@@ -0,0 +1,45 @@
package evictions
type EvictionNodeLimitError struct {
node string
}
func (e EvictionNodeLimitError) Error() string {
return "maximum number of evicted pods per node reached"
}
func NewEvictionNodeLimitError(node string) *EvictionNodeLimitError {
return &EvictionNodeLimitError{
node: node,
}
}
var _ error = &EvictionNodeLimitError{}
type EvictionNamespaceLimitError struct {
namespace string
}
func (e EvictionNamespaceLimitError) Error() string {
return "maximum number of evicted pods per namespace reached"
}
func NewEvictionNamespaceLimitError(namespace string) *EvictionNamespaceLimitError {
return &EvictionNamespaceLimitError{
namespace: namespace,
}
}
var _ error = &EvictionNamespaceLimitError{}
type EvictionTotalLimitError struct{}
func (e EvictionTotalLimitError) Error() string {
return "maximum number of evicted pods per a descheduling cycle reached"
}
func NewEvictionTotalLimitError() *EvictionTotalLimitError {
return &EvictionTotalLimitError{}
}
var _ error = &EvictionTotalLimitError{}

View File

@@ -19,6 +19,9 @@ package evictions
import ( import (
"context" "context"
"fmt" "fmt"
"strings"
"sync"
"time"
"go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace" "go.opentelemetry.io/otel/trace"
@@ -26,15 +29,176 @@ import (
policy "k8s.io/api/policy/v1" policy "k8s.io/api/policy/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors" apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/events" "k8s.io/client-go/tools/events"
"k8s.io/component-base/featuregate"
"k8s.io/klog/v2" "k8s.io/klog/v2"
"sigs.k8s.io/descheduler/metrics"
"sigs.k8s.io/descheduler/metrics"
eutils "sigs.k8s.io/descheduler/pkg/descheduler/evictions/utils" eutils "sigs.k8s.io/descheduler/pkg/descheduler/evictions/utils"
"sigs.k8s.io/descheduler/pkg/features"
"sigs.k8s.io/descheduler/pkg/tracing" "sigs.k8s.io/descheduler/pkg/tracing"
) )
var (
assumedEvictionRequestTimeoutSeconds uint = 10 * 60 // 10 minutes
evictionRequestsCacheResyncPeriod time.Duration = 10 * time.Minute
// syncedPollPeriod controls how often you look at the status of your sync funcs
syncedPollPeriod = 100 * time.Millisecond
)
type evictionRequestItem struct {
podName, podNamespace, podNodeName string
evictionAssumed bool
assumedTimestamp metav1.Time
}
type evictionRequestsCache struct {
mu sync.RWMutex
requests map[string]evictionRequestItem
requestsPerNode map[string]uint
requestsPerNamespace map[string]uint
requestsTotal uint
assumedRequestTimeoutSeconds uint
}
func newEvictionRequestsCache(assumedRequestTimeoutSeconds uint) *evictionRequestsCache {
return &evictionRequestsCache{
requests: make(map[string]evictionRequestItem),
requestsPerNode: make(map[string]uint),
requestsPerNamespace: make(map[string]uint),
assumedRequestTimeoutSeconds: assumedRequestTimeoutSeconds,
}
}
func (erc *evictionRequestsCache) run(ctx context.Context) {
wait.UntilWithContext(ctx, erc.cleanCache, evictionRequestsCacheResyncPeriod)
}
// cleanCache removes all assumed entries that has not been confirmed
// for more than a specified timeout
func (erc *evictionRequestsCache) cleanCache(ctx context.Context) {
erc.mu.Lock()
defer erc.mu.Unlock()
klog.V(4).Infof("Cleaning cache of assumed eviction requests in background")
for uid, item := range erc.requests {
if item.evictionAssumed {
requestAgeSeconds := uint(metav1.Now().Sub(item.assumedTimestamp.Local()).Seconds())
if requestAgeSeconds > erc.assumedRequestTimeoutSeconds {
klog.V(4).InfoS("Assumed eviction request in background timed out, deleting", "timeout", erc.assumedRequestTimeoutSeconds, "podNamespace", item.podNamespace, "podName", item.podName)
erc.deleteItem(uid)
}
}
}
}
func (erc *evictionRequestsCache) evictionRequestsPerNode(nodeName string) uint {
erc.mu.RLock()
defer erc.mu.RUnlock()
return erc.requestsPerNode[nodeName]
}
func (erc *evictionRequestsCache) evictionRequestsPerNamespace(ns string) uint {
erc.mu.RLock()
defer erc.mu.RUnlock()
return erc.requestsPerNamespace[ns]
}
func (erc *evictionRequestsCache) evictionRequestsTotal() uint {
erc.mu.RLock()
defer erc.mu.RUnlock()
return erc.requestsTotal
}
func (erc *evictionRequestsCache) TotalEvictionRequests() uint {
erc.mu.RLock()
defer erc.mu.RUnlock()
return uint(len(erc.requests))
}
// getPodKey returns the string key of a pod.
func getPodKey(pod *v1.Pod) string {
uid := string(pod.UID)
// Every pod is expected to have the UID set.
// When the descheduling framework is used for simulation
// user created workload may forget to set the UID.
if len(uid) == 0 {
panic(fmt.Errorf("cannot get cache key for %v/%v pod with empty UID", pod.Namespace, pod.Name))
}
return uid
}
func (erc *evictionRequestsCache) addPod(pod *v1.Pod) {
erc.mu.Lock()
defer erc.mu.Unlock()
uid := getPodKey(pod)
if _, exists := erc.requests[uid]; exists {
return
}
erc.requests[uid] = evictionRequestItem{podNamespace: pod.Namespace, podName: pod.Name, podNodeName: pod.Spec.NodeName}
erc.requestsPerNode[pod.Spec.NodeName]++
erc.requestsPerNamespace[pod.Namespace]++
erc.requestsTotal++
}
func (erc *evictionRequestsCache) assumePod(pod *v1.Pod) {
erc.mu.Lock()
defer erc.mu.Unlock()
uid := getPodKey(pod)
if _, exists := erc.requests[uid]; exists {
return
}
erc.requests[uid] = evictionRequestItem{
podNamespace: pod.Namespace,
podName: pod.Name,
podNodeName: pod.Spec.NodeName,
evictionAssumed: true,
assumedTimestamp: metav1.NewTime(time.Now()),
}
erc.requestsPerNode[pod.Spec.NodeName]++
erc.requestsPerNamespace[pod.Namespace]++
erc.requestsTotal++
}
// no locking, expected to be invoked from protected methods only
func (erc *evictionRequestsCache) deleteItem(uid string) {
erc.requestsPerNode[erc.requests[uid].podNodeName]--
if erc.requestsPerNode[erc.requests[uid].podNodeName] == 0 {
delete(erc.requestsPerNode, erc.requests[uid].podNodeName)
}
erc.requestsPerNamespace[erc.requests[uid].podNamespace]--
if erc.requestsPerNamespace[erc.requests[uid].podNamespace] == 0 {
delete(erc.requestsPerNamespace, erc.requests[uid].podNamespace)
}
erc.requestsTotal--
delete(erc.requests, uid)
}
func (erc *evictionRequestsCache) deletePod(pod *v1.Pod) {
erc.mu.Lock()
defer erc.mu.Unlock()
uid := getPodKey(pod)
if _, exists := erc.requests[uid]; exists {
erc.deleteItem(uid)
}
}
func (erc *evictionRequestsCache) hasPod(pod *v1.Pod) bool {
erc.mu.RLock()
defer erc.mu.RUnlock()
uid := getPodKey(pod)
_, exists := erc.requests[uid]
return exists
}
var (
EvictionRequestAnnotationKey = "descheduler.alpha.kubernetes.io/request-evict-only"
EvictionInProgressAnnotationKey = "descheduler.alpha.kubernetes.io/eviction-in-progress"
EvictionInBackgroundErrorText = "Eviction triggered evacuation"
)
// nodePodEvictedCount keeps count of pods evicted on node // nodePodEvictedCount keeps count of pods evicted on node
type ( type (
nodePodEvictedCount map[string]uint nodePodEvictedCount map[string]uint
@@ -42,69 +206,239 @@ type (
) )
type PodEvictor struct { type PodEvictor struct {
client clientset.Interface mu sync.RWMutex
nodes []*v1.Node client clientset.Interface
policyGroupVersion string policyGroupVersion string
dryRun bool dryRun bool
maxPodsToEvictPerNode *uint evictionFailureEventNotification bool
maxPodsToEvictPerNamespace *uint maxPodsToEvictPerNode *uint
nodepodCount nodePodEvictedCount maxPodsToEvictPerNamespace *uint
namespacePodCount namespacePodEvictCount maxPodsToEvictTotal *uint
metricsEnabled bool nodePodCount nodePodEvictedCount
eventRecorder events.EventRecorder namespacePodCount namespacePodEvictCount
totalPodCount uint
metricsEnabled bool
eventRecorder events.EventRecorder
erCache *evictionRequestsCache
featureGates featuregate.FeatureGate
// registeredHandlers contains the registrations of all handlers. It's used to check if all handlers have finished syncing before the scheduling cycles start.
registeredHandlers []cache.ResourceEventHandlerRegistration
} }
func NewPodEvictor( func NewPodEvictor(
ctx context.Context,
client clientset.Interface, client clientset.Interface,
policyGroupVersion string,
dryRun bool,
maxPodsToEvictPerNode *uint,
maxPodsToEvictPerNamespace *uint,
nodes []*v1.Node,
metricsEnabled bool,
eventRecorder events.EventRecorder, eventRecorder events.EventRecorder,
) *PodEvictor { podInformer cache.SharedIndexInformer,
nodePodCount := make(nodePodEvictedCount) featureGates featuregate.FeatureGate,
namespacePodCount := make(namespacePodEvictCount) options *Options,
for _, node := range nodes { ) (*PodEvictor, error) {
// Initialize podsEvicted till now with 0. if options == nil {
nodePodCount[node.Name] = 0 options = NewOptions()
} }
return &PodEvictor{ podEvictor := &PodEvictor{
client: client, client: client,
nodes: nodes, eventRecorder: eventRecorder,
policyGroupVersion: policyGroupVersion, policyGroupVersion: options.policyGroupVersion,
dryRun: dryRun, dryRun: options.dryRun,
maxPodsToEvictPerNode: maxPodsToEvictPerNode, evictionFailureEventNotification: options.evictionFailureEventNotification,
maxPodsToEvictPerNamespace: maxPodsToEvictPerNamespace, maxPodsToEvictPerNode: options.maxPodsToEvictPerNode,
nodepodCount: nodePodCount, maxPodsToEvictPerNamespace: options.maxPodsToEvictPerNamespace,
namespacePodCount: namespacePodCount, maxPodsToEvictTotal: options.maxPodsToEvictTotal,
metricsEnabled: metricsEnabled, metricsEnabled: options.metricsEnabled,
eventRecorder: eventRecorder, nodePodCount: make(nodePodEvictedCount),
namespacePodCount: make(namespacePodEvictCount),
featureGates: featureGates,
} }
if featureGates.Enabled(features.EvictionsInBackground) {
erCache := newEvictionRequestsCache(assumedEvictionRequestTimeoutSeconds)
handlerRegistration, err := podInformer.AddEventHandler(
cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
pod, ok := obj.(*v1.Pod)
if !ok {
klog.ErrorS(nil, "Cannot convert to *v1.Pod", "obj", obj)
return
}
if _, exists := pod.Annotations[EvictionRequestAnnotationKey]; exists {
if _, exists := pod.Annotations[EvictionInProgressAnnotationKey]; exists {
// Ignore completed/suceeeded or failed pods
if pod.Status.Phase != v1.PodSucceeded && pod.Status.Phase != v1.PodFailed {
klog.V(3).InfoS("Eviction in background detected. Adding pod to the cache.", "pod", klog.KObj(pod))
erCache.addPod(pod)
}
}
}
},
UpdateFunc: func(oldObj, newObj interface{}) {
oldPod, ok := oldObj.(*v1.Pod)
if !ok {
klog.ErrorS(nil, "Cannot convert oldObj to *v1.Pod", "oldObj", oldObj)
return
}
newPod, ok := newObj.(*v1.Pod)
if !ok {
klog.ErrorS(nil, "Cannot convert newObj to *v1.Pod", "newObj", newObj)
return
}
// Ignore pod's that are not subject to an eviction in background
if _, exists := newPod.Annotations[EvictionRequestAnnotationKey]; !exists {
if erCache.hasPod(newPod) {
klog.V(3).InfoS("Pod with eviction in background lost annotation. Removing pod from the cache.", "pod", klog.KObj(newPod))
}
erCache.deletePod(newPod)
return
}
// Remove completed/suceeeded or failed pods from the cache
if newPod.Status.Phase == v1.PodSucceeded || newPod.Status.Phase == v1.PodFailed {
klog.V(3).InfoS("Pod with eviction in background completed. Removing pod from the cache.", "pod", klog.KObj(newPod))
erCache.deletePod(newPod)
return
}
// Ignore any pod that does not have eviction in progress
if _, exists := newPod.Annotations[EvictionInProgressAnnotationKey]; !exists {
// In case EvictionInProgressAnnotationKey annotation is not present/removed
// it's unclear whether the eviction was restarted or terminated.
// If the eviction gets restarted the pod needs to be removed from the cache
// to allow re-triggering the eviction.
if _, exists := oldPod.Annotations[EvictionInProgressAnnotationKey]; !exists {
return
}
// the annotation was removed -> remove the pod from the cache to allow to
// request for eviction again. In case the eviction got restarted requesting
// the eviction again is expected to be a no-op. In case the eviction
// got terminated with no-retry, requesting a new eviction is a normal
// operation.
klog.V(3).InfoS("Eviction in background canceled (annotation removed). Removing pod from the cache.", "annotation", EvictionInProgressAnnotationKey, "pod", klog.KObj(newPod))
erCache.deletePod(newPod)
return
}
// Pick up the eviction in progress
if !erCache.hasPod(newPod) {
klog.V(3).InfoS("Eviction in background detected. Updating the cache.", "pod", klog.KObj(newPod))
}
erCache.addPod(newPod)
},
DeleteFunc: func(obj interface{}) {
var pod *v1.Pod
switch t := obj.(type) {
case *v1.Pod:
pod = t
case cache.DeletedFinalStateUnknown:
var ok bool
pod, ok = t.Obj.(*v1.Pod)
if !ok {
klog.ErrorS(nil, "Cannot convert to *v1.Pod", "obj", t.Obj)
return
}
default:
klog.ErrorS(nil, "Cannot convert to *v1.Pod", "obj", t)
return
}
if erCache.hasPod(pod) {
klog.V(3).InfoS("Pod with eviction in background deleted/evicted. Removing pod from the cache.", "pod", klog.KObj(pod))
}
erCache.deletePod(pod)
},
},
)
if err != nil {
return nil, fmt.Errorf("unable to register event handler for pod evictor: %v", err)
}
podEvictor.registeredHandlers = append(podEvictor.registeredHandlers, handlerRegistration)
go erCache.run(ctx)
podEvictor.erCache = erCache
}
return podEvictor, nil
}
// WaitForEventHandlersSync waits for EventHandlers to sync.
// It returns true if it was successful, false if the controller should shut down
func (pe *PodEvictor) WaitForEventHandlersSync(ctx context.Context) error {
return wait.PollUntilContextCancel(ctx, syncedPollPeriod, true, func(ctx context.Context) (done bool, err error) {
for _, handler := range pe.registeredHandlers {
if !handler.HasSynced() {
return false, nil
}
}
return true, nil
})
} }
// NodeEvicted gives a number of pods evicted for node // NodeEvicted gives a number of pods evicted for node
func (pe *PodEvictor) NodeEvicted(node *v1.Node) uint { func (pe *PodEvictor) NodeEvicted(node *v1.Node) uint {
return pe.nodepodCount[node.Name] pe.mu.RLock()
defer pe.mu.RUnlock()
return pe.nodePodCount[node.Name]
} }
// TotalEvicted gives a number of pods evicted through all nodes // TotalEvicted gives a number of pods evicted through all nodes
func (pe *PodEvictor) TotalEvicted() uint { func (pe *PodEvictor) TotalEvicted() uint {
var total uint pe.mu.RLock()
for _, count := range pe.nodepodCount { defer pe.mu.RUnlock()
total += count return pe.totalPodCount
}
return total
} }
// NodeLimitExceeded checks if the number of evictions for a node was exceeded func (pe *PodEvictor) ResetCounters() {
func (pe *PodEvictor) NodeLimitExceeded(node *v1.Node) bool { pe.mu.Lock()
if pe.maxPodsToEvictPerNode != nil { defer pe.mu.Unlock()
return pe.nodepodCount[node.Name] == *pe.maxPodsToEvictPerNode pe.nodePodCount = make(nodePodEvictedCount)
pe.namespacePodCount = make(namespacePodEvictCount)
pe.totalPodCount = 0
}
func (pe *PodEvictor) SetClient(client clientset.Interface) {
pe.mu.Lock()
defer pe.mu.Unlock()
pe.client = client
}
func (pe *PodEvictor) evictionRequestsTotal() uint {
if pe.featureGates.Enabled(features.EvictionsInBackground) {
return pe.erCache.evictionRequestsTotal()
} else {
return 0
}
}
func (pe *PodEvictor) evictionRequestsPerNode(node string) uint {
if pe.featureGates.Enabled(features.EvictionsInBackground) {
return pe.erCache.evictionRequestsPerNode(node)
} else {
return 0
}
}
func (pe *PodEvictor) evictionRequestsPerNamespace(ns string) uint {
if pe.featureGates.Enabled(features.EvictionsInBackground) {
return pe.erCache.evictionRequestsPerNamespace(ns)
} else {
return 0
}
}
func (pe *PodEvictor) EvictionRequests(node *v1.Node) uint {
pe.mu.RLock()
defer pe.mu.RUnlock()
return pe.evictionRequestsTotal()
}
func (pe *PodEvictor) TotalEvictionRequests() uint {
pe.mu.RLock()
defer pe.mu.RUnlock()
if pe.featureGates.Enabled(features.EvictionsInBackground) {
return pe.erCache.TotalEvictionRequests()
} else {
return 0
} }
return false
} }
// EvictOptions provides a handle for passing additional info to EvictPod // EvictOptions provides a handle for passing additional info to EvictPod
@@ -119,32 +453,71 @@ type EvictOptions struct {
// EvictPod evicts a pod while exercising eviction limits. // EvictPod evicts a pod while exercising eviction limits.
// Returns true when the pod is evicted on the server side. // Returns true when the pod is evicted on the server side.
func (pe *PodEvictor) EvictPod(ctx context.Context, pod *v1.Pod, opts EvictOptions) bool { func (pe *PodEvictor) EvictPod(ctx context.Context, pod *v1.Pod, opts EvictOptions) error {
if len(pod.UID) == 0 {
klog.InfoS("Ignoring pod eviction due to missing UID", "pod", pod)
return fmt.Errorf("Pod %v is missing UID", klog.KObj(pod))
}
if pe.featureGates.Enabled(features.EvictionsInBackground) {
// eviction in background requested
if _, exists := pod.Annotations[EvictionRequestAnnotationKey]; exists {
if pe.erCache.hasPod(pod) {
klog.V(3).InfoS("Eviction in background already requested (ignoring)", "pod", klog.KObj(pod))
return nil
}
}
}
pe.mu.Lock()
defer pe.mu.Unlock()
var span trace.Span var span trace.Span
ctx, span = tracing.Tracer().Start(ctx, "EvictPod", trace.WithAttributes(attribute.String("podName", pod.Name), attribute.String("podNamespace", pod.Namespace), attribute.String("reason", opts.Reason), attribute.String("operation", tracing.EvictOperation))) ctx, span = tracing.Tracer().Start(ctx, "EvictPod", trace.WithAttributes(attribute.String("podName", pod.Name), attribute.String("podNamespace", pod.Namespace), attribute.String("reason", opts.Reason), attribute.String("operation", tracing.EvictOperation)))
defer span.End() defer span.End()
if pod.Spec.NodeName != "" { if pe.maxPodsToEvictTotal != nil && pe.totalPodCount+pe.evictionRequestsTotal()+1 > *pe.maxPodsToEvictTotal {
if pe.maxPodsToEvictPerNode != nil && pe.nodepodCount[pod.Spec.NodeName]+1 > *pe.maxPodsToEvictPerNode { err := NewEvictionTotalLimitError()
if pe.metricsEnabled {
metrics.PodsEvicted.With(map[string]string{"result": "maximum number of pods per node reached", "strategy": opts.StrategyName, "namespace": pod.Namespace, "node": pod.Spec.NodeName, "profile": opts.ProfileName}).Inc()
}
span.AddEvent("Eviction Failed", trace.WithAttributes(attribute.String("node", pod.Spec.NodeName), attribute.String("err", "Maximum number of evicted pods per node reached")))
klog.ErrorS(fmt.Errorf("maximum number of evicted pods per node reached"), "Error evicting pod", "limit", *pe.maxPodsToEvictPerNode, "node", pod.Spec.NodeName)
return false
}
}
if pe.maxPodsToEvictPerNamespace != nil && pe.namespacePodCount[pod.Namespace]+1 > *pe.maxPodsToEvictPerNamespace {
if pe.metricsEnabled { if pe.metricsEnabled {
metrics.PodsEvicted.With(map[string]string{"result": "maximum number of pods per namespace reached", "strategy": opts.StrategyName, "namespace": pod.Namespace, "node": pod.Spec.NodeName, "profile": opts.ProfileName}).Inc() metrics.PodsEvicted.With(map[string]string{"result": err.Error(), "strategy": opts.StrategyName, "namespace": pod.Namespace, "node": pod.Spec.NodeName, "profile": opts.ProfileName}).Inc()
} }
span.AddEvent("Eviction Failed", trace.WithAttributes(attribute.String("node", pod.Spec.NodeName), attribute.String("err", "Maximum number of evicted pods per namespace reached"))) span.AddEvent("Eviction Failed", trace.WithAttributes(attribute.String("node", pod.Spec.NodeName), attribute.String("err", err.Error())))
klog.ErrorS(fmt.Errorf("maximum number of evicted pods per namespace reached"), "Error evicting pod", "limit", *pe.maxPodsToEvictPerNamespace, "namespace", pod.Namespace) klog.ErrorS(err, "Error evicting pod", "limit", *pe.maxPodsToEvictTotal)
return false if pe.evictionFailureEventNotification {
pe.eventRecorder.Eventf(pod, nil, v1.EventTypeWarning, "EvictionFailed", "Descheduled", "pod eviction from %v node by sigs.k8s.io/descheduler failed: total eviction limit exceeded (%v)", pod.Spec.NodeName, *pe.maxPodsToEvictTotal)
}
return err
} }
err := evictPod(ctx, pe.client, pod, pe.policyGroupVersion) if pod.Spec.NodeName != "" {
if pe.maxPodsToEvictPerNode != nil && pe.nodePodCount[pod.Spec.NodeName]+pe.evictionRequestsPerNode(pod.Spec.NodeName)+1 > *pe.maxPodsToEvictPerNode {
err := NewEvictionNodeLimitError(pod.Spec.NodeName)
if pe.metricsEnabled {
metrics.PodsEvicted.With(map[string]string{"result": err.Error(), "strategy": opts.StrategyName, "namespace": pod.Namespace, "node": pod.Spec.NodeName, "profile": opts.ProfileName}).Inc()
}
span.AddEvent("Eviction Failed", trace.WithAttributes(attribute.String("node", pod.Spec.NodeName), attribute.String("err", err.Error())))
klog.ErrorS(err, "Error evicting pod", "limit", *pe.maxPodsToEvictPerNode, "node", pod.Spec.NodeName)
if pe.evictionFailureEventNotification {
pe.eventRecorder.Eventf(pod, nil, v1.EventTypeWarning, "EvictionFailed", "Descheduled", "pod eviction from %v node by sigs.k8s.io/descheduler failed: node eviction limit exceeded (%v)", pod.Spec.NodeName, *pe.maxPodsToEvictPerNode)
}
return err
}
}
if pe.maxPodsToEvictPerNamespace != nil && pe.namespacePodCount[pod.Namespace]+pe.evictionRequestsPerNamespace(pod.Namespace)+1 > *pe.maxPodsToEvictPerNamespace {
err := NewEvictionNamespaceLimitError(pod.Namespace)
if pe.metricsEnabled {
metrics.PodsEvicted.With(map[string]string{"result": err.Error(), "strategy": opts.StrategyName, "namespace": pod.Namespace, "node": pod.Spec.NodeName, "profile": opts.ProfileName}).Inc()
}
span.AddEvent("Eviction Failed", trace.WithAttributes(attribute.String("node", pod.Spec.NodeName), attribute.String("err", err.Error())))
klog.ErrorS(err, "Error evicting pod", "limit", *pe.maxPodsToEvictPerNamespace, "namespace", pod.Namespace, "pod", klog.KObj(pod))
if pe.evictionFailureEventNotification {
pe.eventRecorder.Eventf(pod, nil, v1.EventTypeWarning, "EvictionFailed", "Descheduled", "pod eviction from %v node by sigs.k8s.io/descheduler failed: namespace eviction limit exceeded (%v)", pod.Spec.NodeName, *pe.maxPodsToEvictPerNamespace)
}
return err
}
ignore, err := pe.evictPod(ctx, pod)
if err != nil { if err != nil {
// err is used only for logging purposes // err is used only for logging purposes
span.AddEvent("Eviction Failed", trace.WithAttributes(attribute.String("node", pod.Spec.NodeName), attribute.String("err", err.Error()))) span.AddEvent("Eviction Failed", trace.WithAttributes(attribute.String("node", pod.Spec.NodeName), attribute.String("err", err.Error())))
@@ -152,13 +525,21 @@ func (pe *PodEvictor) EvictPod(ctx context.Context, pod *v1.Pod, opts EvictOptio
if pe.metricsEnabled { if pe.metricsEnabled {
metrics.PodsEvicted.With(map[string]string{"result": "error", "strategy": opts.StrategyName, "namespace": pod.Namespace, "node": pod.Spec.NodeName, "profile": opts.ProfileName}).Inc() metrics.PodsEvicted.With(map[string]string{"result": "error", "strategy": opts.StrategyName, "namespace": pod.Namespace, "node": pod.Spec.NodeName, "profile": opts.ProfileName}).Inc()
} }
return false if pe.evictionFailureEventNotification {
pe.eventRecorder.Eventf(pod, nil, v1.EventTypeWarning, "EvictionFailed", "Descheduled", "pod eviction from %v node by sigs.k8s.io/descheduler failed: %v", pod.Spec.NodeName, err.Error())
}
return err
}
if ignore {
return nil
} }
if pod.Spec.NodeName != "" { if pod.Spec.NodeName != "" {
pe.nodepodCount[pod.Spec.NodeName]++ pe.nodePodCount[pod.Spec.NodeName]++
} }
pe.namespacePodCount[pod.Namespace]++ pe.namespacePodCount[pod.Namespace]++
pe.totalPodCount++
if pe.metricsEnabled { if pe.metricsEnabled {
metrics.PodsEvicted.With(map[string]string{"result": "success", "strategy": opts.StrategyName, "namespace": pod.Namespace, "node": pod.Spec.NodeName, "profile": opts.ProfileName}).Inc() metrics.PodsEvicted.With(map[string]string{"result": "success", "strategy": opts.StrategyName, "namespace": pod.Namespace, "node": pod.Spec.NodeName, "profile": opts.ProfileName}).Inc()
@@ -175,17 +556,18 @@ func (pe *PodEvictor) EvictPod(ctx context.Context, pod *v1.Pod, opts EvictOptio
reason = "NotSet" reason = "NotSet"
} }
} }
pe.eventRecorder.Eventf(pod, nil, v1.EventTypeNormal, reason, "Descheduled", "pod evicted from %v node by sigs.k8s.io/descheduler", pod.Spec.NodeName) pe.eventRecorder.Eventf(pod, nil, v1.EventTypeNormal, reason, "Descheduled", "pod eviction from %v node by sigs.k8s.io/descheduler", pod.Spec.NodeName)
} }
return true return nil
} }
func evictPod(ctx context.Context, client clientset.Interface, pod *v1.Pod, policyGroupVersion string) error { // return (ignore, err)
func (pe *PodEvictor) evictPod(ctx context.Context, pod *v1.Pod) (bool, error) {
deleteOptions := &metav1.DeleteOptions{} deleteOptions := &metav1.DeleteOptions{}
// GracePeriodSeconds ? // GracePeriodSeconds ?
eviction := &policy.Eviction{ eviction := &policy.Eviction{
TypeMeta: metav1.TypeMeta{ TypeMeta: metav1.TypeMeta{
APIVersion: policyGroupVersion, APIVersion: pe.policyGroupVersion,
Kind: eutils.EvictionKind, Kind: eutils.EvictionKind,
}, },
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
@@ -194,13 +576,36 @@ func evictPod(ctx context.Context, client clientset.Interface, pod *v1.Pod, poli
}, },
DeleteOptions: deleteOptions, DeleteOptions: deleteOptions,
} }
err := client.PolicyV1().Evictions(eviction.Namespace).Evict(ctx, eviction) err := pe.client.PolicyV1().Evictions(eviction.Namespace).Evict(ctx, eviction)
if err == nil {
return false, nil
}
if pe.featureGates.Enabled(features.EvictionsInBackground) {
// eviction in background requested
if _, exists := pod.Annotations[EvictionRequestAnnotationKey]; exists {
// Simulating https://github.com/kubevirt/kubevirt/pull/11532/files#diff-059cc1fc09e8b469143348cc3aa80b40de987670e008fa18a6fe010061f973c9R77
if apierrors.IsTooManyRequests(err) && strings.Contains(err.Error(), EvictionInBackgroundErrorText) {
// Ignore eviction of any pod that's failed or completed.
// It can happen an eviction in background ends up with the pod stuck in the completed state.
// Normally, any request eviction is expected to end with the pod deletion.
// However, some custom eviction policies may end up with completed pods around.
// Which leads to all the completed pods to be considered still as unfinished evictions in background.
if pod.Status.Phase == v1.PodSucceeded || pod.Status.Phase == v1.PodFailed {
klog.V(3).InfoS("Ignoring eviction of a completed/failed pod", "pod", klog.KObj(pod))
return true, nil
}
klog.V(3).InfoS("Eviction in background assumed", "pod", klog.KObj(pod))
pe.erCache.assumePod(pod)
return true, nil
}
}
}
if apierrors.IsTooManyRequests(err) { if apierrors.IsTooManyRequests(err) {
return fmt.Errorf("error when evicting pod (ignoring) %q: %v", pod.Name, err) return false, fmt.Errorf("error when evicting pod (ignoring) %q: %v", pod.Name, err)
} }
if apierrors.IsNotFound(err) { if apierrors.IsNotFound(err) {
return fmt.Errorf("pod not found when evicting %q: %v", pod.Name, err) return false, fmt.Errorf("pod not found when evicting %q: %v", pod.Name, err)
} }
return err return false, err
} }

View File

@@ -18,54 +18,107 @@ package evictions
import ( import (
"context" "context"
"fmt"
"reflect"
"testing" "testing"
"time"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
policy "k8s.io/api/policy/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/kubernetes/fake"
fakeclientset "k8s.io/client-go/kubernetes/fake"
core "k8s.io/client-go/testing" core "k8s.io/client-go/testing"
"k8s.io/client-go/tools/events"
"k8s.io/component-base/featuregate"
"k8s.io/klog/v2"
utilptr "k8s.io/utils/ptr"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod" podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
"sigs.k8s.io/descheduler/pkg/features"
"sigs.k8s.io/descheduler/pkg/utils" "sigs.k8s.io/descheduler/pkg/utils"
"sigs.k8s.io/descheduler/test" "sigs.k8s.io/descheduler/test"
) )
const (
notFoundText = "pod not found when evicting \"%s\": pods \"%s\" not found"
tooManyRequests = "error when evicting pod (ignoring) \"%s\": Too many requests: too many requests"
)
func initFeatureGates() featuregate.FeatureGate {
featureGates := featuregate.NewFeatureGate()
featureGates.Add(map[featuregate.Feature]featuregate.FeatureSpec{
features.EvictionsInBackground: {Default: true, PreRelease: featuregate.Alpha},
})
return featureGates
}
func TestEvictPod(t *testing.T) { func TestEvictPod(t *testing.T) {
ctx := context.Background()
node1 := test.BuildTestNode("node1", 1000, 2000, 9, nil) node1 := test.BuildTestNode("node1", 1000, 2000, 9, nil)
pod1 := test.BuildTestPod("p1", 400, 0, "node1", nil) pod1 := test.BuildTestPod("p1", 400, 0, "node1", nil)
tests := []struct { tests := []struct {
description string description string
node *v1.Node node *v1.Node
pod *v1.Pod evictedPod *v1.Pod
pods []v1.Pod pods []runtime.Object
want error wantErr error
}{ }{
{ {
description: "test pod eviction - pod present", description: "test pod eviction - pod present",
node: node1, node: node1,
pod: pod1, evictedPod: pod1,
pods: []v1.Pod{*pod1}, pods: []runtime.Object{pod1},
want: nil,
}, },
{ {
description: "test pod eviction - pod absent", description: "test pod eviction - pod absent (not found error)",
node: node1, node: node1,
pod: pod1, evictedPod: pod1,
pods: []v1.Pod{*test.BuildTestPod("p2", 400, 0, "node1", nil), *test.BuildTestPod("p3", 450, 0, "node1", nil)}, pods: []runtime.Object{test.BuildTestPod("p2", 400, 0, "node1", nil), test.BuildTestPod("p3", 450, 0, "node1", nil)},
want: nil, wantErr: fmt.Errorf(notFoundText, pod1.Name, pod1.Name),
},
{
description: "test pod eviction - pod absent (too many requests error)",
node: node1,
evictedPod: pod1,
pods: []runtime.Object{test.BuildTestPod("p2", 400, 0, "node1", nil), test.BuildTestPod("p3", 450, 0, "node1", nil)},
wantErr: fmt.Errorf(tooManyRequests, pod1.Name),
}, },
} }
for _, test := range tests { for _, test := range tests {
fakeClient := &fake.Clientset{} t.Run(test.description, func(t *testing.T) {
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) { ctx := context.Background()
return true, &v1.PodList{Items: test.pods}, nil fakeClient := fake.NewClientset(test.pods...)
fakeClient.PrependReactor("create", "pods/eviction", func(action core.Action) (handled bool, ret runtime.Object, err error) {
return true, nil, test.wantErr
})
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
sharedInformerFactory.Start(ctx.Done())
sharedInformerFactory.WaitForCacheSync(ctx.Done())
eventRecorder := &events.FakeRecorder{}
podEvictor, err := NewPodEvictor(
ctx,
fakeClient,
eventRecorder,
sharedInformerFactory.Core().V1().Pods().Informer(),
initFeatureGates(),
NewOptions(),
)
if err != nil {
t.Fatalf("Unexpected error when creating a pod evictor: %v", err)
}
_, got := podEvictor.evictPod(ctx, test.evictedPod)
if got != test.wantErr {
t.Errorf("Test error for Desc: %s. Expected %v pod eviction to be %v, got %v", test.description, test.evictedPod.Name, test.wantErr, got)
}
}) })
got := evictPod(ctx, fakeClient, test.pod, "v1")
if got != test.want {
t.Errorf("Test error for Desc: %s. Expected %v pod eviction to be %v, got %v", test.description, test.pod.Name, test.want, got)
}
} }
} }
@@ -113,3 +166,319 @@ func TestPodTypes(t *testing.T) {
t.Errorf("Expected p1 to be a normal pod.") t.Errorf("Expected p1 to be a normal pod.")
} }
} }
func TestNewPodEvictor(t *testing.T) {
ctx := context.Background()
pod1 := test.BuildTestPod("pod", 400, 0, "node", nil)
type podEvictorTest struct {
description string
pod *v1.Pod
dryRun bool
evictionFailureEventNotification *bool
maxPodsToEvictTotal *uint
maxPodsToEvictPerNode *uint
maxPodsToEvictPerNamespace *uint
expectedNodeEvictions uint
expectedTotalEvictions uint
expectedError error
// expectedEvent is a slice of strings representing expected events.
// Each string in the slice should follow the format: "EventType Reason Message".
// - "Warning Failed processing failed"
events []string
}
tests := []podEvictorTest{
{
description: "one eviction expected with eviction failure event notification",
pod: pod1,
evictionFailureEventNotification: utilptr.To[bool](true),
maxPodsToEvictTotal: utilptr.To[uint](1),
maxPodsToEvictPerNode: utilptr.To[uint](1),
maxPodsToEvictPerNamespace: utilptr.To[uint](1),
expectedNodeEvictions: 1,
expectedTotalEvictions: 1,
expectedError: nil,
events: []string{"Normal NotSet pod eviction from node node by sigs.k8s.io/descheduler"},
},
{
description: "eviction limit exceeded on total with eviction failure event notification",
pod: pod1,
evictionFailureEventNotification: utilptr.To[bool](true),
maxPodsToEvictTotal: utilptr.To[uint](0),
maxPodsToEvictPerNode: utilptr.To[uint](1),
maxPodsToEvictPerNamespace: utilptr.To[uint](1),
expectedNodeEvictions: 0,
expectedTotalEvictions: 0,
expectedError: NewEvictionTotalLimitError(),
events: []string{"Warning EvictionFailed pod eviction from node node by sigs.k8s.io/descheduler failed: total eviction limit exceeded (0)"},
},
{
description: "eviction limit exceeded on node with eviction failure event notification",
pod: pod1,
evictionFailureEventNotification: utilptr.To[bool](true),
maxPodsToEvictTotal: utilptr.To[uint](1),
maxPodsToEvictPerNode: utilptr.To[uint](0),
maxPodsToEvictPerNamespace: utilptr.To[uint](1),
expectedNodeEvictions: 0,
expectedTotalEvictions: 0,
expectedError: NewEvictionNodeLimitError("node"),
events: []string{"Warning EvictionFailed pod eviction from node node by sigs.k8s.io/descheduler failed: node eviction limit exceeded (0)"},
},
{
description: "eviction limit exceeded on node with eviction failure event notification",
pod: pod1,
evictionFailureEventNotification: utilptr.To[bool](true),
maxPodsToEvictTotal: utilptr.To[uint](1),
maxPodsToEvictPerNode: utilptr.To[uint](1),
maxPodsToEvictPerNamespace: utilptr.To[uint](0),
expectedNodeEvictions: 0,
expectedTotalEvictions: 0,
expectedError: NewEvictionNamespaceLimitError("default"),
events: []string{"Warning EvictionFailed pod eviction from node node by sigs.k8s.io/descheduler failed: namespace eviction limit exceeded (0)"},
},
{
description: "eviction error with eviction failure event notification",
pod: pod1,
evictionFailureEventNotification: utilptr.To[bool](true),
maxPodsToEvictTotal: utilptr.To[uint](1),
maxPodsToEvictPerNode: utilptr.To[uint](1),
maxPodsToEvictPerNamespace: utilptr.To[uint](1),
expectedNodeEvictions: 0,
expectedTotalEvictions: 0,
expectedError: fmt.Errorf("eviction error"),
events: []string{"Warning EvictionFailed pod eviction from node node by sigs.k8s.io/descheduler failed: eviction error"},
},
{
description: "eviction with dryRun with eviction failure event notification",
pod: pod1,
dryRun: true,
evictionFailureEventNotification: utilptr.To[bool](true),
maxPodsToEvictTotal: utilptr.To[uint](1),
maxPodsToEvictPerNode: utilptr.To[uint](1),
maxPodsToEvictPerNamespace: utilptr.To[uint](1),
expectedNodeEvictions: 1,
expectedTotalEvictions: 1,
expectedError: nil,
},
{
description: "one eviction expected without eviction failure event notification",
pod: pod1,
maxPodsToEvictTotal: utilptr.To[uint](1),
maxPodsToEvictPerNode: utilptr.To[uint](1),
maxPodsToEvictPerNamespace: utilptr.To[uint](1),
expectedNodeEvictions: 1,
expectedTotalEvictions: 1,
expectedError: nil,
events: []string{"Normal NotSet pod eviction from node node by sigs.k8s.io/descheduler"},
},
{
description: "eviction limit exceeded on total without eviction failure event notification",
pod: pod1,
maxPodsToEvictTotal: utilptr.To[uint](0),
maxPodsToEvictPerNode: utilptr.To[uint](1),
maxPodsToEvictPerNamespace: utilptr.To[uint](1),
expectedNodeEvictions: 0,
expectedTotalEvictions: 0,
expectedError: NewEvictionTotalLimitError(),
},
{
description: "eviction limit exceeded on node without eviction failure event notification",
pod: pod1,
maxPodsToEvictTotal: utilptr.To[uint](1),
maxPodsToEvictPerNode: utilptr.To[uint](0),
maxPodsToEvictPerNamespace: utilptr.To[uint](1),
expectedNodeEvictions: 0,
expectedTotalEvictions: 0,
expectedError: NewEvictionNodeLimitError("node"),
},
{
description: "eviction limit exceeded on node without eviction failure event notification",
pod: pod1,
maxPodsToEvictTotal: utilptr.To[uint](1),
maxPodsToEvictPerNode: utilptr.To[uint](1),
maxPodsToEvictPerNamespace: utilptr.To[uint](0),
expectedNodeEvictions: 0,
expectedTotalEvictions: 0,
expectedError: NewEvictionNamespaceLimitError("default"),
},
{
description: "eviction error without eviction failure event notification",
pod: pod1,
maxPodsToEvictTotal: utilptr.To[uint](1),
maxPodsToEvictPerNode: utilptr.To[uint](1),
maxPodsToEvictPerNamespace: utilptr.To[uint](1),
expectedNodeEvictions: 0,
expectedTotalEvictions: 0,
expectedError: fmt.Errorf("eviction error"),
},
{
description: "eviction without dryRun with eviction failure event notification",
pod: pod1,
dryRun: true,
maxPodsToEvictTotal: utilptr.To[uint](1),
maxPodsToEvictPerNode: utilptr.To[uint](1),
maxPodsToEvictPerNamespace: utilptr.To[uint](1),
expectedNodeEvictions: 1,
expectedTotalEvictions: 1,
expectedError: nil,
},
}
for _, test := range tests {
t.Run(test.description, func(t *testing.T) {
fakeClient := fake.NewSimpleClientset(pod1)
fakeClient.PrependReactor("create", "pods/eviction", func(action core.Action) (handled bool, ret runtime.Object, err error) {
return true, nil, test.expectedError
})
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
sharedInformerFactory.Start(ctx.Done())
sharedInformerFactory.WaitForCacheSync(ctx.Done())
eventRecorder := events.NewFakeRecorder(100)
podEvictor, err := NewPodEvictor(
ctx,
fakeClient,
eventRecorder,
sharedInformerFactory.Core().V1().Pods().Informer(),
initFeatureGates(),
NewOptions().
WithDryRun(test.dryRun).
WithMaxPodsToEvictTotal(test.maxPodsToEvictTotal).
WithMaxPodsToEvictPerNode(test.maxPodsToEvictPerNode).
WithEvictionFailureEventNotification(test.evictionFailureEventNotification).
WithMaxPodsToEvictPerNamespace(test.maxPodsToEvictPerNamespace),
)
if err != nil {
t.Fatalf("Unexpected error when creating a pod evictor: %v", err)
}
stubNode := &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "node"}}
if actualErr := podEvictor.EvictPod(ctx, test.pod, EvictOptions{}); actualErr != nil && actualErr.Error() != test.expectedError.Error() {
t.Errorf("Expected error: %v, got: %v", test.expectedError, actualErr)
}
if evictions := podEvictor.NodeEvicted(stubNode); evictions != test.expectedNodeEvictions {
t.Errorf("Expected %d node evictions, got %d instead", test.expectedNodeEvictions, evictions)
}
if evictions := podEvictor.TotalEvicted(); evictions != test.expectedTotalEvictions {
t.Errorf("Expected %d total evictions, got %d instead", test.expectedTotalEvictions, evictions)
}
// Assert that the events are correct.
assertEqualEvents(t, test.events, eventRecorder.Events)
})
}
}
func TestEvictionRequestsCacheCleanup(t *testing.T) {
ctx := context.Background()
node1 := test.BuildTestNode("n1", 2000, 3000, 10, nil)
ownerRef1 := test.GetReplicaSetOwnerRefList()
updatePod := func(pod *v1.Pod) {
pod.Namespace = "dev"
pod.ObjectMeta.OwnerReferences = ownerRef1
}
updatePodWithEvictionInBackground := func(pod *v1.Pod) {
updatePod(pod)
pod.Annotations = map[string]string{
EvictionRequestAnnotationKey: "",
}
}
p1 := test.BuildTestPod("p1", 100, 0, node1.Name, updatePodWithEvictionInBackground)
p2 := test.BuildTestPod("p2", 100, 0, node1.Name, updatePodWithEvictionInBackground)
p3 := test.BuildTestPod("p3", 100, 0, node1.Name, updatePod)
p4 := test.BuildTestPod("p4", 100, 0, node1.Name, updatePod)
client := fakeclientset.NewSimpleClientset(node1, p1, p2, p3, p4)
sharedInformerFactory := informers.NewSharedInformerFactory(client, 0)
_, eventRecorder := utils.GetRecorderAndBroadcaster(ctx, client)
podEvictor, err := NewPodEvictor(
ctx,
client,
eventRecorder,
sharedInformerFactory.Core().V1().Pods().Informer(),
initFeatureGates(),
nil,
)
if err != nil {
t.Fatalf("Unexpected error when creating a pod evictor: %v", err)
}
client.PrependReactor("create", "pods", func(action core.Action) (bool, runtime.Object, error) {
if action.GetSubresource() == "eviction" {
createAct, matched := action.(core.CreateActionImpl)
if !matched {
return false, nil, fmt.Errorf("unable to convert action to core.CreateActionImpl")
}
if eviction, matched := createAct.Object.(*policy.Eviction); matched {
podName := eviction.GetName()
if podName == "p1" || podName == "p2" {
return true, nil, &apierrors.StatusError{
ErrStatus: metav1.Status{
Reason: metav1.StatusReasonTooManyRequests,
Message: "Eviction triggered evacuation",
},
}
}
return true, nil, nil
}
}
return false, nil, nil
})
sharedInformerFactory.Start(ctx.Done())
sharedInformerFactory.WaitForCacheSync(ctx.Done())
podEvictor.EvictPod(ctx, p1, EvictOptions{})
podEvictor.EvictPod(ctx, p2, EvictOptions{})
podEvictor.EvictPod(ctx, p3, EvictOptions{})
podEvictor.EvictPod(ctx, p4, EvictOptions{})
klog.Infof("2 evictions in background expected, 2 normal evictions")
if total := podEvictor.TotalEvictionRequests(); total != 2 {
t.Fatalf("Expected %v total eviction requests, got %v instead", 2, total)
}
if total := podEvictor.TotalEvicted(); total != 2 {
t.Fatalf("Expected %v total evictions, got %v instead", 2, total)
}
klog.Infof("2 evictions in background assumed. Wait for few seconds and check the assumed requests timed out")
time.Sleep(2 * time.Second)
klog.Infof("Checking the assumed requests timed out and were deleted")
// Set the timeout to 1s so the cleaning can be tested
podEvictor.erCache.assumedRequestTimeoutSeconds = 1
podEvictor.erCache.cleanCache(ctx)
if totalERs := podEvictor.TotalEvictionRequests(); totalERs > 0 {
t.Fatalf("Expected 0 eviction requests, got %v instead", totalERs)
}
}
func assertEqualEvents(t *testing.T, expected []string, actual <-chan string) {
t.Logf("Assert for events: %v", expected)
c := time.After(wait.ForeverTestTimeout)
for _, e := range expected {
select {
case a := <-actual:
if !reflect.DeepEqual(a, e) {
t.Errorf("Expected event %q, got %q instead", e, a)
}
case <-c:
t.Errorf("Expected event %q, got nothing", e)
// continue iterating to print all expected events
}
}
for {
select {
case a := <-actual:
t.Errorf("Unexpected event: %q", a)
default:
return // No more events, as expected.
}
}
}

View File

@@ -0,0 +1,59 @@
package evictions
import (
policy "k8s.io/api/policy/v1"
)
type Options struct {
policyGroupVersion string
dryRun bool
maxPodsToEvictPerNode *uint
maxPodsToEvictPerNamespace *uint
maxPodsToEvictTotal *uint
evictionFailureEventNotification bool
metricsEnabled bool
}
// NewOptions returns an Options with default values.
func NewOptions() *Options {
return &Options{
policyGroupVersion: policy.SchemeGroupVersion.String(),
}
}
func (o *Options) WithPolicyGroupVersion(policyGroupVersion string) *Options {
o.policyGroupVersion = policyGroupVersion
return o
}
func (o *Options) WithDryRun(dryRun bool) *Options {
o.dryRun = dryRun
return o
}
func (o *Options) WithMaxPodsToEvictPerNode(maxPodsToEvictPerNode *uint) *Options {
o.maxPodsToEvictPerNode = maxPodsToEvictPerNode
return o
}
func (o *Options) WithMaxPodsToEvictPerNamespace(maxPodsToEvictPerNamespace *uint) *Options {
o.maxPodsToEvictPerNamespace = maxPodsToEvictPerNamespace
return o
}
func (o *Options) WithMaxPodsToEvictTotal(maxPodsToEvictTotal *uint) *Options {
o.maxPodsToEvictTotal = maxPodsToEvictTotal
return o
}
func (o *Options) WithMetricsEnabled(metricsEnabled bool) *Options {
o.metricsEnabled = metricsEnabled
return o
}
func (o *Options) WithEvictionFailureEventNotification(evictionFailureEventNotification *bool) *Options {
if evictionFailureEventNotification != nil {
o.evictionFailureEventNotification = *evictionFailureEventNotification
}
return o
}

View File

@@ -0,0 +1,151 @@
/*
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package metricscollector
import (
"context"
"fmt"
"math"
"sync"
"time"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/wait"
listercorev1 "k8s.io/client-go/listers/core/v1"
"k8s.io/klog/v2"
metricsclient "k8s.io/metrics/pkg/client/clientset/versioned"
utilptr "k8s.io/utils/ptr"
)
const (
beta float64 = 0.9
)
type MetricsCollector struct {
nodeLister listercorev1.NodeLister
metricsClientset metricsclient.Interface
nodeSelector labels.Selector
nodes map[string]map[v1.ResourceName]*resource.Quantity
mu sync.RWMutex
// hasSynced signals at least one sync succeeded
hasSynced bool
}
func NewMetricsCollector(nodeLister listercorev1.NodeLister, metricsClientset metricsclient.Interface, nodeSelector labels.Selector) *MetricsCollector {
return &MetricsCollector{
nodeLister: nodeLister,
metricsClientset: metricsClientset,
nodeSelector: nodeSelector,
nodes: make(map[string]map[v1.ResourceName]*resource.Quantity),
}
}
func (mc *MetricsCollector) Run(ctx context.Context) {
wait.NonSlidingUntil(func() {
mc.Collect(ctx)
}, 5*time.Second, ctx.Done())
}
// During experiments rounding to int error causes weightedAverage to never
// reach value even when weightedAverage is repeated many times in a row.
// The difference between the limit and computed average stops within 5 units.
// Nevertheless, the value is expected to change in time. So the weighted
// average nevers gets a chance to converge. Which makes the computed
// error negligible.
// The speed of convergence depends on how often the metrics collector
// syncs with the current value. Currently, the interval is set to 5s.
func weightedAverage(prevValue, value int64) int64 {
return int64(math.Round(beta*float64(prevValue) + (1-beta)*float64(value)))
}
func (mc *MetricsCollector) AllNodesUsage() (map[string]map[v1.ResourceName]*resource.Quantity, error) {
mc.mu.RLock()
defer mc.mu.RUnlock()
allNodesUsage := make(map[string]map[v1.ResourceName]*resource.Quantity)
for nodeName := range mc.nodes {
allNodesUsage[nodeName] = map[v1.ResourceName]*resource.Quantity{
v1.ResourceCPU: utilptr.To[resource.Quantity](mc.nodes[nodeName][v1.ResourceCPU].DeepCopy()),
v1.ResourceMemory: utilptr.To[resource.Quantity](mc.nodes[nodeName][v1.ResourceMemory].DeepCopy()),
}
}
return allNodesUsage, nil
}
func (mc *MetricsCollector) NodeUsage(node *v1.Node) (map[v1.ResourceName]*resource.Quantity, error) {
mc.mu.RLock()
defer mc.mu.RUnlock()
if _, exists := mc.nodes[node.Name]; !exists {
klog.V(4).InfoS("unable to find node in the collected metrics", "node", klog.KObj(node))
return nil, fmt.Errorf("unable to find node %q in the collected metrics", node.Name)
}
return map[v1.ResourceName]*resource.Quantity{
v1.ResourceCPU: utilptr.To[resource.Quantity](mc.nodes[node.Name][v1.ResourceCPU].DeepCopy()),
v1.ResourceMemory: utilptr.To[resource.Quantity](mc.nodes[node.Name][v1.ResourceMemory].DeepCopy()),
}, nil
}
func (mc *MetricsCollector) HasSynced() bool {
return mc.hasSynced
}
func (mc *MetricsCollector) MetricsClient() metricsclient.Interface {
return mc.metricsClientset
}
func (mc *MetricsCollector) Collect(ctx context.Context) error {
mc.mu.Lock()
defer mc.mu.Unlock()
nodes, err := mc.nodeLister.List(mc.nodeSelector)
if err != nil {
return fmt.Errorf("unable to list nodes: %v", err)
}
for _, node := range nodes {
metrics, err := mc.metricsClientset.MetricsV1beta1().NodeMetricses().Get(ctx, node.Name, metav1.GetOptions{})
if err != nil {
klog.ErrorS(err, "Error fetching metrics", "node", node.Name)
// No entry -> duplicate the previous value -> do nothing as beta*PV + (1-beta)*PV = PV
continue
}
if _, exists := mc.nodes[node.Name]; !exists {
mc.nodes[node.Name] = map[v1.ResourceName]*resource.Quantity{
v1.ResourceCPU: utilptr.To[resource.Quantity](metrics.Usage.Cpu().DeepCopy()),
v1.ResourceMemory: utilptr.To[resource.Quantity](metrics.Usage.Memory().DeepCopy()),
}
} else {
// get MilliValue to reduce loss of precision
mc.nodes[node.Name][v1.ResourceCPU].SetMilli(
weightedAverage(mc.nodes[node.Name][v1.ResourceCPU].MilliValue(), metrics.Usage.Cpu().MilliValue()),
)
mc.nodes[node.Name][v1.ResourceMemory].Set(
weightedAverage(mc.nodes[node.Name][v1.ResourceMemory].Value(), metrics.Usage.Memory().Value()),
)
}
}
mc.hasSynced = true
return nil
}

View File

@@ -0,0 +1,141 @@
/*
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package metricscollector
import (
"context"
"math"
"testing"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/informers"
fakeclientset "k8s.io/client-go/kubernetes/fake"
fakemetricsclient "k8s.io/metrics/pkg/client/clientset/versioned/fake"
"sigs.k8s.io/descheduler/test"
)
func checkCpuNodeUsage(t *testing.T, usage map[v1.ResourceName]*resource.Quantity, millicpu int64) {
t.Logf("current node cpu usage: %v\n", usage[v1.ResourceCPU].MilliValue())
if usage[v1.ResourceCPU].MilliValue() != millicpu {
t.Fatalf("cpu node usage expected to be %v, got %v instead", millicpu, usage[v1.ResourceCPU].MilliValue())
}
}
func TestMetricsCollector(t *testing.T) {
gvr := schema.GroupVersionResource{Group: "metrics.k8s.io", Version: "v1beta1", Resource: "nodes"}
n1 := test.BuildTestNode("n1", 2000, 3000, 10, nil)
n2 := test.BuildTestNode("n2", 2000, 3000, 10, nil)
n3 := test.BuildTestNode("n3", 2000, 3000, 10, nil)
n1metrics := test.BuildNodeMetrics("n1", 400, 1714978816)
n2metrics := test.BuildNodeMetrics("n2", 1400, 1714978816)
n3metrics := test.BuildNodeMetrics("n3", 300, 1714978816)
clientset := fakeclientset.NewSimpleClientset(n1, n2, n3)
metricsClientset := fakemetricsclient.NewSimpleClientset()
metricsClientset.Tracker().Create(gvr, n1metrics, "")
metricsClientset.Tracker().Create(gvr, n2metrics, "")
metricsClientset.Tracker().Create(gvr, n3metrics, "")
ctx := context.TODO()
sharedInformerFactory := informers.NewSharedInformerFactory(clientset, 0)
nodeLister := sharedInformerFactory.Core().V1().Nodes().Lister()
sharedInformerFactory.Start(ctx.Done())
sharedInformerFactory.WaitForCacheSync(ctx.Done())
t.Logf("Set initial node cpu usage to 1400")
collector := NewMetricsCollector(nodeLister, metricsClientset, labels.Everything())
collector.Collect(context.TODO())
nodesUsage, _ := collector.NodeUsage(n2)
checkCpuNodeUsage(t, nodesUsage, 1400)
allnodesUsage, _ := collector.AllNodesUsage()
checkCpuNodeUsage(t, allnodesUsage[n2.Name], 1400)
t.Logf("Set current node cpu usage to 500")
n2metrics.Usage[v1.ResourceCPU] = *resource.NewMilliQuantity(500, resource.DecimalSI)
metricsClientset.Tracker().Update(gvr, n2metrics, "")
collector.Collect(context.TODO())
nodesUsage, _ = collector.NodeUsage(n2)
checkCpuNodeUsage(t, nodesUsage, 1310)
allnodesUsage, _ = collector.AllNodesUsage()
checkCpuNodeUsage(t, allnodesUsage[n2.Name], 1310)
t.Logf("Set current node cpu usage to 900")
n2metrics.Usage[v1.ResourceCPU] = *resource.NewMilliQuantity(900, resource.DecimalSI)
metricsClientset.Tracker().Update(gvr, n2metrics, "")
collector.Collect(context.TODO())
nodesUsage, _ = collector.NodeUsage(n2)
checkCpuNodeUsage(t, nodesUsage, 1269)
allnodesUsage, _ = collector.AllNodesUsage()
checkCpuNodeUsage(t, allnodesUsage[n2.Name], 1269)
}
func TestMetricsCollectorConvergence(t *testing.T) {
gvr := schema.GroupVersionResource{Group: "metrics.k8s.io", Version: "v1beta1", Resource: "nodes"}
n1 := test.BuildTestNode("n1", 2000, 3000, 10, nil)
n2 := test.BuildTestNode("n2", 2000, 3000, 10, nil)
n3 := test.BuildTestNode("n3", 2000, 3000, 10, nil)
n1metrics := test.BuildNodeMetrics("n1", 400, 1714978816)
n2metrics := test.BuildNodeMetrics("n2", 1400, 1714978816)
n3metrics := test.BuildNodeMetrics("n3", 300, 1714978816)
clientset := fakeclientset.NewSimpleClientset(n1, n2, n3)
metricsClientset := fakemetricsclient.NewSimpleClientset()
metricsClientset.Tracker().Create(gvr, n1metrics, "")
metricsClientset.Tracker().Create(gvr, n2metrics, "")
metricsClientset.Tracker().Create(gvr, n3metrics, "")
ctx := context.TODO()
sharedInformerFactory := informers.NewSharedInformerFactory(clientset, 0)
nodeLister := sharedInformerFactory.Core().V1().Nodes().Lister()
sharedInformerFactory.Start(ctx.Done())
sharedInformerFactory.WaitForCacheSync(ctx.Done())
t.Logf("Set initial node cpu usage to 1400")
collector := NewMetricsCollector(nodeLister, metricsClientset, labels.Everything())
collector.Collect(context.TODO())
nodesUsage, _ := collector.NodeUsage(n2)
checkCpuNodeUsage(t, nodesUsage, 1400)
allnodesUsage, _ := collector.AllNodesUsage()
checkCpuNodeUsage(t, allnodesUsage[n2.Name], 1400)
t.Logf("Set current node cpu/memory usage to 900/1614978816 and wait until it converges to it")
n2metrics.Usage[v1.ResourceCPU] = *resource.NewMilliQuantity(900, resource.DecimalSI)
n2metrics.Usage[v1.ResourceMemory] = *resource.NewQuantity(1614978816, resource.BinarySI)
metricsClientset.Tracker().Update(gvr, n2metrics, "")
converged := false
for i := 0; i < 300; i++ {
collector.Collect(context.TODO())
nodesUsage, _ = collector.NodeUsage(n2)
if math.Abs(float64(900-nodesUsage[v1.ResourceCPU].MilliValue())) < 6 && math.Abs(float64(1614978816-nodesUsage[v1.ResourceMemory].Value())) < 6 {
t.Logf("Node cpu/memory usage converged to 900+-5/1614978816+-5")
converged = true
break
}
t.Logf("The current node usage: cpu=%v, memory=%v", nodesUsage[v1.ResourceCPU].MilliValue(), nodesUsage[v1.ResourceMemory].Value())
}
if !converged {
t.Fatalf("The node usage did not converged to 900+-1")
}
}

View File

@@ -18,20 +18,24 @@ package node
import ( import (
"context" "context"
"errors"
"fmt" "fmt"
"sync/atomic"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/labels"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
listersv1 "k8s.io/client-go/listers/core/v1" listersv1 "k8s.io/client-go/listers/core/v1"
"k8s.io/client-go/util/workqueue"
"k8s.io/klog/v2" "k8s.io/klog/v2"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod" podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
"sigs.k8s.io/descheduler/pkg/utils" "sigs.k8s.io/descheduler/pkg/utils"
) )
const workersCount = 100
// ReadyNodes returns ready nodes irrespective of whether they are // ReadyNodes returns ready nodes irrespective of whether they are
// schedulable or not. // schedulable or not.
func ReadyNodes(ctx context.Context, client clientset.Interface, nodeLister listersv1.NodeLister, nodeSelector string) ([]*v1.Node, error) { func ReadyNodes(ctx context.Context, client clientset.Interface, nodeLister listersv1.NodeLister, nodeSelector string) ([]*v1.Node, error) {
@@ -104,90 +108,96 @@ func IsReady(node *v1.Node) bool {
// This function is used when the NodeFit pod filtering feature of the Descheduler is enabled. // This function is used when the NodeFit pod filtering feature of the Descheduler is enabled.
// This function currently considers a subset of the Kubernetes Scheduler's predicates when // This function currently considers a subset of the Kubernetes Scheduler's predicates when
// deciding if a pod would fit on a node, but more predicates may be added in the future. // deciding if a pod would fit on a node, but more predicates may be added in the future.
func NodeFit(nodeIndexer podutil.GetPodsAssignedToNodeFunc, pod *v1.Pod, node *v1.Node) []error { // There should be no methods to modify nodes or pods in this method.
func NodeFit(nodeIndexer podutil.GetPodsAssignedToNodeFunc, pod *v1.Pod, node *v1.Node) error {
// Check node selector and required affinity // Check node selector and required affinity
var errors []error
if ok, err := utils.PodMatchNodeSelector(pod, node); err != nil { if ok, err := utils.PodMatchNodeSelector(pod, node); err != nil {
errors = append(errors, err) return err
} else if !ok { } else if !ok {
errors = append(errors, fmt.Errorf("pod node selector does not match the node label")) return errors.New("pod node selector does not match the node label")
} }
// Check taints (we only care about NoSchedule and NoExecute taints) // Check taints (we only care about NoSchedule and NoExecute taints)
ok := utils.TolerationsTolerateTaintsWithFilter(pod.Spec.Tolerations, node.Spec.Taints, func(taint *v1.Taint) bool { ok := utils.TolerationsTolerateTaintsWithFilter(pod.Spec.Tolerations, node.Spec.Taints, func(taint *v1.Taint) bool {
return taint.Effect == v1.TaintEffectNoSchedule || taint.Effect == v1.TaintEffectNoExecute return taint.Effect == v1.TaintEffectNoSchedule || taint.Effect == v1.TaintEffectNoExecute
}) })
if !ok { if !ok {
errors = append(errors, fmt.Errorf("pod does not tolerate taints on the node")) return errors.New("pod does not tolerate taints on the node")
} }
// Check if the pod can fit on a node based off it's requests // Check if the pod can fit on a node based off it's requests
if pod.Spec.NodeName == "" || pod.Spec.NodeName != node.Name { if pod.Spec.NodeName == "" || pod.Spec.NodeName != node.Name {
if ok, reqErrors := fitsRequest(nodeIndexer, pod, node); !ok { if ok, reqError := fitsRequest(nodeIndexer, pod, node); !ok {
errors = append(errors, reqErrors...) return reqError
} }
} }
// Check if node is schedulable // Check if node is schedulable
if IsNodeUnschedulable(node) { if IsNodeUnschedulable(node) {
errors = append(errors, fmt.Errorf("node is not schedulable")) return errors.New("node is not schedulable")
} }
// Check if pod matches inter-pod anti-affinity rule of pod on node // Check if pod matches inter-pod anti-affinity rule of pod on node
if match, err := podMatchesInterPodAntiAffinity(nodeIndexer, pod, node); err != nil { if match, err := podMatchesInterPodAntiAffinity(nodeIndexer, pod, node); err != nil {
errors = append(errors, err) return err
} else if match { } else if match {
errors = append(errors, fmt.Errorf("pod matches inter-pod anti-affinity rule of other pod on node")) return errors.New("pod matches inter-pod anti-affinity rule of other pod on node")
} }
return errors return nil
}
func podFitsNodes(nodeIndexer podutil.GetPodsAssignedToNodeFunc, pod *v1.Pod, nodes []*v1.Node, excludeFilter func(pod *v1.Pod, node *v1.Node) bool) bool {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
var filteredLen int32
checkNode := func(i int) {
node := nodes[i]
if excludeFilter != nil && excludeFilter(pod, node) {
return
}
err := NodeFit(nodeIndexer, pod, node)
if err == nil {
klog.V(4).InfoS("Pod fits on node", "pod", klog.KObj(pod), "node", klog.KObj(node))
atomic.AddInt32(&filteredLen, 1)
cancel()
} else {
klog.V(4).InfoS("Pod does not fit on node", "pod", klog.KObj(pod), "node", klog.KObj(node), "err", err.Error())
}
}
// Stops searching for more nodes once a node are found.
workqueue.ParallelizeUntil(ctx, workersCount, len(nodes), checkNode)
return filteredLen > 0
} }
// PodFitsAnyOtherNode checks if the given pod will fit any of the given nodes, besides the node // PodFitsAnyOtherNode checks if the given pod will fit any of the given nodes, besides the node
// the pod is already running on. The predicates used to determine if the pod will fit can be found in the NodeFit function. // the pod is already running on. The predicates used to determine if the pod will fit can be found in the NodeFit function.
func PodFitsAnyOtherNode(nodeIndexer podutil.GetPodsAssignedToNodeFunc, pod *v1.Pod, nodes []*v1.Node) bool { func PodFitsAnyOtherNode(nodeIndexer podutil.GetPodsAssignedToNodeFunc, pod *v1.Pod, nodes []*v1.Node) bool {
for _, node := range nodes { return podFitsNodes(nodeIndexer, pod, nodes, func(pod *v1.Pod, node *v1.Node) bool {
// Skip node pod is already on return pod.Spec.NodeName == node.Name
if node.Name == pod.Spec.NodeName { })
continue
}
errors := NodeFit(nodeIndexer, pod, node)
if len(errors) == 0 {
klog.V(4).InfoS("Pod fits on node", "pod", klog.KObj(pod), "node", klog.KObj(node))
return true
}
klog.V(4).InfoS("Pod does not fit on any other node",
"pod:", klog.KObj(pod), "node:", klog.KObj(node), "error:", utilerrors.NewAggregate(errors).Error())
}
return false
} }
// PodFitsAnyNode checks if the given pod will fit any of the given nodes. The predicates used // PodFitsAnyNode checks if the given pod will fit any of the given nodes. The predicates used
// to determine if the pod will fit can be found in the NodeFit function. // to determine if the pod will fit can be found in the NodeFit function.
func PodFitsAnyNode(nodeIndexer podutil.GetPodsAssignedToNodeFunc, pod *v1.Pod, nodes []*v1.Node) bool { func PodFitsAnyNode(nodeIndexer podutil.GetPodsAssignedToNodeFunc, pod *v1.Pod, nodes []*v1.Node) bool {
for _, node := range nodes { return podFitsNodes(nodeIndexer, pod, nodes, nil)
errors := NodeFit(nodeIndexer, pod, node)
if len(errors) == 0 {
klog.V(4).InfoS("Pod fits on node", "pod", klog.KObj(pod), "node", klog.KObj(node))
return true
}
klog.V(4).InfoS("Pod does not fit on any node",
"pod:", klog.KObj(pod), "node:", klog.KObj(node), "error:", utilerrors.NewAggregate(errors).Error())
}
return false
} }
// PodFitsCurrentNode checks if the given pod will fit onto the given node. The predicates used // PodFitsCurrentNode checks if the given pod will fit onto the given node. The predicates used
// to determine if the pod will fit can be found in the NodeFit function. // to determine if the pod will fit can be found in the NodeFit function.
func PodFitsCurrentNode(nodeIndexer podutil.GetPodsAssignedToNodeFunc, pod *v1.Pod, node *v1.Node) bool { func PodFitsCurrentNode(nodeIndexer podutil.GetPodsAssignedToNodeFunc, pod *v1.Pod, node *v1.Node) bool {
errors := NodeFit(nodeIndexer, pod, node) err := NodeFit(nodeIndexer, pod, node)
if len(errors) == 0 { if err == nil {
klog.V(4).InfoS("Pod fits on node", "pod", klog.KObj(pod), "node", klog.KObj(node)) klog.V(4).InfoS("Pod fits on node", "pod", klog.KObj(pod), "node", klog.KObj(node))
return true return true
} }
klog.V(4).InfoS("Pod does not fit on current node", klog.V(4).InfoS("Pod does not fit on current node",
"pod:", klog.KObj(pod), "node:", klog.KObj(node), "error:", utilerrors.NewAggregate(errors).Error()) "pod", klog.KObj(pod), "node", klog.KObj(node), "error", err)
return false return false
} }
@@ -200,9 +210,7 @@ func IsNodeUnschedulable(node *v1.Node) bool {
// fitsRequest determines if a pod can fit on a node based on its resource requests. It returns true if // fitsRequest determines if a pod can fit on a node based on its resource requests. It returns true if
// the pod will fit. // the pod will fit.
func fitsRequest(nodeIndexer podutil.GetPodsAssignedToNodeFunc, pod *v1.Pod, node *v1.Node) (bool, []error) { func fitsRequest(nodeIndexer podutil.GetPodsAssignedToNodeFunc, pod *v1.Pod, node *v1.Node) (bool, error) {
var insufficientResources []error
// Get pod requests // Get pod requests
podRequests, _ := utils.PodRequestsAndLimits(pod) podRequests, _ := utils.PodRequestsAndLimits(pod)
resourceNames := make([]v1.ResourceName, 0, len(podRequests)) resourceNames := make([]v1.ResourceName, 0, len(podRequests))
@@ -210,36 +218,41 @@ func fitsRequest(nodeIndexer podutil.GetPodsAssignedToNodeFunc, pod *v1.Pod, nod
resourceNames = append(resourceNames, name) resourceNames = append(resourceNames, name)
} }
availableResources, err := nodeAvailableResources(nodeIndexer, node, resourceNames) availableResources, err := nodeAvailableResources(nodeIndexer, node, resourceNames,
func(pod *v1.Pod) (v1.ResourceList, error) {
req, _ := utils.PodRequestsAndLimits(pod)
return req, nil
},
)
if err != nil { if err != nil {
return false, []error{err} return false, err
} }
podFitsOnNode := true
for _, resource := range resourceNames { for _, resource := range resourceNames {
podResourceRequest := podRequests[resource] podResourceRequest := podRequests[resource]
availableResource, ok := availableResources[resource] availableResource, ok := availableResources[resource]
if !ok || podResourceRequest.MilliValue() > availableResource.MilliValue() { if !ok || podResourceRequest.MilliValue() > availableResource.MilliValue() {
insufficientResources = append(insufficientResources, fmt.Errorf("insufficient %v", resource)) return false, fmt.Errorf("insufficient %v", resource)
podFitsOnNode = false
} }
} }
// check pod num, at least one pod number is avaibalbe // check pod num, at least one pod number is avaibalbe
if availableResources[v1.ResourcePods].MilliValue() <= 0 { if availableResources[v1.ResourcePods].MilliValue() <= 0 {
insufficientResources = append(insufficientResources, fmt.Errorf("insufficient %v", v1.ResourcePods)) return false, fmt.Errorf("insufficient %v", v1.ResourcePods)
podFitsOnNode = false
} }
return podFitsOnNode, insufficientResources return true, nil
} }
// nodeAvailableResources returns resources mapped to the quanitity available on the node. // nodeAvailableResources returns resources mapped to the quanitity available on the node.
func nodeAvailableResources(nodeIndexer podutil.GetPodsAssignedToNodeFunc, node *v1.Node, resourceNames []v1.ResourceName) (map[v1.ResourceName]*resource.Quantity, error) { func nodeAvailableResources(nodeIndexer podutil.GetPodsAssignedToNodeFunc, node *v1.Node, resourceNames []v1.ResourceName, podUtilization podutil.PodUtilizationFnc) (map[v1.ResourceName]*resource.Quantity, error) {
podsOnNode, err := podutil.ListPodsOnANode(node.Name, nodeIndexer, nil) podsOnNode, err := podutil.ListPodsOnANode(node.Name, nodeIndexer, nil)
if err != nil { if err != nil {
return nil, err return nil, err
} }
nodeUtilization := NodeUtilization(podsOnNode, resourceNames) nodeUtilization, err := NodeUtilization(podsOnNode, resourceNames, podUtilization)
if err != nil {
return nil, err
}
remainingResources := map[v1.ResourceName]*resource.Quantity{ remainingResources := map[v1.ResourceName]*resource.Quantity{
v1.ResourceCPU: resource.NewMilliQuantity(node.Status.Allocatable.Cpu().MilliValue()-nodeUtilization[v1.ResourceCPU].MilliValue(), resource.DecimalSI), v1.ResourceCPU: resource.NewMilliQuantity(node.Status.Allocatable.Cpu().MilliValue()-nodeUtilization[v1.ResourceCPU].MilliValue(), resource.DecimalSI),
v1.ResourceMemory: resource.NewQuantity(node.Status.Allocatable.Memory().Value()-nodeUtilization[v1.ResourceMemory].Value(), resource.BinarySI), v1.ResourceMemory: resource.NewQuantity(node.Status.Allocatable.Memory().Value()-nodeUtilization[v1.ResourceMemory].Value(), resource.BinarySI),
@@ -260,31 +273,34 @@ func nodeAvailableResources(nodeIndexer podutil.GetPodsAssignedToNodeFunc, node
} }
// NodeUtilization returns the resources requested by the given pods. Only resources supplied in the resourceNames parameter are calculated. // NodeUtilization returns the resources requested by the given pods. Only resources supplied in the resourceNames parameter are calculated.
func NodeUtilization(pods []*v1.Pod, resourceNames []v1.ResourceName) map[v1.ResourceName]*resource.Quantity { func NodeUtilization(pods []*v1.Pod, resourceNames []v1.ResourceName, podUtilization podutil.PodUtilizationFnc) (map[v1.ResourceName]*resource.Quantity, error) {
totalReqs := map[v1.ResourceName]*resource.Quantity{ totalUtilization := map[v1.ResourceName]*resource.Quantity{
v1.ResourceCPU: resource.NewMilliQuantity(0, resource.DecimalSI), v1.ResourceCPU: resource.NewMilliQuantity(0, resource.DecimalSI),
v1.ResourceMemory: resource.NewQuantity(0, resource.BinarySI), v1.ResourceMemory: resource.NewQuantity(0, resource.BinarySI),
v1.ResourcePods: resource.NewQuantity(int64(len(pods)), resource.DecimalSI), v1.ResourcePods: resource.NewQuantity(int64(len(pods)), resource.DecimalSI),
} }
for _, name := range resourceNames { for _, name := range resourceNames {
if !IsBasicResource(name) { if !IsBasicResource(name) {
totalReqs[name] = resource.NewQuantity(0, resource.DecimalSI) totalUtilization[name] = resource.NewQuantity(0, resource.DecimalSI)
} }
} }
for _, pod := range pods { for _, pod := range pods {
req, _ := utils.PodRequestsAndLimits(pod) podUtil, err := podUtilization(pod)
if err != nil {
return nil, err
}
for _, name := range resourceNames { for _, name := range resourceNames {
quantity, ok := req[name] quantity, ok := podUtil[name]
if ok && name != v1.ResourcePods { if ok && name != v1.ResourcePods {
// As Quantity.Add says: Add adds the provided y quantity to the current value. If the current value is zero, // As Quantity.Add says: Add adds the provided y quantity to the current value. If the current value is zero,
// the format of the quantity will be updated to the format of y. // the format of the quantity will be updated to the format of y.
totalReqs[name].Add(quantity) totalUtilization[name].Add(quantity)
} }
} }
} }
return totalReqs return totalUtilization, nil
} }
// IsBasicResource checks if resource is basic native. // IsBasicResource checks if resource is basic native.
@@ -343,9 +359,30 @@ func podMatchesInterPodAntiAffinity(nodeIndexer podutil.GetPodsAssignedToNodeFun
if err != nil { if err != nil {
return false, fmt.Errorf("error listing all pods: %v", err) return false, fmt.Errorf("error listing all pods: %v", err)
} }
assignedPodsInNamespace := podutil.GroupByNamespace(podsOnNode)
podsInANamespace := podutil.GroupByNamespace(podsOnNode) for _, term := range utils.GetPodAntiAffinityTerms(pod.Spec.Affinity.PodAntiAffinity) {
nodeMap := utils.CreateNodeMap([]*v1.Node{node}) namespaces := utils.GetNamespacesFromPodAffinityTerm(pod, &term)
selector, err := metav1.LabelSelectorAsSelector(term.LabelSelector)
if err != nil {
klog.ErrorS(err, "Unable to convert LabelSelector into Selector")
return false, err
}
return utils.CheckPodsWithAntiAffinityExist(pod, podsInANamespace, nodeMap), nil for namespace := range namespaces {
for _, assignedPod := range assignedPodsInNamespace[namespace] {
if assignedPod.Name == pod.Name || !utils.PodMatchesTermsNamespaceAndSelector(assignedPod, namespaces, selector) {
klog.V(4).InfoS("Pod doesn't match inter-pod anti-affinity rule of assigned pod on node", "candidatePod", klog.KObj(pod), "assignedPod", klog.KObj(assignedPod))
continue
}
if _, ok := node.Labels[term.TopologyKey]; ok {
klog.V(1).InfoS("Pod matches inter-pod anti-affinity rule of assigned pod on node", "candidatePod", klog.KObj(pod), "assignedPod", klog.KObj(assignedPod))
return true, nil
}
}
}
}
return false, nil
} }

View File

@@ -19,6 +19,7 @@ package node
import ( import (
"context" "context"
"errors" "errors"
"sync"
"testing" "testing"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
@@ -230,7 +231,7 @@ func TestPodFitsAnyOtherNode(t *testing.T) {
nodeTaintValue := "gpu" nodeTaintValue := "gpu"
// Staging node has no scheduling restrictions, but the pod always starts here and PodFitsAnyOtherNode() doesn't take into account the node the pod is running on. // Staging node has no scheduling restrictions, but the pod always starts here and PodFitsAnyOtherNode() doesn't take into account the node the pod is running on.
nodeNames := []string{"node1", "node2", "stagingNode"} nodeNames := []string{"node1", "node2", "stagingNode", "node4"}
tests := []struct { tests := []struct {
description string description string
@@ -716,6 +717,151 @@ func TestPodFitsAnyOtherNode(t *testing.T) {
}, },
success: false, success: false,
}, },
{
description: "There are four nodes. One node has a taint, and the other three nodes do not meet the resource requirements, should fail",
pod: test.BuildTestPod("p1", 1000, 2*1000*1000*1000, nodeNames[2], func(pod *v1.Pod) {
pod.Spec.NodeSelector = map[string]string{
nodeLabelKey: nodeLabelValue,
}
pod.Spec.Containers[0].Resources.Requests[v1.ResourceEphemeralStorage] = *resource.NewQuantity(10*1000*1000*1000, resource.DecimalSI)
}),
nodes: []*v1.Node{
test.BuildTestNode(nodeNames[0], 64000, 128*1000*1000*1000, 200, func(node *v1.Node) {
node.ObjectMeta.Labels = map[string]string{
nodeLabelKey: nodeLabelValue,
}
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(1000*1000*1000*1000, resource.DecimalSI)
node.Spec.Taints = []v1.Taint{
{
Key: nodeTaintKey,
Value: nodeTaintValue,
Effect: v1.TaintEffectNoSchedule,
},
}
}),
test.BuildTestNode(nodeNames[1], 3000, 8*1000*1000*1000, 12, func(node *v1.Node) {
node.ObjectMeta.Labels = map[string]string{
nodeLabelKey: nodeLabelValue,
}
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(200*1000*1000*1000, resource.DecimalSI)
}),
test.BuildTestNode(nodeNames[2], 3000, 8*1000*1000*1000, 12, nil),
test.BuildTestNode(nodeNames[3], 0, 0, 0, nil),
},
podsOnNodes: []*v1.Pod{
test.BuildTestPod("3-core-pod", 2000, 4*1000*1000*1000, nodeNames[1], func(pod *v1.Pod) {
pod.ObjectMeta = metav1.ObjectMeta{
Namespace: "test",
Labels: map[string]string{
"test": "true",
},
}
pod.Spec.Containers[0].Resources.Requests[v1.ResourceEphemeralStorage] = *resource.NewQuantity(10*1000*1000*1000, resource.DecimalSI)
pod.Spec.Overhead = createResourceList(1000, 1000*1000*1000, 1000*1000*1000)
}),
},
success: false,
},
{
description: "There are four nodes. First node has a taint, second node has no label, third node do not meet the resource requirements, just fourth node meets the requirements, should success",
pod: test.BuildTestPod("p1", 1000, 2*1000*1000*1000, nodeNames[2], func(pod *v1.Pod) {
pod.Spec.NodeSelector = map[string]string{
nodeLabelKey: nodeLabelValue,
}
pod.Spec.Containers[0].Resources.Requests[v1.ResourceEphemeralStorage] = *resource.NewQuantity(10*1000*1000*1000, resource.DecimalSI)
}),
nodes: []*v1.Node{
test.BuildTestNode(nodeNames[0], 64000, 128*1000*1000*1000, 200, func(node *v1.Node) {
node.ObjectMeta.Labels = map[string]string{
nodeLabelKey: nodeLabelValue,
}
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(1000*1000*1000*1000, resource.DecimalSI)
node.Spec.Taints = []v1.Taint{
{
Key: nodeTaintKey,
Value: nodeTaintValue,
Effect: v1.TaintEffectNoSchedule,
},
}
}),
test.BuildTestNode(nodeNames[1], 8000, 8*1000*1000*1000, 12, func(node *v1.Node) {
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(200*1000*1000*1000, resource.DecimalSI)
}),
test.BuildTestNode(nodeNames[2], 1000, 8*1000*1000*1000, 12, nil),
test.BuildTestNode(nodeNames[3], 8000, 8*1000*1000*1000, 12, func(node *v1.Node) {
node.ObjectMeta.Labels = map[string]string{
nodeLabelKey: nodeLabelValue,
}
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(1000*1000*1000*1000, resource.DecimalSI)
}),
},
podsOnNodes: []*v1.Pod{
test.BuildTestPod("3-core-pod", 2000, 4*1000*1000*1000, nodeNames[1], func(pod *v1.Pod) {
pod.ObjectMeta = metav1.ObjectMeta{
Namespace: "test",
Labels: map[string]string{
"test": "true",
},
}
pod.Spec.Containers[0].Resources.Requests[v1.ResourceEphemeralStorage] = *resource.NewQuantity(10*1000*1000*1000, resource.DecimalSI)
pod.Spec.Overhead = createResourceList(1000, 1000*1000*1000, 1000*1000*1000)
}),
},
success: true,
},
{
description: "There are four nodes. First node has a taint, second node has no label, third node do not meet the resource requirements, fourth node is the one where the pod is located, should fail",
pod: test.BuildTestPod("p1", 1000, 2*1000*1000*1000, nodeNames[3], func(pod *v1.Pod) {
pod.Spec.NodeSelector = map[string]string{
nodeLabelKey: nodeLabelValue,
}
pod.Spec.Containers[0].Resources.Requests[v1.ResourceEphemeralStorage] = *resource.NewQuantity(10*1000*1000*1000, resource.DecimalSI)
}),
nodes: []*v1.Node{
test.BuildTestNode(nodeNames[0], 64000, 128*1000*1000*1000, 200, func(node *v1.Node) {
node.ObjectMeta.Labels = map[string]string{
nodeLabelKey: nodeLabelValue,
}
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(1000*1000*1000*1000, resource.DecimalSI)
node.Spec.Taints = []v1.Taint{
{
Key: nodeTaintKey,
Value: nodeTaintValue,
Effect: v1.TaintEffectNoSchedule,
},
}
}),
test.BuildTestNode(nodeNames[1], 8000, 8*1000*1000*1000, 12, func(node *v1.Node) {
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(200*1000*1000*1000, resource.DecimalSI)
}),
test.BuildTestNode(nodeNames[2], 1000, 8*1000*1000*1000, 12, nil),
test.BuildTestNode(nodeNames[3], 8000, 8*1000*1000*1000, 12, func(node *v1.Node) {
node.ObjectMeta.Labels = map[string]string{
nodeLabelKey: nodeLabelValue,
}
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(1000*1000*1000*1000, resource.DecimalSI)
}),
},
podsOnNodes: []*v1.Pod{
test.BuildTestPod("3-core-pod", 2000, 4*1000*1000*1000, nodeNames[1], func(pod *v1.Pod) {
pod.ObjectMeta = metav1.ObjectMeta{
Namespace: "test",
Labels: map[string]string{
"test": "true",
},
}
pod.Spec.Containers[0].Resources.Requests[v1.ResourceEphemeralStorage] = *resource.NewQuantity(10*1000*1000*1000, resource.DecimalSI)
pod.Spec.Overhead = createResourceList(1000, 1000*1000*1000, 1000*1000*1000)
}),
},
success: false,
},
} }
for _, tc := range tests { for _, tc := range tests {
@@ -753,12 +899,60 @@ func TestPodFitsAnyOtherNode(t *testing.T) {
} }
} }
func TestPodFitsNodes(t *testing.T) {
nodeNames := []string{"node1", "node2", "node3", "node4"}
pod := test.BuildTestPod("p1", 950, 2*1000*1000*1000, nodeNames[0], nil)
nodes := []*v1.Node{
test.BuildTestNode(nodeNames[0], 1000, 8*1000*1000*1000, 12, nil),
test.BuildTestNode(nodeNames[1], 200, 8*1000*1000*1000, 12, nil),
test.BuildTestNode(nodeNames[2], 300, 8*1000*1000*1000, 12, nil),
test.BuildTestNode(nodeNames[3], 400, 8*1000*1000*1000, 12, nil),
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
var objs []runtime.Object
for _, node := range nodes {
objs = append(objs, node)
}
objs = append(objs, pod)
fakeClient := fake.NewSimpleClientset(objs...)
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
podInformer := sharedInformerFactory.Core().V1().Pods().Informer()
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
if err != nil {
t.Errorf("Build get pods assigned to node function error: %v", err)
}
sharedInformerFactory.Start(ctx.Done())
sharedInformerFactory.WaitForCacheSync(ctx.Done())
var nodesTraversed sync.Map
podFitsNodes(getPodsAssignedToNode, pod, nodes, func(pod *v1.Pod, node *v1.Node) bool {
nodesTraversed.Store(node.Name, node)
return true
})
for _, node := range nodes {
if _, exists := nodesTraversed.Load(node.Name); !exists {
t.Errorf("Node %v was not proccesed", node.Name)
}
}
}
func TestNodeFit(t *testing.T) { func TestNodeFit(t *testing.T) {
node := test.BuildTestNode("node", 64000, 128*1000*1000*1000, 2, func(node *v1.Node) { node := test.BuildTestNode("node", 64000, 128*1000*1000*1000, 2, func(node *v1.Node) {
node.ObjectMeta.Labels = map[string]string{ node.ObjectMeta.Labels = map[string]string{
"region": "main-region", "region": "main-region",
} }
}) })
nodeNolabel := test.BuildTestNode("node", 64000, 128*1000*1000*1000, 2, nil)
tests := []struct { tests := []struct {
description string description string
pod *v1.Pod pod *v1.Pod
@@ -767,7 +961,7 @@ func TestNodeFit(t *testing.T) {
err error err error
}{ }{
{ {
description: "insufficient cpu", description: "Insufficient cpu",
pod: test.BuildTestPod("p1", 10000, 2*1000*1000*1000, "", nil), pod: test.BuildTestPod("p1", 10000, 2*1000*1000*1000, "", nil),
node: node, node: node,
podsOnNode: []*v1.Pod{ podsOnNode: []*v1.Pod{
@@ -776,7 +970,7 @@ func TestNodeFit(t *testing.T) {
err: errors.New("insufficient cpu"), err: errors.New("insufficient cpu"),
}, },
{ {
description: "insufficient pod num", description: "Insufficient pod num",
pod: test.BuildTestPod("p1", 1000, 2*1000*1000*1000, "", nil), pod: test.BuildTestPod("p1", 1000, 2*1000*1000*1000, "", nil),
node: node, node: node,
podsOnNode: []*v1.Pod{ podsOnNode: []*v1.Pod{
@@ -786,7 +980,7 @@ func TestNodeFit(t *testing.T) {
err: errors.New("insufficient pods"), err: errors.New("insufficient pods"),
}, },
{ {
description: "matches inter-pod anti-affinity rule of pod on node", description: "Pod matches inter-pod anti-affinity rule of other pod on node",
pod: test.PodWithPodAntiAffinity(test.BuildTestPod("p1", 1000, 1000, node.Name, nil), "foo", "bar"), pod: test.PodWithPodAntiAffinity(test.BuildTestPod("p1", 1000, 1000, node.Name, nil), "foo", "bar"),
node: node, node: node,
podsOnNode: []*v1.Pod{ podsOnNode: []*v1.Pod{
@@ -795,11 +989,36 @@ func TestNodeFit(t *testing.T) {
err: errors.New("pod matches inter-pod anti-affinity rule of other pod on node"), err: errors.New("pod matches inter-pod anti-affinity rule of other pod on node"),
}, },
{ {
description: "pod fits on node", description: "Pod doesn't match inter-pod anti-affinity rule of other pod on node, because pod and other pod is not same namespace",
pod: test.PodWithPodAntiAffinity(test.BuildTestPod("p1", 1000, 1000, node.Name, nil), "foo", "bar"),
node: node,
podsOnNode: []*v1.Pod{
test.PodWithPodAntiAffinity(test.BuildTestPod("p2", 1000, 1000, node.Name, func(pod *v1.Pod) {
pod.Namespace = "test"
}), "foo", "bar"),
},
},
{
description: "Pod doesn't match inter-pod anti-affinity rule of other pod on node, because other pod not match labels of pod",
pod: test.PodWithPodAntiAffinity(test.BuildTestPod("p1", 1000, 1000, node.Name, nil), "foo", "bar"),
node: node,
podsOnNode: []*v1.Pod{
test.PodWithPodAntiAffinity(test.BuildTestPod("p2", 1000, 1000, node.Name, nil), "foo1", "bar1"),
},
},
{
description: "Pod doesn't match inter-pod anti-affinity rule of other pod on node, because node have no topologyKey",
pod: test.PodWithPodAntiAffinity(test.BuildTestPod("p1", 1000, 1000, "node1", nil), "foo", "bar"),
node: nodeNolabel,
podsOnNode: []*v1.Pod{
test.PodWithPodAntiAffinity(test.BuildTestPod("p2", 1000, 1000, node.Name, nil), "foo", "bar"),
},
},
{
description: "Pod fits on node",
pod: test.BuildTestPod("p1", 1000, 1000, "", func(pod *v1.Pod) {}), pod: test.BuildTestPod("p1", 1000, 1000, "", func(pod *v1.Pod) {}),
node: node, node: node,
podsOnNode: []*v1.Pod{}, podsOnNode: []*v1.Pod{},
err: nil,
}, },
} }
@@ -824,9 +1043,9 @@ func TestNodeFit(t *testing.T) {
sharedInformerFactory.Start(ctx.Done()) sharedInformerFactory.Start(ctx.Done())
sharedInformerFactory.WaitForCacheSync(ctx.Done()) sharedInformerFactory.WaitForCacheSync(ctx.Done())
errs := NodeFit(getPodsAssignedToNode, tc.pod, tc.node) err = NodeFit(getPodsAssignedToNode, tc.pod, tc.node)
if (len(errs) == 0 && tc.err != nil) || (len(errs) > 0 && errs[0].Error() != tc.err.Error()) { if (err == nil && tc.err != nil) || (err != nil && err.Error() != tc.err.Error()) {
t.Errorf("Test %#v failed, got %v, expect %v", tc.description, errs, tc.err) t.Errorf("Test %#v failed, got %v, expect %v", tc.description, err, tc.err)
} }
}) })
} }

View File

@@ -39,6 +39,9 @@ type FilterFunc func(*v1.Pod) bool
// as input and returns the pods that assigned to the node. // as input and returns the pods that assigned to the node.
type GetPodsAssignedToNodeFunc func(string, FilterFunc) ([]*v1.Pod, error) type GetPodsAssignedToNodeFunc func(string, FilterFunc) ([]*v1.Pod, error)
// PodUtilizationFnc is a function for getting pod's utilization. E.g. requested resources of utilization from metrics.
type PodUtilizationFnc func(pod *v1.Pod) (v1.ResourceList, error)
// WrapFilterFuncs wraps a set of FilterFunc in one. // WrapFilterFuncs wraps a set of FilterFunc in one.
func WrapFilterFuncs(filters ...FilterFunc) FilterFunc { func WrapFilterFuncs(filters ...FilterFunc) FilterFunc {
return func(pod *v1.Pod) bool { return func(pod *v1.Pod) bool {
@@ -99,9 +102,6 @@ func (o *Options) BuildFilterFunc() (FilterFunc, error) {
} }
} }
return func(pod *v1.Pod) bool { return func(pod *v1.Pod) bool {
if o.filter != nil && !o.filter(pod) {
return false
}
if len(o.includedNamespaces) > 0 && !o.includedNamespaces.Has(pod.Namespace) { if len(o.includedNamespaces) > 0 && !o.includedNamespaces.Has(pod.Namespace) {
return false return false
} }
@@ -111,6 +111,9 @@ func (o *Options) BuildFilterFunc() (FilterFunc, error) {
if s != nil && !s.Matches(labels.Set(pod.GetLabels())) { if s != nil && !s.Matches(labels.Set(pod.GetLabels())) {
return false return false
} }
if o.filter != nil && !o.filter(pod) {
return false
}
return true return true
}, nil }, nil
} }

View File

@@ -28,7 +28,6 @@ import (
"k8s.io/klog/v2" "k8s.io/klog/v2"
"sigs.k8s.io/descheduler/pkg/api" "sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/api/v1alpha1"
"sigs.k8s.io/descheduler/pkg/api/v1alpha2" "sigs.k8s.io/descheduler/pkg/api/v1alpha2"
"sigs.k8s.io/descheduler/pkg/descheduler/scheme" "sigs.k8s.io/descheduler/pkg/descheduler/scheme"
"sigs.k8s.io/descheduler/pkg/framework/pluginregistry" "sigs.k8s.io/descheduler/pkg/framework/pluginregistry"
@@ -54,7 +53,7 @@ func decode(policyConfigFile string, policy []byte, client clientset.Interface,
internalPolicy := &api.DeschedulerPolicy{} internalPolicy := &api.DeschedulerPolicy{}
var err error var err error
decoder := scheme.Codecs.UniversalDecoder(v1alpha1.SchemeGroupVersion, v1alpha2.SchemeGroupVersion, api.SchemeGroupVersion) decoder := scheme.Codecs.UniversalDecoder(v1alpha2.SchemeGroupVersion, api.SchemeGroupVersion)
if err := runtime.DecodeInto(decoder, policy, internalPolicy); err != nil { if err := runtime.DecodeInto(decoder, policy, internalPolicy); err != nil {
return nil, fmt.Errorf("failed decoding descheduler's policy config %q: %v", policyConfigFile, err) return nil, fmt.Errorf("failed decoding descheduler's policy config %q: %v", policyConfigFile, err)
} }
@@ -106,6 +105,7 @@ func setDefaultEvictor(profile api.DeschedulerProfile, client clientset.Interfac
EvictSystemCriticalPods: false, EvictSystemCriticalPods: false,
IgnorePvcPods: false, IgnorePvcPods: false,
EvictFailedBarePods: false, EvictFailedBarePods: false,
IgnorePodsWithoutPDB: false,
}, },
} }

File diff suppressed because it is too large Load Diff

View File

@@ -21,7 +21,6 @@ import (
"k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/apimachinery/pkg/runtime/serializer"
utilruntime "k8s.io/apimachinery/pkg/util/runtime" utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"sigs.k8s.io/descheduler/pkg/api" "sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/api/v1alpha1"
"sigs.k8s.io/descheduler/pkg/api/v1alpha2" "sigs.k8s.io/descheduler/pkg/api/v1alpha2"
"sigs.k8s.io/descheduler/pkg/apis/componentconfig" "sigs.k8s.io/descheduler/pkg/apis/componentconfig"
componentconfigv1alpha1 "sigs.k8s.io/descheduler/pkg/apis/componentconfig/v1alpha1" componentconfigv1alpha1 "sigs.k8s.io/descheduler/pkg/apis/componentconfig/v1alpha1"
@@ -57,10 +56,8 @@ func init() {
utilruntime.Must(componentconfig.AddToScheme(Scheme)) utilruntime.Must(componentconfig.AddToScheme(Scheme))
utilruntime.Must(componentconfigv1alpha1.AddToScheme(Scheme)) utilruntime.Must(componentconfigv1alpha1.AddToScheme(Scheme))
utilruntime.Must(v1alpha1.AddToScheme(Scheme))
utilruntime.Must(v1alpha2.AddToScheme(Scheme)) utilruntime.Must(v1alpha2.AddToScheme(Scheme))
utilruntime.Must(Scheme.SetVersionPriority( utilruntime.Must(Scheme.SetVersionPriority(
v1alpha2.SchemeGroupVersion, v1alpha2.SchemeGroupVersion,
v1alpha1.SchemeGroupVersion,
)) ))
} }

49
pkg/features/features.go Normal file
View File

@@ -0,0 +1,49 @@
package features
import (
"k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/component-base/featuregate"
)
const (
// Every feature gate should add method here following this template:
//
// // owner: @username
// // kep: kep link
// // alpha: v1.X
// MyFeature featuregate.Feature = "MyFeature"
//
// Feature gates should be listed in alphabetical, case-sensitive
// (upper before any lower case character) order. This reduces the risk
// of code conflicts because changes are more likely to be scattered
// across the file.
// owner: @ingvagabund
// kep: https://github.com/kubernetes-sigs/descheduler/issues/1397
// alpha: v1.31
//
// Enable evictions in background so users can create their own eviction policies
// as an alternative to immediate evictions.
EvictionsInBackground featuregate.Feature = "EvictionsInBackground"
)
func init() {
runtime.Must(DefaultMutableFeatureGate.Add(defaultDeschedulerFeatureGates))
}
// defaultDeschedulerFeatureGates consists of all known descheduler-specific feature keys.
// To add a new feature, define a key for it above and add it here. The features will be
// available throughout descheduler binary.
//
// Entries are separated from each other with blank lines to avoid sweeping gofmt changes
// when adding or removing one entry.
var defaultDeschedulerFeatureGates = map[featuregate.Feature]featuregate.FeatureSpec{
EvictionsInBackground: {Default: false, PreRelease: featuregate.Alpha},
}
// DefaultMutableFeatureGate is a mutable version of DefaultFeatureGate.
// Only top-level commands/options setup and the k8s.io/component-base/featuregate/testing package should make use of this.
// Tests that need to modify feature gates for the duration of their test should use:
//
// defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.<FeatureName>, <value>)()
var DefaultMutableFeatureGate featuregate.MutableFeatureGate = featuregate.NewFeatureGate()

View File

@@ -8,6 +8,7 @@ import (
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions" "sigs.k8s.io/descheduler/pkg/descheduler/evictions"
"sigs.k8s.io/descheduler/pkg/descheduler/metricscollector"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod" podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types" frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
) )
@@ -18,6 +19,7 @@ type HandleImpl struct {
SharedInformerFactoryImpl informers.SharedInformerFactory SharedInformerFactoryImpl informers.SharedInformerFactory
EvictorFilterImpl frameworktypes.EvictorPlugin EvictorFilterImpl frameworktypes.EvictorPlugin
PodEvictorImpl *evictions.PodEvictor PodEvictorImpl *evictions.PodEvictor
MetricsCollectorImpl *metricscollector.MetricsCollector
} }
var _ frameworktypes.Handle = &HandleImpl{} var _ frameworktypes.Handle = &HandleImpl{}
@@ -26,6 +28,10 @@ func (hi *HandleImpl) ClientSet() clientset.Interface {
return hi.ClientsetImpl return hi.ClientsetImpl
} }
func (hi *HandleImpl) MetricsCollector() *metricscollector.MetricsCollector {
return hi.MetricsCollectorImpl
}
func (hi *HandleImpl) GetPodsAssignedToNodeFunc() podutil.GetPodsAssignedToNodeFunc { func (hi *HandleImpl) GetPodsAssignedToNodeFunc() podutil.GetPodsAssignedToNodeFunc {
return hi.GetPodsAssignedToNodeFuncImpl return hi.GetPodsAssignedToNodeFuncImpl
} }
@@ -46,10 +52,6 @@ func (hi *HandleImpl) PreEvictionFilter(pod *v1.Pod) bool {
return hi.EvictorFilterImpl.PreEvictionFilter(pod) return hi.EvictorFilterImpl.PreEvictionFilter(pod)
} }
func (hi *HandleImpl) Evict(ctx context.Context, pod *v1.Pod, opts evictions.EvictOptions) bool { func (hi *HandleImpl) Evict(ctx context.Context, pod *v1.Pod, opts evictions.EvictOptions) error {
return hi.PodEvictorImpl.EvictPod(ctx, pod, opts) return hi.PodEvictorImpl.EvictPod(ctx, pod, opts)
} }
func (hi *HandleImpl) NodeLimitExceeded(node *v1.Node) bool {
return hi.PodEvictorImpl.NodeLimitExceeded(node)
}

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated // +build !ignore_autogenerated
/* /*
Copyright 2024 The Kubernetes Authors. Copyright 2025 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.

View File

@@ -18,6 +18,7 @@ import (
"context" "context"
"errors" "errors"
"fmt" "fmt"
"time"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -64,6 +65,7 @@ func HaveEvictAnnotation(pod *v1.Pod) bool {
} }
// New builds plugin from its arguments while passing a handle // New builds plugin from its arguments while passing a handle
// nolint: gocyclo
func New(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) { func New(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
defaultEvictorArgs, ok := args.(*DefaultEvictorArgs) defaultEvictorArgs, ok := args.(*DefaultEvictorArgs)
if !ok { if !ok {
@@ -185,6 +187,28 @@ func New(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plug
}) })
} }
if defaultEvictorArgs.MinPodAge != nil {
ev.constraints = append(ev.constraints, func(pod *v1.Pod) error {
if pod.Status.StartTime == nil || time.Since(pod.Status.StartTime.Time) < defaultEvictorArgs.MinPodAge.Duration {
return fmt.Errorf("pod age is not older than MinPodAge: %s seconds", defaultEvictorArgs.MinPodAge.String())
}
return nil
})
}
if defaultEvictorArgs.IgnorePodsWithoutPDB {
ev.constraints = append(ev.constraints, func(pod *v1.Pod) error {
hasPdb, err := utils.IsPodCoveredByPDB(pod, handle.SharedInformerFactory().Policy().V1().PodDisruptionBudgets().Lister())
if err != nil {
return fmt.Errorf("unable to check if pod is covered by PodDisruptionBudget: %w", err)
}
if !hasPdb {
return fmt.Errorf("no PodDisruptionBudget found for pod")
}
return nil
})
}
return ev, nil return ev, nil
} }
@@ -244,6 +268,15 @@ func (d *DefaultEvictor) Filter(pod *v1.Pod) bool {
func getPodIndexerByOwnerRefs(indexName string, handle frameworktypes.Handle) (cache.Indexer, error) { func getPodIndexerByOwnerRefs(indexName string, handle frameworktypes.Handle) (cache.Indexer, error) {
podInformer := handle.SharedInformerFactory().Core().V1().Pods().Informer() podInformer := handle.SharedInformerFactory().Core().V1().Pods().Informer()
indexer := podInformer.GetIndexer()
// do not reinitialize the indexer, if it's been defined already
for name := range indexer.GetIndexers() {
if name == indexName {
return indexer, nil
}
}
if err := podInformer.AddIndexers(cache.Indexers{ if err := podInformer.AddIndexers(cache.Indexers{
indexName: func(obj interface{}) ([]string, error) { indexName: func(obj interface{}) ([]string, error) {
pod, ok := obj.(*v1.Pod) pod, ok := obj.(*v1.Pod)
@@ -257,6 +290,5 @@ func getPodIndexerByOwnerRefs(indexName string, handle frameworktypes.Handle) (c
return nil, err return nil, err
} }
indexer := podInformer.GetIndexer()
return indexer, nil return indexer, nil
} }

View File

@@ -15,9 +15,14 @@ package defaultevictor
import ( import (
"context" "context"
"fmt"
"testing" "testing"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
policyv1 "k8s.io/api/policy/v1"
"k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/util/uuid"
@@ -31,6 +36,22 @@ import (
"sigs.k8s.io/descheduler/test" "sigs.k8s.io/descheduler/test"
) )
type testCase struct {
description string
pods []*v1.Pod
nodes []*v1.Node
pdbs []*policyv1.PodDisruptionBudget
evictFailedBarePods bool
evictLocalStoragePods bool
evictSystemCriticalPods bool
priorityThreshold *int32
nodeFit bool
minReplicas uint
minPodAge *metav1.Duration
result bool
ignorePodsWithoutPDB bool
}
func TestDefaultEvictorPreEvictionFilter(t *testing.T) { func TestDefaultEvictorPreEvictionFilter(t *testing.T) {
n1 := test.BuildTestNode("node1", 1000, 2000, 13, nil) n1 := test.BuildTestNode("node1", 1000, 2000, 13, nil)
@@ -39,17 +60,6 @@ func TestDefaultEvictorPreEvictionFilter(t *testing.T) {
nodeLabelKey := "datacenter" nodeLabelKey := "datacenter"
nodeLabelValue := "east" nodeLabelValue := "east"
type testCase struct {
description string
pods []*v1.Pod
nodes []*v1.Node
evictFailedBarePods bool
evictLocalStoragePods bool
evictSystemCriticalPods bool
priorityThreshold *int32
nodeFit bool
result bool
}
testCases := []testCase{ testCases := []testCase{
{ {
@@ -305,45 +315,7 @@ func TestDefaultEvictorPreEvictionFilter(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
defer cancel() defer cancel()
var objs []runtime.Object evictorPlugin, err := initializePlugin(ctx, test)
for _, node := range test.nodes {
objs = append(objs, node)
}
for _, pod := range test.pods {
objs = append(objs, pod)
}
fakeClient := fake.NewSimpleClientset(objs...)
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
podInformer := sharedInformerFactory.Core().V1().Pods().Informer()
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
if err != nil {
t.Errorf("Build get pods assigned to node function error: %v", err)
}
sharedInformerFactory.Start(ctx.Done())
sharedInformerFactory.WaitForCacheSync(ctx.Done())
defaultEvictorArgs := &DefaultEvictorArgs{
EvictLocalStoragePods: test.evictLocalStoragePods,
EvictSystemCriticalPods: test.evictSystemCriticalPods,
IgnorePvcPods: false,
EvictFailedBarePods: test.evictFailedBarePods,
PriorityThreshold: &api.PriorityThreshold{
Value: test.priorityThreshold,
},
NodeFit: test.nodeFit,
}
evictorPlugin, err := New(
defaultEvictorArgs,
&frameworkfake.HandleImpl{
ClientsetImpl: fakeClient,
GetPodsAssignedToNodeFuncImpl: getPodsAssignedToNode,
SharedInformerFactoryImpl: sharedInformerFactory,
})
if err != nil { if err != nil {
t.Fatalf("Unable to initialize the plugin: %v", err) t.Fatalf("Unable to initialize the plugin: %v", err)
} }
@@ -361,24 +333,13 @@ func TestDefaultEvictorFilter(t *testing.T) {
lowPriority := int32(800) lowPriority := int32(800)
highPriority := int32(900) highPriority := int32(900)
minPodAge := metav1.Duration{Duration: 50 * time.Minute}
nodeTaintKey := "hardware" nodeTaintKey := "hardware"
nodeTaintValue := "gpu" nodeTaintValue := "gpu"
ownerRefUUID := uuid.NewUUID() ownerRefUUID := uuid.NewUUID()
type testCase struct {
description string
pods []*v1.Pod
nodes []*v1.Node
evictFailedBarePods bool
evictLocalStoragePods bool
evictSystemCriticalPods bool
priorityThreshold *int32
nodeFit bool
minReplicas uint
result bool
}
testCases := []testCase{ testCases := []testCase{
{ {
description: "Failed pod eviction with no ownerRefs", description: "Failed pod eviction with no ownerRefs",
@@ -749,6 +710,65 @@ func TestDefaultEvictorFilter(t *testing.T) {
}, },
minReplicas: 2, minReplicas: 2,
result: true, result: true,
}, {
description: "minPodAge of 50, pod created 10 minutes ago, no eviction",
pods: []*v1.Pod{
test.BuildTestPod("p1", 1, 1, n1.Name, func(pod *v1.Pod) {
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
podStartTime := metav1.Now().Add(time.Minute * time.Duration(-10))
pod.Status.StartTime = &metav1.Time{Time: podStartTime}
}),
},
minPodAge: &minPodAge,
result: false,
}, {
description: "minPodAge of 50, pod created 60 minutes ago, evicts",
pods: []*v1.Pod{
test.BuildTestPod("p1", 1, 1, n1.Name, func(pod *v1.Pod) {
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
podStartTime := metav1.Now().Add(time.Minute * time.Duration(-60))
pod.Status.StartTime = &metav1.Time{Time: podStartTime}
}),
},
minPodAge: &minPodAge,
result: true,
}, {
description: "nil minPodAge, pod created 60 minutes ago, evicts",
pods: []*v1.Pod{
test.BuildTestPod("p1", 1, 1, n1.Name, func(pod *v1.Pod) {
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
podStartTime := metav1.Now().Add(time.Minute * time.Duration(-60))
pod.Status.StartTime = &metav1.Time{Time: podStartTime}
}),
},
result: true,
}, {
description: "ignorePodsWithoutPDB, pod with no PDBs, no eviction",
pods: []*v1.Pod{
test.BuildTestPod("p1", 1, 1, n1.Name, func(pod *v1.Pod) {
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
pod.Labels = map[string]string{
"app": "foo",
}
}),
},
ignorePodsWithoutPDB: true,
result: false,
}, {
description: "ignorePodsWithoutPDB, pod with PDBs, evicts",
pods: []*v1.Pod{
test.BuildTestPod("p1", 1, 1, n1.Name, func(pod *v1.Pod) {
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
pod.Labels = map[string]string{
"app": "foo",
}
}),
},
pdbs: []*policyv1.PodDisruptionBudget{
test.BuildTestPDB("pdb1", "foo"),
},
ignorePodsWithoutPDB: true,
result: true,
}, },
} }
@@ -757,46 +777,7 @@ func TestDefaultEvictorFilter(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
defer cancel() defer cancel()
var objs []runtime.Object evictorPlugin, err := initializePlugin(ctx, test)
for _, node := range test.nodes {
objs = append(objs, node)
}
for _, pod := range test.pods {
objs = append(objs, pod)
}
fakeClient := fake.NewSimpleClientset(objs...)
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
podInformer := sharedInformerFactory.Core().V1().Pods().Informer()
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
if err != nil {
t.Errorf("Build get pods assigned to node function error: %v", err)
}
sharedInformerFactory.Start(ctx.Done())
sharedInformerFactory.WaitForCacheSync(ctx.Done())
defaultEvictorArgs := &DefaultEvictorArgs{
EvictLocalStoragePods: test.evictLocalStoragePods,
EvictSystemCriticalPods: test.evictSystemCriticalPods,
IgnorePvcPods: false,
EvictFailedBarePods: test.evictFailedBarePods,
PriorityThreshold: &api.PriorityThreshold{
Value: test.priorityThreshold,
},
NodeFit: test.nodeFit,
MinReplicas: test.minReplicas,
}
evictorPlugin, err := New(
defaultEvictorArgs,
&frameworkfake.HandleImpl{
ClientsetImpl: fakeClient,
GetPodsAssignedToNodeFuncImpl: getPodsAssignedToNode,
SharedInformerFactoryImpl: sharedInformerFactory,
})
if err != nil { if err != nil {
t.Fatalf("Unable to initialize the plugin: %v", err) t.Fatalf("Unable to initialize the plugin: %v", err)
} }
@@ -808,3 +789,100 @@ func TestDefaultEvictorFilter(t *testing.T) {
}) })
} }
} }
func TestReinitialization(t *testing.T) {
n1 := test.BuildTestNode("node1", 1000, 2000, 13, nil)
ownerRefUUID := uuid.NewUUID()
testCases := []testCase{
{
description: "minReplicas of 2, multiple owners, eviction",
pods: []*v1.Pod{
test.BuildTestPod("p1", 1, 1, n1.Name, func(pod *v1.Pod) {
pod.ObjectMeta.OwnerReferences = append(test.GetNormalPodOwnerRefList(), test.GetNormalPodOwnerRefList()...)
pod.ObjectMeta.OwnerReferences[0].UID = ownerRefUUID
}),
test.BuildTestPod("p2", 1, 1, n1.Name, func(pod *v1.Pod) {
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
}),
},
minReplicas: 2,
result: true,
},
}
for _, test := range testCases {
t.Run(test.description, func(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
evictorPlugin, err := initializePlugin(ctx, test)
if err != nil {
t.Fatalf("Unable to initialize the plugin: %v", err)
}
defaultEvictor, ok := evictorPlugin.(*DefaultEvictor)
if !ok {
t.Fatalf("Unable to initialize as a DefaultEvictor plugin")
}
_, err = New(defaultEvictor.args, defaultEvictor.handle)
if err != nil {
t.Fatalf("Unable to reinitialize the plugin: %v", err)
}
})
}
}
func initializePlugin(ctx context.Context, test testCase) (frameworktypes.Plugin, error) {
var objs []runtime.Object
for _, node := range test.nodes {
objs = append(objs, node)
}
for _, pod := range test.pods {
objs = append(objs, pod)
}
for _, pdb := range test.pdbs {
objs = append(objs, pdb)
}
fakeClient := fake.NewSimpleClientset(objs...)
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
podInformer := sharedInformerFactory.Core().V1().Pods().Informer()
_ = sharedInformerFactory.Policy().V1().PodDisruptionBudgets().Lister()
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
if err != nil {
return nil, fmt.Errorf("build get pods assigned to node function error: %v", err)
}
sharedInformerFactory.Start(ctx.Done())
sharedInformerFactory.WaitForCacheSync(ctx.Done())
defaultEvictorArgs := &DefaultEvictorArgs{
EvictLocalStoragePods: test.evictLocalStoragePods,
EvictSystemCriticalPods: test.evictSystemCriticalPods,
IgnorePvcPods: false,
EvictFailedBarePods: test.evictFailedBarePods,
PriorityThreshold: &api.PriorityThreshold{
Value: test.priorityThreshold,
},
NodeFit: test.nodeFit,
MinReplicas: test.minReplicas,
MinPodAge: test.minPodAge,
IgnorePodsWithoutPDB: test.ignorePodsWithoutPDB,
}
evictorPlugin, err := New(
defaultEvictorArgs,
&frameworkfake.HandleImpl{
ClientsetImpl: fakeClient,
GetPodsAssignedToNodeFuncImpl: getPodsAssignedToNode,
SharedInformerFactoryImpl: sharedInformerFactory,
})
if err != nil {
return nil, fmt.Errorf("unable to initialize the plugin: %v", err)
}
return evictorPlugin, nil
}

View File

@@ -19,7 +19,7 @@ import (
"github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime" utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/utils/pointer" utilptr "k8s.io/utils/ptr"
"sigs.k8s.io/descheduler/pkg/api" "sigs.k8s.io/descheduler/pkg/api"
) )
@@ -42,6 +42,7 @@ func TestSetDefaults_DefaultEvictorArgs(t *testing.T) {
LabelSelector: nil, LabelSelector: nil,
PriorityThreshold: nil, PriorityThreshold: nil,
NodeFit: false, NodeFit: false,
IgnorePodsWithoutPDB: false,
}, },
}, },
{ {
@@ -55,9 +56,10 @@ func TestSetDefaults_DefaultEvictorArgs(t *testing.T) {
EvictFailedBarePods: true, EvictFailedBarePods: true,
LabelSelector: nil, LabelSelector: nil,
PriorityThreshold: &api.PriorityThreshold{ PriorityThreshold: &api.PriorityThreshold{
Value: pointer.Int32(800), Value: utilptr.To[int32](800),
}, },
NodeFit: true, NodeFit: true,
IgnorePodsWithoutPDB: true,
}, },
want: &DefaultEvictorArgs{ want: &DefaultEvictorArgs{
NodeSelector: "NodeSelector", NodeSelector: "NodeSelector",
@@ -68,9 +70,10 @@ func TestSetDefaults_DefaultEvictorArgs(t *testing.T) {
EvictFailedBarePods: true, EvictFailedBarePods: true,
LabelSelector: nil, LabelSelector: nil,
PriorityThreshold: &api.PriorityThreshold{ PriorityThreshold: &api.PriorityThreshold{
Value: pointer.Int32(800), Value: utilptr.To[int32](800),
}, },
NodeFit: true, NodeFit: true,
IgnorePodsWithoutPDB: true,
}, },
}, },
} }

View File

@@ -25,14 +25,16 @@ import (
type DefaultEvictorArgs struct { type DefaultEvictorArgs struct {
metav1.TypeMeta `json:",inline"` metav1.TypeMeta `json:",inline"`
NodeSelector string `json:"nodeSelector"` NodeSelector string `json:"nodeSelector,omitempty"`
EvictLocalStoragePods bool `json:"evictLocalStoragePods"` EvictLocalStoragePods bool `json:"evictLocalStoragePods,omitempty"`
EvictDaemonSetPods bool `json:"evictDaemonSetPods"` EvictDaemonSetPods bool `json:"evictDaemonSetPods,omitempty"`
EvictSystemCriticalPods bool `json:"evictSystemCriticalPods"` EvictSystemCriticalPods bool `json:"evictSystemCriticalPods,omitempty"`
IgnorePvcPods bool `json:"ignorePvcPods"` IgnorePvcPods bool `json:"ignorePvcPods,omitempty"`
EvictFailedBarePods bool `json:"evictFailedBarePods"` EvictFailedBarePods bool `json:"evictFailedBarePods,omitempty"`
LabelSelector *metav1.LabelSelector `json:"labelSelector"` LabelSelector *metav1.LabelSelector `json:"labelSelector,omitempty"`
PriorityThreshold *api.PriorityThreshold `json:"priorityThreshold"` PriorityThreshold *api.PriorityThreshold `json:"priorityThreshold,omitempty"`
NodeFit bool `json:"nodeFit"` NodeFit bool `json:"nodeFit,omitempty"`
MinReplicas uint `json:"minReplicas"` MinReplicas uint `json:"minReplicas,omitempty"`
MinPodAge *metav1.Duration `json:"minPodAge,omitempty"`
IgnorePodsWithoutPDB bool `json:"ignorePodsWithoutPDB,omitempty"`
} }

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated // +build !ignore_autogenerated
/* /*
Copyright 2024 The Kubernetes Authors. Copyright 2025 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.
@@ -41,6 +41,11 @@ func (in *DefaultEvictorArgs) DeepCopyInto(out *DefaultEvictorArgs) {
*out = new(api.PriorityThreshold) *out = new(api.PriorityThreshold)
(*in).DeepCopyInto(*out) (*in).DeepCopyInto(*out)
} }
if in.MinPodAge != nil {
in, out := &in.MinPodAge, &out.MinPodAge
*out = new(v1.Duration)
**out = **in
}
return return
} }

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated // +build !ignore_autogenerated
/* /*
Copyright 2024 The Kubernetes Authors. Copyright 2025 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.

View File

@@ -38,9 +38,13 @@ const HighNodeUtilizationPluginName = "HighNodeUtilization"
// Note that CPU/Memory requests are used to calculate nodes' utilization and not the actual resource usage. // Note that CPU/Memory requests are used to calculate nodes' utilization and not the actual resource usage.
type HighNodeUtilization struct { type HighNodeUtilization struct {
handle frameworktypes.Handle handle frameworktypes.Handle
args *HighNodeUtilizationArgs args *HighNodeUtilizationArgs
podFilter func(pod *v1.Pod) bool podFilter func(pod *v1.Pod) bool
underutilizationCriteria []interface{}
resourceNames []v1.ResourceName
targetThresholds api.ResourceThresholds
usageClient usageClient
} }
var _ frameworktypes.BalancePlugin = &HighNodeUtilization{} var _ frameworktypes.BalancePlugin = &HighNodeUtilization{}
@@ -52,6 +56,21 @@ func NewHighNodeUtilization(args runtime.Object, handle frameworktypes.Handle) (
return nil, fmt.Errorf("want args to be of type HighNodeUtilizationArgs, got %T", args) return nil, fmt.Errorf("want args to be of type HighNodeUtilizationArgs, got %T", args)
} }
targetThresholds := make(api.ResourceThresholds)
setDefaultForThresholds(highNodeUtilizatioArgs.Thresholds, targetThresholds)
resourceNames := getResourceNames(targetThresholds)
underutilizationCriteria := []interface{}{
"CPU", highNodeUtilizatioArgs.Thresholds[v1.ResourceCPU],
"Mem", highNodeUtilizatioArgs.Thresholds[v1.ResourceMemory],
"Pods", highNodeUtilizatioArgs.Thresholds[v1.ResourcePods],
}
for name := range highNodeUtilizatioArgs.Thresholds {
if !nodeutil.IsBasicResource(name) {
underutilizationCriteria = append(underutilizationCriteria, string(name), int64(highNodeUtilizatioArgs.Thresholds[name]))
}
}
podFilter, err := podutil.NewOptions(). podFilter, err := podutil.NewOptions().
WithFilter(handle.Evictor().Filter). WithFilter(handle.Evictor().Filter).
BuildFilterFunc() BuildFilterFunc()
@@ -60,9 +79,13 @@ func NewHighNodeUtilization(args runtime.Object, handle frameworktypes.Handle) (
} }
return &HighNodeUtilization{ return &HighNodeUtilization{
handle: handle, handle: handle,
args: highNodeUtilizatioArgs, args: highNodeUtilizatioArgs,
podFilter: podFilter, resourceNames: resourceNames,
targetThresholds: targetThresholds,
underutilizationCriteria: underutilizationCriteria,
podFilter: podFilter,
usageClient: newRequestedUsageClient(resourceNames, handle.GetPodsAssignedToNodeFunc()),
}, nil }, nil
} }
@@ -73,15 +96,15 @@ func (h *HighNodeUtilization) Name() string {
// Balance extension point implementation for the plugin // Balance extension point implementation for the plugin
func (h *HighNodeUtilization) Balance(ctx context.Context, nodes []*v1.Node) *frameworktypes.Status { func (h *HighNodeUtilization) Balance(ctx context.Context, nodes []*v1.Node) *frameworktypes.Status {
thresholds := h.args.Thresholds if err := h.usageClient.sync(nodes); err != nil {
targetThresholds := make(api.ResourceThresholds) return &frameworktypes.Status{
Err: fmt.Errorf("error getting node usage: %v", err),
setDefaultForThresholds(thresholds, targetThresholds) }
resourceNames := getResourceNames(targetThresholds) }
sourceNodes, highNodes := classifyNodes( sourceNodes, highNodes := classifyNodes(
getNodeUsage(nodes, resourceNames, h.handle.GetPodsAssignedToNodeFunc()), getNodeUsage(nodes, h.usageClient),
getNodeThresholds(nodes, thresholds, targetThresholds, resourceNames, h.handle.GetPodsAssignedToNodeFunc(), false), getNodeThresholds(nodes, h.args.Thresholds, h.targetThresholds, h.resourceNames, false, h.usageClient),
func(node *v1.Node, usage NodeUsage, threshold NodeThresholds) bool { func(node *v1.Node, usage NodeUsage, threshold NodeThresholds) bool {
return isNodeWithLowUtilization(usage, threshold.lowResourceThreshold) return isNodeWithLowUtilization(usage, threshold.lowResourceThreshold)
}, },
@@ -94,18 +117,7 @@ func (h *HighNodeUtilization) Balance(ctx context.Context, nodes []*v1.Node) *fr
}) })
// log message in one line // log message in one line
keysAndValues := []interface{}{ klog.V(1).InfoS("Criteria for a node below target utilization", h.underutilizationCriteria...)
"CPU", thresholds[v1.ResourceCPU],
"Mem", thresholds[v1.ResourceMemory],
"Pods", thresholds[v1.ResourcePods],
}
for name := range thresholds {
if !nodeutil.IsBasicResource(name) {
keysAndValues = append(keysAndValues, string(name), int64(thresholds[name]))
}
}
klog.V(1).InfoS("Criteria for a node below target utilization", keysAndValues...)
klog.V(1).InfoS("Number of underutilized nodes", "totalNumber", len(sourceNodes)) klog.V(1).InfoS("Number of underutilized nodes", "totalNumber", len(sourceNodes))
if len(sourceNodes) == 0 { if len(sourceNodes) == 0 {
@@ -147,8 +159,10 @@ func (h *HighNodeUtilization) Balance(ctx context.Context, nodes []*v1.Node) *fr
h.handle.Evictor(), h.handle.Evictor(),
evictions.EvictOptions{StrategyName: HighNodeUtilizationPluginName}, evictions.EvictOptions{StrategyName: HighNodeUtilizationPluginName},
h.podFilter, h.podFilter,
resourceNames, h.resourceNames,
continueEvictionCond) continueEvictionCond,
h.usageClient,
)
return nil return nil
} }

View File

@@ -25,16 +25,13 @@ import (
policy "k8s.io/api/policy/v1" policy "k8s.io/api/policy/v1"
"k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/kubernetes/fake"
core "k8s.io/client-go/testing" core "k8s.io/client-go/testing"
"k8s.io/client-go/tools/events"
"sigs.k8s.io/descheduler/pkg/api" "sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions" "sigs.k8s.io/descheduler/pkg/descheduler/evictions"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
frameworkfake "sigs.k8s.io/descheduler/pkg/framework/fake"
"sigs.k8s.io/descheduler/pkg/framework/plugins/defaultevictor" "sigs.k8s.io/descheduler/pkg/framework/plugins/defaultevictor"
frameworktesting "sigs.k8s.io/descheduler/pkg/framework/testing"
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types" frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
"sigs.k8s.io/descheduler/pkg/utils" "sigs.k8s.io/descheduler/pkg/utils"
"sigs.k8s.io/descheduler/test" "sigs.k8s.io/descheduler/test"
@@ -115,6 +112,7 @@ func TestHighNodeUtilization(t *testing.T) {
}), }),
test.BuildTestPod("p2", 400, 0, n1NodeName, func(pod *v1.Pod) { test.BuildTestPod("p2", 400, 0, n1NodeName, func(pod *v1.Pod) {
// A Critical Pod. // A Critical Pod.
test.SetNormalOwnerRef(pod)
pod.Namespace = "kube-system" pod.Namespace = "kube-system"
priority := utils.SystemCriticalPriority priority := utils.SystemCriticalPriority
pod.Spec.Priority = &priority pod.Spec.Priority = &priority
@@ -167,6 +165,7 @@ func TestHighNodeUtilization(t *testing.T) {
// These won't be evicted. // These won't be evicted.
test.BuildTestPod("p2", 400, 0, n1NodeName, func(pod *v1.Pod) { test.BuildTestPod("p2", 400, 0, n1NodeName, func(pod *v1.Pod) {
// A Critical Pod. // A Critical Pod.
test.SetNormalOwnerRef(pod)
pod.Namespace = "kube-system" pod.Namespace = "kube-system"
priority := utils.SystemCriticalPriority priority := utils.SystemCriticalPriority
pod.Spec.Priority = &priority pod.Spec.Priority = &priority
@@ -448,20 +447,16 @@ func TestHighNodeUtilization(t *testing.T) {
for _, pod := range testCase.pods { for _, pod := range testCase.pods {
objs = append(objs, pod) objs = append(objs, pod)
} }
fakeClient := fake.NewSimpleClientset(objs...)
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
podInformer := sharedInformerFactory.Core().V1().Pods().Informer()
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
if err != nil {
t.Errorf("Build get pods assigned to node function error: %v", err)
}
podsForEviction := make(map[string]struct{}) podsForEviction := make(map[string]struct{})
for _, pod := range testCase.evictedPods { for _, pod := range testCase.evictedPods {
podsForEviction[pod] = struct{}{} podsForEviction[pod] = struct{}{}
} }
fakeClient := fake.NewSimpleClientset(objs...)
handle, podEvictor, err := frameworktesting.InitFrameworkHandle(ctx, fakeClient, nil, defaultevictor.DefaultEvictorArgs{NodeFit: true}, nil)
if err != nil {
t.Fatalf("Unable to initialize a framework handle: %v", err)
}
evictionFailed := false evictionFailed := false
if len(testCase.evictedPods) > 0 { if len(testCase.evictedPods) > 0 {
@@ -479,50 +474,6 @@ func TestHighNodeUtilization(t *testing.T) {
}) })
} }
sharedInformerFactory.Start(ctx.Done())
sharedInformerFactory.WaitForCacheSync(ctx.Done())
eventRecorder := &events.FakeRecorder{}
podEvictor := evictions.NewPodEvictor(
fakeClient,
"v1",
false,
nil,
nil,
testCase.nodes,
false,
eventRecorder,
)
defaultevictorArgs := &defaultevictor.DefaultEvictorArgs{
EvictLocalStoragePods: false,
EvictSystemCriticalPods: false,
IgnorePvcPods: false,
EvictFailedBarePods: false,
NodeFit: true,
}
evictorFilter, err := defaultevictor.New(
defaultevictorArgs,
&frameworkfake.HandleImpl{
ClientsetImpl: fakeClient,
GetPodsAssignedToNodeFuncImpl: getPodsAssignedToNode,
SharedInformerFactoryImpl: sharedInformerFactory,
},
)
if err != nil {
t.Fatalf("Unable to initialize the plugin: %v", err)
}
handle := &frameworkfake.HandleImpl{
ClientsetImpl: fakeClient,
GetPodsAssignedToNodeFuncImpl: getPodsAssignedToNode,
PodEvictorImpl: podEvictor,
EvictorFilterImpl: evictorFilter.(frameworktypes.EvictorPlugin),
SharedInformerFactoryImpl: sharedInformerFactory,
}
plugin, err := NewHighNodeUtilization(&HighNodeUtilizationArgs{ plugin, err := NewHighNodeUtilization(&HighNodeUtilizationArgs{
Thresholds: testCase.thresholds, Thresholds: testCase.thresholds,
}, },
@@ -623,55 +574,16 @@ func TestHighNodeUtilizationWithTaints(t *testing.T) {
} }
fakeClient := fake.NewSimpleClientset(objs...) fakeClient := fake.NewSimpleClientset(objs...)
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
podInformer := sharedInformerFactory.Core().V1().Pods().Informer()
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer) handle, podEvictor, err := frameworktesting.InitFrameworkHandle(
if err != nil { ctx,
t.Errorf("Build get pods assigned to node function error: %v", err)
}
sharedInformerFactory.Start(ctx.Done())
sharedInformerFactory.WaitForCacheSync(ctx.Done())
eventRecorder := &events.FakeRecorder{}
podEvictor := evictions.NewPodEvictor(
fakeClient, fakeClient,
"policy/v1", evictions.NewOptions().WithMaxPodsToEvictPerNode(&item.evictionsExpected),
false, defaultevictor.DefaultEvictorArgs{},
&item.evictionsExpected,
nil, nil,
item.nodes,
false,
eventRecorder,
)
defaultevictorArgs := &defaultevictor.DefaultEvictorArgs{
EvictLocalStoragePods: false,
EvictSystemCriticalPods: false,
IgnorePvcPods: false,
EvictFailedBarePods: false,
}
evictorFilter, err := defaultevictor.New(
defaultevictorArgs,
&frameworkfake.HandleImpl{
ClientsetImpl: fakeClient,
GetPodsAssignedToNodeFuncImpl: getPodsAssignedToNode,
SharedInformerFactoryImpl: sharedInformerFactory,
},
) )
if err != nil { if err != nil {
t.Fatalf("Unable to initialize the plugin: %v", err) t.Fatalf("Unable to initialize a framework handle: %v", err)
}
handle := &frameworkfake.HandleImpl{
ClientsetImpl: fakeClient,
GetPodsAssignedToNodeFuncImpl: getPodsAssignedToNode,
PodEvictorImpl: podEvictor,
EvictorFilterImpl: evictorFilter.(frameworktypes.EvictorPlugin),
SharedInformerFactoryImpl: sharedInformerFactory,
} }
plugin, err := NewHighNodeUtilization(&HighNodeUtilizationArgs{ plugin, err := NewHighNodeUtilization(&HighNodeUtilizationArgs{

View File

@@ -24,6 +24,8 @@ import (
"k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
"k8s.io/klog/v2" "k8s.io/klog/v2"
"sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions" "sigs.k8s.io/descheduler/pkg/descheduler/evictions"
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node" nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod" podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
@@ -36,9 +38,13 @@ const LowNodeUtilizationPluginName = "LowNodeUtilization"
// to calculate nodes' utilization and not the actual resource usage. // to calculate nodes' utilization and not the actual resource usage.
type LowNodeUtilization struct { type LowNodeUtilization struct {
handle frameworktypes.Handle handle frameworktypes.Handle
args *LowNodeUtilizationArgs args *LowNodeUtilizationArgs
podFilter func(pod *v1.Pod) bool podFilter func(pod *v1.Pod) bool
underutilizationCriteria []interface{}
overutilizationCriteria []interface{}
resourceNames []v1.ResourceName
usageClient usageClient
} }
var _ frameworktypes.BalancePlugin = &LowNodeUtilization{} var _ frameworktypes.BalancePlugin = &LowNodeUtilization{}
@@ -50,6 +56,30 @@ func NewLowNodeUtilization(args runtime.Object, handle frameworktypes.Handle) (f
return nil, fmt.Errorf("want args to be of type LowNodeUtilizationArgs, got %T", args) return nil, fmt.Errorf("want args to be of type LowNodeUtilizationArgs, got %T", args)
} }
setDefaultForLNUThresholds(lowNodeUtilizationArgsArgs.Thresholds, lowNodeUtilizationArgsArgs.TargetThresholds, lowNodeUtilizationArgsArgs.UseDeviationThresholds)
underutilizationCriteria := []interface{}{
"CPU", lowNodeUtilizationArgsArgs.Thresholds[v1.ResourceCPU],
"Mem", lowNodeUtilizationArgsArgs.Thresholds[v1.ResourceMemory],
"Pods", lowNodeUtilizationArgsArgs.Thresholds[v1.ResourcePods],
}
for name := range lowNodeUtilizationArgsArgs.Thresholds {
if !nodeutil.IsBasicResource(name) {
underutilizationCriteria = append(underutilizationCriteria, string(name), int64(lowNodeUtilizationArgsArgs.Thresholds[name]))
}
}
overutilizationCriteria := []interface{}{
"CPU", lowNodeUtilizationArgsArgs.TargetThresholds[v1.ResourceCPU],
"Mem", lowNodeUtilizationArgsArgs.TargetThresholds[v1.ResourceMemory],
"Pods", lowNodeUtilizationArgsArgs.TargetThresholds[v1.ResourcePods],
}
for name := range lowNodeUtilizationArgsArgs.TargetThresholds {
if !nodeutil.IsBasicResource(name) {
overutilizationCriteria = append(overutilizationCriteria, string(name), int64(lowNodeUtilizationArgsArgs.TargetThresholds[name]))
}
}
podFilter, err := podutil.NewOptions(). podFilter, err := podutil.NewOptions().
WithFilter(handle.Evictor().Filter). WithFilter(handle.Evictor().Filter).
BuildFilterFunc() BuildFilterFunc()
@@ -57,10 +87,26 @@ func NewLowNodeUtilization(args runtime.Object, handle frameworktypes.Handle) (f
return nil, fmt.Errorf("error initializing pod filter function: %v", err) return nil, fmt.Errorf("error initializing pod filter function: %v", err)
} }
resourceNames := getResourceNames(lowNodeUtilizationArgsArgs.Thresholds)
var usageClient usageClient
if lowNodeUtilizationArgsArgs.MetricsUtilization.MetricsServer {
if handle.MetricsCollector() == nil {
return nil, fmt.Errorf("metrics client not initialized")
}
usageClient = newActualUsageClient(resourceNames, handle.GetPodsAssignedToNodeFunc(), handle.MetricsCollector())
} else {
usageClient = newRequestedUsageClient(resourceNames, handle.GetPodsAssignedToNodeFunc())
}
return &LowNodeUtilization{ return &LowNodeUtilization{
handle: handle, handle: handle,
args: lowNodeUtilizationArgsArgs, args: lowNodeUtilizationArgsArgs,
podFilter: podFilter, underutilizationCriteria: underutilizationCriteria,
overutilizationCriteria: overutilizationCriteria,
resourceNames: resourceNames,
podFilter: podFilter,
usageClient: usageClient,
}, nil }, nil
} }
@@ -71,43 +117,15 @@ func (l *LowNodeUtilization) Name() string {
// Balance extension point implementation for the plugin // Balance extension point implementation for the plugin
func (l *LowNodeUtilization) Balance(ctx context.Context, nodes []*v1.Node) *frameworktypes.Status { func (l *LowNodeUtilization) Balance(ctx context.Context, nodes []*v1.Node) *frameworktypes.Status {
useDeviationThresholds := l.args.UseDeviationThresholds if err := l.usageClient.sync(nodes); err != nil {
thresholds := l.args.Thresholds return &frameworktypes.Status{
targetThresholds := l.args.TargetThresholds Err: fmt.Errorf("error getting node usage: %v", err),
// check if Pods/CPU/Mem are set, if not, set them to 100
if _, ok := thresholds[v1.ResourcePods]; !ok {
if useDeviationThresholds {
thresholds[v1.ResourcePods] = MinResourcePercentage
targetThresholds[v1.ResourcePods] = MinResourcePercentage
} else {
thresholds[v1.ResourcePods] = MaxResourcePercentage
targetThresholds[v1.ResourcePods] = MaxResourcePercentage
} }
} }
if _, ok := thresholds[v1.ResourceCPU]; !ok {
if useDeviationThresholds {
thresholds[v1.ResourceCPU] = MinResourcePercentage
targetThresholds[v1.ResourceCPU] = MinResourcePercentage
} else {
thresholds[v1.ResourceCPU] = MaxResourcePercentage
targetThresholds[v1.ResourceCPU] = MaxResourcePercentage
}
}
if _, ok := thresholds[v1.ResourceMemory]; !ok {
if useDeviationThresholds {
thresholds[v1.ResourceMemory] = MinResourcePercentage
targetThresholds[v1.ResourceMemory] = MinResourcePercentage
} else {
thresholds[v1.ResourceMemory] = MaxResourcePercentage
targetThresholds[v1.ResourceMemory] = MaxResourcePercentage
}
}
resourceNames := getResourceNames(thresholds)
lowNodes, sourceNodes := classifyNodes( lowNodes, sourceNodes := classifyNodes(
getNodeUsage(nodes, resourceNames, l.handle.GetPodsAssignedToNodeFunc()), getNodeUsage(nodes, l.usageClient),
getNodeThresholds(nodes, thresholds, targetThresholds, resourceNames, l.handle.GetPodsAssignedToNodeFunc(), useDeviationThresholds), getNodeThresholds(nodes, l.args.Thresholds, l.args.TargetThresholds, l.resourceNames, l.args.UseDeviationThresholds, l.usageClient),
// The node has to be schedulable (to be able to move workload there) // The node has to be schedulable (to be able to move workload there)
func(node *v1.Node, usage NodeUsage, threshold NodeThresholds) bool { func(node *v1.Node, usage NodeUsage, threshold NodeThresholds) bool {
if nodeutil.IsNodeUnschedulable(node) { if nodeutil.IsNodeUnschedulable(node) {
@@ -122,31 +140,11 @@ func (l *LowNodeUtilization) Balance(ctx context.Context, nodes []*v1.Node) *fra
) )
// log message for nodes with low utilization // log message for nodes with low utilization
underutilizationCriteria := []interface{}{ klog.V(1).InfoS("Criteria for a node under utilization", l.underutilizationCriteria...)
"CPU", thresholds[v1.ResourceCPU],
"Mem", thresholds[v1.ResourceMemory],
"Pods", thresholds[v1.ResourcePods],
}
for name := range thresholds {
if !nodeutil.IsBasicResource(name) {
underutilizationCriteria = append(underutilizationCriteria, string(name), int64(thresholds[name]))
}
}
klog.V(1).InfoS("Criteria for a node under utilization", underutilizationCriteria...)
klog.V(1).InfoS("Number of underutilized nodes", "totalNumber", len(lowNodes)) klog.V(1).InfoS("Number of underutilized nodes", "totalNumber", len(lowNodes))
// log message for over utilized nodes // log message for over utilized nodes
overutilizationCriteria := []interface{}{ klog.V(1).InfoS("Criteria for a node above target utilization", l.overutilizationCriteria...)
"CPU", targetThresholds[v1.ResourceCPU],
"Mem", targetThresholds[v1.ResourceMemory],
"Pods", targetThresholds[v1.ResourcePods],
}
for name := range targetThresholds {
if !nodeutil.IsBasicResource(name) {
overutilizationCriteria = append(overutilizationCriteria, string(name), int64(targetThresholds[name]))
}
}
klog.V(1).InfoS("Criteria for a node above target utilization", overutilizationCriteria...)
klog.V(1).InfoS("Number of overutilized nodes", "totalNumber", len(sourceNodes)) klog.V(1).InfoS("Number of overutilized nodes", "totalNumber", len(sourceNodes))
if len(lowNodes) == 0 { if len(lowNodes) == 0 {
@@ -194,8 +192,41 @@ func (l *LowNodeUtilization) Balance(ctx context.Context, nodes []*v1.Node) *fra
l.handle.Evictor(), l.handle.Evictor(),
evictions.EvictOptions{StrategyName: LowNodeUtilizationPluginName}, evictions.EvictOptions{StrategyName: LowNodeUtilizationPluginName},
l.podFilter, l.podFilter,
resourceNames, l.resourceNames,
continueEvictionCond) continueEvictionCond,
l.usageClient,
)
return nil return nil
} }
func setDefaultForLNUThresholds(thresholds, targetThresholds api.ResourceThresholds, useDeviationThresholds bool) {
// check if Pods/CPU/Mem are set, if not, set them to 100
if _, ok := thresholds[v1.ResourcePods]; !ok {
if useDeviationThresholds {
thresholds[v1.ResourcePods] = MinResourcePercentage
targetThresholds[v1.ResourcePods] = MinResourcePercentage
} else {
thresholds[v1.ResourcePods] = MaxResourcePercentage
targetThresholds[v1.ResourcePods] = MaxResourcePercentage
}
}
if _, ok := thresholds[v1.ResourceCPU]; !ok {
if useDeviationThresholds {
thresholds[v1.ResourceCPU] = MinResourcePercentage
targetThresholds[v1.ResourceCPU] = MinResourcePercentage
} else {
thresholds[v1.ResourceCPU] = MaxResourcePercentage
targetThresholds[v1.ResourceCPU] = MaxResourcePercentage
}
}
if _, ok := thresholds[v1.ResourceMemory]; !ok {
if useDeviationThresholds {
thresholds[v1.ResourceMemory] = MinResourcePercentage
targetThresholds[v1.ResourceMemory] = MinResourcePercentage
} else {
thresholds[v1.ResourceMemory] = MaxResourcePercentage
targetThresholds[v1.ResourceMemory] = MaxResourcePercentage
}
}
}

View File

@@ -21,22 +21,23 @@ import (
"fmt" "fmt"
"testing" "testing"
"sigs.k8s.io/descheduler/pkg/api"
frameworkfake "sigs.k8s.io/descheduler/pkg/framework/fake"
"sigs.k8s.io/descheduler/pkg/framework/plugins/defaultevictor"
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
policy "k8s.io/api/policy/v1" policy "k8s.io/api/policy/v1"
"k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/informers" "k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/kubernetes/fake"
core "k8s.io/client-go/testing" core "k8s.io/client-go/testing"
"k8s.io/client-go/tools/events" "k8s.io/metrics/pkg/apis/metrics/v1beta1"
fakemetricsclient "k8s.io/metrics/pkg/client/clientset/versioned/fake"
"sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions" "sigs.k8s.io/descheduler/pkg/descheduler/evictions"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod" "sigs.k8s.io/descheduler/pkg/descheduler/metricscollector"
"sigs.k8s.io/descheduler/pkg/framework/plugins/defaultevictor"
frameworktesting "sigs.k8s.io/descheduler/pkg/framework/testing"
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
"sigs.k8s.io/descheduler/pkg/utils" "sigs.k8s.io/descheduler/pkg/utils"
"sigs.k8s.io/descheduler/test" "sigs.k8s.io/descheduler/test"
) )
@@ -51,14 +52,17 @@ func TestLowNodeUtilization(t *testing.T) {
notMatchingNodeSelectorValue := "east" notMatchingNodeSelectorValue := "east"
testCases := []struct { testCases := []struct {
name string name string
useDeviationThresholds bool useDeviationThresholds bool
thresholds, targetThresholds api.ResourceThresholds thresholds, targetThresholds api.ResourceThresholds
nodes []*v1.Node nodes []*v1.Node
pods []*v1.Pod pods []*v1.Pod
expectedPodsEvicted uint nodemetricses []*v1beta1.NodeMetrics
evictedPods []string podmetricses []*v1beta1.PodMetrics
evictableNamespaces *api.Namespaces expectedPodsEvicted uint
expectedPodsWithMetricsEvicted uint
evictedPods []string
evictableNamespaces *api.Namespaces
}{ }{
{ {
name: "no evictable pods", name: "no evictable pods",
@@ -106,7 +110,20 @@ func TestLowNodeUtilization(t *testing.T) {
}), }),
test.BuildTestPod("p9", 400, 0, n2NodeName, test.SetRSOwnerRef), test.BuildTestPod("p9", 400, 0, n2NodeName, test.SetRSOwnerRef),
}, },
expectedPodsEvicted: 0, nodemetricses: []*v1beta1.NodeMetrics{
test.BuildNodeMetrics(n1NodeName, 2401, 1714978816),
test.BuildNodeMetrics(n2NodeName, 401, 1714978816),
test.BuildNodeMetrics(n3NodeName, 10, 1714978816),
},
podmetricses: []*v1beta1.PodMetrics{
test.BuildPodMetrics("p1", 401, 0),
test.BuildPodMetrics("p2", 401, 0),
test.BuildPodMetrics("p3", 401, 0),
test.BuildPodMetrics("p4", 401, 0),
test.BuildPodMetrics("p5", 401, 0),
},
expectedPodsEvicted: 0,
expectedPodsWithMetricsEvicted: 0,
}, },
{ {
name: "without priorities", name: "without priorities",
@@ -156,7 +173,20 @@ func TestLowNodeUtilization(t *testing.T) {
}), }),
test.BuildTestPod("p9", 400, 0, n2NodeName, test.SetRSOwnerRef), test.BuildTestPod("p9", 400, 0, n2NodeName, test.SetRSOwnerRef),
}, },
expectedPodsEvicted: 4, nodemetricses: []*v1beta1.NodeMetrics{
test.BuildNodeMetrics(n1NodeName, 3201, 0),
test.BuildNodeMetrics(n2NodeName, 401, 0),
test.BuildNodeMetrics(n3NodeName, 11, 0),
},
podmetricses: []*v1beta1.PodMetrics{
test.BuildPodMetrics("p1", 401, 0),
test.BuildPodMetrics("p2", 401, 0),
test.BuildPodMetrics("p3", 401, 0),
test.BuildPodMetrics("p4", 401, 0),
test.BuildPodMetrics("p5", 401, 0),
},
expectedPodsEvicted: 4,
expectedPodsWithMetricsEvicted: 4,
}, },
{ {
name: "without priorities, but excluding namespaces", name: "without priorities, but excluding namespaces",
@@ -175,18 +205,23 @@ func TestLowNodeUtilization(t *testing.T) {
}, },
pods: []*v1.Pod{ pods: []*v1.Pod{
test.BuildTestPod("p1", 400, 0, n1NodeName, func(pod *v1.Pod) { test.BuildTestPod("p1", 400, 0, n1NodeName, func(pod *v1.Pod) {
test.SetNormalOwnerRef(pod)
pod.Namespace = "namespace1" pod.Namespace = "namespace1"
}), }),
test.BuildTestPod("p2", 400, 0, n1NodeName, func(pod *v1.Pod) { test.BuildTestPod("p2", 400, 0, n1NodeName, func(pod *v1.Pod) {
test.SetNormalOwnerRef(pod)
pod.Namespace = "namespace1" pod.Namespace = "namespace1"
}), }),
test.BuildTestPod("p3", 400, 0, n1NodeName, func(pod *v1.Pod) { test.BuildTestPod("p3", 400, 0, n1NodeName, func(pod *v1.Pod) {
test.SetNormalOwnerRef(pod)
pod.Namespace = "namespace1" pod.Namespace = "namespace1"
}), }),
test.BuildTestPod("p4", 400, 0, n1NodeName, func(pod *v1.Pod) { test.BuildTestPod("p4", 400, 0, n1NodeName, func(pod *v1.Pod) {
test.SetNormalOwnerRef(pod)
pod.Namespace = "namespace1" pod.Namespace = "namespace1"
}), }),
test.BuildTestPod("p5", 400, 0, n1NodeName, func(pod *v1.Pod) { test.BuildTestPod("p5", 400, 0, n1NodeName, func(pod *v1.Pod) {
test.SetNormalOwnerRef(pod)
pod.Namespace = "namespace1" pod.Namespace = "namespace1"
}), }),
// These won't be evicted. // These won't be evicted.
@@ -216,12 +251,25 @@ func TestLowNodeUtilization(t *testing.T) {
}), }),
test.BuildTestPod("p9", 400, 0, n2NodeName, test.SetRSOwnerRef), test.BuildTestPod("p9", 400, 0, n2NodeName, test.SetRSOwnerRef),
}, },
nodemetricses: []*v1beta1.NodeMetrics{
test.BuildNodeMetrics(n1NodeName, 3201, 0),
test.BuildNodeMetrics(n2NodeName, 401, 0),
test.BuildNodeMetrics(n3NodeName, 11, 0),
},
podmetricses: []*v1beta1.PodMetrics{
test.BuildPodMetrics("p1", 401, 0),
test.BuildPodMetrics("p2", 401, 0),
test.BuildPodMetrics("p3", 401, 0),
test.BuildPodMetrics("p4", 401, 0),
test.BuildPodMetrics("p5", 401, 0),
},
evictableNamespaces: &api.Namespaces{ evictableNamespaces: &api.Namespaces{
Exclude: []string{ Exclude: []string{
"namespace1", "namespace1",
}, },
}, },
expectedPodsEvicted: 0, expectedPodsEvicted: 0,
expectedPodsWithMetricsEvicted: 0,
}, },
{ {
name: "without priorities, but include only default namespace", name: "without priorities, but include only default namespace",
@@ -242,12 +290,15 @@ func TestLowNodeUtilization(t *testing.T) {
test.BuildTestPod("p1", 400, 0, n1NodeName, test.SetRSOwnerRef), test.BuildTestPod("p1", 400, 0, n1NodeName, test.SetRSOwnerRef),
test.BuildTestPod("p2", 400, 0, n1NodeName, test.SetRSOwnerRef), test.BuildTestPod("p2", 400, 0, n1NodeName, test.SetRSOwnerRef),
test.BuildTestPod("p3", 400, 0, n1NodeName, func(pod *v1.Pod) { test.BuildTestPod("p3", 400, 0, n1NodeName, func(pod *v1.Pod) {
// TODO(zhifei92): add ownerRef for pod
pod.Namespace = "namespace3" pod.Namespace = "namespace3"
}), }),
test.BuildTestPod("p4", 400, 0, n1NodeName, func(pod *v1.Pod) { test.BuildTestPod("p4", 400, 0, n1NodeName, func(pod *v1.Pod) {
// TODO(zhifei92): add ownerRef for pod
pod.Namespace = "namespace4" pod.Namespace = "namespace4"
}), }),
test.BuildTestPod("p5", 400, 0, n1NodeName, func(pod *v1.Pod) { test.BuildTestPod("p5", 400, 0, n1NodeName, func(pod *v1.Pod) {
// TODO(zhifei92): add ownerRef for pod
pod.Namespace = "namespace5" pod.Namespace = "namespace5"
}), }),
// These won't be evicted. // These won't be evicted.
@@ -271,18 +322,32 @@ func TestLowNodeUtilization(t *testing.T) {
}), }),
test.BuildTestPod("p8", 400, 0, n1NodeName, func(pod *v1.Pod) { test.BuildTestPod("p8", 400, 0, n1NodeName, func(pod *v1.Pod) {
// A Critical Pod. // A Critical Pod.
test.SetNormalOwnerRef(pod)
pod.Namespace = "kube-system" pod.Namespace = "kube-system"
priority := utils.SystemCriticalPriority priority := utils.SystemCriticalPriority
pod.Spec.Priority = &priority pod.Spec.Priority = &priority
}), }),
test.BuildTestPod("p9", 400, 0, n2NodeName, test.SetRSOwnerRef), test.BuildTestPod("p9", 400, 0, n2NodeName, test.SetRSOwnerRef),
}, },
nodemetricses: []*v1beta1.NodeMetrics{
test.BuildNodeMetrics(n1NodeName, 3201, 0),
test.BuildNodeMetrics(n2NodeName, 401, 0),
test.BuildNodeMetrics(n3NodeName, 11, 0),
},
podmetricses: []*v1beta1.PodMetrics{
test.BuildPodMetrics("p1", 401, 0),
test.BuildPodMetrics("p2", 401, 0),
test.BuildPodMetrics("p3", 401, 0),
test.BuildPodMetrics("p4", 401, 0),
test.BuildPodMetrics("p5", 401, 0),
},
evictableNamespaces: &api.Namespaces{ evictableNamespaces: &api.Namespaces{
Include: []string{ Include: []string{
"default", "default",
}, },
}, },
expectedPodsEvicted: 2, expectedPodsEvicted: 2,
expectedPodsWithMetricsEvicted: 2,
}, },
{ {
name: "without priorities stop when cpu capacity is depleted", name: "without priorities stop when cpu capacity is depleted",
@@ -300,14 +365,14 @@ func TestLowNodeUtilization(t *testing.T) {
test.BuildTestNode(n3NodeName, 4000, 3000, 10, test.SetNodeUnschedulable), test.BuildTestNode(n3NodeName, 4000, 3000, 10, test.SetNodeUnschedulable),
}, },
pods: []*v1.Pod{ pods: []*v1.Pod{
test.BuildTestPod("p1", 400, 300, n1NodeName, test.SetRSOwnerRef), test.BuildTestPod("p1", 400, 0, n1NodeName, test.SetRSOwnerRef),
test.BuildTestPod("p2", 400, 300, n1NodeName, test.SetRSOwnerRef), test.BuildTestPod("p2", 400, 0, n1NodeName, test.SetRSOwnerRef),
test.BuildTestPod("p3", 400, 300, n1NodeName, test.SetRSOwnerRef), test.BuildTestPod("p3", 400, 0, n1NodeName, test.SetRSOwnerRef),
test.BuildTestPod("p4", 400, 300, n1NodeName, test.SetRSOwnerRef), test.BuildTestPod("p4", 400, 0, n1NodeName, test.SetRSOwnerRef),
test.BuildTestPod("p5", 400, 300, n1NodeName, test.SetRSOwnerRef), test.BuildTestPod("p5", 400, 0, n1NodeName, test.SetRSOwnerRef),
// These won't be evicted. // These won't be evicted.
test.BuildTestPod("p6", 400, 300, n1NodeName, test.SetDSOwnerRef), test.BuildTestPod("p6", 400, 0, n1NodeName, test.SetDSOwnerRef),
test.BuildTestPod("p7", 400, 300, n1NodeName, func(pod *v1.Pod) { test.BuildTestPod("p7", 400, 0, n1NodeName, func(pod *v1.Pod) {
// A pod with local storage. // A pod with local storage.
test.SetNormalOwnerRef(pod) test.SetNormalOwnerRef(pod)
pod.Spec.Volumes = []v1.Volume{ pod.Spec.Volumes = []v1.Volume{
@@ -324,16 +389,29 @@ func TestLowNodeUtilization(t *testing.T) {
// A Mirror Pod. // A Mirror Pod.
pod.Annotations = test.GetMirrorPodAnnotation() pod.Annotations = test.GetMirrorPodAnnotation()
}), }),
test.BuildTestPod("p8", 400, 300, n1NodeName, func(pod *v1.Pod) { test.BuildTestPod("p8", 400, 0, n1NodeName, func(pod *v1.Pod) {
// A Critical Pod. // A Critical Pod.
test.SetNormalOwnerRef(pod)
pod.Namespace = "kube-system" pod.Namespace = "kube-system"
priority := utils.SystemCriticalPriority priority := utils.SystemCriticalPriority
pod.Spec.Priority = &priority pod.Spec.Priority = &priority
}), }),
test.BuildTestPod("p9", 400, 2100, n2NodeName, test.SetRSOwnerRef), test.BuildTestPod("p9", 400, 0, n2NodeName, test.SetRSOwnerRef),
}, },
// 4 pods available for eviction based on v1.ResourcePods, only 3 pods can be evicted before cpu is depleted nodemetricses: []*v1beta1.NodeMetrics{
expectedPodsEvicted: 3, test.BuildNodeMetrics(n1NodeName, 3201, 0),
test.BuildNodeMetrics(n2NodeName, 401, 0),
test.BuildNodeMetrics(n3NodeName, 0, 0),
},
podmetricses: []*v1beta1.PodMetrics{
test.BuildPodMetrics("p1", 401, 0),
test.BuildPodMetrics("p2", 401, 0),
test.BuildPodMetrics("p3", 401, 0),
test.BuildPodMetrics("p4", 401, 0),
test.BuildPodMetrics("p5", 401, 0),
},
expectedPodsEvicted: 4,
expectedPodsWithMetricsEvicted: 4,
}, },
{ {
name: "with priorities", name: "with priorities",
@@ -396,13 +474,27 @@ func TestLowNodeUtilization(t *testing.T) {
}), }),
test.BuildTestPod("p8", 400, 0, n1NodeName, func(pod *v1.Pod) { test.BuildTestPod("p8", 400, 0, n1NodeName, func(pod *v1.Pod) {
// A Critical Pod. // A Critical Pod.
test.SetNormalOwnerRef(pod)
pod.Namespace = "kube-system" pod.Namespace = "kube-system"
priority := utils.SystemCriticalPriority priority := utils.SystemCriticalPriority
pod.Spec.Priority = &priority pod.Spec.Priority = &priority
}), }),
test.BuildTestPod("p9", 400, 0, n2NodeName, test.SetRSOwnerRef), test.BuildTestPod("p9", 400, 0, n2NodeName, test.SetRSOwnerRef),
}, },
expectedPodsEvicted: 4, nodemetricses: []*v1beta1.NodeMetrics{
test.BuildNodeMetrics(n1NodeName, 3201, 0),
test.BuildNodeMetrics(n2NodeName, 401, 0),
test.BuildNodeMetrics(n3NodeName, 11, 0),
},
podmetricses: []*v1beta1.PodMetrics{
test.BuildPodMetrics("p1", 401, 0),
test.BuildPodMetrics("p2", 401, 0),
test.BuildPodMetrics("p3", 401, 0),
test.BuildPodMetrics("p4", 401, 0),
test.BuildPodMetrics("p5", 401, 0),
},
expectedPodsEvicted: 4,
expectedPodsWithMetricsEvicted: 4,
}, },
{ {
name: "without priorities evicting best-effort pods only", name: "without priorities evicting best-effort pods only",
@@ -419,7 +511,7 @@ func TestLowNodeUtilization(t *testing.T) {
test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil), test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil),
test.BuildTestNode(n3NodeName, 4000, 3000, 10, test.SetNodeUnschedulable), test.BuildTestNode(n3NodeName, 4000, 3000, 10, test.SetNodeUnschedulable),
}, },
// All pods are assumed to be burstable (test.BuildTestNode always sets both cpu/memory resource requests to some value) // All pods are assumed to be burstable (tc.BuildTestNode always sets both cpu/memory resource requests to some value)
pods: []*v1.Pod{ pods: []*v1.Pod{
test.BuildTestPod("p1", 400, 0, n1NodeName, func(pod *v1.Pod) { test.BuildTestPod("p1", 400, 0, n1NodeName, func(pod *v1.Pod) {
test.SetRSOwnerRef(pod) test.SetRSOwnerRef(pod)
@@ -463,14 +555,28 @@ func TestLowNodeUtilization(t *testing.T) {
}), }),
test.BuildTestPod("p8", 400, 0, n1NodeName, func(pod *v1.Pod) { test.BuildTestPod("p8", 400, 0, n1NodeName, func(pod *v1.Pod) {
// A Critical Pod. // A Critical Pod.
test.SetNormalOwnerRef(pod)
pod.Namespace = "kube-system" pod.Namespace = "kube-system"
priority := utils.SystemCriticalPriority priority := utils.SystemCriticalPriority
pod.Spec.Priority = &priority pod.Spec.Priority = &priority
}), }),
test.BuildTestPod("p9", 400, 0, n2NodeName, test.SetRSOwnerRef), test.BuildTestPod("p9", 400, 0, n2NodeName, test.SetRSOwnerRef),
}, },
expectedPodsEvicted: 4, nodemetricses: []*v1beta1.NodeMetrics{
evictedPods: []string{"p1", "p2", "p4", "p5"}, test.BuildNodeMetrics(n1NodeName, 3201, 0),
test.BuildNodeMetrics(n2NodeName, 401, 0),
test.BuildNodeMetrics(n3NodeName, 11, 0),
},
podmetricses: []*v1beta1.PodMetrics{
test.BuildPodMetrics("p1", 401, 0),
test.BuildPodMetrics("p2", 401, 0),
test.BuildPodMetrics("p3", 401, 0),
test.BuildPodMetrics("p4", 401, 0),
test.BuildPodMetrics("p5", 401, 0),
},
expectedPodsEvicted: 4,
expectedPodsWithMetricsEvicted: 4,
evictedPods: []string{"p1", "p2", "p4", "p5"},
}, },
{ {
name: "with extended resource", name: "with extended resource",
@@ -538,6 +644,7 @@ func TestLowNodeUtilization(t *testing.T) {
}), }),
test.BuildTestPod("p8", 0, 0, n1NodeName, func(pod *v1.Pod) { test.BuildTestPod("p8", 0, 0, n1NodeName, func(pod *v1.Pod) {
// A Critical Pod. // A Critical Pod.
test.SetNormalOwnerRef(pod)
test.SetPodExtendedResourceRequest(pod, extendedResource, 1) test.SetPodExtendedResourceRequest(pod, extendedResource, 1)
pod.Namespace = "kube-system" pod.Namespace = "kube-system"
priority := utils.SystemCriticalPriority priority := utils.SystemCriticalPriority
@@ -548,8 +655,21 @@ func TestLowNodeUtilization(t *testing.T) {
test.SetPodExtendedResourceRequest(pod, extendedResource, 1) test.SetPodExtendedResourceRequest(pod, extendedResource, 1)
}), }),
}, },
nodemetricses: []*v1beta1.NodeMetrics{
test.BuildNodeMetrics(n1NodeName, 3201, 0),
test.BuildNodeMetrics(n2NodeName, 401, 0),
test.BuildNodeMetrics(n3NodeName, 11, 0),
},
podmetricses: []*v1beta1.PodMetrics{
test.BuildPodMetrics("p1", 401, 0),
test.BuildPodMetrics("p2", 401, 0),
test.BuildPodMetrics("p3", 401, 0),
test.BuildPodMetrics("p4", 401, 0),
test.BuildPodMetrics("p5", 401, 0),
},
// 4 pods available for eviction based on v1.ResourcePods, only 3 pods can be evicted before extended resource is depleted // 4 pods available for eviction based on v1.ResourcePods, only 3 pods can be evicted before extended resource is depleted
expectedPodsEvicted: 3, expectedPodsEvicted: 3,
expectedPodsWithMetricsEvicted: 0,
}, },
{ {
name: "with extended resource in some of nodes", name: "with extended resource in some of nodes",
@@ -576,8 +696,21 @@ func TestLowNodeUtilization(t *testing.T) {
}), }),
test.BuildTestPod("p9", 0, 0, n2NodeName, test.SetRSOwnerRef), test.BuildTestPod("p9", 0, 0, n2NodeName, test.SetRSOwnerRef),
}, },
nodemetricses: []*v1beta1.NodeMetrics{
test.BuildNodeMetrics(n1NodeName, 3201, 0),
test.BuildNodeMetrics(n2NodeName, 401, 0),
test.BuildNodeMetrics(n3NodeName, 11, 0),
},
podmetricses: []*v1beta1.PodMetrics{
test.BuildPodMetrics("p1", 401, 0),
test.BuildPodMetrics("p2", 401, 0),
test.BuildPodMetrics("p3", 401, 0),
test.BuildPodMetrics("p4", 401, 0),
test.BuildPodMetrics("p5", 401, 0),
},
// 0 pods available for eviction because there's no enough extended resource in node2 // 0 pods available for eviction because there's no enough extended resource in node2
expectedPodsEvicted: 0, expectedPodsEvicted: 0,
expectedPodsWithMetricsEvicted: 0,
}, },
{ {
name: "without priorities, but only other node is unschedulable", name: "without priorities, but only other node is unschedulable",
@@ -620,12 +753,25 @@ func TestLowNodeUtilization(t *testing.T) {
}), }),
test.BuildTestPod("p8", 400, 0, n1NodeName, func(pod *v1.Pod) { test.BuildTestPod("p8", 400, 0, n1NodeName, func(pod *v1.Pod) {
// A Critical Pod. // A Critical Pod.
test.SetNormalOwnerRef(pod)
pod.Namespace = "kube-system" pod.Namespace = "kube-system"
priority := utils.SystemCriticalPriority priority := utils.SystemCriticalPriority
pod.Spec.Priority = &priority pod.Spec.Priority = &priority
}), }),
}, },
expectedPodsEvicted: 0, nodemetricses: []*v1beta1.NodeMetrics{
test.BuildNodeMetrics(n1NodeName, 3201, 0),
test.BuildNodeMetrics(n2NodeName, 401, 0),
},
podmetricses: []*v1beta1.PodMetrics{
test.BuildPodMetrics("p1", 401, 0),
test.BuildPodMetrics("p2", 401, 0),
test.BuildPodMetrics("p3", 401, 0),
test.BuildPodMetrics("p4", 401, 0),
test.BuildPodMetrics("p5", 401, 0),
},
expectedPodsEvicted: 0,
expectedPodsWithMetricsEvicted: 0,
}, },
{ {
name: "without priorities, but only other node doesn't match pod node selector for p4 and p5", name: "without priorities, but only other node doesn't match pod node selector for p4 and p5",
@@ -684,12 +830,23 @@ func TestLowNodeUtilization(t *testing.T) {
}), }),
test.BuildTestPod("p8", 400, 0, n1NodeName, func(pod *v1.Pod) { test.BuildTestPod("p8", 400, 0, n1NodeName, func(pod *v1.Pod) {
// A Critical Pod. // A Critical Pod.
test.SetNormalOwnerRef(pod)
pod.Namespace = "kube-system" pod.Namespace = "kube-system"
priority := utils.SystemCriticalPriority priority := utils.SystemCriticalPriority
pod.Spec.Priority = &priority pod.Spec.Priority = &priority
}), }),
}, },
expectedPodsEvicted: 3, nodemetricses: []*v1beta1.NodeMetrics{
test.BuildNodeMetrics(n1NodeName, 3201, 0),
test.BuildNodeMetrics(n2NodeName, 401, 0),
},
podmetricses: []*v1beta1.PodMetrics{
test.BuildPodMetrics("p1", 401, 0),
test.BuildPodMetrics("p2", 401, 0),
test.BuildPodMetrics("p3", 401, 0),
},
expectedPodsEvicted: 3,
expectedPodsWithMetricsEvicted: 3,
}, },
{ {
name: "without priorities, but only other node doesn't match pod node affinity for p4 and p5", name: "without priorities, but only other node doesn't match pod node affinity for p4 and p5",
@@ -776,13 +933,24 @@ func TestLowNodeUtilization(t *testing.T) {
}), }),
test.BuildTestPod("p8", 400, 0, n1NodeName, func(pod *v1.Pod) { test.BuildTestPod("p8", 400, 0, n1NodeName, func(pod *v1.Pod) {
// A Critical Pod. // A Critical Pod.
test.SetNormalOwnerRef(pod)
pod.Namespace = "kube-system" pod.Namespace = "kube-system"
priority := utils.SystemCriticalPriority priority := utils.SystemCriticalPriority
pod.Spec.Priority = &priority pod.Spec.Priority = &priority
}), }),
test.BuildTestPod("p9", 0, 0, n2NodeName, test.SetRSOwnerRef), test.BuildTestPod("p9", 0, 0, n2NodeName, test.SetRSOwnerRef),
}, },
expectedPodsEvicted: 3, nodemetricses: []*v1beta1.NodeMetrics{
test.BuildNodeMetrics(n1NodeName, 3201, 0),
test.BuildNodeMetrics(n2NodeName, 401, 0),
},
podmetricses: []*v1beta1.PodMetrics{
test.BuildPodMetrics("p1", 401, 0),
test.BuildPodMetrics("p2", 401, 0),
test.BuildPodMetrics("p3", 401, 0),
},
expectedPodsEvicted: 3,
expectedPodsWithMetricsEvicted: 3,
}, },
{ {
name: "deviation thresholds", name: "deviation thresholds",
@@ -827,124 +995,226 @@ func TestLowNodeUtilization(t *testing.T) {
}), }),
test.BuildTestPod("p8", 400, 0, n1NodeName, func(pod *v1.Pod) { test.BuildTestPod("p8", 400, 0, n1NodeName, func(pod *v1.Pod) {
// A Critical Pod. // A Critical Pod.
test.SetNormalOwnerRef(pod)
pod.Namespace = "kube-system" pod.Namespace = "kube-system"
priority := utils.SystemCriticalPriority priority := utils.SystemCriticalPriority
pod.Spec.Priority = &priority pod.Spec.Priority = &priority
}), }),
test.BuildTestPod("p9", 400, 0, n2NodeName, test.SetRSOwnerRef), test.BuildTestPod("p9", 400, 0, n2NodeName, test.SetRSOwnerRef),
}, },
expectedPodsEvicted: 2, nodemetricses: []*v1beta1.NodeMetrics{
evictedPods: []string{}, test.BuildNodeMetrics(n1NodeName, 3201, 0),
test.BuildNodeMetrics(n2NodeName, 401, 0),
test.BuildNodeMetrics(n3NodeName, 11, 0),
},
podmetricses: []*v1beta1.PodMetrics{
test.BuildPodMetrics("p1", 401, 0),
test.BuildPodMetrics("p2", 401, 0),
test.BuildPodMetrics("p3", 401, 0),
test.BuildPodMetrics("p4", 401, 0),
test.BuildPodMetrics("p5", 401, 0),
},
expectedPodsEvicted: 2,
expectedPodsWithMetricsEvicted: 2,
evictedPods: []string{},
},
{
name: "without priorities different evictions for requested and actual resources",
thresholds: api.ResourceThresholds{
v1.ResourceCPU: 30,
v1.ResourcePods: 30,
},
targetThresholds: api.ResourceThresholds{
v1.ResourceCPU: 50,
v1.ResourcePods: 50,
},
nodes: []*v1.Node{
test.BuildTestNode(n1NodeName, 4000, 3000, 9, nil),
test.BuildTestNode(n2NodeName, 4000, 3000, 10, func(node *v1.Node) {
node.ObjectMeta.Labels = map[string]string{
nodeSelectorKey: notMatchingNodeSelectorValue,
}
}),
},
pods: []*v1.Pod{
test.BuildTestPod("p1", 400, 0, n1NodeName, test.SetRSOwnerRef),
test.BuildTestPod("p2", 400, 0, n1NodeName, test.SetRSOwnerRef),
test.BuildTestPod("p3", 400, 0, n1NodeName, test.SetRSOwnerRef),
// These won't be evicted.
test.BuildTestPod("p4", 400, 0, n1NodeName, func(pod *v1.Pod) {
// A pod with affinity to run in the "west" datacenter upon scheduling
test.SetNormalOwnerRef(pod)
pod.Spec.Affinity = &v1.Affinity{
NodeAffinity: &v1.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
NodeSelectorTerms: []v1.NodeSelectorTerm{
{
MatchExpressions: []v1.NodeSelectorRequirement{
{
Key: nodeSelectorKey,
Operator: "In",
Values: []string{nodeSelectorValue},
},
},
},
},
},
},
}
}),
test.BuildTestPod("p5", 400, 0, n1NodeName, func(pod *v1.Pod) {
// A pod with affinity to run in the "west" datacenter upon scheduling
test.SetNormalOwnerRef(pod)
pod.Spec.Affinity = &v1.Affinity{
NodeAffinity: &v1.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
NodeSelectorTerms: []v1.NodeSelectorTerm{
{
MatchExpressions: []v1.NodeSelectorRequirement{
{
Key: nodeSelectorKey,
Operator: "In",
Values: []string{nodeSelectorValue},
},
},
},
},
},
},
}
}),
test.BuildTestPod("p6", 400, 0, n1NodeName, test.SetDSOwnerRef),
test.BuildTestPod("p7", 400, 0, n1NodeName, func(pod *v1.Pod) {
// A pod with local storage.
test.SetNormalOwnerRef(pod)
pod.Spec.Volumes = []v1.Volume{
{
Name: "sample",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
EmptyDir: &v1.EmptyDirVolumeSource{
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
},
},
},
}
// A Mirror Pod.
pod.Annotations = test.GetMirrorPodAnnotation()
}),
test.BuildTestPod("p8", 400, 0, n1NodeName, func(pod *v1.Pod) {
// A Critical Pod.
test.SetNormalOwnerRef(pod)
pod.Namespace = "kube-system"
priority := utils.SystemCriticalPriority
pod.Spec.Priority = &priority
}),
test.BuildTestPod("p9", 0, 0, n2NodeName, test.SetRSOwnerRef),
},
nodemetricses: []*v1beta1.NodeMetrics{
test.BuildNodeMetrics(n1NodeName, 3201, 0),
test.BuildNodeMetrics(n2NodeName, 401, 0),
},
podmetricses: []*v1beta1.PodMetrics{
test.BuildPodMetrics("p1", 801, 0),
test.BuildPodMetrics("p2", 801, 0),
test.BuildPodMetrics("p3", 801, 0),
},
expectedPodsEvicted: 3,
expectedPodsWithMetricsEvicted: 2,
}, },
} }
for _, test := range testCases { for _, tc := range testCases {
t.Run(test.name, func(t *testing.T) { testFnc := func(metricsEnabled bool, expectedPodsEvicted uint) func(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background()) return func(t *testing.T) {
defer cancel() ctx, cancel := context.WithCancel(context.Background())
defer cancel()
var objs []runtime.Object var objs []runtime.Object
for _, node := range test.nodes { for _, node := range tc.nodes {
objs = append(objs, node) objs = append(objs, node)
} }
for _, pod := range test.pods { for _, pod := range tc.pods {
objs = append(objs, pod) objs = append(objs, pod)
} }
fakeClient := fake.NewSimpleClientset(objs...)
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0) fakeClient := fake.NewSimpleClientset(objs...)
podInformer := sharedInformerFactory.Core().V1().Pods().Informer()
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer) var collector *metricscollector.MetricsCollector
if err != nil { if metricsEnabled {
t.Errorf("Build get pods assigned to node function error: %v", err) metricsClientset := fakemetricsclient.NewSimpleClientset()
} for _, nodemetrics := range tc.nodemetricses {
metricsClientset.Tracker().Create(nodesgvr, nodemetrics, "")
podsForEviction := make(map[string]struct{}) }
for _, pod := range test.evictedPods { for _, podmetrics := range tc.podmetricses {
podsForEviction[pod] = struct{}{} metricsClientset.Tracker().Create(podsgvr, podmetrics, podmetrics.Namespace)
}
evictionFailed := false
if len(test.evictedPods) > 0 {
fakeClient.Fake.AddReactor("create", "pods", func(action core.Action) (bool, runtime.Object, error) {
getAction := action.(core.CreateAction)
obj := getAction.GetObject()
if eviction, ok := obj.(*policy.Eviction); ok {
if _, exists := podsForEviction[eviction.Name]; exists {
return true, obj, nil
}
evictionFailed = true
return true, nil, fmt.Errorf("pod %q was unexpectedly evicted", eviction.Name)
} }
return true, obj, nil
})
}
sharedInformerFactory.Start(ctx.Done()) sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
sharedInformerFactory.WaitForCacheSync(ctx.Done()) nodeLister := sharedInformerFactory.Core().V1().Nodes().Lister()
sharedInformerFactory.Start(ctx.Done())
sharedInformerFactory.WaitForCacheSync(ctx.Done())
eventRecorder := &events.FakeRecorder{} collector = metricscollector.NewMetricsCollector(nodeLister, metricsClientset, labels.Everything())
err := collector.Collect(ctx)
if err != nil {
t.Fatalf("unable to collect metrics: %v", err)
}
}
podEvictor := evictions.NewPodEvictor( podsForEviction := make(map[string]struct{})
fakeClient, for _, pod := range tc.evictedPods {
policy.SchemeGroupVersion.String(), podsForEviction[pod] = struct{}{}
false, }
nil,
nil,
test.nodes,
false,
eventRecorder,
)
defaultEvictorFilterArgs := &defaultevictor.DefaultEvictorArgs{ evictionFailed := false
EvictLocalStoragePods: false, if len(tc.evictedPods) > 0 {
EvictSystemCriticalPods: false, fakeClient.Fake.AddReactor("create", "pods", func(action core.Action) (bool, runtime.Object, error) {
IgnorePvcPods: false, getAction := action.(core.CreateAction)
EvictFailedBarePods: false, obj := getAction.GetObject()
NodeFit: true, if eviction, ok := obj.(*policy.Eviction); ok {
} if _, exists := podsForEviction[eviction.Name]; exists {
return true, obj, nil
}
evictionFailed = true
return true, nil, fmt.Errorf("pod %q was unexpectedly evicted", eviction.Name)
}
return true, obj, nil
})
}
evictorFilter, err := defaultevictor.New( handle, podEvictor, err := frameworktesting.InitFrameworkHandle(ctx, fakeClient, nil, defaultevictor.DefaultEvictorArgs{NodeFit: true}, nil)
defaultEvictorFilterArgs, if err != nil {
&frameworkfake.HandleImpl{ t.Fatalf("Unable to initialize a framework handle: %v", err)
ClientsetImpl: fakeClient, }
GetPodsAssignedToNodeFuncImpl: getPodsAssignedToNode, handle.MetricsCollectorImpl = collector
SharedInformerFactoryImpl: sharedInformerFactory,
plugin, err := NewLowNodeUtilization(&LowNodeUtilizationArgs{
Thresholds: tc.thresholds,
TargetThresholds: tc.targetThresholds,
UseDeviationThresholds: tc.useDeviationThresholds,
EvictableNamespaces: tc.evictableNamespaces,
MetricsUtilization: MetricsUtilization{
MetricsServer: metricsEnabled,
},
}, },
) handle)
if err != nil { if err != nil {
t.Fatalf("Unable to initialize the plugin: %v", err) t.Fatalf("Unable to initialize the plugin: %v", err)
} }
plugin.(frameworktypes.BalancePlugin).Balance(ctx, tc.nodes)
handle := &frameworkfake.HandleImpl{ podsEvicted := podEvictor.TotalEvicted()
ClientsetImpl: fakeClient, if expectedPodsEvicted != podsEvicted {
GetPodsAssignedToNodeFuncImpl: getPodsAssignedToNode, t.Errorf("Expected %v pods to be evicted but %v got evicted", expectedPodsEvicted, podsEvicted)
PodEvictorImpl: podEvictor, }
EvictorFilterImpl: evictorFilter.(frameworktypes.EvictorPlugin), if evictionFailed {
SharedInformerFactoryImpl: sharedInformerFactory, t.Errorf("Pod evictions failed unexpectedly")
}
} }
}
plugin, err := NewLowNodeUtilization(&LowNodeUtilizationArgs{ t.Run(tc.name, testFnc(false, tc.expectedPodsEvicted))
Thresholds: test.thresholds, t.Run(tc.name+" with metrics enabled", testFnc(true, tc.expectedPodsWithMetricsEvicted))
TargetThresholds: test.targetThresholds,
UseDeviationThresholds: test.useDeviationThresholds,
EvictableNamespaces: test.evictableNamespaces,
},
handle)
if err != nil {
t.Fatalf("Unable to initialize the plugin: %v", err)
}
plugin.(frameworktypes.BalancePlugin).Balance(ctx, test.nodes)
podsEvicted := podEvictor.TotalEvicted()
if test.expectedPodsEvicted != podsEvicted {
t.Errorf("Expected %v pods to be evicted but %v got evicted", test.expectedPodsEvicted, podsEvicted)
}
if evictionFailed {
t.Errorf("Pod evictions failed unexpectedly")
}
})
} }
} }
@@ -971,11 +1241,14 @@ func TestLowNodeUtilizationWithTaints(t *testing.T) {
}, },
} }
var uint0, uint1 uint = 0, 1
tests := []struct { tests := []struct {
name string name string
nodes []*v1.Node nodes []*v1.Node
pods []*v1.Pod pods []*v1.Pod
evictionsExpected uint maxPodsToEvictPerNode *uint
maxPodsToEvictTotal *uint
evictionsExpected uint
}{ }{
{ {
name: "No taints", name: "No taints",
@@ -1031,6 +1304,26 @@ func TestLowNodeUtilizationWithTaints(t *testing.T) {
}, },
evictionsExpected: 1, evictionsExpected: 1,
}, },
{
name: "Pod which tolerates node taint, set maxPodsToEvictTotal(0), should not be expelled",
nodes: []*v1.Node{n1, n3withTaints},
pods: []*v1.Pod{
// Node 1 pods
test.BuildTestPod(fmt.Sprintf("pod_1_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
test.BuildTestPod(fmt.Sprintf("pod_2_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
test.BuildTestPod(fmt.Sprintf("pod_3_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
test.BuildTestPod(fmt.Sprintf("pod_4_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
test.BuildTestPod(fmt.Sprintf("pod_5_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
test.BuildTestPod(fmt.Sprintf("pod_6_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
test.BuildTestPod(fmt.Sprintf("pod_7_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
podThatToleratesTaint,
// Node 3 pods
test.BuildTestPod(fmt.Sprintf("pod_9_%s", n3withTaints.Name), 200, 0, n3withTaints.Name, test.SetRSOwnerRef),
},
maxPodsToEvictPerNode: &uint1,
maxPodsToEvictTotal: &uint0,
evictionsExpected: 0,
},
} }
for _, item := range tests { for _, item := range tests {
@@ -1045,56 +1338,16 @@ func TestLowNodeUtilizationWithTaints(t *testing.T) {
} }
fakeClient := fake.NewSimpleClientset(objs...) fakeClient := fake.NewSimpleClientset(objs...)
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
podInformer := sharedInformerFactory.Core().V1().Pods().Informer()
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer) handle, podEvictor, err := frameworktesting.InitFrameworkHandle(
if err != nil { ctx,
t.Errorf("Build get pods assigned to node function error: %v", err)
}
sharedInformerFactory.Start(ctx.Done())
sharedInformerFactory.WaitForCacheSync(ctx.Done())
eventRecorder := &events.FakeRecorder{}
podEvictor := evictions.NewPodEvictor(
fakeClient, fakeClient,
policy.SchemeGroupVersion.String(), evictions.NewOptions().WithMaxPodsToEvictPerNode(&item.evictionsExpected),
false, defaultevictor.DefaultEvictorArgs{NodeFit: true},
&item.evictionsExpected,
nil, nil,
item.nodes,
false,
eventRecorder,
)
defaultEvictorFilterArgs := &defaultevictor.DefaultEvictorArgs{
EvictLocalStoragePods: false,
EvictSystemCriticalPods: false,
IgnorePvcPods: false,
EvictFailedBarePods: false,
NodeFit: true,
}
evictorFilter, err := defaultevictor.New(
defaultEvictorFilterArgs,
&frameworkfake.HandleImpl{
ClientsetImpl: fakeClient,
GetPodsAssignedToNodeFuncImpl: getPodsAssignedToNode,
SharedInformerFactoryImpl: sharedInformerFactory,
},
) )
if err != nil { if err != nil {
t.Fatalf("Unable to initialize the plugin: %v", err) t.Fatalf("Unable to initialize a framework handle: %v", err)
}
handle := &frameworkfake.HandleImpl{
ClientsetImpl: fakeClient,
GetPodsAssignedToNodeFuncImpl: getPodsAssignedToNode,
PodEvictorImpl: podEvictor,
EvictorFilterImpl: evictorFilter.(frameworktypes.EvictorPlugin),
SharedInformerFactoryImpl: sharedInformerFactory,
} }
plugin, err := NewLowNodeUtilization(&LowNodeUtilizationArgs{ plugin, err := NewLowNodeUtilization(&LowNodeUtilizationArgs{

View File

@@ -28,7 +28,6 @@ import (
"k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/sets"
"k8s.io/klog/v2" "k8s.io/klog/v2"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions" "sigs.k8s.io/descheduler/pkg/descheduler/evictions"
"sigs.k8s.io/descheduler/pkg/descheduler/node"
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node" nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod" podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types" frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
@@ -78,14 +77,14 @@ func getNodeThresholds(
nodes []*v1.Node, nodes []*v1.Node,
lowThreshold, highThreshold api.ResourceThresholds, lowThreshold, highThreshold api.ResourceThresholds,
resourceNames []v1.ResourceName, resourceNames []v1.ResourceName,
getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc,
useDeviationThresholds bool, useDeviationThresholds bool,
usageClient usageClient,
) map[string]NodeThresholds { ) map[string]NodeThresholds {
nodeThresholdsMap := map[string]NodeThresholds{} nodeThresholdsMap := map[string]NodeThresholds{}
averageResourceUsagePercent := api.ResourceThresholds{} averageResourceUsagePercent := api.ResourceThresholds{}
if useDeviationThresholds { if useDeviationThresholds {
averageResourceUsagePercent = averageNodeBasicresources(nodes, getPodsAssignedToNode, resourceNames) averageResourceUsagePercent = averageNodeBasicresources(nodes, usageClient)
} }
for _, node := range nodes { for _, node := range nodes {
@@ -121,22 +120,15 @@ func getNodeThresholds(
func getNodeUsage( func getNodeUsage(
nodes []*v1.Node, nodes []*v1.Node,
resourceNames []v1.ResourceName, usageClient usageClient,
getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc,
) []NodeUsage { ) []NodeUsage {
var nodeUsageList []NodeUsage var nodeUsageList []NodeUsage
for _, node := range nodes { for _, node := range nodes {
pods, err := podutil.ListPodsOnANode(node.Name, getPodsAssignedToNode, nil)
if err != nil {
klog.V(2).InfoS("Node will not be processed, error accessing its pods", "node", klog.KObj(node), "err", err)
continue
}
nodeUsageList = append(nodeUsageList, NodeUsage{ nodeUsageList = append(nodeUsageList, NodeUsage{
node: node, node: node,
usage: nodeutil.NodeUtilization(pods, resourceNames), usage: usageClient.nodeUtilization(node.Name),
allPods: pods, allPods: usageClient.pods(node.Name),
}) })
} }
@@ -214,6 +206,26 @@ func classifyNodes(
return lowNodes, highNodes return lowNodes, highNodes
} }
func usageToKeysAndValues(usage map[v1.ResourceName]*resource.Quantity) []interface{} {
// log message in one line
keysAndValues := []interface{}{}
if quantity, exists := usage[v1.ResourceCPU]; exists {
keysAndValues = append(keysAndValues, "CPU", quantity.MilliValue())
}
if quantity, exists := usage[v1.ResourceMemory]; exists {
keysAndValues = append(keysAndValues, "Mem", quantity.Value())
}
if quantity, exists := usage[v1.ResourcePods]; exists {
keysAndValues = append(keysAndValues, "Pods", quantity.Value())
}
for name := range usage {
if !nodeutil.IsBasicResource(name) {
keysAndValues = append(keysAndValues, string(name), usage[name].Value())
}
}
return keysAndValues
}
// evictPodsFromSourceNodes evicts pods based on priority, if all the pods on the node have priority, if not // evictPodsFromSourceNodes evicts pods based on priority, if all the pods on the node have priority, if not
// evicts them based on QoS as fallback option. // evicts them based on QoS as fallback option.
// TODO: @ravig Break this function into smaller functions. // TODO: @ravig Break this function into smaller functions.
@@ -226,12 +238,12 @@ func evictPodsFromSourceNodes(
podFilter func(pod *v1.Pod) bool, podFilter func(pod *v1.Pod) bool,
resourceNames []v1.ResourceName, resourceNames []v1.ResourceName,
continueEviction continueEvictionCond, continueEviction continueEvictionCond,
usageClient usageClient,
) { ) {
// upper bound on total number of pods/cpu/memory and optional extended resources to be moved // upper bound on total number of pods/cpu/memory and optional extended resources to be moved
totalAvailableUsage := map[v1.ResourceName]*resource.Quantity{ totalAvailableUsage := map[v1.ResourceName]*resource.Quantity{}
v1.ResourcePods: {}, for _, resourceName := range resourceNames {
v1.ResourceCPU: {}, totalAvailableUsage[resourceName] = &resource.Quantity{}
v1.ResourceMemory: {},
} }
taintsOfDestinationNodes := make(map[string][]v1.Taint, len(destinationNodes)) taintsOfDestinationNodes := make(map[string][]v1.Taint, len(destinationNodes))
@@ -239,6 +251,10 @@ func evictPodsFromSourceNodes(
taintsOfDestinationNodes[node.node.Name] = node.node.Spec.Taints taintsOfDestinationNodes[node.node.Name] = node.node.Spec.Taints
for _, name := range resourceNames { for _, name := range resourceNames {
if _, exists := node.usage[name]; !exists {
klog.Errorf("unable to find %q resource in node's %q usage, terminating eviction", name, node.node.Name)
return
}
if _, ok := totalAvailableUsage[name]; !ok { if _, ok := totalAvailableUsage[name]; !ok {
totalAvailableUsage[name] = resource.NewQuantity(0, resource.DecimalSI) totalAvailableUsage[name] = resource.NewQuantity(0, resource.DecimalSI)
} }
@@ -248,17 +264,7 @@ func evictPodsFromSourceNodes(
} }
// log message in one line // log message in one line
keysAndValues := []interface{}{ klog.V(1).InfoS("Total capacity to be moved", usageToKeysAndValues(totalAvailableUsage)...)
"CPU", totalAvailableUsage[v1.ResourceCPU].MilliValue(),
"Mem", totalAvailableUsage[v1.ResourceMemory].Value(),
"Pods", totalAvailableUsage[v1.ResourcePods].Value(),
}
for name := range totalAvailableUsage {
if !node.IsBasicResource(name) {
keysAndValues = append(keysAndValues, string(name), totalAvailableUsage[name].Value())
}
}
klog.V(1).InfoS("Total capacity to be moved", keysAndValues...)
for _, node := range sourceNodes { for _, node := range sourceNodes {
klog.V(3).InfoS("Evicting pods from node", "node", klog.KObj(node.node), "usage", node.usage) klog.V(3).InfoS("Evicting pods from node", "node", klog.KObj(node.node), "usage", node.usage)
@@ -274,8 +280,14 @@ func evictPodsFromSourceNodes(
klog.V(1).InfoS("Evicting pods based on priority, if they have same priority, they'll be evicted based on QoS tiers") klog.V(1).InfoS("Evicting pods based on priority, if they have same priority, they'll be evicted based on QoS tiers")
// sort the evictable Pods based on priority. This also sorts them based on QoS. If there are multiple pods with same priority, they are sorted based on QoS tiers. // sort the evictable Pods based on priority. This also sorts them based on QoS. If there are multiple pods with same priority, they are sorted based on QoS tiers.
podutil.SortPodsBasedOnPriorityLowToHigh(removablePods) podutil.SortPodsBasedOnPriorityLowToHigh(removablePods)
evictPods(ctx, evictableNamespaces, removablePods, node, totalAvailableUsage, taintsOfDestinationNodes, podEvictor, evictOptions, continueEviction) err := evictPods(ctx, evictableNamespaces, removablePods, node, totalAvailableUsage, taintsOfDestinationNodes, podEvictor, evictOptions, continueEviction, usageClient)
if err != nil {
switch err.(type) {
case *evictions.EvictionTotalLimitError:
return
default:
}
}
} }
} }
@@ -289,7 +301,8 @@ func evictPods(
podEvictor frameworktypes.Evictor, podEvictor frameworktypes.Evictor,
evictOptions evictions.EvictOptions, evictOptions evictions.EvictOptions,
continueEviction continueEvictionCond, continueEviction continueEvictionCond,
) { usageClient usageClient,
) error {
var excludedNamespaces sets.Set[string] var excludedNamespaces sets.Set[string]
if evictableNamespaces != nil { if evictableNamespaces != nil {
excludedNamespaces = sets.New(evictableNamespaces.Exclude...) excludedNamespaces = sets.New(evictableNamespaces.Exclude...)
@@ -311,58 +324,67 @@ func evictPods(
continue continue
} }
if preEvictionFilterWithOptions(pod) { if !preEvictionFilterWithOptions(pod) {
if podEvictor.Evict(ctx, pod, evictOptions) { continue
klog.V(3).InfoS("Evicted pods", "pod", klog.KObj(pod)) }
podUsage, err := usageClient.podUsage(pod)
if err != nil {
klog.Errorf("unable to get pod usage for %v/%v: %v", pod.Namespace, pod.Name, err)
continue
}
err = podEvictor.Evict(ctx, pod, evictOptions)
if err == nil {
klog.V(3).InfoS("Evicted pods", "pod", klog.KObj(pod))
for name := range totalAvailableUsage { for name := range totalAvailableUsage {
if name == v1.ResourcePods { if name == v1.ResourcePods {
nodeInfo.usage[name].Sub(*resource.NewQuantity(1, resource.DecimalSI)) nodeInfo.usage[name].Sub(*resource.NewQuantity(1, resource.DecimalSI))
totalAvailableUsage[name].Sub(*resource.NewQuantity(1, resource.DecimalSI)) totalAvailableUsage[name].Sub(*resource.NewQuantity(1, resource.DecimalSI))
} else { } else {
quantity := utils.GetResourceRequestQuantity(pod, name) nodeInfo.usage[name].Sub(*podUsage[name])
nodeInfo.usage[name].Sub(quantity) totalAvailableUsage[name].Sub(*podUsage[name])
totalAvailableUsage[name].Sub(quantity)
}
}
keysAndValues := []interface{}{
"node", nodeInfo.node.Name,
"CPU", nodeInfo.usage[v1.ResourceCPU].MilliValue(),
"Mem", nodeInfo.usage[v1.ResourceMemory].Value(),
"Pods", nodeInfo.usage[v1.ResourcePods].Value(),
}
for name := range totalAvailableUsage {
if !nodeutil.IsBasicResource(name) {
keysAndValues = append(keysAndValues, string(name), totalAvailableUsage[name].Value())
}
}
klog.V(3).InfoS("Updated node usage", keysAndValues...)
// check if pods can be still evicted
if !continueEviction(nodeInfo, totalAvailableUsage) {
break
} }
} }
keysAndValues := []interface{}{
"node", nodeInfo.node.Name,
}
keysAndValues = append(keysAndValues, usageToKeysAndValues(nodeInfo.usage)...)
klog.V(3).InfoS("Updated node usage", keysAndValues...)
// check if pods can be still evicted
if !continueEviction(nodeInfo, totalAvailableUsage) {
break
}
continue
} }
if podEvictor.NodeLimitExceeded(nodeInfo.node) { switch err.(type) {
return case *evictions.EvictionNodeLimitError, *evictions.EvictionTotalLimitError:
return err
default:
klog.Errorf("eviction failed: %v", err)
} }
} }
} }
return nil
} }
// sortNodesByUsage sorts nodes based on usage according to the given plugin. // sortNodesByUsage sorts nodes based on usage according to the given plugin.
func sortNodesByUsage(nodes []NodeInfo, ascending bool) { func sortNodesByUsage(nodes []NodeInfo, ascending bool) {
sort.Slice(nodes, func(i, j int) bool { sort.Slice(nodes, func(i, j int) bool {
ti := nodes[i].usage[v1.ResourceMemory].Value() + nodes[i].usage[v1.ResourceCPU].MilliValue() + nodes[i].usage[v1.ResourcePods].Value() ti := resource.NewQuantity(0, resource.DecimalSI).Value()
tj := nodes[j].usage[v1.ResourceMemory].Value() + nodes[j].usage[v1.ResourceCPU].MilliValue() + nodes[j].usage[v1.ResourcePods].Value() tj := resource.NewQuantity(0, resource.DecimalSI).Value()
for resourceName := range nodes[i].usage {
// extended resources if resourceName == v1.ResourceCPU {
for name := range nodes[i].usage { ti += nodes[i].usage[resourceName].MilliValue()
if !nodeutil.IsBasicResource(name) { } else {
ti = ti + nodes[i].usage[name].Value() ti += nodes[i].usage[resourceName].Value()
tj = tj + nodes[j].usage[name].Value() }
}
for resourceName := range nodes[j].usage {
if resourceName == v1.ResourceCPU {
tj += nodes[j].usage[resourceName].MilliValue()
} else {
tj += nodes[j].usage[resourceName].Value()
} }
} }
@@ -424,17 +446,12 @@ func classifyPods(pods []*v1.Pod, filter func(pod *v1.Pod) bool) ([]*v1.Pod, []*
return nonRemovablePods, removablePods return nonRemovablePods, removablePods
} }
func averageNodeBasicresources(nodes []*v1.Node, getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc, resourceNames []v1.ResourceName) api.ResourceThresholds { func averageNodeBasicresources(nodes []*v1.Node, usageClient usageClient) api.ResourceThresholds {
total := api.ResourceThresholds{} total := api.ResourceThresholds{}
average := api.ResourceThresholds{} average := api.ResourceThresholds{}
numberOfNodes := len(nodes) numberOfNodes := len(nodes)
for _, node := range nodes { for _, node := range nodes {
pods, err := podutil.ListPodsOnANode(node.Name, getPodsAssignedToNode, nil) usage := usageClient.nodeUtilization(node.Name)
if err != nil {
numberOfNodes--
continue
}
usage := nodeutil.NodeUtilization(pods, resourceNames)
nodeCapacity := node.Status.Capacity nodeCapacity := node.Status.Capacity
if len(node.Status.Allocatable) > 0 { if len(node.Status.Allocatable) > 0 {
nodeCapacity = node.Status.Allocatable nodeCapacity = node.Status.Allocatable

View File

@@ -25,82 +25,34 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
) )
func BuildTestNodeInfo(name string, apply func(*NodeInfo)) *NodeInfo {
nodeInfo := &NodeInfo{
NodeUsage: NodeUsage{
node: &v1.Node{
Status: v1.NodeStatus{
Capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(3977868*1024, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(29, resource.BinarySI),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(1930, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(3287692*1024, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(29, resource.BinarySI),
},
},
ObjectMeta: metav1.ObjectMeta{Name: name},
},
},
}
apply(nodeInfo)
return nodeInfo
}
var ( var (
lowPriority = int32(0) lowPriority = int32(0)
highPriority = int32(10000) highPriority = int32(10000)
extendedResource = v1.ResourceName("example.com/foo") extendedResource = v1.ResourceName("example.com/foo")
testNode1 = NodeInfo{
NodeUsage: NodeUsage{
node: &v1.Node{
Status: v1.NodeStatus{
Capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(3977868*1024, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(29, resource.BinarySI),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(1930, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(3287692*1024, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(29, resource.BinarySI),
},
},
ObjectMeta: metav1.ObjectMeta{Name: "node1"},
},
usage: map[v1.ResourceName]*resource.Quantity{
v1.ResourceCPU: resource.NewMilliQuantity(1730, resource.DecimalSI),
v1.ResourceMemory: resource.NewQuantity(3038982964, resource.BinarySI),
v1.ResourcePods: resource.NewQuantity(25, resource.BinarySI),
},
},
}
testNode2 = NodeInfo{
NodeUsage: NodeUsage{
node: &v1.Node{
Status: v1.NodeStatus{
Capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(3977868*1024, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(29, resource.BinarySI),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(1930, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(3287692*1024, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(29, resource.BinarySI),
},
},
ObjectMeta: metav1.ObjectMeta{Name: "node2"},
},
usage: map[v1.ResourceName]*resource.Quantity{
v1.ResourceCPU: resource.NewMilliQuantity(1220, resource.DecimalSI),
v1.ResourceMemory: resource.NewQuantity(3038982964, resource.BinarySI),
v1.ResourcePods: resource.NewQuantity(11, resource.BinarySI),
},
},
}
testNode3 = NodeInfo{
NodeUsage: NodeUsage{
node: &v1.Node{
Status: v1.NodeStatus{
Capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(3977868*1024, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(29, resource.BinarySI),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(1930, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(3287692*1024, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(29, resource.BinarySI),
},
},
ObjectMeta: metav1.ObjectMeta{Name: "node3"},
},
usage: map[v1.ResourceName]*resource.Quantity{
v1.ResourceCPU: resource.NewMilliQuantity(1530, resource.DecimalSI),
v1.ResourceMemory: resource.NewQuantity(5038982964, resource.BinarySI),
v1.ResourcePods: resource.NewQuantity(20, resource.BinarySI),
},
},
}
) )
func TestResourceUsagePercentages(t *testing.T) { func TestResourceUsagePercentages(t *testing.T) {
@@ -141,26 +93,81 @@ func TestResourceUsagePercentages(t *testing.T) {
t.Logf("resourceUsagePercentage: %#v\n", resourceUsagePercentage) t.Logf("resourceUsagePercentage: %#v\n", resourceUsagePercentage)
} }
func TestSortNodesByUsageDescendingOrder(t *testing.T) { func TestSortNodesByUsage(t *testing.T) {
nodeList := []NodeInfo{testNode1, testNode2, testNode3} tests := []struct {
expectedNodeList := []NodeInfo{testNode3, testNode1, testNode2} // testNode3 has the highest usage name string
sortNodesByUsage(nodeList, false) // ascending=false, sort nodes in descending order nodeInfoList []NodeInfo
expectedNodeInfoNames []string
}{
{
name: "cpu memory pods",
nodeInfoList: []NodeInfo{
*BuildTestNodeInfo("node1", func(nodeInfo *NodeInfo) {
nodeInfo.usage = map[v1.ResourceName]*resource.Quantity{
v1.ResourceCPU: resource.NewMilliQuantity(1730, resource.DecimalSI),
v1.ResourceMemory: resource.NewQuantity(3038982964, resource.BinarySI),
v1.ResourcePods: resource.NewQuantity(25, resource.BinarySI),
}
}),
*BuildTestNodeInfo("node2", func(nodeInfo *NodeInfo) {
nodeInfo.usage = map[v1.ResourceName]*resource.Quantity{
v1.ResourceCPU: resource.NewMilliQuantity(1220, resource.DecimalSI),
v1.ResourceMemory: resource.NewQuantity(3038982964, resource.BinarySI),
v1.ResourcePods: resource.NewQuantity(11, resource.BinarySI),
}
}),
*BuildTestNodeInfo("node3", func(nodeInfo *NodeInfo) {
nodeInfo.usage = map[v1.ResourceName]*resource.Quantity{
v1.ResourceCPU: resource.NewMilliQuantity(1530, resource.DecimalSI),
v1.ResourceMemory: resource.NewQuantity(5038982964, resource.BinarySI),
v1.ResourcePods: resource.NewQuantity(20, resource.BinarySI),
}
}),
},
expectedNodeInfoNames: []string{"node3", "node1", "node2"},
},
{
name: "memory",
nodeInfoList: []NodeInfo{
*BuildTestNodeInfo("node1", func(nodeInfo *NodeInfo) {
nodeInfo.usage = map[v1.ResourceName]*resource.Quantity{
v1.ResourceMemory: resource.NewQuantity(3038982964, resource.BinarySI),
}
}),
*BuildTestNodeInfo("node2", func(nodeInfo *NodeInfo) {
nodeInfo.usage = map[v1.ResourceName]*resource.Quantity{
v1.ResourceMemory: resource.NewQuantity(2038982964, resource.BinarySI),
}
}),
*BuildTestNodeInfo("node3", func(nodeInfo *NodeInfo) {
nodeInfo.usage = map[v1.ResourceName]*resource.Quantity{
v1.ResourceMemory: resource.NewQuantity(5038982964, resource.BinarySI),
}
}),
},
expectedNodeInfoNames: []string{"node3", "node1", "node2"},
},
}
for i := 0; i < len(expectedNodeList); i++ { for _, tc := range tests {
if nodeList[i].NodeUsage.node.Name != expectedNodeList[i].NodeUsage.node.Name { t.Run(tc.name+" descending", func(t *testing.T) {
t.Errorf("Expected %v, got %v", expectedNodeList[i].NodeUsage.node.Name, nodeList[i].NodeUsage.node.Name) sortNodesByUsage(tc.nodeInfoList, false) // ascending=false, sort nodes in descending order
}
} for i := 0; i < len(tc.nodeInfoList); i++ {
} if tc.nodeInfoList[i].NodeUsage.node.Name != tc.expectedNodeInfoNames[i] {
t.Errorf("Expected %v, got %v", tc.expectedNodeInfoNames[i], tc.nodeInfoList[i].NodeUsage.node.Name)
func TestSortNodesByUsageAscendingOrder(t *testing.T) { }
nodeList := []NodeInfo{testNode1, testNode2, testNode3} }
expectedNodeList := []NodeInfo{testNode2, testNode1, testNode3} })
sortNodesByUsage(nodeList, true) // ascending=true, sort nodes in ascending order t.Run(tc.name+" ascending", func(t *testing.T) {
sortNodesByUsage(tc.nodeInfoList, true) // ascending=true, sort nodes in ascending order
for i := 0; i < len(expectedNodeList); i++ {
if nodeList[i].NodeUsage.node.Name != expectedNodeList[i].NodeUsage.node.Name { size := len(tc.nodeInfoList)
t.Errorf("Expected %v, got %v", expectedNodeList[i].NodeUsage.node.Name, nodeList[i].NodeUsage.node.Name) for i := 0; i < size; i++ {
} if tc.nodeInfoList[i].NodeUsage.node.Name != tc.expectedNodeInfoNames[size-i-1] {
t.Errorf("Expected %v, got %v", tc.expectedNodeInfoNames[size-i-1], tc.nodeInfoList[i].NodeUsage.node.Name)
}
}
})
} }
} }

View File

@@ -24,15 +24,16 @@ import (
type LowNodeUtilizationArgs struct { type LowNodeUtilizationArgs struct {
metav1.TypeMeta `json:",inline"` metav1.TypeMeta `json:",inline"`
UseDeviationThresholds bool `json:"useDeviationThresholds"` UseDeviationThresholds bool `json:"useDeviationThresholds,omitempty"`
Thresholds api.ResourceThresholds `json:"thresholds"` Thresholds api.ResourceThresholds `json:"thresholds"`
TargetThresholds api.ResourceThresholds `json:"targetThresholds"` TargetThresholds api.ResourceThresholds `json:"targetThresholds"`
NumberOfNodes int `json:"numberOfNodes"` NumberOfNodes int `json:"numberOfNodes,omitempty"`
MetricsUtilization MetricsUtilization `json:"metricsUtilization,omitempty"`
// Naming this one differently since namespaces are still // Naming this one differently since namespaces are still
// considered while considering resources used by pods // considered while considering resources used by pods
// but then filtered out before eviction // but then filtered out before eviction
EvictableNamespaces *api.Namespaces `json:"evictableNamespaces"` EvictableNamespaces *api.Namespaces `json:"evictableNamespaces,omitempty"`
} }
// +k8s:deepcopy-gen=true // +k8s:deepcopy-gen=true
@@ -41,10 +42,19 @@ type LowNodeUtilizationArgs struct {
type HighNodeUtilizationArgs struct { type HighNodeUtilizationArgs struct {
metav1.TypeMeta `json:",inline"` metav1.TypeMeta `json:",inline"`
Thresholds api.ResourceThresholds `json:"thresholds"` Thresholds api.ResourceThresholds `json:"thresholds"`
NumberOfNodes int `json:"numberOfNodes"` NumberOfNodes int `json:"numberOfNodes,omitempty"`
MetricsUtilization MetricsUtilization `json:"metricsUtilization,omitempty"`
// Naming this one differently since namespaces are still // Naming this one differently since namespaces are still
// considered while considering resources used by pods // considered while considering resources used by pods
// but then filtered out before eviction // but then filtered out before eviction
EvictableNamespaces *api.Namespaces `json:"evictableNamespaces"` EvictableNamespaces *api.Namespaces `json:"evictableNamespaces,omitempty"`
}
// MetricsUtilization allow to consume actual resource utilization from metrics
type MetricsUtilization struct {
// metricsServer enables metrics from a kubernetes metrics server.
// Please see https://kubernetes-sigs.github.io/metrics-server/ for more.
MetricsServer bool `json:"metricsServer,omitempty"`
} }

View File

@@ -0,0 +1,201 @@
/*
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package nodeutilization
import (
"context"
"fmt"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/klog/v2"
utilptr "k8s.io/utils/ptr"
"sigs.k8s.io/descheduler/pkg/descheduler/metricscollector"
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
"sigs.k8s.io/descheduler/pkg/utils"
)
type usageClient interface {
// Both low/high node utilization plugins are expected to invoke sync right
// after Balance method is invoked. There's no cache invalidation so each
// Balance is expected to get the latest data by invoking sync.
sync(nodes []*v1.Node) error
nodeUtilization(node string) map[v1.ResourceName]*resource.Quantity
pods(node string) []*v1.Pod
podUsage(pod *v1.Pod) (map[v1.ResourceName]*resource.Quantity, error)
}
type requestedUsageClient struct {
resourceNames []v1.ResourceName
getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc
_pods map[string][]*v1.Pod
_nodeUtilization map[string]map[v1.ResourceName]*resource.Quantity
}
var _ usageClient = &requestedUsageClient{}
func newRequestedUsageClient(
resourceNames []v1.ResourceName,
getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc,
) *requestedUsageClient {
return &requestedUsageClient{
resourceNames: resourceNames,
getPodsAssignedToNode: getPodsAssignedToNode,
}
}
func (s *requestedUsageClient) nodeUtilization(node string) map[v1.ResourceName]*resource.Quantity {
return s._nodeUtilization[node]
}
func (s *requestedUsageClient) pods(node string) []*v1.Pod {
return s._pods[node]
}
func (s *requestedUsageClient) podUsage(pod *v1.Pod) (map[v1.ResourceName]*resource.Quantity, error) {
usage := make(map[v1.ResourceName]*resource.Quantity)
for _, resourceName := range s.resourceNames {
usage[resourceName] = utilptr.To[resource.Quantity](utils.GetResourceRequestQuantity(pod, resourceName).DeepCopy())
}
return usage, nil
}
func (s *requestedUsageClient) sync(nodes []*v1.Node) error {
s._nodeUtilization = make(map[string]map[v1.ResourceName]*resource.Quantity)
s._pods = make(map[string][]*v1.Pod)
for _, node := range nodes {
pods, err := podutil.ListPodsOnANode(node.Name, s.getPodsAssignedToNode, nil)
if err != nil {
klog.V(2).InfoS("Node will not be processed, error accessing its pods", "node", klog.KObj(node), "err", err)
return fmt.Errorf("error accessing %q node's pods: %v", node.Name, err)
}
nodeUsage, err := nodeutil.NodeUtilization(pods, s.resourceNames, func(pod *v1.Pod) (v1.ResourceList, error) {
req, _ := utils.PodRequestsAndLimits(pod)
return req, nil
})
if err != nil {
return err
}
// store the snapshot of pods from the same (or the closest) node utilization computation
s._pods[node.Name] = pods
s._nodeUtilization[node.Name] = nodeUsage
}
return nil
}
type actualUsageClient struct {
resourceNames []v1.ResourceName
getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc
metricsCollector *metricscollector.MetricsCollector
_pods map[string][]*v1.Pod
_nodeUtilization map[string]map[v1.ResourceName]*resource.Quantity
}
var _ usageClient = &actualUsageClient{}
func newActualUsageClient(
resourceNames []v1.ResourceName,
getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc,
metricsCollector *metricscollector.MetricsCollector,
) *actualUsageClient {
return &actualUsageClient{
resourceNames: resourceNames,
getPodsAssignedToNode: getPodsAssignedToNode,
metricsCollector: metricsCollector,
}
}
func (client *actualUsageClient) nodeUtilization(node string) map[v1.ResourceName]*resource.Quantity {
return client._nodeUtilization[node]
}
func (client *actualUsageClient) pods(node string) []*v1.Pod {
return client._pods[node]
}
func (client *actualUsageClient) podUsage(pod *v1.Pod) (map[v1.ResourceName]*resource.Quantity, error) {
// It's not efficient to keep track of all pods in a cluster when only their fractions is evicted.
// Thus, take the current pod metrics without computing any softening (like e.g. EWMA).
podMetrics, err := client.metricsCollector.MetricsClient().MetricsV1beta1().PodMetricses(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{})
if err != nil {
return nil, fmt.Errorf("unable to get podmetrics for %q/%q: %v", pod.Namespace, pod.Name, err)
}
totalUsage := make(map[v1.ResourceName]*resource.Quantity)
for _, container := range podMetrics.Containers {
for _, resourceName := range client.resourceNames {
if resourceName == v1.ResourcePods {
continue
}
if _, exists := container.Usage[resourceName]; !exists {
return nil, fmt.Errorf("pod %v/%v: container %q is missing %q resource", pod.Namespace, pod.Name, container.Name, resourceName)
}
if totalUsage[resourceName] == nil {
totalUsage[resourceName] = utilptr.To[resource.Quantity](container.Usage[resourceName].DeepCopy())
} else {
totalUsage[resourceName].Add(container.Usage[resourceName])
}
}
}
return totalUsage, nil
}
func (client *actualUsageClient) sync(nodes []*v1.Node) error {
client._nodeUtilization = make(map[string]map[v1.ResourceName]*resource.Quantity)
client._pods = make(map[string][]*v1.Pod)
nodesUsage, err := client.metricsCollector.AllNodesUsage()
if err != nil {
return err
}
for _, node := range nodes {
pods, err := podutil.ListPodsOnANode(node.Name, client.getPodsAssignedToNode, nil)
if err != nil {
klog.V(2).InfoS("Node will not be processed, error accessing its pods", "node", klog.KObj(node), "err", err)
return fmt.Errorf("error accessing %q node's pods: %v", node.Name, err)
}
nodeUsage, ok := nodesUsage[node.Name]
if !ok {
return fmt.Errorf("unable to find node %q in the collected metrics", node.Name)
}
nodeUsage[v1.ResourcePods] = resource.NewQuantity(int64(len(pods)), resource.DecimalSI)
for _, resourceName := range client.resourceNames {
if _, exists := nodeUsage[resourceName]; !exists {
return fmt.Errorf("unable to find %q resource for collected %q node metric", resourceName, node.Name)
}
}
// store the snapshot of pods from the same (or the closest) node utilization computation
client._pods[node.Name] = pods
client._nodeUtilization[node.Name] = nodeUsage
}
return nil
}

View File

@@ -0,0 +1,139 @@
/*
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package nodeutilization
import (
"context"
"fmt"
"testing"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/informers"
fakeclientset "k8s.io/client-go/kubernetes/fake"
"k8s.io/metrics/pkg/apis/metrics/v1beta1"
fakemetricsclient "k8s.io/metrics/pkg/client/clientset/versioned/fake"
"sigs.k8s.io/descheduler/pkg/descheduler/metricscollector"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
"sigs.k8s.io/descheduler/test"
)
var (
nodesgvr = schema.GroupVersionResource{Group: "metrics.k8s.io", Version: "v1beta1", Resource: "nodes"}
podsgvr = schema.GroupVersionResource{Group: "metrics.k8s.io", Version: "v1beta1", Resource: "pods"}
)
func updateMetricsAndCheckNodeUtilization(
t *testing.T,
ctx context.Context,
newValue, expectedValue int64,
metricsClientset *fakemetricsclient.Clientset,
collector *metricscollector.MetricsCollector,
usageClient usageClient,
nodes []*v1.Node,
nodeName string,
nodemetrics *v1beta1.NodeMetrics,
) {
t.Logf("Set current node cpu usage to %v", newValue)
nodemetrics.Usage[v1.ResourceCPU] = *resource.NewMilliQuantity(newValue, resource.DecimalSI)
metricsClientset.Tracker().Update(nodesgvr, nodemetrics, "")
err := collector.Collect(ctx)
if err != nil {
t.Fatalf("failed to capture metrics: %v", err)
}
err = usageClient.sync(nodes)
if err != nil {
t.Fatalf("failed to capture a snapshot: %v", err)
}
nodeUtilization := usageClient.nodeUtilization(nodeName)
t.Logf("current node cpu usage: %v\n", nodeUtilization[v1.ResourceCPU].MilliValue())
if nodeUtilization[v1.ResourceCPU].MilliValue() != expectedValue {
t.Fatalf("cpu node usage expected to be %v, got %v instead", expectedValue, nodeUtilization[v1.ResourceCPU].MilliValue())
}
pods := usageClient.pods(nodeName)
fmt.Printf("pods: %#v\n", pods)
if len(pods) != 2 {
t.Fatalf("expected 2 pods for node %v, got %v instead", nodeName, len(pods))
}
}
func TestActualUsageClient(t *testing.T) {
n1 := test.BuildTestNode("n1", 2000, 3000, 10, nil)
n2 := test.BuildTestNode("n2", 2000, 3000, 10, nil)
n3 := test.BuildTestNode("n3", 2000, 3000, 10, nil)
p1 := test.BuildTestPod("p1", 400, 0, n1.Name, nil)
p21 := test.BuildTestPod("p21", 400, 0, n2.Name, nil)
p22 := test.BuildTestPod("p22", 400, 0, n2.Name, nil)
p3 := test.BuildTestPod("p3", 400, 0, n3.Name, nil)
nodes := []*v1.Node{n1, n2, n3}
n1metrics := test.BuildNodeMetrics("n1", 400, 1714978816)
n2metrics := test.BuildNodeMetrics("n2", 1400, 1714978816)
n3metrics := test.BuildNodeMetrics("n3", 300, 1714978816)
clientset := fakeclientset.NewSimpleClientset(n1, n2, n3, p1, p21, p22, p3)
metricsClientset := fakemetricsclient.NewSimpleClientset()
metricsClientset.Tracker().Create(nodesgvr, n1metrics, "")
metricsClientset.Tracker().Create(nodesgvr, n2metrics, "")
metricsClientset.Tracker().Create(nodesgvr, n3metrics, "")
ctx := context.TODO()
resourceNames := []v1.ResourceName{
v1.ResourceCPU,
v1.ResourceMemory,
}
sharedInformerFactory := informers.NewSharedInformerFactory(clientset, 0)
podInformer := sharedInformerFactory.Core().V1().Pods().Informer()
nodeLister := sharedInformerFactory.Core().V1().Nodes().Lister()
podsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
if err != nil {
t.Fatalf("Build get pods assigned to node function error: %v", err)
}
sharedInformerFactory.Start(ctx.Done())
sharedInformerFactory.WaitForCacheSync(ctx.Done())
collector := metricscollector.NewMetricsCollector(nodeLister, metricsClientset, labels.Everything())
usageClient := newActualUsageClient(
resourceNames,
podsAssignedToNode,
collector,
)
updateMetricsAndCheckNodeUtilization(t, ctx,
1400, 1400,
metricsClientset, collector, usageClient, nodes, n2.Name, n2metrics,
)
updateMetricsAndCheckNodeUtilization(t, ctx,
500, 1310,
metricsClientset, collector, usageClient, nodes, n2.Name, n2metrics,
)
updateMetricsAndCheckNodeUtilization(t, ctx,
900, 1269,
metricsClientset, collector, usageClient, nodes, n2.Name, n2metrics,
)
}

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated // +build !ignore_autogenerated
/* /*
Copyright 2024 The Kubernetes Authors. Copyright 2025 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.
@@ -37,6 +37,7 @@ func (in *HighNodeUtilizationArgs) DeepCopyInto(out *HighNodeUtilizationArgs) {
(*out)[key] = val (*out)[key] = val
} }
} }
out.MetricsUtilization = in.MetricsUtilization
if in.EvictableNamespaces != nil { if in.EvictableNamespaces != nil {
in, out := &in.EvictableNamespaces, &out.EvictableNamespaces in, out := &in.EvictableNamespaces, &out.EvictableNamespaces
*out = new(api.Namespaces) *out = new(api.Namespaces)
@@ -81,6 +82,7 @@ func (in *LowNodeUtilizationArgs) DeepCopyInto(out *LowNodeUtilizationArgs) {
(*out)[key] = val (*out)[key] = val
} }
} }
out.MetricsUtilization = in.MetricsUtilization
if in.EvictableNamespaces != nil { if in.EvictableNamespaces != nil {
in, out := &in.EvictableNamespaces, &out.EvictableNamespaces in, out := &in.EvictableNamespaces, &out.EvictableNamespaces
*out = new(api.Namespaces) *out = new(api.Namespaces)

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated // +build !ignore_autogenerated
/* /*
Copyright 2024 The Kubernetes Authors. Copyright 2025 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.

View File

@@ -20,7 +20,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime" utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/utils/pointer" utilptr "k8s.io/utils/ptr"
"sigs.k8s.io/descheduler/pkg/api" "sigs.k8s.io/descheduler/pkg/api"
) )
@@ -47,7 +47,7 @@ func TestSetDefaults_PodLifeTimeArgs(t *testing.T) {
LabelSelector: &metav1.LabelSelector{ LabelSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{"foo": "bar"}, MatchLabels: map[string]string{"foo": "bar"},
}, },
MaxPodLifeTimeSeconds: pointer.Uint(600), MaxPodLifeTimeSeconds: utilptr.To[uint](600),
States: []string{"Pending"}, States: []string{"Pending"},
}, },
want: &PodLifeTimeArgs{ want: &PodLifeTimeArgs{
@@ -55,7 +55,7 @@ func TestSetDefaults_PodLifeTimeArgs(t *testing.T) {
LabelSelector: &metav1.LabelSelector{ LabelSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{"foo": "bar"}, MatchLabels: map[string]string{"foo": "bar"},
}, },
MaxPodLifeTimeSeconds: pointer.Uint(600), MaxPodLifeTimeSeconds: utilptr.To[uint](600),
States: []string{"Pending"}, States: []string{"Pending"},
}, },
}, },

View File

@@ -85,6 +85,24 @@ func New(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plug
return true return true
} }
// Init Container Status Reason
if podLifeTimeArgs.IncludingInitContainers {
for _, containerStatus := range pod.Status.InitContainerStatuses {
if containerStatus.State.Waiting != nil && states.Has(containerStatus.State.Waiting.Reason) {
return true
}
}
}
// Ephemeral Container Status Reason
if podLifeTimeArgs.IncludingEphemeralContainers {
for _, containerStatus := range pod.Status.EphemeralContainerStatuses {
if containerStatus.State.Waiting != nil && states.Has(containerStatus.State.Waiting.Reason) {
return true
}
}
}
// Container Status Reason // Container Status Reason
for _, containerStatus := range pod.Status.ContainerStatuses { for _, containerStatus := range pod.Status.ContainerStatuses {
if containerStatus.State.Waiting != nil && states.Has(containerStatus.State.Waiting.Reason) { if containerStatus.State.Waiting != nil && states.Has(containerStatus.State.Waiting.Reason) {
@@ -131,9 +149,19 @@ func (d *PodLifeTime) Deschedule(ctx context.Context, nodes []*v1.Node) *framewo
// in the event that PDB or settings such maxNoOfPodsToEvictPer* prevent too much eviction // in the event that PDB or settings such maxNoOfPodsToEvictPer* prevent too much eviction
podutil.SortPodsBasedOnAge(podsToEvict) podutil.SortPodsBasedOnAge(podsToEvict)
loop:
for _, pod := range podsToEvict { for _, pod := range podsToEvict {
if !d.handle.Evictor().NodeLimitExceeded(nodeMap[pod.Spec.NodeName]) { err := d.handle.Evictor().Evict(ctx, pod, evictions.EvictOptions{StrategyName: PluginName})
d.handle.Evictor().Evict(ctx, pod, evictions.EvictOptions{StrategyName: PluginName}) if err == nil {
continue
}
switch err.(type) {
case *evictions.EvictionNodeLimitError:
continue loop
case *evictions.EvictionTotalLimitError:
return nil
default:
klog.Errorf("eviction failed: %v", err)
} }
} }

View File

@@ -22,18 +22,14 @@ import (
"time" "time"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
policyv1 "k8s.io/api/policy/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/kubernetes/fake"
"k8s.io/client-go/tools/events"
utilptr "k8s.io/utils/ptr" utilptr "k8s.io/utils/ptr"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions" "sigs.k8s.io/descheduler/pkg/descheduler/evictions"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
frameworkfake "sigs.k8s.io/descheduler/pkg/framework/fake"
"sigs.k8s.io/descheduler/pkg/framework/plugins/defaultevictor" "sigs.k8s.io/descheduler/pkg/framework/plugins/defaultevictor"
frameworktesting "sigs.k8s.io/descheduler/pkg/framework/testing"
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types" frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
"sigs.k8s.io/descheduler/test" "sigs.k8s.io/descheduler/test"
) )
@@ -157,6 +153,7 @@ func TestPodLifeTime(t *testing.T) {
ignorePvcPods bool ignorePvcPods bool
maxPodsToEvictPerNode *uint maxPodsToEvictPerNode *uint
maxPodsToEvictPerNamespace *uint maxPodsToEvictPerNamespace *uint
maxPodsToEvictTotal *uint
applyPodsFunc func(pods []*v1.Pod) applyPodsFunc func(pods []*v1.Pod)
}{ }{
{ {
@@ -315,6 +312,17 @@ func TestPodLifeTime(t *testing.T) {
maxPodsToEvictPerNamespace: utilptr.To[uint](1), maxPodsToEvictPerNamespace: utilptr.To[uint](1),
expectedEvictedPodCount: 1, expectedEvictedPodCount: 1,
}, },
{
description: "1 Oldest pod should be evicted when maxPodsToEvictTotal is set to 1",
args: &PodLifeTimeArgs{
MaxPodLifeTimeSeconds: &maxLifeTime,
},
pods: []*v1.Pod{p1, p2, p9},
nodes: []*v1.Node{node1},
maxPodsToEvictPerNamespace: utilptr.To[uint](2),
maxPodsToEvictTotal: utilptr.To[uint](1),
expectedEvictedPodCount: 1,
},
{ {
description: "1 Oldest pod should be evicted when maxPodsToEvictPerNode is set to 1", description: "1 Oldest pod should be evicted when maxPodsToEvictPerNode is set to 1",
args: &PodLifeTimeArgs{ args: &PodLifeTimeArgs{
@@ -401,6 +409,84 @@ func TestPodLifeTime(t *testing.T) {
} }
}, },
}, },
{
description: "1 pod with init container status CreateContainerError should not be evicted without includingInitContainers",
args: &PodLifeTimeArgs{
MaxPodLifeTimeSeconds: &maxLifeTime,
States: []string{"CreateContainerError"},
},
pods: []*v1.Pod{p9},
nodes: []*v1.Node{node1},
expectedEvictedPodCount: 0,
applyPodsFunc: func(pods []*v1.Pod) {
pods[0].Status.InitContainerStatuses = []v1.ContainerStatus{
{
State: v1.ContainerState{
Waiting: &v1.ContainerStateWaiting{Reason: "CreateContainerError"},
},
},
}
},
},
{
description: "1 pod with init container status CreateContainerError should be evicted with includingInitContainers",
args: &PodLifeTimeArgs{
MaxPodLifeTimeSeconds: &maxLifeTime,
States: []string{"CreateContainerError"},
IncludingInitContainers: true,
},
pods: []*v1.Pod{p9},
nodes: []*v1.Node{node1},
expectedEvictedPodCount: 1,
applyPodsFunc: func(pods []*v1.Pod) {
pods[0].Status.InitContainerStatuses = []v1.ContainerStatus{
{
State: v1.ContainerState{
Waiting: &v1.ContainerStateWaiting{Reason: "CreateContainerError"},
},
},
}
},
},
{
description: "1 pod with ephemeral container status CreateContainerError should not be evicted without includingEphemeralContainers",
args: &PodLifeTimeArgs{
MaxPodLifeTimeSeconds: &maxLifeTime,
States: []string{"CreateContainerError"},
},
pods: []*v1.Pod{p9},
nodes: []*v1.Node{node1},
expectedEvictedPodCount: 0,
applyPodsFunc: func(pods []*v1.Pod) {
pods[0].Status.InitContainerStatuses = []v1.ContainerStatus{
{
State: v1.ContainerState{
Waiting: &v1.ContainerStateWaiting{Reason: "CreateContainerError"},
},
},
}
},
},
{
description: "1 pod with ephemeral container status CreateContainerError should be evicted with includingEphemeralContainers",
args: &PodLifeTimeArgs{
MaxPodLifeTimeSeconds: &maxLifeTime,
States: []string{"CreateContainerError"},
IncludingEphemeralContainers: true,
},
pods: []*v1.Pod{p9},
nodes: []*v1.Node{node1},
expectedEvictedPodCount: 1,
applyPodsFunc: func(pods []*v1.Pod) {
pods[0].Status.EphemeralContainerStatuses = []v1.ContainerStatus{
{
State: v1.ContainerState{
Waiting: &v1.ContainerStateWaiting{Reason: "CreateContainerError"},
},
},
}
},
},
{ {
description: "1 pod with container status CreateContainerError should be evicted", description: "1 pod with container status CreateContainerError should be evicted",
args: &PodLifeTimeArgs{ args: &PodLifeTimeArgs{
@@ -543,55 +629,21 @@ func TestPodLifeTime(t *testing.T) {
} }
fakeClient := fake.NewSimpleClientset(objs...) fakeClient := fake.NewSimpleClientset(objs...)
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0) handle, podEvictor, err := frameworktesting.InitFrameworkHandle(
podInformer := sharedInformerFactory.Core().V1().Pods().Informer() ctx,
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
if err != nil {
t.Errorf("Build get pods assigned to node function error: %v", err)
}
sharedInformerFactory.Start(ctx.Done())
sharedInformerFactory.WaitForCacheSync(ctx.Done())
eventRecorder := &events.FakeRecorder{}
podEvictor := evictions.NewPodEvictor(
fakeClient, fakeClient,
policyv1.SchemeGroupVersion.String(), evictions.NewOptions().
false, WithMaxPodsToEvictPerNode(tc.maxPodsToEvictPerNode).
tc.maxPodsToEvictPerNode, WithMaxPodsToEvictPerNamespace(tc.maxPodsToEvictPerNamespace).
tc.maxPodsToEvictPerNamespace, WithMaxPodsToEvictTotal(tc.maxPodsToEvictTotal),
tc.nodes, defaultevictor.DefaultEvictorArgs{IgnorePvcPods: tc.ignorePvcPods},
false, nil,
eventRecorder,
)
defaultEvictorFilterArgs := &defaultevictor.DefaultEvictorArgs{
EvictLocalStoragePods: false,
EvictSystemCriticalPods: false,
IgnorePvcPods: tc.ignorePvcPods,
EvictFailedBarePods: false,
}
evictorFilter, err := defaultevictor.New(
defaultEvictorFilterArgs,
&frameworkfake.HandleImpl{
ClientsetImpl: fakeClient,
GetPodsAssignedToNodeFuncImpl: getPodsAssignedToNode,
SharedInformerFactoryImpl: sharedInformerFactory,
},
) )
if err != nil { if err != nil {
t.Fatalf("Unable to initialize the plugin: %v", err) t.Fatalf("Unable to initialize a framework handle: %v", err)
} }
plugin, err := New(tc.args, &frameworkfake.HandleImpl{ plugin, err := New(tc.args, handle)
ClientsetImpl: fakeClient,
PodEvictorImpl: podEvictor,
EvictorFilterImpl: evictorFilter.(frameworktypes.EvictorPlugin),
GetPodsAssignedToNodeFuncImpl: getPodsAssignedToNode,
})
if err != nil { if err != nil {
t.Fatalf("Unable to initialize the plugin: %v", err) t.Fatalf("Unable to initialize the plugin: %v", err)
} }

View File

@@ -25,8 +25,10 @@ import (
type PodLifeTimeArgs struct { type PodLifeTimeArgs struct {
metav1.TypeMeta `json:",inline"` metav1.TypeMeta `json:",inline"`
Namespaces *api.Namespaces `json:"namespaces"` Namespaces *api.Namespaces `json:"namespaces,omitempty"`
LabelSelector *metav1.LabelSelector `json:"labelSelector"` LabelSelector *metav1.LabelSelector `json:"labelSelector,omitempty"`
MaxPodLifeTimeSeconds *uint `json:"maxPodLifeTimeSeconds"` MaxPodLifeTimeSeconds *uint `json:"maxPodLifeTimeSeconds,omitempty"`
States []string `json:"states"` States []string `json:"states,omitempty"`
IncludingInitContainers bool `json:"includingInitContainers,omitempty"`
IncludingEphemeralContainers bool `json:"includingEphemeralContainers,omitempty"`
} }

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated // +build !ignore_autogenerated
/* /*
Copyright 2024 The Kubernetes Authors. Copyright 2025 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated // +build !ignore_autogenerated
/* /*
Copyright 2024 The Kubernetes Authors. Copyright 2025 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.

View File

@@ -210,9 +210,17 @@ func (r *RemoveDuplicates) Balance(ctx context.Context, nodes []*v1.Node) *frame
// It's assumed all duplicated pods are in the same priority class // It's assumed all duplicated pods are in the same priority class
// TODO(jchaloup): check if the pod has a different node to lend to // TODO(jchaloup): check if the pod has a different node to lend to
for _, pod := range pods[upperAvg-1:] { for _, pod := range pods[upperAvg-1:] {
r.handle.Evictor().Evict(ctx, pod, evictions.EvictOptions{StrategyName: PluginName}) err := r.handle.Evictor().Evict(ctx, pod, evictions.EvictOptions{StrategyName: PluginName})
if r.handle.Evictor().NodeLimitExceeded(nodeMap[nodeName]) { if err == nil {
continue
}
switch err.(type) {
case *evictions.EvictionNodeLimitError:
continue loop continue loop
case *evictions.EvictionTotalLimitError:
return nil
default:
klog.Errorf("eviction failed: %v", err)
} }
} }
} }

View File

@@ -20,21 +20,15 @@ import (
"context" "context"
"testing" "testing"
"k8s.io/client-go/tools/events"
frameworkfake "sigs.k8s.io/descheduler/pkg/framework/fake"
"sigs.k8s.io/descheduler/pkg/framework/plugins/defaultevictor"
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
policyv1 "k8s.io/api/policy/v1"
"k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/kubernetes/fake"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions" "sigs.k8s.io/descheduler/pkg/framework/plugins/defaultevictor"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod" frameworktesting "sigs.k8s.io/descheduler/pkg/framework/testing"
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
"sigs.k8s.io/descheduler/pkg/utils" "sigs.k8s.io/descheduler/pkg/utils"
"sigs.k8s.io/descheduler/test" "sigs.k8s.io/descheduler/test"
) )
@@ -300,55 +294,9 @@ func TestFindDuplicatePods(t *testing.T) {
} }
fakeClient := fake.NewSimpleClientset(objs...) fakeClient := fake.NewSimpleClientset(objs...)
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0) handle, podEvictor, err := frameworktesting.InitFrameworkHandle(ctx, fakeClient, nil, defaultevictor.DefaultEvictorArgs{NodeFit: testCase.nodefit}, nil)
podInformer := sharedInformerFactory.Core().V1().Pods().Informer()
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
if err != nil { if err != nil {
t.Errorf("Build get pods assigned to node function error: %v", err) t.Fatalf("Unable to initialize a framework handle: %v", err)
}
sharedInformerFactory.Start(ctx.Done())
sharedInformerFactory.WaitForCacheSync(ctx.Done())
eventRecorder := &events.FakeRecorder{}
podEvictor := evictions.NewPodEvictor(
fakeClient,
"v1",
false,
nil,
nil,
testCase.nodes,
false,
eventRecorder,
)
nodeFit := testCase.nodefit
defaultEvictorFilterArgs := &defaultevictor.DefaultEvictorArgs{
EvictLocalStoragePods: false,
EvictSystemCriticalPods: false,
IgnorePvcPods: false,
EvictFailedBarePods: false,
NodeFit: nodeFit,
}
evictorFilter, _ := defaultevictor.New(
defaultEvictorFilterArgs,
&frameworkfake.HandleImpl{
ClientsetImpl: fakeClient,
GetPodsAssignedToNodeFuncImpl: getPodsAssignedToNode,
SharedInformerFactoryImpl: sharedInformerFactory,
},
)
handle := &frameworkfake.HandleImpl{
ClientsetImpl: fakeClient,
GetPodsAssignedToNodeFuncImpl: getPodsAssignedToNode,
PodEvictorImpl: podEvictor,
EvictorFilterImpl: evictorFilter.(frameworktypes.EvictorPlugin),
SharedInformerFactoryImpl: sharedInformerFactory,
} }
plugin, err := New(&RemoveDuplicatesArgs{ plugin, err := New(&RemoveDuplicatesArgs{
@@ -749,55 +697,9 @@ func TestRemoveDuplicatesUniformly(t *testing.T) {
} }
fakeClient := fake.NewSimpleClientset(objs...) fakeClient := fake.NewSimpleClientset(objs...)
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0) handle, podEvictor, err := frameworktesting.InitFrameworkHandle(ctx, fakeClient, nil, defaultevictor.DefaultEvictorArgs{}, nil)
podInformer := sharedInformerFactory.Core().V1().Pods().Informer()
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
if err != nil { if err != nil {
t.Errorf("Build get pods assigned to node function error: %v", err) t.Fatalf("Unable to initialize a framework handle: %v", err)
}
sharedInformerFactory.Start(ctx.Done())
sharedInformerFactory.WaitForCacheSync(ctx.Done())
eventRecorder := &events.FakeRecorder{}
podEvictor := evictions.NewPodEvictor(
fakeClient,
policyv1.SchemeGroupVersion.String(),
false,
nil,
nil,
testCase.nodes,
false,
eventRecorder,
)
defaultEvictorFilterArgs := &defaultevictor.DefaultEvictorArgs{
EvictLocalStoragePods: false,
EvictSystemCriticalPods: false,
IgnorePvcPods: false,
EvictFailedBarePods: false,
}
evictorFilter, err := defaultevictor.New(
defaultEvictorFilterArgs,
&frameworkfake.HandleImpl{
ClientsetImpl: fakeClient,
GetPodsAssignedToNodeFuncImpl: getPodsAssignedToNode,
SharedInformerFactoryImpl: sharedInformerFactory,
},
)
if err != nil {
t.Fatalf("Unable to initialize the plugin: %v", err)
}
handle := &frameworkfake.HandleImpl{
ClientsetImpl: fakeClient,
GetPodsAssignedToNodeFuncImpl: getPodsAssignedToNode,
PodEvictorImpl: podEvictor,
EvictorFilterImpl: evictorFilter.(frameworktypes.EvictorPlugin),
SharedInformerFactoryImpl: sharedInformerFactory,
} }
plugin, err := New(&RemoveDuplicatesArgs{}, plugin, err := New(&RemoveDuplicatesArgs{},

Some files were not shown because too many files have changed in this diff Show More