mirror of
https://github.com/kubernetes-sigs/descheduler.git
synced 2026-01-26 13:29:11 +01:00
Compare commits
376 Commits
release-1.
...
f164943257
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f164943257 | ||
|
|
1fe9e2c345 | ||
|
|
16ccff8ed8 | ||
|
|
38f0f15787 | ||
|
|
52f2aea444 | ||
|
|
f3c63011cc | ||
|
|
47b939dd86 | ||
|
|
89c88f483b | ||
|
|
d558fa3a5b | ||
|
|
7ef3673d4c | ||
|
|
988e0b8868 | ||
|
|
fc8ae3b4e8 | ||
|
|
6d7fedc982 | ||
|
|
769ded35f1 | ||
|
|
3283635149 | ||
|
|
994ce3e2f7 | ||
|
|
90e4115b78 | ||
|
|
8913d79d14 | ||
|
|
9a5d7e8286 | ||
|
|
674e463bc2 | ||
|
|
1df3ef5030 | ||
|
|
3068f8431a | ||
|
|
dfd2b95d2d | ||
|
|
3bb4529c34 | ||
|
|
b8765bd8ee | ||
|
|
d666e4b830 | ||
|
|
08f733863e | ||
|
|
60da931e0e | ||
|
|
12a9db4da0 | ||
|
|
41da7497c3 | ||
|
|
b56f3cdae9 | ||
|
|
162a2d14b7 | ||
|
|
78788d72de | ||
|
|
956eeefede | ||
|
|
1f7bd1fba9 | ||
|
|
5fdf368593 | ||
|
|
50b6e178c1 | ||
|
|
c1ad532c46 | ||
|
|
7e40aae2dc | ||
|
|
e09bd976f5 | ||
|
|
ffb1f44144 | ||
|
|
cb595f2524 | ||
|
|
c46817f6df | ||
|
|
032db38d6c | ||
|
|
c1cd3ae794 | ||
|
|
060d9c8573 | ||
|
|
51bcf60ccf | ||
|
|
b472549cf6 | ||
|
|
c68e8a6d06 | ||
|
|
68d9d4d044 | ||
|
|
452b1ff7d9 | ||
|
|
f123f78b44 | ||
|
|
ca0f7535fb | ||
|
|
78ff3fe92a | ||
|
|
0269283185 | ||
|
|
57ed329feb | ||
|
|
b96a41a745 | ||
|
|
6b6f7ba5c7 | ||
|
|
a3ca65ea14 | ||
|
|
d81580c93e | ||
|
|
0f7ff8a2b7 | ||
|
|
d27afd0319 | ||
|
|
3d48efdff4 | ||
|
|
e5d5cf2229 | ||
|
|
f65209d4fa | ||
|
|
b9ceb9144f | ||
|
|
2bbec0cbc6 | ||
|
|
a363da9806 | ||
|
|
63b3bd3b4d | ||
|
|
7fb935c650 | ||
|
|
f85b2f8d4d | ||
|
|
0580b5942c | ||
|
|
4171af7e8a | ||
|
|
a1678cd464 | ||
|
|
2f90d1dd01 | ||
|
|
f0cda32b6e | ||
|
|
43523113ff | ||
|
|
1b7889f4a3 | ||
|
|
b86315f097 | ||
|
|
0d496dfc5d | ||
|
|
d6b35eaed6 | ||
|
|
dc18f9f330 | ||
|
|
39212419e6 | ||
|
|
64f77ce6ee | ||
|
|
ca5326c5c4 | ||
|
|
9cf075ffc4 | ||
|
|
3325fe0b8b | ||
|
|
6c41ebd8f3 | ||
|
|
ba034d6e0e | ||
|
|
3289554f90 | ||
|
|
72575c2f23 | ||
|
|
07616c3fc0 | ||
|
|
cad120881f | ||
|
|
aec4416099 | ||
|
|
7b9d5d2539 | ||
|
|
9f7629136f | ||
|
|
42d255fd95 | ||
|
|
183a138d82 | ||
|
|
f669c45892 | ||
|
|
a2ffbc1261 | ||
|
|
2cda1bd89d | ||
|
|
691a1da43b | ||
|
|
8fe74c7a0c | ||
|
|
102bd6a91d | ||
|
|
3d1e15bb82 | ||
|
|
3c02d9029c | ||
|
|
57a3e610a7 | ||
|
|
7cec27d467 | ||
|
|
688b45011a | ||
|
|
a96451030c | ||
|
|
a4930ebc83 | ||
|
|
ad872f8b77 | ||
|
|
a0654df270 | ||
|
|
03b5a9a967 | ||
|
|
9f2d22c1f7 | ||
|
|
cbe1c1e559 | ||
|
|
87182c5e8f | ||
|
|
2765e31048 | ||
|
|
87f675a2cd | ||
|
|
a400a66d51 | ||
|
|
fa427a2b37 | ||
|
|
90672630da | ||
|
|
6a00214457 | ||
|
|
9413b0c654 | ||
|
|
3072a59ea0 | ||
|
|
0e56823865 | ||
|
|
ea80f7d307 | ||
|
|
6638b976ad | ||
|
|
116385718f | ||
|
|
5ad695166a | ||
|
|
d5e0ec597f | ||
|
|
4b86cdd31a | ||
|
|
99527292e0 | ||
|
|
cf79af6fba | ||
|
|
da55c779f2 | ||
|
|
bc6500d917 | ||
|
|
c5b9debe56 | ||
|
|
18f847bbe8 | ||
|
|
6e753ac5fb | ||
|
|
b797ca6ba2 | ||
|
|
4ffabad669 | ||
|
|
bba62ccb93 | ||
|
|
1f856595f5 | ||
|
|
993162dd44 | ||
|
|
ee73336fd8 | ||
|
|
75f655e271 | ||
|
|
76895273f9 | ||
|
|
35d2103fcf | ||
|
|
b069ae009a | ||
|
|
be275deea5 | ||
|
|
a5d3241a54 | ||
|
|
2af9ea8449 | ||
|
|
60fa5aa228 | ||
|
|
a94d22fd1b | ||
|
|
8c70b02088 | ||
|
|
ec58fed521 | ||
|
|
bf9cf0ee1c | ||
|
|
6ebb0b7aa7 | ||
|
|
bb01360776 | ||
|
|
c8bc668e04 | ||
|
|
b64426888b | ||
|
|
1306cf38a1 | ||
|
|
bc06d1be83 | ||
|
|
c9e87bb97d | ||
|
|
05b6d5e343 | ||
|
|
044f75dcec | ||
|
|
6e62af3dbf | ||
|
|
2fac727be3 | ||
|
|
babc4137a4 | ||
|
|
fc033caf21 | ||
|
|
fd524f2172 | ||
|
|
47275831ab | ||
|
|
b8b0fa0565 | ||
|
|
daaa5896a9 | ||
|
|
e27864717d | ||
|
|
e8cf01591e | ||
|
|
d7766cccfd | ||
|
|
3ebffe5a86 | ||
|
|
4e758c18e8 | ||
|
|
1c494f9c44 | ||
|
|
45dfe3011c | ||
|
|
eeb459d6d4 | ||
|
|
f3d91fc69f | ||
|
|
e9dcd4e54d | ||
|
|
8490ed9c8f | ||
|
|
01fb826bd3 | ||
|
|
9b50aa91f8 | ||
|
|
7a5bf8c2f0 | ||
|
|
df06442830 | ||
|
|
180548cc1a | ||
|
|
0aee6cff48 | ||
|
|
7a0257a682 | ||
|
|
f5253faeb0 | ||
|
|
59f499e2cd | ||
|
|
008265db9b | ||
|
|
61190b805b | ||
|
|
8c83840bf9 | ||
|
|
e46b5db6d5 | ||
|
|
b21fb4a655 | ||
|
|
8f3c5f4978 | ||
|
|
6f94e19385 | ||
|
|
3bb99512d8 | ||
|
|
56f49bc78f | ||
|
|
800dd280cd | ||
|
|
8dada79593 | ||
|
|
660e2dba40 | ||
|
|
294ce39231 | ||
|
|
f2031ddcb0 | ||
|
|
7435b5d474 | ||
|
|
b5f177efa0 | ||
|
|
4a4ec4afb7 | ||
|
|
0c33be962d | ||
|
|
511ed214b0 | ||
|
|
3d4263bf5e | ||
|
|
96171413ba | ||
|
|
5578211253 | ||
|
|
08c2fc7621 | ||
|
|
9e45259399 | ||
|
|
e5bbedb602 | ||
|
|
2710fd3781 | ||
|
|
2658864ac0 | ||
|
|
e05de87368 | ||
|
|
293a9ca4b7 | ||
|
|
83151219e7 | ||
|
|
fb0bddf85d | ||
|
|
286f2848fc | ||
|
|
d8d997a25d | ||
|
|
5d7a483dc8 | ||
|
|
58076dd162 | ||
|
|
b6e81fdd4b | ||
|
|
59dfd041a8 | ||
|
|
7bf29ce56d | ||
|
|
c77f1a4ed2 | ||
|
|
7e14c6c7c4 | ||
|
|
b4a0b8dbac | ||
|
|
680b10099d | ||
|
|
e92dda1a37 | ||
|
|
07dc0c61c5 | ||
|
|
cab310e55c | ||
|
|
822a1d4c40 | ||
|
|
1d7368b58d | ||
|
|
70a71f54bc | ||
|
|
3ea0eadcb3 | ||
|
|
41a0a9c994 | ||
|
|
c707f53cec | ||
|
|
9be42e50cc | ||
|
|
bed39d70f0 | ||
|
|
8a0fd10315 | ||
|
|
5e6cd6057b | ||
|
|
b857869371 | ||
|
|
3e764eb564 | ||
|
|
2648749eb8 | ||
|
|
ff43002060 | ||
|
|
4f42a7ae9b | ||
|
|
7d84b68556 | ||
|
|
5b4719634c | ||
|
|
94a0fbdcbb | ||
|
|
bbc3eef1c9 | ||
|
|
3a3e72e9c2 | ||
|
|
e6c14a365f | ||
|
|
2b2ab0b9ad | ||
|
|
16b9311e9e | ||
|
|
1a61470e81 | ||
|
|
c02779b6a5 | ||
|
|
ff6363692c | ||
|
|
34540c3c95 | ||
|
|
ee40f7ff30 | ||
|
|
cece2ee3cc | ||
|
|
fbdf86fdfd | ||
|
|
7bfd4088ce | ||
|
|
18f61b5e64 | ||
|
|
769b4fe34a | ||
|
|
6ffc7e3975 | ||
|
|
31af0d8223 | ||
|
|
0c80f3689d | ||
|
|
9722018847 | ||
|
|
47cfdf7057 | ||
|
|
db6d460677 | ||
|
|
237d9c1a7b | ||
|
|
5b66733ada | ||
|
|
eb1b91d085 | ||
|
|
058056d965 | ||
|
|
f9aa969791 | ||
|
|
4bbfa08dfb | ||
|
|
4b7c2c90ea | ||
|
|
06cab8e2aa | ||
|
|
582641c2e9 | ||
|
|
4d78cd49a0 | ||
|
|
ce56624cea | ||
|
|
dd7b76f2c4 | ||
|
|
bc4f17194b | ||
|
|
d9d6ca64e9 | ||
|
|
ebaf155e23 | ||
|
|
781572fed5 | ||
|
|
e3503d22f4 | ||
|
|
564c2c29d8 | ||
|
|
bbb915e003 | ||
|
|
1db6b615d1 | ||
|
|
e9188852ef | ||
|
|
c36466bb1c | ||
|
|
841fd29282 | ||
|
|
79b2e04199 | ||
|
|
3a608a590a | ||
|
|
07b1c4e681 | ||
|
|
7d6f6fedec | ||
|
|
3033aec6a0 | ||
|
|
9eb582cd67 | ||
|
|
925b388702 | ||
|
|
e599018adb | ||
|
|
f9a3be8cde | ||
|
|
d47e077897 | ||
|
|
483c9c1499 | ||
|
|
d841a2d913 | ||
|
|
f7f86ed075 | ||
|
|
46f55b5221 | ||
|
|
fa9fa70ed7 | ||
|
|
43dc9a9616 | ||
|
|
f8c8d9a385 | ||
|
|
b5bb15ae10 | ||
|
|
94d1825c68 | ||
|
|
257bd55909 | ||
|
|
fd2febbfe1 | ||
|
|
63ecaec0ef | ||
|
|
4740bc3e27 | ||
|
|
8f521bb6f7 | ||
|
|
d641488ea1 | ||
|
|
e5c57a759b | ||
|
|
9c7e01de67 | ||
|
|
eb4c7d102f | ||
|
|
e5ea03ce75 | ||
|
|
2cce141feb | ||
|
|
f2211e1cef | ||
|
|
d9697591d5 | ||
|
|
419fe74702 | ||
|
|
7380aa6e0a | ||
|
|
b84b2623b9 | ||
|
|
d0548b75d7 | ||
|
|
6e9d8891c5 | ||
|
|
5cc9e68127 | ||
|
|
bf6a51f733 | ||
|
|
f15d8d0d54 | ||
|
|
a177744169 | ||
|
|
eadfe4a546 | ||
|
|
fbf11df729 | ||
|
|
e5ab156a99 | ||
|
|
6e714a2134 | ||
|
|
a01fa87de8 | ||
|
|
ba694cfac1 | ||
|
|
2570bedd52 | ||
|
|
89eab59d82 | ||
|
|
ace001c618 | ||
|
|
33894afe2b | ||
|
|
528aff2d42 | ||
|
|
a2b899aa15 | ||
|
|
a1ddb3f28f | ||
|
|
b63b09089e | ||
|
|
2d7528411a | ||
|
|
e06443ef40 | ||
|
|
9f918371a2 | ||
|
|
c8912acfb7 | ||
|
|
1974c12e0f | ||
|
|
b3aeca73db | ||
|
|
d34848086c | ||
|
|
9aa6d79c21 | ||
|
|
7a76d9f0d3 | ||
|
|
71746262b1 | ||
|
|
8b0ae7ce52 | ||
|
|
6691720da5 | ||
|
|
0a691debfb | ||
|
|
fbc875fac1 | ||
|
|
e466307d7c | ||
|
|
9fed73148c | ||
|
|
957c5bc8e0 | ||
|
|
5d4dc6604a | ||
|
|
603473839a | ||
|
|
581d997379 |
22
.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
22
.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
## Description
|
||||
<!-- Please include a summary of the change and which issue is fixed -->
|
||||
|
||||
## Checklist
|
||||
Please ensure your pull request meets the following criteria before submitting
|
||||
for review, these items will be used by reviewers to assess the quality and
|
||||
completeness of your changes:
|
||||
|
||||
- [ ] **Code Readability**: Is the code easy to understand, well-structured, and consistent with project conventions?
|
||||
- [ ] **Naming Conventions**: Are variable, function, and structs descriptive and consistent?
|
||||
- [ ] **Code Duplication**: Is there any repeated code that should be refactored?
|
||||
- [ ] **Function/Method Size**: Are functions/methods short and focused on a single task?
|
||||
- [ ] **Comments & Documentation**: Are comments clear, useful, and not excessive? Were comments updated where necessary?
|
||||
- [ ] **Error Handling**: Are errors handled appropriately ?
|
||||
- [ ] **Testing**: Are there sufficient unit/integration tests?
|
||||
- [ ] **Performance**: Are there any obvious performance issues or unnecessary computations?
|
||||
- [ ] **Dependencies**: Are new dependencies justified ?
|
||||
- [ ] **Logging & Monitoring**: Is logging used appropriately (not too verbose, not too silent)?
|
||||
- [ ] **Backward Compatibility**: Does this change break any existing functionality or APIs?
|
||||
- [ ] **Resource Management**: Are resources (files, connections, memory) managed and released properly?
|
||||
- [ ] **PR Description**: Is the PR description clear, providing enough context and explaining the motivation for the change?
|
||||
- [ ] **Documentation & Changelog**: Are README and docs updated if necessary?
|
||||
6
.github/workflows/manifests.yaml
vendored
6
.github/workflows/manifests.yaml
vendored
@@ -7,11 +7,11 @@ jobs:
|
||||
deploy:
|
||||
strategy:
|
||||
matrix:
|
||||
k8s-version: ["v1.33.0"]
|
||||
descheduler-version: ["v0.33.0"]
|
||||
k8s-version: ["v1.34.0"]
|
||||
descheduler-version: ["v0.34.0"]
|
||||
descheduler-api: ["v1alpha2"]
|
||||
manifest: ["deployment"]
|
||||
kind-version: ["v0.27.0"] # keep in sync with test/run-e2e-tests.sh
|
||||
kind-version: ["v0.30.0"] # keep in sync with test/run-e2e-tests.sh
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout Repo
|
||||
|
||||
30
CONTRIBUTING-descheduler.md
Normal file
30
CONTRIBUTING-descheduler.md
Normal file
@@ -0,0 +1,30 @@
|
||||
# Descheduler Design Constraints
|
||||
|
||||
This is a slowly growing document that lists good practices, conventions, and design decisions.
|
||||
|
||||
## Overview
|
||||
|
||||
TBD
|
||||
|
||||
## Code convention
|
||||
|
||||
* *formatting code*: running `make fmt` before committing each change to avoid ci failing
|
||||
|
||||
## Unit Test Conventions
|
||||
|
||||
These are the known conventions that are useful to practice whenever reasonable:
|
||||
|
||||
* *single pod creation*: each pod variable built using `test.BuildTestPod` is updated only through the `apply` argument of `BuildTestPod`
|
||||
* *single node creation*: each node variable built using `test.BuildTestNode` is updated only through the `apply` argument of `BuildTestNode`
|
||||
* *no object instance sharing*: each object built through `test.BuildXXX` functions is newly created in each unit test to avoid accidental object mutations
|
||||
* *no object instance duplication*: avoid duplication by no creating two objects with the same passed values at two different places. E.g. two nodes created with the same memory, cpu and pods requests. Rather create a single function wrapping test.BuildTestNode and invoke this wrapper multiple times.
|
||||
|
||||
The aim is to reduce cognitive load when reading and debugging the test code.
|
||||
|
||||
## Design Decisions FAQ
|
||||
|
||||
This section documents common questions about design decisions in the descheduler codebase and the rationale behind them.
|
||||
|
||||
### Why doesn't the framework provide helpers for registering and retrieving indexers for plugins?
|
||||
|
||||
In general, each plugin can have many indexers—for example, for nodes, namespaces, pods, and other resources. Each plugin, depending on its internal optimizations, may choose a different indexing function. Indexers are currently used very rarely in the framework and default plugins. Therefore, extending the framework interface with additional helpers for registering and retrieving indexers might introduce an unnecessary and overly restrictive layer without first understanding how indexers will be used. For the moment, I suggest avoiding any restrictions on how many indexers can be registered or which ones can be registered. Instead, we should extend the framework handle to provide a unique ID for each profile, so that indexers within the same profile share a unique prefix. This avoids collisions when the same profile is instantiated more than once. Later, once we learn more about indexer usage, we can revisit whether it makes sense to impose additional restrictions.
|
||||
@@ -11,7 +11,7 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
FROM golang:1.24.2
|
||||
FROM golang:1.24.6
|
||||
|
||||
WORKDIR /go/src/sigs.k8s.io/descheduler
|
||||
COPY . .
|
||||
|
||||
1
OWNERS
1
OWNERS
@@ -4,6 +4,7 @@ approvers:
|
||||
- seanmalloy
|
||||
- a7i
|
||||
- knelasevero
|
||||
- ricardomaraschini
|
||||
reviewers:
|
||||
- damemi
|
||||
- seanmalloy
|
||||
|
||||
125
README.md
125
README.md
@@ -33,11 +33,12 @@ but relies on the default scheduler for that.
|
||||
## ⚠️ Documentation Versions by Release
|
||||
|
||||
If you are using a published release of Descheduler (such as
|
||||
`registry.k8s.io/descheduler/descheduler:v0.33.0`), follow the documentation in
|
||||
`registry.k8s.io/descheduler/descheduler:v0.34.0`), follow the documentation in
|
||||
that version's release branch, as listed below:
|
||||
|
||||
|Descheduler Version|Docs link|
|
||||
|---|---|
|
||||
|v0.34.x|[`release-1.34`](https://github.com/kubernetes-sigs/descheduler/blob/release-1.34/README.md)|
|
||||
|v0.33.x|[`release-1.33`](https://github.com/kubernetes-sigs/descheduler/blob/release-1.33/README.md)|
|
||||
|v0.32.x|[`release-1.32`](https://github.com/kubernetes-sigs/descheduler/blob/release-1.32/README.md)|
|
||||
|v0.31.x|[`release-1.31`](https://github.com/kubernetes-sigs/descheduler/blob/release-1.31/README.md)|
|
||||
@@ -93,17 +94,17 @@ See the [resources | Kustomize](https://kubectl.docs.kubernetes.io/references/ku
|
||||
|
||||
Run As A Job
|
||||
```
|
||||
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/job?ref=release-1.33' | kubectl apply -f -
|
||||
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/job?ref=release-1.34' | kubectl apply -f -
|
||||
```
|
||||
|
||||
Run As A CronJob
|
||||
```
|
||||
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/cronjob?ref=release-1.33' | kubectl apply -f -
|
||||
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/cronjob?ref=release-1.34' | kubectl apply -f -
|
||||
```
|
||||
|
||||
Run As A Deployment
|
||||
```
|
||||
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/deployment?ref=release-1.33' | kubectl apply -f -
|
||||
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/deployment?ref=release-1.34' | kubectl apply -f -
|
||||
```
|
||||
|
||||
## User Guide
|
||||
@@ -128,7 +129,7 @@ These are top level keys in the Descheduler Policy that you can use to configure
|
||||
| `metricsCollector.enabled` | `bool` | `false` | Enables Kubernetes [Metrics Server](https://kubernetes-sigs.github.io/metrics-server/) collection. |
|
||||
| `metricsProviders` | `[]object` | `nil` | Enables various metrics providers like Kubernetes [Metrics Server](https://kubernetes-sigs.github.io/metrics-server/) |
|
||||
| `evictionFailureEventNotification` | `bool` | `false` | Enables eviction failure event notification. |
|
||||
| `gracePeriodSeconds` | `int` | `0` | The duration in seconds before the object should be deleted. The value zero indicates delete immediately. |
|
||||
| `gracePeriodSeconds` | `int` | `nil` | The duration in seconds before the object should be deleted. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. |
|
||||
| `prometheus` |`object`| `nil` | Configures collection of Prometheus metrics for actual resource utilization |
|
||||
| `prometheus.url` |`string`| `nil` | Points to a Prometheus server url |
|
||||
| `prometheus.authToken` |`object`| `nil` | Sets Prometheus server authentication token. If not specified in cluster authentication token from the container's file system is read. |
|
||||
@@ -148,20 +149,70 @@ In general, each plugin can consume metrics from a different provider so multipl
|
||||
|
||||
The Default Evictor Plugin is used by default for filtering pods before processing them in an strategy plugin, or for applying a PreEvictionFilter of pods before eviction. You can also create your own Evictor Plugin or use the Default one provided by Descheduler. Other uses for the Evictor plugin can be to sort, filter, validate or group pods by different criteria, and that's why this is handled by a plugin and not configured in the top level config.
|
||||
|
||||
| Name |type| Default Value | Description |
|
||||
|---------------------------|----|---------------|-----------------------------------------------------------------------------------------------------------------------------|
|
||||
| `nodeSelector` |`string`| `nil` | limiting the nodes which are processed |
|
||||
| `evictLocalStoragePods` |`bool`| `false` | allows eviction of pods with local storage |
|
||||
| `evictDaemonSetPods` | bool | false | allows eviction of DaemonSet managed Pods. |
|
||||
| `evictSystemCriticalPods` |`bool`| `false` | [Warning: Will evict Kubernetes system pods] allows eviction of pods with any priority, including system pods like kube-dns |
|
||||
| `ignorePvcPods` |`bool`| `false` | set whether PVC pods should be evicted or ignored |
|
||||
| `evictFailedBarePods` |`bool`| `false` | allow eviction of pods without owner references and in failed phase |
|
||||
| `labelSelector` |`metav1.LabelSelector`|| (see [label filtering](#label-filtering)) |
|
||||
| `priorityThreshold` |`priorityThreshold`|| (see [priority filtering](#priority-filtering)) |
|
||||
| `nodeFit` |`bool`|`false`| (see [node fit filtering](#node-fit-filtering)) |
|
||||
| `minReplicas` |`uint`|`0`| ignore eviction of pods where owner (e.g. `ReplicaSet`) replicas is below this threshold |
|
||||
| `minPodAge` |`metav1.Duration`|`0`| ignore eviction of pods with a creation time within this threshold |
|
||||
| `ignorePodsWithoutPDB` |`bool`|`false`| set whether pods without PodDisruptionBudget should be evicted or ignored |
|
||||
| Name | Type | Default Value | Description |
|
||||
|---------------------------|------------------------|---------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| `nodeSelector` | `string` | `nil` | Limits the nodes that are processed. |
|
||||
| `evictLocalStoragePods` | `bool` | `false` | **[Deprecated: Use `podProtections` with `"PodsWithLocalStorage"` instead]**<br>Allows eviction of pods using local storage. |
|
||||
| `evictDaemonSetPods` | `bool` | `false` | **[Deprecated: Use `podProtections` with `"DaemonSetPods"` instead]**<br>Allows eviction of DaemonSet managed Pods. |
|
||||
| `evictSystemCriticalPods` | `bool` | `false` | **[Deprecated: Use `podProtections` with `"SystemCriticalPods"` instead]**<br>[Warning: Will evict Kubernetes system pods] Allows eviction of pods with any priority, including system-critical pods like kube-dns. |
|
||||
| `ignorePvcPods` | `bool` | `false` | **[Deprecated: Use `podProtections` with `"PodsWithPVC"` instead]**<br>Sets whether PVC pods should be evicted or ignored. |
|
||||
| `evictFailedBarePods` | `bool` | `false` | **[Deprecated: Use `podProtections` with `"FailedBarePods"` instead]**<br>Allows eviction of pods without owner references and in a failed phase. |
|
||||
| `ignorePodsWithoutPDB` | `bool` | `false` | **[Deprecated: Use `podProtections` with `"PodsWithoutPDB"` instead]**<br>Sets whether pods without PodDisruptionBudget should be evicted or ignored. |
|
||||
| `labelSelector` | `metav1.LabelSelector` | | (See [label filtering](#label-filtering)) |
|
||||
| `priorityThreshold` | `priorityThreshold` | | (See [priority filtering](#priority-filtering)) |
|
||||
| `nodeFit` | `bool` | `false` | (See [node fit filtering](#node-fit-filtering)) |
|
||||
| `minReplicas` | `uint` | `0` | Ignores eviction of pods where the owner (e.g., `ReplicaSet`) replicas are below this threshold. |
|
||||
| `minPodAge` | `metav1.Duration` | `0` | Ignores eviction of pods with a creation time within this threshold. |
|
||||
| `noEvictionPolicy` | `enum` | `` | sets whether a `descheduler.alpha.kubernetes.io/prefer-no-eviction` pod annotation is considered preferred or mandatory. Accepted values: "", "Preferred", "Mandatory". Defaults to "Preferred". |
|
||||
| `podProtections` | `PodProtections` | `{}` | Holds the list of enabled and disabled protection pod policies.<br>Users can selectively disable certain default protection rules or enable extra ones. See below for supported values. |
|
||||
|
||||
#### Supported Values for `podProtections.DefaultDisabled`
|
||||
|
||||
> Setting a value in `defaultDisabled` **disables the corresponding default protection rule**. This means the specified type of Pods will **no longer be protected** from eviction and may be evicted if they meet other criteria.
|
||||
|
||||
| Value | Meaning |
|
||||
|--------------------------|-------------------------------------------------------------------------|
|
||||
| `"PodsWithLocalStorage"` | Allow eviction of Pods using local storage. |
|
||||
| `"DaemonSetPods"` | Allow eviction of DaemonSet-managed Pods. |
|
||||
| `"SystemCriticalPods"` | Allow eviction of system-critical Pods. |
|
||||
| `"FailedBarePods"` | Allow eviction of failed bare Pods (without controllers). |
|
||||
|
||||
---
|
||||
|
||||
#### Supported Values for `podProtections.ExtraEnabled`
|
||||
|
||||
> Setting a value in `extraEnabled` **enables an additional protection rule**. This means the specified type of Pods will be **protected** from eviction.
|
||||
|
||||
| Value | Meaning |
|
||||
|----------------------------|------------------------------------------------------------------|
|
||||
| `"PodsWithPVC"` | Prevents eviction of Pods using Persistent Volume Claims (PVCs). |
|
||||
| `"PodsWithoutPDB"` | Prevents eviction of Pods without a PodDisruptionBudget (PDB). |
|
||||
| `"PodsWithResourceClaims"` | Prevents eviction of Pods using ResourceClaims. |
|
||||
|
||||
|
||||
#### Protecting pods using specific Storage Classes
|
||||
|
||||
With the `PodsWithPVC` protection enabled all pods using PVCs are protected from eviction by default, if needed you can restrict the protection by filtering by PVC storage class. When filtering out by storage class, only pods using PVCs with the specified storage classes are protected from eviction. For example:
|
||||
|
||||
```yaml
|
||||
apiVersion: "descheduler/v1alpha2"
|
||||
kind: "DeschedulerPolicy"
|
||||
profiles:
|
||||
- name: ProfileName
|
||||
pluginConfig:
|
||||
- name: "DefaultEvictor"
|
||||
args:
|
||||
podProtections:
|
||||
extraEnabled:
|
||||
- PodsWithPVC
|
||||
config:
|
||||
PodsWithPVC:
|
||||
protectedStorageClasses:
|
||||
- name: storage-class-0
|
||||
- name: storage-class-1
|
||||
|
||||
```
|
||||
This example will protect pods using PVCs with storage classes `storage-class-0` and `storage-class-1` from eviction.
|
||||
|
||||
### Example policy
|
||||
|
||||
@@ -193,9 +244,17 @@ profiles:
|
||||
pluginConfig:
|
||||
- name: "DefaultEvictor"
|
||||
args:
|
||||
evictSystemCriticalPods: true
|
||||
evictFailedBarePods: true
|
||||
evictLocalStoragePods: true
|
||||
podProtections:
|
||||
defaultDisabled:
|
||||
#- "PodsWithLocalStorage"
|
||||
#- "SystemCriticalPods"
|
||||
#- "DaemonSetPods"
|
||||
#- "FailedBarePods"
|
||||
extraEnabled:
|
||||
#- "PodsWithPVC"
|
||||
#- "PodsWithoutPDB"
|
||||
#- "PodsWithResourceClaims"
|
||||
config: {}
|
||||
nodeFit: true
|
||||
minReplicas: 2
|
||||
plugins:
|
||||
@@ -727,7 +786,9 @@ profiles:
|
||||
This strategy evicts pods that are older than `maxPodLifeTimeSeconds`.
|
||||
|
||||
You can also specify `states` parameter to **only** evict pods matching the following conditions:
|
||||
- [Pod Phase](https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-phase) status of: `Running`, `Pending`, `Unknown`
|
||||
> The primary purpose for using states like `Succeeded` and `Failed` is releasing resources so that new pods can be rescheduled.
|
||||
> I.e., the main motivation is not for cleaning pods, rather to release resources.
|
||||
- [Pod Phase](https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-phase) status of: `Running`, `Pending`, `Succeeded`, `Failed`, `Unknown`
|
||||
- [Pod Reason](https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-conditions) reasons of: `NodeAffinity`, `NodeLost`, `Shutdown`, `UnexpectedAdmissionError`
|
||||
- [Container State Waiting](https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-state-waiting) condition of: `PodInitializing`, `ContainerCreating`, `ImagePullBackOff`, `CrashLoopBackOff`, `CreateContainerConfigError`, `ErrImagePull`, `ImagePullBackOff`, `CreateContainerError`, `InvalidImageName`
|
||||
|
||||
@@ -1011,12 +1072,16 @@ never evicted because these pods won't be recreated. (Standalone pods in failed
|
||||
* Pods with PVCs are evicted (unless `ignorePvcPods: true` is set).
|
||||
* In `LowNodeUtilization` and `RemovePodsViolatingInterPodAntiAffinity`, pods are evicted by their priority from low to high, and if they have same priority,
|
||||
best effort pods are evicted before burstable and guaranteed pods.
|
||||
* All types of pods with the annotation `descheduler.alpha.kubernetes.io/evict` are eligible for eviction. This
|
||||
* All types of pods with the `descheduler.alpha.kubernetes.io/evict` annotation are eligible for eviction. This
|
||||
annotation is used to override checks which prevent eviction and users can select which pod is evicted.
|
||||
Users should know how and if the pod will be recreated.
|
||||
The annotation only affects internal descheduler checks.
|
||||
The anti-disruption protection provided by the [/eviction](https://kubernetes.io/docs/concepts/scheduling-eviction/api-eviction/)
|
||||
subresource is still respected.
|
||||
* Pods with the `descheduler.alpha.kubernetes.io/prefer-no-eviction` annotation voice their preference not to be evicted.
|
||||
Each plugin decides whether the annotation gets respected or not. When the `DefaultEvictor` plugin sets `noEvictionPolicy`
|
||||
to `Mandatory` all such pods are excluded from eviction. Needs to be used with caution as some plugins may enfore
|
||||
various policies that are expected to be always met.
|
||||
* Pods with a non-nil DeletionTimestamp are not evicted by default.
|
||||
|
||||
Setting `--v=4` or greater on the Descheduler will log all reasons why any pod is not evictable.
|
||||
@@ -1048,9 +1113,12 @@ To get best results from HA mode some additional configurations might require:
|
||||
| name | type | description |
|
||||
|---------------------------------------|--------------|-----------------------------------------------------------------------------------|
|
||||
| build_info | gauge | constant 1 |
|
||||
| pods_evicted | CounterVec | total number of pods evicted |
|
||||
| descheduler_loop_duration_seconds | HistogramVec | time taken to complete a whole descheduling cycle (support _bucket, _sum, _count) |
|
||||
| descheduler_strategy_duration_seconds | HistogramVec | time taken to complete each stragtegy of descheduling operation (support _bucket, _sum, _count) |
|
||||
| pods_evicted | CounterVec | total number of pods evicted, is deprecated in version v0.34.0 |
|
||||
| pods_evicted_total | CounterVec | total number of pods evicted |
|
||||
| descheduler_loop_duration_seconds | HistogramVec | time taken to complete a whole descheduling cycle (support _bucket, _sum, _count), is deprecated in version v0.34.0 |
|
||||
| loop_duration_seconds | HistogramVec | time taken to complete a whole descheduling cycle (support _bucket, _sum, _count) |
|
||||
| descheduler_strategy_duration_seconds | HistogramVec | time taken to complete each stragtegy of descheduling operation (support _bucket, _sum, _count), is deprecated in version v0.34.0 |
|
||||
| strategy_duration_seconds | HistogramVec | time taken to complete each stragtegy of descheduling operation (support _bucket, _sum, _count) |
|
||||
|
||||
The metrics are served through https://localhost:10258/metrics by default.
|
||||
The address and port can be changed by setting `--binding-address` and `--secure-port` flags.
|
||||
@@ -1066,6 +1134,7 @@ packages that it is compiled with.
|
||||
|
||||
| Descheduler | Supported Kubernetes Version |
|
||||
|-------------|------------------------------|
|
||||
| v0.34 | v1.34 |
|
||||
| v0.33 | v1.33 |
|
||||
| v0.32 | v1.32 |
|
||||
| v0.31 | v1.31 |
|
||||
@@ -1109,7 +1178,7 @@ that the only people who can get things done around here are the "maintainers".
|
||||
We also would love to add more "official" maintainers, so show us what you can
|
||||
do!
|
||||
|
||||
This repository uses the Kubernetes bots. See a full list of the commands [here][prow].
|
||||
This repository uses the Kubernetes bots. See a full list of the commands [here](https://go.k8s.io/bot-commands).
|
||||
|
||||
### Communicating With Contributors
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
apiVersion: v1
|
||||
name: descheduler
|
||||
version: 0.33.0
|
||||
appVersion: 0.33.0
|
||||
version: 0.34.0
|
||||
appVersion: 0.34.0
|
||||
description: Descheduler for Kubernetes is used to rebalance clusters by evicting pods that can potentially be scheduled on better nodes. In the current implementation, descheduler does not schedule replacement of evicted pods but relies on the default scheduler for that.
|
||||
keywords:
|
||||
- kubernetes
|
||||
|
||||
@@ -70,6 +70,10 @@ The following table lists the configurable parameters of the _descheduler_ chart
|
||||
| `serviceAccount.create` | If `true`, create a service account for the cron job | `true` |
|
||||
| `serviceAccount.name` | The name of the service account to use, if not set and create is true a name is generated using the fullname template | `nil` |
|
||||
| `serviceAccount.annotations` | Specifies custom annotations for the serviceAccount | `{}` |
|
||||
| `cronJobAnnotations` | Annotations to add to the descheduler CronJob | `{}` |
|
||||
| `cronJobLabels` | Labels to add to the descheduler CronJob | `{}` |
|
||||
| `jobAnnotations` | Annotations to add to the descheduler Job resources (created by CronJob) | `{}` |
|
||||
| `jobLabels` | Labels to add to the descheduler Job resources (created by CronJob) | `{}` |
|
||||
| `podAnnotations` | Annotations to add to the descheduler Pods | `{}` |
|
||||
| `podLabels` | Labels to add to the descheduler Pods | `{}` |
|
||||
| `nodeSelector` | Node selectors to run the descheduler cronjob/deployment on specific nodes | `nil` |
|
||||
|
||||
@@ -10,5 +10,5 @@ data:
|
||||
policy.yaml: |
|
||||
apiVersion: "{{ .Values.deschedulerPolicyAPIVersion }}"
|
||||
kind: "DeschedulerPolicy"
|
||||
{{ toYaml .Values.deschedulerPolicy | trim | indent 4 }}
|
||||
{{ tpl (toYaml .Values.deschedulerPolicy) . | trim | indent 4 }}
|
||||
{{- end }}
|
||||
|
||||
@@ -4,8 +4,15 @@ kind: CronJob
|
||||
metadata:
|
||||
name: {{ template "descheduler.fullname" . }}
|
||||
namespace: {{ include "descheduler.namespace" . }}
|
||||
{{- if .Values.cronJobAnnotations }}
|
||||
annotations:
|
||||
{{- .Values.cronJobAnnotations | toYaml | nindent 4 }}
|
||||
{{- end }}
|
||||
labels:
|
||||
{{- include "descheduler.labels" . | nindent 4 }}
|
||||
{{- if .Values.cronJobLabels }}
|
||||
{{- .Values.cronJobLabels | toYaml | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
schedule: {{ .Values.schedule | quote }}
|
||||
{{- if .Values.suspend }}
|
||||
@@ -25,10 +32,24 @@ spec:
|
||||
timeZone: {{ .Values.timeZone }}
|
||||
{{- end }}
|
||||
jobTemplate:
|
||||
{{- if or .Values.jobAnnotations .Values.jobLabels }}
|
||||
metadata:
|
||||
{{- if .Values.jobAnnotations }}
|
||||
annotations:
|
||||
{{- .Values.jobAnnotations | toYaml | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.jobLabels }}
|
||||
labels:
|
||||
{{- .Values.jobLabels | toYaml | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- if .Values.ttlSecondsAfterFinished }}
|
||||
ttlSecondsAfterFinished: {{ .Values.ttlSecondsAfterFinished }}
|
||||
{{- end }}
|
||||
{{- if .Values.activeDeadlineSeconds }}
|
||||
activeDeadlineSeconds: {{ .Values.activeDeadlineSeconds }}
|
||||
{{- end }}
|
||||
template:
|
||||
metadata:
|
||||
name: {{ template "descheduler.fullname" . }}
|
||||
@@ -67,6 +88,9 @@ spec:
|
||||
priorityClassName: {{ .Values.priorityClassName }}
|
||||
{{- end }}
|
||||
serviceAccountName: {{ template "descheduler.serviceAccountName" . }}
|
||||
{{- if kindIs "bool" .Values.automountServiceAccountToken }}
|
||||
automountServiceAccountToken: {{ .Values.automountServiceAccountToken }}
|
||||
{{- end }}
|
||||
restartPolicy: "Never"
|
||||
{{- with .Values.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
@@ -100,6 +124,9 @@ spec:
|
||||
volumeMounts:
|
||||
- mountPath: /policy-dir
|
||||
name: policy-volume
|
||||
{{- if and .Values.extraServiceAccountVolumeMounts (not .Values.automountServiceAccountToken) }}
|
||||
{{ toYaml .Values.extraServiceAccountVolumeMounts | nindent 16 }}
|
||||
{{- end }}
|
||||
{{- if .Values.podSecurityContext }}
|
||||
securityContext:
|
||||
{{- toYaml .Values.podSecurityContext | nindent 12 }}
|
||||
@@ -108,4 +135,7 @@ spec:
|
||||
- name: policy-volume
|
||||
configMap:
|
||||
name: {{ template "descheduler.fullname" . }}
|
||||
{{- if and .Values.extraServiceAccountVolumes (not .Values.automountServiceAccountToken) }}
|
||||
{{ toYaml .Values.extraServiceAccountVolumes | nindent 10 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
@@ -6,6 +6,9 @@ metadata:
|
||||
namespace: {{ include "descheduler.namespace" . }}
|
||||
labels:
|
||||
{{- include "descheduler.labels" . | nindent 4 }}
|
||||
{{- if .Values.annotations }}
|
||||
annotations: {{- toYaml .Values.deploymentAnnotations | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- if gt (.Values.replicas | int) 1 }}
|
||||
{{- if not .Values.leaderElection.enabled }}
|
||||
@@ -39,6 +42,9 @@ spec:
|
||||
priorityClassName: {{ .Values.priorityClassName }}
|
||||
{{- end }}
|
||||
serviceAccountName: {{ template "descheduler.serviceAccountName" . }}
|
||||
{{- if kindIs "bool" .Values.automountServiceAccountToken }}
|
||||
automountServiceAccountToken: {{ .Values.automountServiceAccountToken }}
|
||||
{{- end }}
|
||||
{{- with .Values.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- toYaml . | nindent 6 }}
|
||||
@@ -75,6 +81,9 @@ spec:
|
||||
volumeMounts:
|
||||
- mountPath: /policy-dir
|
||||
name: policy-volume
|
||||
{{- if and .Values.extraServiceAccountVolumeMounts (not .Values.automountServiceAccountToken) }}
|
||||
{{ toYaml .Values.extraServiceAccountVolumeMounts | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- if .Values.podSecurityContext }}
|
||||
securityContext:
|
||||
{{- toYaml .Values.podSecurityContext | nindent 8 }}
|
||||
@@ -83,6 +92,9 @@ spec:
|
||||
- name: policy-volume
|
||||
configMap:
|
||||
name: {{ template "descheduler.fullname" . }}
|
||||
{{- if and .Values.extraServiceAccountVolumes (not .Values.automountServiceAccountToken) }}
|
||||
{{ toYaml .Values.extraServiceAccountVolumes | nindent 8}}
|
||||
{{- end }}
|
||||
{{- with .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
|
||||
@@ -1,6 +1,9 @@
|
||||
{{- if .Values.serviceAccount.create -}}
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
{{- if kindIs "bool" .Values.serviceAccount.automountServiceAccountToken }}
|
||||
automountServiceAccountToken: {{ .Values.serviceAccount.automountServiceAccountToken }}
|
||||
{{- end }}
|
||||
metadata:
|
||||
name: {{ template "descheduler.serviceAccountName" . }}
|
||||
namespace: {{ include "descheduler.namespace" . }}
|
||||
|
||||
109
charts/descheduler/tests/cronjob_annotations_test.yaml
Normal file
109
charts/descheduler/tests/cronjob_annotations_test.yaml
Normal file
@@ -0,0 +1,109 @@
|
||||
suite: Test Descheduler CronJob and Job Annotations and Labels
|
||||
|
||||
templates:
|
||||
- "*.yaml"
|
||||
|
||||
release:
|
||||
name: descheduler
|
||||
|
||||
set:
|
||||
kind: CronJob
|
||||
|
||||
tests:
|
||||
- it: adds cronJob and job annotations and labels when set
|
||||
template: templates/cronjob.yaml
|
||||
set:
|
||||
cronJobAnnotations:
|
||||
monitoring.company.com/scrape: "true"
|
||||
description: "test cronjob"
|
||||
cronJobLabels:
|
||||
environment: "test"
|
||||
team: "platform"
|
||||
jobAnnotations:
|
||||
sidecar.istio.io/inject: "false"
|
||||
job.company.com/retry-limit: "3"
|
||||
jobLabels:
|
||||
job-type: "maintenance"
|
||||
priority: "high"
|
||||
asserts:
|
||||
- equal:
|
||||
path: metadata.annotations["monitoring.company.com/scrape"]
|
||||
value: "true"
|
||||
- equal:
|
||||
path: metadata.annotations.description
|
||||
value: "test cronjob"
|
||||
- equal:
|
||||
path: metadata.labels.environment
|
||||
value: "test"
|
||||
- equal:
|
||||
path: metadata.labels.team
|
||||
value: "platform"
|
||||
- equal:
|
||||
path: spec.jobTemplate.metadata.annotations["sidecar.istio.io/inject"]
|
||||
value: "false"
|
||||
- equal:
|
||||
path: spec.jobTemplate.metadata.annotations["job.company.com/retry-limit"]
|
||||
value: "3"
|
||||
- equal:
|
||||
path: spec.jobTemplate.metadata.labels.job-type
|
||||
value: "maintenance"
|
||||
- equal:
|
||||
path: spec.jobTemplate.metadata.labels.priority
|
||||
value: "high"
|
||||
|
||||
- it: does not add cronJob and job metadata when not set
|
||||
template: templates/cronjob.yaml
|
||||
asserts:
|
||||
- isNull:
|
||||
path: metadata.annotations
|
||||
- isNotNull:
|
||||
path: metadata.labels
|
||||
- equal:
|
||||
path: metadata.labels["app.kubernetes.io/name"]
|
||||
value: descheduler
|
||||
- isNull:
|
||||
path: spec.jobTemplate.metadata
|
||||
|
||||
- it: does not add job metadata when job annotations and labels are empty
|
||||
template: templates/cronjob.yaml
|
||||
set:
|
||||
jobAnnotations: {}
|
||||
jobLabels: {}
|
||||
asserts:
|
||||
- isNull:
|
||||
path: spec.jobTemplate.metadata
|
||||
|
||||
- it: works with all annotation and label types together
|
||||
template: templates/cronjob.yaml
|
||||
set:
|
||||
cronJobAnnotations:
|
||||
cron-annotation: "cron-value"
|
||||
cronJobLabels:
|
||||
cron-label: "cron-value"
|
||||
jobAnnotations:
|
||||
job-annotation: "job-value"
|
||||
jobLabels:
|
||||
job-label: "job-value"
|
||||
podAnnotations:
|
||||
pod-annotation: "pod-value"
|
||||
podLabels:
|
||||
pod-label: "pod-value"
|
||||
asserts:
|
||||
- equal:
|
||||
path: metadata.annotations.cron-annotation
|
||||
value: "cron-value"
|
||||
- equal:
|
||||
path: metadata.labels.cron-label
|
||||
value: "cron-value"
|
||||
- equal:
|
||||
path: spec.jobTemplate.metadata.annotations.job-annotation
|
||||
value: "job-value"
|
||||
- equal:
|
||||
path: spec.jobTemplate.metadata.labels.job-label
|
||||
value: "job-value"
|
||||
- equal:
|
||||
path: spec.jobTemplate.spec.template.metadata.annotations.pod-annotation
|
||||
value: "pod-value"
|
||||
- equal:
|
||||
path: spec.jobTemplate.spec.template.metadata.labels.pod-label
|
||||
value: "pod-value"
|
||||
@@ -55,7 +55,8 @@ suspend: false
|
||||
# startingDeadlineSeconds: 200
|
||||
# successfulJobsHistoryLimit: 3
|
||||
# failedJobsHistoryLimit: 1
|
||||
# ttlSecondsAfterFinished 600
|
||||
# ttlSecondsAfterFinished: 600
|
||||
# activeDeadlineSeconds: 60 # Make sure this value is SHORTER than the cron interval.
|
||||
# timeZone: Etc/UTC
|
||||
|
||||
# Required when running as a Deployment
|
||||
@@ -107,8 +108,11 @@ deschedulerPolicy:
|
||||
pluginConfig:
|
||||
- name: DefaultEvictor
|
||||
args:
|
||||
ignorePvcPods: true
|
||||
evictLocalStoragePods: true
|
||||
podProtections:
|
||||
defaultDisabled:
|
||||
- "PodsWithLocalStorage"
|
||||
extraEnabled:
|
||||
- "PodsWithPVC"
|
||||
- name: RemoveDuplicates
|
||||
- name: RemovePodsHavingTooManyRestarts
|
||||
args:
|
||||
@@ -193,6 +197,25 @@ serviceAccount:
|
||||
name:
|
||||
# Specifies custom annotations for the serviceAccount
|
||||
annotations: {}
|
||||
# Opt out of API credential automounting
|
||||
#
|
||||
# automountServiceAccountToken Default is not set
|
||||
# automountServiceAccountToken: true
|
||||
|
||||
# Mount the ServiceAccountToken in the Pod of a CronJob or Deployment
|
||||
# Default is not set - but only implied by the ServiceAccount
|
||||
# automountServiceAccountToken: true
|
||||
|
||||
# Annotations that'll be applied to deployment
|
||||
deploymentAnnotations: {}
|
||||
|
||||
cronJobAnnotations: {}
|
||||
|
||||
cronJobLabels: {}
|
||||
|
||||
jobAnnotations: {}
|
||||
|
||||
jobLabels: {}
|
||||
|
||||
podAnnotations: {}
|
||||
|
||||
@@ -206,8 +229,9 @@ livenessProbe:
|
||||
path: /healthz
|
||||
port: 10258
|
||||
scheme: HTTPS
|
||||
initialDelaySeconds: 3
|
||||
periodSeconds: 10
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 20
|
||||
timeoutSeconds: 5
|
||||
|
||||
service:
|
||||
enabled: false
|
||||
@@ -244,3 +268,30 @@ serviceMonitor:
|
||||
# targetLabel: nodename
|
||||
# replacement: $1
|
||||
# action: replace
|
||||
|
||||
## Additional Volume mounts when automountServiceAccountToken is false
|
||||
# extraServiceAccountVolumeMounts:
|
||||
# - mountPath: /var/run/secrets/kubernetes.io/serviceaccount
|
||||
# name: kube-api-access
|
||||
# readOnly: true
|
||||
|
||||
## Additional Volumes when automountServiceAccountToken is false
|
||||
# extraServiceAccountVolumes:
|
||||
# - name: kube-api-access
|
||||
# projected:
|
||||
# defaultMode: 0444
|
||||
# sources:
|
||||
# - configMap:
|
||||
# items:
|
||||
# - key: ca.crt
|
||||
# path: ca.crt
|
||||
# name: kube-root-ca.crt
|
||||
# - downwardAPI:
|
||||
# items:
|
||||
# - fieldRef:
|
||||
# apiVersion: v1
|
||||
# fieldPath: metadata.namespace
|
||||
# path: namespace
|
||||
# - serviceAccountToken:
|
||||
# expirationSeconds: 3600
|
||||
# path: token
|
||||
|
||||
@@ -144,8 +144,10 @@ func (rs *DeschedulerServer) Apply() error {
|
||||
return err
|
||||
}
|
||||
|
||||
secureServing.DisableHTTP2 = !rs.EnableHTTP2
|
||||
rs.SecureServingInfo = secureServing
|
||||
if secureServing != nil {
|
||||
secureServing.DisableHTTP2 = !rs.EnableHTTP2
|
||||
rs.SecureServingInfo = secureServing
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -22,6 +22,7 @@ import (
|
||||
"io"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
@@ -97,17 +98,28 @@ func Run(rootCtx context.Context, rs *options.DeschedulerServer) error {
|
||||
|
||||
healthz.InstallHandler(pathRecorderMux, healthz.NamedCheck("Descheduler", healthz.PingHealthz.Check))
|
||||
|
||||
stoppedCh, _, err := rs.SecureServingInfo.Serve(pathRecorderMux, 0, ctx.Done())
|
||||
if err != nil {
|
||||
klog.Fatalf("failed to start secure server: %v", err)
|
||||
return err
|
||||
var stoppedCh <-chan struct{}
|
||||
var err error
|
||||
if rs.SecureServingInfo != nil {
|
||||
stoppedCh, _, err = rs.SecureServingInfo.Serve(pathRecorderMux, 0, ctx.Done())
|
||||
if err != nil {
|
||||
klog.Fatalf("failed to start secure server: %v", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
err = tracing.NewTracerProvider(ctx, rs.Tracing.CollectorEndpoint, rs.Tracing.TransportCert, rs.Tracing.ServiceName, rs.Tracing.ServiceNamespace, rs.Tracing.SampleRate, rs.Tracing.FallbackToNoOpProviderOnError)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "failed to create tracer provider")
|
||||
}
|
||||
defer tracing.Shutdown(ctx)
|
||||
defer func() {
|
||||
// we give the tracing.Shutdown() its own context as the
|
||||
// original context may have been cancelled already. we
|
||||
// have arbitrarily chosen the timeout duration.
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
|
||||
defer cancel()
|
||||
tracing.Shutdown(ctx)
|
||||
}()
|
||||
|
||||
// increase the fake watch channel so the dry-run mode can be run
|
||||
// over a cluster with thousands of pods
|
||||
@@ -118,8 +130,10 @@ func Run(rootCtx context.Context, rs *options.DeschedulerServer) error {
|
||||
}
|
||||
|
||||
done()
|
||||
// wait for metrics server to close
|
||||
<-stoppedCh
|
||||
if stoppedCh != nil {
|
||||
// wait for metrics server to close
|
||||
<-stoppedCh
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ Starting with descheduler release v0.10.0 container images are available in the
|
||||
|
||||
Descheduler Version | Container Image | Architectures |
|
||||
------------------- |-------------------------------------------------|-------------------------|
|
||||
v0.34.0 | registry.k8s.io/descheduler/descheduler:v0.34.0 | AMD64<br>ARM64<br>ARMv7 |
|
||||
v0.33.0 | registry.k8s.io/descheduler/descheduler:v0.33.0 | AMD64<br>ARM64<br>ARMv7 |
|
||||
v0.32.0 | registry.k8s.io/descheduler/descheduler:v0.32.0 | AMD64<br>ARM64<br>ARMv7 |
|
||||
v0.31.0 | registry.k8s.io/descheduler/descheduler:v0.31.0 | AMD64<br>ARM64<br>ARMv7 |
|
||||
|
||||
113
go.mod
113
go.mod
@@ -1,57 +1,60 @@
|
||||
module sigs.k8s.io/descheduler
|
||||
|
||||
go 1.24.2
|
||||
go 1.24.0
|
||||
|
||||
toolchain go1.24.3
|
||||
|
||||
godebug default=go1.24
|
||||
|
||||
require (
|
||||
github.com/client9/misspell v0.3.4
|
||||
github.com/google/go-cmp v0.7.0
|
||||
github.com/prometheus/client_golang v1.22.0
|
||||
github.com/prometheus/common v0.62.0
|
||||
github.com/spf13/cobra v1.8.1
|
||||
github.com/spf13/pflag v1.0.5
|
||||
go.opentelemetry.io/otel v1.33.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0
|
||||
go.opentelemetry.io/otel/sdk v1.33.0
|
||||
go.opentelemetry.io/otel/trace v1.33.0
|
||||
google.golang.org/grpc v1.68.1
|
||||
k8s.io/api v0.33.0
|
||||
k8s.io/apimachinery v0.33.0
|
||||
k8s.io/apiserver v0.33.0
|
||||
k8s.io/client-go v0.33.0
|
||||
k8s.io/code-generator v0.33.0
|
||||
k8s.io/component-base v0.33.0
|
||||
k8s.io/component-helpers v0.33.0
|
||||
github.com/prometheus/common v0.64.0
|
||||
github.com/spf13/cobra v1.10.0
|
||||
github.com/spf13/pflag v1.0.9
|
||||
go.opentelemetry.io/otel v1.36.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.36.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.36.0
|
||||
go.opentelemetry.io/otel/sdk v1.36.0
|
||||
go.opentelemetry.io/otel/trace v1.36.0
|
||||
google.golang.org/grpc v1.72.2
|
||||
k8s.io/api v0.34.0
|
||||
k8s.io/apimachinery v0.34.0
|
||||
k8s.io/apiserver v0.34.0
|
||||
k8s.io/client-go v0.34.0
|
||||
k8s.io/code-generator v0.34.0
|
||||
k8s.io/component-base v0.34.0
|
||||
k8s.io/component-helpers v0.34.0
|
||||
k8s.io/klog/v2 v2.130.1
|
||||
k8s.io/metrics v0.33.0
|
||||
k8s.io/utils v0.0.0-20250321185631-1f6e0b77f77e
|
||||
k8s.io/metrics v0.34.0
|
||||
k8s.io/utils v0.0.0-20250604170112-4c0f3b243397
|
||||
kubevirt.io/api v1.3.0
|
||||
kubevirt.io/client-go v1.3.0
|
||||
kubevirt.io/containerized-data-importer-api v1.60.1 // indirect; drops dependency on o/api
|
||||
sigs.k8s.io/mdtoc v1.1.0
|
||||
sigs.k8s.io/yaml v1.4.0
|
||||
sigs.k8s.io/yaml v1.6.0
|
||||
)
|
||||
|
||||
require golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56
|
||||
|
||||
require (
|
||||
cel.dev/expr v0.19.1 // indirect
|
||||
cel.dev/expr v0.24.0 // indirect
|
||||
github.com/BurntSushi/toml v0.3.1 // indirect
|
||||
github.com/NYTimes/gziphandler v1.1.1 // indirect
|
||||
github.com/antlr4-go/antlr/v4 v4.13.0 // indirect
|
||||
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/blang/semver/v4 v4.0.0 // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
|
||||
github.com/cenkalti/backoff/v5 v5.0.2 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/coreos/go-semver v0.3.1 // indirect
|
||||
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.4 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.6 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.11.0 // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.12.2 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/fsnotify/fsnotify v1.7.0 // indirect
|
||||
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
|
||||
github.com/fsnotify/fsnotify v1.9.0 // indirect
|
||||
github.com/fxamacker/cbor/v2 v2.9.0 // indirect
|
||||
github.com/go-kit/kit v0.13.0 // indirect
|
||||
github.com/go-kit/log v0.2.1 // indirect
|
||||
github.com/go-logfmt/logfmt v0.6.0 // indirect
|
||||
@@ -66,13 +69,12 @@ require (
|
||||
github.com/golang/protobuf v1.5.4 // indirect
|
||||
github.com/gomarkdown/markdown v0.0.0-20210514010506-3b9f47219fe7 // indirect
|
||||
github.com/google/btree v1.1.3 // indirect
|
||||
github.com/google/cel-go v0.23.2 // indirect
|
||||
github.com/google/gnostic-models v0.6.9 // indirect
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/google/cel-go v0.26.0 // indirect
|
||||
github.com/google/gnostic-models v0.7.0 // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/jpillora/backoff v1.0.0 // indirect
|
||||
@@ -81,53 +83,56 @@ require (
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
github.com/mmarkdown/mmark v2.0.40+incompatible // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect
|
||||
github.com/openshift/custom-resource-status v1.1.2 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/prometheus/client_model v0.6.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/prometheus/client_model v0.6.2 // indirect
|
||||
github.com/prometheus/procfs v0.15.1 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/stoewer/go-strcase v1.3.0 // indirect
|
||||
github.com/x448/float16 v0.8.4 // indirect
|
||||
go.etcd.io/etcd/api/v3 v3.5.21 // indirect
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.21 // indirect
|
||||
go.etcd.io/etcd/client/v3 v3.5.21 // indirect
|
||||
go.etcd.io/etcd/api/v3 v3.6.4 // indirect
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.6.4 // indirect
|
||||
go.etcd.io/etcd/client/v3 v3.6.4 // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.33.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.4.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.36.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.6.0 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
go.uber.org/zap v1.27.0 // indirect
|
||||
golang.org/x/crypto v0.36.0 // indirect
|
||||
go.yaml.in/yaml/v2 v2.4.2 // indirect
|
||||
go.yaml.in/yaml/v3 v3.0.4 // indirect
|
||||
golang.org/x/crypto v0.38.0 // indirect
|
||||
golang.org/x/mod v0.21.0 // indirect
|
||||
golang.org/x/net v0.38.0 // indirect
|
||||
golang.org/x/oauth2 v0.27.0 // indirect
|
||||
golang.org/x/sync v0.12.0 // indirect
|
||||
golang.org/x/sys v0.31.0 // indirect
|
||||
golang.org/x/term v0.30.0 // indirect
|
||||
golang.org/x/text v0.23.0 // indirect
|
||||
golang.org/x/net v0.40.0 // indirect
|
||||
golang.org/x/oauth2 v0.30.0 // indirect
|
||||
golang.org/x/sync v0.14.0 // indirect
|
||||
golang.org/x/sys v0.33.0 // indirect
|
||||
golang.org/x/term v0.32.0 // indirect
|
||||
golang.org/x/text v0.25.0 // indirect
|
||||
golang.org/x/time v0.9.0 // indirect
|
||||
golang.org/x/tools v0.26.0 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576 // indirect
|
||||
google.golang.org/protobuf v1.36.5 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250519155744-55703ea1f237 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250519155744-55703ea1f237 // indirect
|
||||
google.golang.org/protobuf v1.36.6 // indirect
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
k8s.io/apiextensions-apiserver v0.30.0 // indirect
|
||||
k8s.io/gengo/v2 v2.0.0-20250207200755-1244d31929d7 // indirect
|
||||
k8s.io/kms v0.33.0 // indirect
|
||||
k8s.io/gengo/v2 v2.0.0-20250604051438-85fd79dbfd9f // indirect
|
||||
k8s.io/kms v0.34.0 // indirect
|
||||
k8s.io/kube-openapi v0.30.0 // indirect
|
||||
kubevirt.io/controller-lifecycle-operator-sdk/api v0.0.0-20220329064328-f3cc58c6ed90 // indirect
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect
|
||||
sigs.k8s.io/randfill v1.0.0 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.6.0 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect
|
||||
)
|
||||
|
||||
replace k8s.io/kube-openapi => k8s.io/kube-openapi v0.0.0-20240430033511-f0e62f92d13f
|
||||
replace k8s.io/kube-openapi => k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b
|
||||
|
||||
282
go.sum
282
go.sum
@@ -1,23 +1,20 @@
|
||||
cel.dev/expr v0.19.1 h1:NciYrtDRIR0lNCnH1LFJegdjspNx9fI59O7TWcua/W4=
|
||||
cel.dev/expr v0.19.1/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw=
|
||||
cel.dev/expr v0.24.0 h1:56OvJKSH3hDGL0ml5uSxZmz3/3Pq4tJ+fb1unVLAFcY=
|
||||
cel.dev/expr v0.24.0/go.mod h1:hLPLo1W4QUmuYdA72RBX06QTs6MXw941piREPl3Yfiw=
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
|
||||
github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I=
|
||||
github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c=
|
||||
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
|
||||
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
|
||||
github.com/antlr4-go/antlr/v4 v4.13.0 h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8TVTI=
|
||||
github.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g=
|
||||
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA=
|
||||
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM=
|
||||
github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ=
|
||||
github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
|
||||
github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
|
||||
github.com/cenkalti/backoff/v5 v5.0.2 h1:rIfFVxEf1QsI7E1ZHfp/B4DF/6QBAUhmgkxc0H7Zss8=
|
||||
github.com/cenkalti/backoff/v5 v5.0.2/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
||||
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
@@ -36,8 +33,8 @@ github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr
|
||||
github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec=
|
||||
github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
|
||||
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.4 h1:wfIWP927BUkWJb2NmU/kNDYIBTh/ziUX91+lVfRxZq4=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.6 h1:XJtiaUW6dEEqVuZiMTn1ldk455QWwEIsMIJlo5vtkx0=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
@@ -49,9 +46,9 @@ github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+m
|
||||
github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
|
||||
github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
|
||||
github.com/emicklei/go-restful v2.15.0+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
|
||||
github.com/emicklei/go-restful/v3 v3.8.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
|
||||
github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g=
|
||||
github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
|
||||
github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU=
|
||||
github.com/emicklei/go-restful/v3 v3.12.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
@@ -59,10 +56,10 @@ github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2
|
||||
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
|
||||
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
|
||||
github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E=
|
||||
github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ=
|
||||
github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k=
|
||||
github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
|
||||
github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM=
|
||||
github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ=
|
||||
github.com/go-kit/kit v0.13.0 h1:OoneCcHKHQ03LfBpoQCUfCluwd2Vt3ohz+kvbJneZAU=
|
||||
github.com/go-kit/kit v0.13.0/go.mod h1:phqEHMMUbyrCFCTgH48JueqrM3md2HcAZ8N3XE4FKDg=
|
||||
github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU=
|
||||
@@ -108,8 +105,8 @@ github.com/gobwas/ws v1.2.1/go.mod h1:hRKAFb8wOxFROYNsT1bqfWnhX+b5MFeJM9r2ZSwg/K
|
||||
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||
github.com/golang-jwt/jwt/v4 v4.5.2 h1:YtQM7lnr8iZ+j5q71MGKkNw9Mn7AjHM68uc9g5fXeUI=
|
||||
github.com/golang-jwt/jwt/v4 v4.5.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
|
||||
github.com/golang-jwt/jwt/v5 v5.2.2 h1:Rl4B7itRWVtYIHFrSNd7vhTiz9UpLdi6gZhZ3wEeDy8=
|
||||
github.com/golang-jwt/jwt/v5 v5.2.2/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/glog v1.2.4 h1:CNNw5U8lSiiBk7druxtSHHTsRWcxKoac6kZKm2peBBc=
|
||||
github.com/golang/glog v1.2.4/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w=
|
||||
@@ -132,11 +129,10 @@ github.com/gomarkdown/markdown v0.0.0-20210514010506-3b9f47219fe7 h1:oKYOfNR7Hp6
|
||||
github.com/gomarkdown/markdown v0.0.0-20210514010506-3b9f47219fe7/go.mod h1:aii0r/K0ZnHv7G0KF7xy1v0A7s2Ljrb5byB7MO5p6TU=
|
||||
github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg=
|
||||
github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4=
|
||||
github.com/google/cel-go v0.23.2 h1:UdEe3CvQh3Nv+E/j9r1Y//WO0K0cSyD7/y0bzyLIMI4=
|
||||
github.com/google/cel-go v0.23.2/go.mod h1:52Pb6QsDbC5kvgxvZhiL9QX1oZEkcUF/ZqaPx1J5Wwo=
|
||||
github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U=
|
||||
github.com/google/gnostic-models v0.6.9 h1:MU/8wDLif2qCXZmzncUQ/BOfxWfthHi63KqpoNbWqVw=
|
||||
github.com/google/gnostic-models v0.6.9/go.mod h1:CiWsm0s6BSQd1hRn8/QmxqB6BesYcbSZxsz9b0KuDBw=
|
||||
github.com/google/cel-go v0.26.0 h1:DPGjXackMpJWH680oGY4lZhYjIameYmR+/6RBdDGmaI=
|
||||
github.com/google/cel-go v0.26.0/go.mod h1:A9O8OU9rdvrK5MQyrqfIxo1a0u4g3sF8KB6PUIaryMM=
|
||||
github.com/google/gnostic-models v0.7.0 h1:qwTtogB15McXDaNqTZdzPJRHvaVJlAl+HVQnLmJEJxo=
|
||||
github.com/google/gnostic-models v0.7.0/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
@@ -149,10 +145,9 @@ github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
||||
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
|
||||
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6/go.mod h1:kf6iHlnVGwgKolg33glAes7Yg/8iWP8ukqeldJSO7jw=
|
||||
github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo=
|
||||
github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo=
|
||||
github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144=
|
||||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
@@ -163,20 +158,21 @@ github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/ad
|
||||
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo=
|
||||
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.0.1 h1:qnpSQwGEnkcRpTqNOIR6bJbR0gAorgP9CSALpRcKoAA=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.0.1/go.mod h1:lXGCsh6c22WGtjr+qGHj1otzZpV/1kwTMAqkwZsnWRU=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.0 h1:FbSCl+KggFl+Ocym490i/EyXF4lPgLoUtcSWquBM0Rs=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.0/go.mod h1:qOchhhIlmRcqk/O9uCo/puJlyo07YINaIqdZfZG3Jkc=
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho=
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0 h1:TmHmbvxPmaegwhDubVz0lICL0J5Ka2vwTzhoePEXsGE=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0/go.mod h1:qztMSjm835F2bXf+5HKAPIS5qsmQDqZna/PgVt4rWtI=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 h1:5ZPtiqj0JL5oKWmcsq4VMaAW5ukBEgSGXEN89zeH1Jo=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3/go.mod h1:ndYquD05frm2vACXE1nsccT4oJzjhw2arTS2cpUD1PI=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20240312041847-bd984b5ce465/go.mod h1:gx7rwoVhcfuVKG5uya9Hs3Sxj7EIvldVofAWIUtGouw=
|
||||
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
||||
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
||||
github.com/jonboulle/clockwork v0.4.0 h1:p4Cf1aMWXnXAUh8lVfewRBx1zaTSYKrKMF2g3ST4RZ4=
|
||||
github.com/jonboulle/clockwork v0.4.0/go.mod h1:xgRqUGwRcjKCO1vbZUEtSLrqKoPSsUpK7fnezOII0kc=
|
||||
github.com/jonboulle/clockwork v0.5.0 h1:Hyh9A8u51kptdkR+cqRpT1EebBwTn1oK9YfGYbdFz6I=
|
||||
github.com/jonboulle/clockwork v0.5.0/go.mod h1:3mZlmanh0g2NDKO5TWZVJAfofYk64M7XN3SzBPjZF60=
|
||||
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
|
||||
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
|
||||
github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA=
|
||||
@@ -212,9 +208,9 @@ github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJ
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
|
||||
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||
github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFdJifH4BDsTlE89Zl93FEloxaWZfGcifgq8=
|
||||
github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU=
|
||||
@@ -246,6 +242,7 @@ github.com/onsi/ginkgo/v2 v2.11.0/go.mod h1:ZhrRA5XmEE3x3rhlzamx/JJvujdZoJ2uvgI7
|
||||
github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o=
|
||||
github.com/onsi/ginkgo/v2 v2.17.1/go.mod h1:llBI3WDLL9Z6taip6f33H76YcWtJv+7R3HigUjbIBOs=
|
||||
github.com/onsi/ginkgo/v2 v2.17.2/go.mod h1:nP2DPOQoNsQmsVyv5rDA8JkXQoCs6goXIvr/PRJ1eCc=
|
||||
github.com/onsi/ginkgo/v2 v2.19.0/go.mod h1:rlwLi9PilAFJ8jCg9UE1QP6VBpd6/xj3SRC0d6TU0To=
|
||||
github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM=
|
||||
github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo=
|
||||
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
|
||||
@@ -274,6 +271,7 @@ github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7y
|
||||
github.com/openshift/custom-resource-status v1.1.2 h1:C3DL44LEbvlbItfd8mT5jWrqPfHnSOQoQf/sypqA6A4=
|
||||
github.com/openshift/custom-resource-status v1.1.2/go.mod h1:DB/Mf2oTeiAmVVX1gN+NEqweonAPY0TKUwADizj8+ZA=
|
||||
github.com/orisano/pixelmatch v0.0.0-20220722002657-fb0b55479cde/go.mod h1:nZgzbfBr3hhjoZnS66nKrHmduYNpc34ny7RK4z5/HM0=
|
||||
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
@@ -282,12 +280,14 @@ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH
|
||||
github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q=
|
||||
github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
|
||||
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
|
||||
github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io=
|
||||
github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I=
|
||||
github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
|
||||
github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
|
||||
github.com/prometheus/common v0.64.0 h1:pdZeA+g617P7oGv1CzdTzyeShxAGrTBsolKNOLQPGO4=
|
||||
github.com/prometheus/common v0.64.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8=
|
||||
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
|
||||
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
|
||||
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
|
||||
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
|
||||
github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
|
||||
github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
@@ -297,10 +297,12 @@ github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVs
|
||||
github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js=
|
||||
github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0=
|
||||
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
|
||||
github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM=
|
||||
github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y=
|
||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
github.com/spf13/cobra v1.10.0 h1:a5/WeUlSDCvV5a45ljW2ZFtV0bTDpkfSAj3uqB6Sc+0=
|
||||
github.com/spf13/cobra v1.10.0/go.mod h1:9dhySC7dnTtEiqzmqfkLj47BslqLCUPMXjG2lj/NgoE=
|
||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spf13/pflag v1.0.8/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spf13/pflag v1.0.9 h1:9exaQaMOCwffKiiiYk6/BndUBv+iRViNW+4lEMi0PvY=
|
||||
github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8=
|
||||
github.com/stoewer/go-strcase v1.3.0 h1:g0eASXYtp+yvN9fK8sH94oCIk0fau9uV1/ZdJ0AVEzs=
|
||||
github.com/stoewer/go-strcase v1.3.0/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8wodgtPmh1xo=
|
||||
@@ -317,6 +319,7 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
|
||||
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 h1:6fotK7otjonDflCTK0BCfls4SPy3NcCVb5dqqmbRknE=
|
||||
@@ -330,48 +333,53 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec
|
||||
github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
||||
github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||
go.etcd.io/bbolt v1.3.11 h1:yGEzV1wPz2yVCLsD8ZAiGHhHVlczyC9d1rP43/VCRJ0=
|
||||
go.etcd.io/bbolt v1.3.11/go.mod h1:dksAq7YMXoljX0xu6VF5DMZGbhYYoLUalEiSySYAS4I=
|
||||
go.etcd.io/etcd/api/v3 v3.5.21 h1:A6O2/JDb3tvHhiIz3xf9nJ7REHvtEFJJ3veW3FbCnS8=
|
||||
go.etcd.io/etcd/api/v3 v3.5.21/go.mod h1:c3aH5wcvXv/9dqIw2Y810LDXJfhSYdHQ0vxmP3CCHVY=
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.21 h1:lPBu71Y7osQmzlflM9OfeIV2JlmpBjqBNlLtcoBqUTc=
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.21/go.mod h1:BgqT/IXPjK9NkeSDjbzwsHySX3yIle2+ndz28nVsjUs=
|
||||
go.etcd.io/etcd/client/v2 v2.305.21 h1:eLiFfexc2mE+pTLz9WwnoEsX5JTTpLCYVivKkmVXIRA=
|
||||
go.etcd.io/etcd/client/v2 v2.305.21/go.mod h1:OKkn4hlYNf43hpjEM3Ke3aRdUkhSl8xjKjSf8eCq2J8=
|
||||
go.etcd.io/etcd/client/v3 v3.5.21 h1:T6b1Ow6fNjOLOtM0xSoKNQt1ASPCLWrF9XMHcH9pEyY=
|
||||
go.etcd.io/etcd/client/v3 v3.5.21/go.mod h1:mFYy67IOqmbRf/kRUvsHixzo3iG+1OF2W2+jVIQRAnU=
|
||||
go.etcd.io/etcd/pkg/v3 v3.5.21 h1:jUItxeKyrDuVuWhdh0HtjUANwyuzcb7/FAeUfABmQsk=
|
||||
go.etcd.io/etcd/pkg/v3 v3.5.21/go.mod h1:wpZx8Egv1g4y+N7JAsqi2zoUiBIUWznLjqJbylDjWgU=
|
||||
go.etcd.io/etcd/raft/v3 v3.5.21 h1:dOmE0mT55dIUsX77TKBLq+RgyumsQuYeiRQnW/ylugk=
|
||||
go.etcd.io/etcd/raft/v3 v3.5.21/go.mod h1:fmcuY5R2SNkklU4+fKVBQi2biVp5vafMrWUEj4TJ4Cs=
|
||||
go.etcd.io/etcd/server/v3 v3.5.21 h1:9w0/k12majtgarGmlMVuhwXRI2ob3/d1Ik3X5TKo0yU=
|
||||
go.etcd.io/etcd/server/v3 v3.5.21/go.mod h1:G1mOzdwuzKT1VRL7SqRchli/qcFrtLBTAQ4lV20sXXo=
|
||||
go.etcd.io/bbolt v1.4.2 h1:IrUHp260R8c+zYx/Tm8QZr04CX+qWS5PGfPdevhdm1I=
|
||||
go.etcd.io/bbolt v1.4.2/go.mod h1:Is8rSHO/b4f3XigBC0lL0+4FwAQv3HXEEIgFMuKHceM=
|
||||
go.etcd.io/etcd/api/v3 v3.6.4 h1:7F6N7toCKcV72QmoUKa23yYLiiljMrT4xCeBL9BmXdo=
|
||||
go.etcd.io/etcd/api/v3 v3.6.4/go.mod h1:eFhhvfR8Px1P6SEuLT600v+vrhdDTdcfMzmnxVXXSbk=
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.6.4 h1:9HBYrjppeOfFjBjaMTRxT3R7xT0GLK8EJMVC4xg6ok0=
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.6.4/go.mod h1:sbdzr2cl3HzVmxNw//PH7aLGVtY4QySjQFuaCgcRFAI=
|
||||
go.etcd.io/etcd/client/v3 v3.6.4 h1:YOMrCfMhRzY8NgtzUsHl8hC2EBSnuqbR3dh84Uryl7A=
|
||||
go.etcd.io/etcd/client/v3 v3.6.4/go.mod h1:jaNNHCyg2FdALyKWnd7hxZXZxZANb0+KGY+YQaEMISo=
|
||||
go.etcd.io/etcd/pkg/v3 v3.6.4 h1:fy8bmXIec1Q35/jRZ0KOes8vuFxbvdN0aAFqmEfJZWA=
|
||||
go.etcd.io/etcd/pkg/v3 v3.6.4/go.mod h1:kKcYWP8gHuBRcteyv6MXWSN0+bVMnfgqiHueIZnKMtE=
|
||||
go.etcd.io/etcd/server/v3 v3.6.4 h1:LsCA7CzjVt+8WGrdsnh6RhC0XqCsLkBly3ve5rTxMAU=
|
||||
go.etcd.io/etcd/server/v3 v3.6.4/go.mod h1:aYCL/h43yiONOv0QIR82kH/2xZ7m+IWYjzRmyQfnCAg=
|
||||
go.etcd.io/raft/v3 v3.6.0 h1:5NtvbDVYpnfZWcIHgGRk9DyzkBIXOi8j+DDp1IcnUWQ=
|
||||
go.etcd.io/raft/v3 v3.6.0/go.mod h1:nLvLevg6+xrVtHUmVaTcTz603gQPHfh7kUAwV6YpfGo=
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
|
||||
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0 h1:PS8wXpbyaDJQ2VDHHncMe9Vct0Zn1fEjpsjrLxGJoSc=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0/go.mod h1:HDBUsEjOuRC0EzKZ1bSaRGZWUBAzo+MhAcUUORSr4D0=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0 h1:x7wzEgXfnzJcHDwStJT+mxOz4etr2EcexjqhBvmoakw=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0/go.mod h1:rg+RlpR5dKwaS95IyyZqj5Wd4E13lk/msnTS0Xl9lJM=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 h1:yd02MEjBdJkG3uabWP9apV+OuWRIXGDuJEUJbOHmCFU=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0/go.mod h1:umTcuxiv1n/s/S6/c2AT/g2CQ7u5C59sHDNmfSwgz7Q=
|
||||
go.opentelemetry.io/otel v1.33.0 h1:/FerN9bax5LoK51X/sI0SVYrjSE0/yUL7DpxW4K3FWw=
|
||||
go.opentelemetry.io/otel v1.33.0/go.mod h1:SUUkR6csvUQl+yjReHu5uM3EtVV7MBm5FHKRlNx4I8I=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 h1:Vh5HayB/0HHfOQA7Ctx69E/Y/DcQSMPpKANYVMQ7fBA=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0/go.mod h1:cpgtDBaqD/6ok/UG0jT15/uKjAY8mRA53diogHBg3UI=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0 h1:5pojmb1U1AogINhN3SurB+zm/nIcusopeBNp42f45QM=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0/go.mod h1:57gTHJSE5S1tqg+EKsLPlTWhpHMsWlVmer+LA926XiA=
|
||||
go.opentelemetry.io/otel/metric v1.33.0 h1:r+JOocAyeRVXD8lZpjdQjzMadVZp2M4WmQ+5WtEnklQ=
|
||||
go.opentelemetry.io/otel/metric v1.33.0/go.mod h1:L9+Fyctbp6HFTddIxClbQkjtubW6O9QS3Ann/M82u6M=
|
||||
go.opentelemetry.io/otel/sdk v1.33.0 h1:iax7M131HuAm9QkZotNHEfstof92xM+N8sr3uHXc2IM=
|
||||
go.opentelemetry.io/otel/sdk v1.33.0/go.mod h1:A1Q5oi7/9XaMlIWzPSxLRWOI8nG3FnzHJNbiENQuihM=
|
||||
go.opentelemetry.io/otel/trace v1.33.0 h1:cCJuF7LRjUFso9LPnEAHJDB2pqzp+hbO8eu1qqW2d/s=
|
||||
go.opentelemetry.io/otel/trace v1.33.0/go.mod h1:uIcdVUZMpTAmz0tI1z04GoVSezK37CbGV4fr1f2nBck=
|
||||
go.opentelemetry.io/proto/otlp v1.4.0 h1:TA9WRvW6zMwP+Ssb6fLoUIuirti1gGbP28GcKG1jgeg=
|
||||
go.opentelemetry.io/proto/otlp v1.4.0/go.mod h1:PPBWZIP98o2ElSqI35IHfu7hIhSwvc5N38Jw8pXuGFY=
|
||||
go.opentelemetry.io/otel v1.36.0 h1:UumtzIklRBY6cI/lllNZlALOF5nNIzJVb16APdvgTXg=
|
||||
go.opentelemetry.io/otel v1.36.0/go.mod h1:/TcFMXYjyRNh8khOAO9ybYkqaDBb/70aVwkNML4pP8E=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.36.0 h1:dNzwXjZKpMpE2JhmO+9HsPl42NIXFIFSUSSs0fiqra0=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.36.0/go.mod h1:90PoxvaEB5n6AOdZvi+yWJQoE95U8Dhhw2bSyRqnTD0=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.36.0 h1:JgtbA0xkWHnTmYk7YusopJFX6uleBmAuZ8n05NEh8nQ=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.36.0/go.mod h1:179AK5aar5R3eS9FucPy6rggvU0g52cvKId8pv4+v0c=
|
||||
go.opentelemetry.io/otel/metric v1.36.0 h1:MoWPKVhQvJ+eeXWHFBOPoBOi20jh6Iq2CcCREuTYufE=
|
||||
go.opentelemetry.io/otel/metric v1.36.0/go.mod h1:zC7Ks+yeyJt4xig9DEw9kuUFe5C3zLbVjV2PzT6qzbs=
|
||||
go.opentelemetry.io/otel/sdk v1.36.0 h1:b6SYIuLRs88ztox4EyrvRti80uXIFy+Sqzoh9kFULbs=
|
||||
go.opentelemetry.io/otel/sdk v1.36.0/go.mod h1:+lC+mTgD+MUWfjJubi2vvXWcVxyr9rmlshZni72pXeY=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.34.0 h1:5CeK9ujjbFVL5c1PhLuStg1wxA7vQv7ce1EK0Gyvahk=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.34.0/go.mod h1:jQ/r8Ze28zRKoNRdkjCZxfs6YvBTG1+YIqyFVFYec5w=
|
||||
go.opentelemetry.io/otel/trace v1.36.0 h1:ahxWNuqZjpdiFAyrIoQ4GIiAIhxAunQR6MUoKrsNd4w=
|
||||
go.opentelemetry.io/otel/trace v1.36.0/go.mod h1:gQ+OnDZzrybY4k4seLzPAWNwVBBVlF2szhehOBB/tGA=
|
||||
go.opentelemetry.io/proto/otlp v1.6.0 h1:jQjP+AQyTf+Fe7OKj/MfkDrmK4MNVtw2NpXsf9fefDI=
|
||||
go.opentelemetry.io/proto/otlp v1.6.0/go.mod h1:cicgGehlFuNdgZkcALOCh3VE6K/u2tAjzlRhDwmVpZc=
|
||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
|
||||
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
|
||||
go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
|
||||
go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
|
||||
go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI=
|
||||
go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU=
|
||||
go.yaml.in/yaml/v3 v3.0.3/go.mod h1:tBHosrYAkRZjRAOREWbDnBXUf08JOwYq++0QNwQiWzI=
|
||||
go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
|
||||
go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
|
||||
golang.org/dl v0.0.0-20190829154251-82a15e2f2ead/go.mod h1:IUMfjQLJQd4UTqG1Z90tenwKoCX93Gn3MAQJMOSBsDQ=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
@@ -387,8 +395,10 @@ golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1m
|
||||
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
|
||||
golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs=
|
||||
golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M=
|
||||
golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34=
|
||||
golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc=
|
||||
golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
|
||||
golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54=
|
||||
golang.org/x/crypto v0.38.0 h1:jt+WWG8IZlBnVbomuhg2Mdq0+BBQaHbtqHEFEigjUV8=
|
||||
golang.org/x/crypto v0.38.0/go.mod h1:MvrbAqul58NNYPKnOra203SB9vpuZW0e+RRZV+Ggqjw=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8=
|
||||
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY=
|
||||
@@ -410,6 +420,7 @@ golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/mod v0.20.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0=
|
||||
golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
@@ -450,11 +461,13 @@ golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY=
|
||||
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
|
||||
golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
|
||||
golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8=
|
||||
golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8=
|
||||
golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=
|
||||
golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
|
||||
golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg=
|
||||
golang.org/x/net v0.40.0 h1:79Xs7wF06Gbdcg4kdCCIQArK11Z1hr5POQ6+fIYHNuY=
|
||||
golang.org/x/net v0.40.0/go.mod h1:y0hY0exeL2Pku80/zKK7tpntoX23cqL3Oa6njdgRtds=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.27.0 h1:da9Vo7/tDv5RH/7nZDz1eMGS/q1Vv1N/7FCrBhI9I3M=
|
||||
golang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8=
|
||||
golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI=
|
||||
golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
@@ -468,8 +481,9 @@ golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
|
||||
golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw=
|
||||
golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
||||
golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.14.0 h1:woo0S4Yywslg6hp4eUFjTVOyKt0RookbpAHG4c1HmhQ=
|
||||
golang.org/x/sync v0.14.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
@@ -513,9 +527,12 @@ golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik=
|
||||
golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||
golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.23.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw=
|
||||
golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||
golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE=
|
||||
golang.org/x/telemetry v0.0.0-20240521205824-bda55230c457/go.mod h1:pRgIJT+bRLFKnoM1ldnzKoxTIn14Yxz928LQRYYgIN0=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
@@ -534,8 +551,10 @@ golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY=
|
||||
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
|
||||
golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58=
|
||||
golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk=
|
||||
golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y=
|
||||
golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g=
|
||||
golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY=
|
||||
golang.org/x/term v0.23.0/go.mod h1:DgV24QBUrK6jhZXl+20l6UWznPlwAHm1Q1mGHtydmSk=
|
||||
golang.org/x/term v0.32.0 h1:DR4lr0TjUs3epypdhTOkMmuF5CDFJ/8pOnbzMZPQ7bg=
|
||||
golang.org/x/term v0.32.0/go.mod h1:uZG1FhGx848Sqfsq4/DlJr3xGGsYMu/L5GW4abiaEPQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
@@ -551,8 +570,10 @@ golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY=
|
||||
golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4=
|
||||
golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
|
||||
golang.org/x/text v0.25.0 h1:qVyWApTSYLk/drJRO5mDlNYskwQznZmkpV2c8q9zls4=
|
||||
golang.org/x/text v0.25.0/go.mod h1:WEdwpYrmk1qmdHvhkSTNPm3app7v4rsT8F2UD6+VHIA=
|
||||
golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY=
|
||||
golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
@@ -580,6 +601,9 @@ golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58
|
||||
golang.org/x/tools v0.16.1/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0=
|
||||
golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps=
|
||||
golang.org/x/tools v0.20.0/go.mod h1:WvitBU7JJf6A4jOdg4S1tviW9bhUxkgeCui/0JHctQg=
|
||||
golang.org/x/tools v0.21.0/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
|
||||
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
|
||||
golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ=
|
||||
golang.org/x/tools v0.26.0 h1:v/60pFQmzmT9ExmjDv2gGIfi3OqfKoEP6I5+umXlbnQ=
|
||||
golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
@@ -592,17 +616,15 @@ google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoA
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||
google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 h1:KAeGQVN3M9nD0/bQXnr/ClcEMJ968gUXJQ9pwfSynuQ=
|
||||
google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80/go.mod h1:cc8bqMqtv9gMOr0zHg2Vzff5ULhhL2IXP4sbcn32Dro=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576 h1:CkkIfIt50+lT6NHAVoRYEyAvQGFM7xEwXUUywFvEb3Q=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576/go.mod h1:1R3kvZ1dtP3+4p4d3G8uJ8rFk/fWlScl38vanWACI08=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576 h1:8ZmaLZE4XWrtU3MyClkYqqtl6Oegr3235h7jxsDyqCY=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576/go.mod h1:5uTbfoYQed2U9p3KIj2/Zzm02PYhndfdmML0qC3q3FU=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250519155744-55703ea1f237 h1:Kog3KlB4xevJlAcbbbzPfRG0+X9fdoGM+UBRKVz6Wr0=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250519155744-55703ea1f237/go.mod h1:ezi0AVyMKDWy5xAncvjLWH7UcLBB5n7y2fQ8MzjJcto=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250519155744-55703ea1f237 h1:cJfm9zPbe1e873mHJzmQ1nwVEeRDU/T1wXDK2kUSU34=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250519155744-55703ea1f237/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.68.1 h1:oI5oTa11+ng8r8XMMN7jAOmWfPZWbYpCFaMUTACxkM0=
|
||||
google.golang.org/grpc v1.68.1/go.mod h1:+q1XYFJjShcqn0QZHvCyeR4CXPA+llXIeUIfIe00waw=
|
||||
google.golang.org/grpc v1.72.2 h1:TdbGzwb82ty4OusHWepvFWGLgIbNo1/SUynEN0ssqv8=
|
||||
google.golang.org/grpc v1.72.2/go.mod h1:wH5Aktxcg25y1I3w7H69nHfXdOG3UiadoBtjh3izSDM=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
@@ -614,11 +636,11 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD
|
||||
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
|
||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||
google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
|
||||
google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM=
|
||||
google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
|
||||
google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
|
||||
google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY=
|
||||
google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
@@ -641,53 +663,50 @@ gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
k8s.io/api v0.23.3/go.mod h1:w258XdGyvCmnBj/vGzQMj6kzdufJZVUwEM1U2fRJwSQ=
|
||||
k8s.io/api v0.33.0 h1:yTgZVn1XEe6opVpP1FylmNrIFWuDqe2H0V8CT5gxfIU=
|
||||
k8s.io/api v0.33.0/go.mod h1:CTO61ECK/KU7haa3qq8sarQ0biLq2ju405IZAd9zsiM=
|
||||
k8s.io/api v0.34.0 h1:L+JtP2wDbEYPUeNGbeSa/5GwFtIA662EmT2YSLOkAVE=
|
||||
k8s.io/api v0.34.0/go.mod h1:YzgkIzOOlhl9uwWCZNqpw6RJy9L2FK4dlJeayUoydug=
|
||||
k8s.io/apiextensions-apiserver v0.30.0 h1:jcZFKMqnICJfRxTgnC4E+Hpcq8UEhT8B2lhBcQ+6uAs=
|
||||
k8s.io/apiextensions-apiserver v0.30.0/go.mod h1:N9ogQFGcrbWqAY9p2mUAL5mGxsLqwgtUce127VtRX5Y=
|
||||
k8s.io/apimachinery v0.23.3/go.mod h1:BEuFMMBaIbcOqVIJqNZJXGFTP4W6AycEpb5+m/97hrM=
|
||||
k8s.io/apimachinery v0.33.0 h1:1a6kHrJxb2hs4t8EE5wuR/WxKDwGN1FKH3JvDtA0CIQ=
|
||||
k8s.io/apimachinery v0.33.0/go.mod h1:BHW0YOu7n22fFv/JkYOEfkUYNRN0fj0BlvMFWA7b+SM=
|
||||
k8s.io/apiserver v0.33.0 h1:QqcM6c+qEEjkOODHppFXRiw/cE2zP85704YrQ9YaBbc=
|
||||
k8s.io/apiserver v0.33.0/go.mod h1:EixYOit0YTxt8zrO2kBU7ixAtxFce9gKGq367nFmqI8=
|
||||
k8s.io/client-go v0.33.0 h1:UASR0sAYVUzs2kYuKn/ZakZlcs2bEHaizrrHUZg0G98=
|
||||
k8s.io/client-go v0.33.0/go.mod h1:kGkd+l/gNGg8GYWAPr0xF1rRKvVWvzh9vmZAMXtaKOg=
|
||||
k8s.io/apimachinery v0.34.0 h1:eR1WO5fo0HyoQZt1wdISpFDffnWOvFLOOeJ7MgIv4z0=
|
||||
k8s.io/apimachinery v0.34.0/go.mod h1:/GwIlEcWuTX9zKIg2mbw0LRFIsXwrfoVxn+ef0X13lw=
|
||||
k8s.io/apiserver v0.34.0 h1:Z51fw1iGMqN7uJ1kEaynf2Aec1Y774PqU+FVWCFV3Jg=
|
||||
k8s.io/apiserver v0.34.0/go.mod h1:52ti5YhxAvewmmpVRqlASvaqxt0gKJxvCeW7ZrwgazQ=
|
||||
k8s.io/client-go v0.34.0 h1:YoWv5r7bsBfb0Hs2jh8SOvFbKzzxyNo0nSb0zC19KZo=
|
||||
k8s.io/client-go v0.34.0/go.mod h1:ozgMnEKXkRjeMvBZdV1AijMHLTh3pbACPvK7zFR+QQY=
|
||||
k8s.io/code-generator v0.23.3/go.mod h1:S0Q1JVA+kSzTI1oUvbKAxZY/DYbA/ZUb4Uknog12ETk=
|
||||
k8s.io/code-generator v0.33.0 h1:B212FVl6EFqNmlgdOZYWNi77yBv+ed3QgQsMR8YQCw4=
|
||||
k8s.io/code-generator v0.33.0/go.mod h1:KnJRokGxjvbBQkSJkbVuBbu6z4B0rC7ynkpY5Aw6m9o=
|
||||
k8s.io/component-base v0.33.0 h1:Ot4PyJI+0JAD9covDhwLp9UNkUja209OzsJ4FzScBNk=
|
||||
k8s.io/component-base v0.33.0/go.mod h1:aXYZLbw3kihdkOPMDhWbjGCO6sg+luw554KP51t8qCU=
|
||||
k8s.io/component-helpers v0.33.0 h1:0AdW0A0mIgljLgtG0hJDdJl52PPqTrtMgOgtm/9i/Ys=
|
||||
k8s.io/component-helpers v0.33.0/go.mod h1:9SRiXfLldPw9lEEuSsapMtvT8j/h1JyFFapbtybwKvU=
|
||||
k8s.io/code-generator v0.34.0 h1:Ze2i1QsvUprIlX3oHiGv09BFQRLCz+StA8qKwwFzees=
|
||||
k8s.io/code-generator v0.34.0/go.mod h1:Py2+4w2HXItL8CGhks8uI/wS3Y93wPKO/9mBQUYNua0=
|
||||
k8s.io/component-base v0.34.0 h1:bS8Ua3zlJzapklsB1dZgjEJuJEeHjj8yTu1gxE2zQX8=
|
||||
k8s.io/component-base v0.34.0/go.mod h1:RSCqUdvIjjrEm81epPcjQ/DS+49fADvGSCkIP3IC6vg=
|
||||
k8s.io/component-helpers v0.34.0 h1:5T7P9XGMoUy1JDNKzHf0p/upYbeUf8ZaSf9jbx0QlIo=
|
||||
k8s.io/component-helpers v0.34.0/go.mod h1:kaOyl5tdtnymriYcVZg4uwDBe2d1wlIpXyDkt6sVnt4=
|
||||
k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
|
||||
k8s.io/gengo v0.0.0-20211129171323-c02415ce4185/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
|
||||
k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70/go.mod h1:VH3AT8AaQOqiGjMF9p0/IM1Dj+82ZwjfxUP1IxaHE+8=
|
||||
k8s.io/gengo/v2 v2.0.0-20250207200755-1244d31929d7 h1:2OX19X59HxDprNCVrWi6jb7LW1PoqTlYqEq5H2oetog=
|
||||
k8s.io/gengo/v2 v2.0.0-20250207200755-1244d31929d7/go.mod h1:EJykeLsmFC60UQbYJezXkEsG2FLrt0GPNkU5iK5GWxU=
|
||||
k8s.io/gengo/v2 v2.0.0-20250604051438-85fd79dbfd9f h1:SLb+kxmzfA87x4E4brQzB33VBbT2+x7Zq9ROIHmGn9Q=
|
||||
k8s.io/gengo/v2 v2.0.0-20250604051438-85fd79dbfd9f/go.mod h1:EJykeLsmFC60UQbYJezXkEsG2FLrt0GPNkU5iK5GWxU=
|
||||
k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
|
||||
k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
|
||||
k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
|
||||
k8s.io/klog/v2 v2.40.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
|
||||
k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
|
||||
k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
|
||||
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
|
||||
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
|
||||
k8s.io/kms v0.33.0 h1:fhQSW/vyaWDhMp0vDuO/sLg2RlGZf4F77beSXcB4/eE=
|
||||
k8s.io/kms v0.33.0/go.mod h1:C1I8mjFFBNzfUZXYt9FZVJ8MJl7ynFbGgZFbBzkBJ3E=
|
||||
k8s.io/kube-openapi v0.0.0-20240430033511-f0e62f92d13f h1:0LQagt0gDpKqvIkAMPaRGcXawNMouPECM1+F9BVxEaM=
|
||||
k8s.io/kube-openapi v0.0.0-20240430033511-f0e62f92d13f/go.mod h1:S9tOR0FxgyusSNR+MboCuiDpVWkAifZvaYI1Q2ubgro=
|
||||
k8s.io/metrics v0.33.0 h1:sKe5sC9qb1RakMhs8LWYNuN2ne6OTCWexj8Jos3rO2Y=
|
||||
k8s.io/metrics v0.33.0/go.mod h1:XewckTFXmE2AJiP7PT3EXaY7hi7bler3t2ZLyOdQYzU=
|
||||
k8s.io/kms v0.34.0 h1:u+/rcxQ3Jr7gC9AY5nXuEnBcGEB7ZOIJ9cdLdyHyEjQ=
|
||||
k8s.io/kms v0.34.0/go.mod h1:s1CFkLG7w9eaTYvctOxosx88fl4spqmixnNpys0JAtM=
|
||||
k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b h1:MloQ9/bdJyIu9lb1PzujOPolHyvO06MXG5TUIj2mNAA=
|
||||
k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b/go.mod h1:UZ2yyWbFTpuhSbFhv24aGNOdoRdJZgsIObGBUaYVsts=
|
||||
k8s.io/metrics v0.34.0 h1:nYSfG2+tnL6/MRC2I+sGHjtNEGoEoM/KktgGOoQFwws=
|
||||
k8s.io/metrics v0.34.0/go.mod h1:KCuXmotE0v4AvoARKUP8NC4lUnbK/Du1mluGdor5h4M=
|
||||
k8s.io/utils v0.0.0-20211116205334-6203023598ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
||||
k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
k8s.io/utils v0.0.0-20250321185631-1f6e0b77f77e h1:KqK5c/ghOm8xkHYhlodbp6i6+r+ChV2vuAuVRdFbLro=
|
||||
k8s.io/utils v0.0.0-20250321185631-1f6e0b77f77e/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 h1:hwvWFiBzdWw1FhfY1FooPn3kzWuJ8tmbZBHi4zVsl1Y=
|
||||
k8s.io/utils v0.0.0-20250604170112-4c0f3b243397/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
kubevirt.io/api v1.3.0 h1:9sGElMmnRU50pGED+MPPD2OwQl4S5lvjCUjm+t0mI90=
|
||||
kubevirt.io/api v1.3.0/go.mod h1:e6LkElYZZm8NcP2gKlFVHZS9pgNhIARHIjSBSfeiP1s=
|
||||
kubevirt.io/client-go v1.3.0 h1:/HKn4exzwsctEVTwVtEFaeT9D2v4TgWr2SmxITVEZ/4=
|
||||
@@ -700,18 +719,19 @@ sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2 h1:jpcvIRr3GLoUo
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw=
|
||||
sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6/go.mod h1:p4QtZmO4uMYipTQNzagwnNoseA6OxSUutVw05NhYDRs=
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
|
||||
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8=
|
||||
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo=
|
||||
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE=
|
||||
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg=
|
||||
sigs.k8s.io/mdtoc v1.1.0 h1:q3YtqYzmC2e0hgLXRIOm7/QLuPux1CX3ZHCwlbABxZo=
|
||||
sigs.k8s.io/mdtoc v1.1.0/go.mod h1:QZLVEdHH2iNIR4uHAZyvFRtjloHgVItk8lo/mzCtq3w=
|
||||
sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
|
||||
sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU=
|
||||
sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.1/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.6.0 h1:IUA9nvMmnKWcj5jl84xn+T5MnlZKThmUW1TdblaLVAc=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.6.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps=
|
||||
sigs.k8s.io/structured-merge-diff/v6 v6.2.0/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE=
|
||||
sigs.k8s.io/structured-merge-diff/v6 v6.3.0 h1:jTijUJbW353oVOd9oTlifJqOGEkUw2jB/fXCbTiQEco=
|
||||
sigs.k8s.io/structured-merge-diff/v6 v6.3.0/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE=
|
||||
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
|
||||
sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8=
|
||||
sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
|
||||
sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=
|
||||
sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs=
|
||||
sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4=
|
||||
|
||||
@@ -35,6 +35,9 @@ rules:
|
||||
- apiGroups: ["metrics.k8s.io"]
|
||||
resources: ["nodes", "pods"]
|
||||
verbs: ["get", "list"]
|
||||
- apiGroups: [""]
|
||||
resources: ["persistentvolumeclaims"]
|
||||
verbs: ["get", "watch", "list"]
|
||||
---
|
||||
kind: Role
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
|
||||
@@ -16,7 +16,7 @@ spec:
|
||||
priorityClassName: system-cluster-critical
|
||||
containers:
|
||||
- name: descheduler
|
||||
image: registry.k8s.io/descheduler/descheduler:v0.33.0
|
||||
image: registry.k8s.io/descheduler/descheduler:v0.34.0
|
||||
volumeMounts:
|
||||
- mountPath: /policy-dir
|
||||
name: policy-volume
|
||||
|
||||
@@ -19,7 +19,7 @@ spec:
|
||||
serviceAccountName: descheduler-sa
|
||||
containers:
|
||||
- name: descheduler
|
||||
image: registry.k8s.io/descheduler/descheduler:v0.33.0
|
||||
image: registry.k8s.io/descheduler/descheduler:v0.34.0
|
||||
imagePullPolicy: IfNotPresent
|
||||
command:
|
||||
- "/bin/descheduler"
|
||||
|
||||
@@ -14,7 +14,7 @@ spec:
|
||||
priorityClassName: system-cluster-critical
|
||||
containers:
|
||||
- name: descheduler
|
||||
image: registry.k8s.io/descheduler/descheduler:v0.33.0
|
||||
image: registry.k8s.io/descheduler/descheduler:v0.34.0
|
||||
volumeMounts:
|
||||
- mountPath: /policy-dir
|
||||
name: policy-volume
|
||||
|
||||
@@ -31,10 +31,18 @@ const (
|
||||
|
||||
var (
|
||||
PodsEvicted = metrics.NewCounterVec(
|
||||
&metrics.CounterOpts{
|
||||
Subsystem: DeschedulerSubsystem,
|
||||
Name: "pods_evicted",
|
||||
Help: "Number of total evicted pods, by the result, by the strategy, by the namespace, by the node name. 'error' result means a pod could not be evicted",
|
||||
StabilityLevel: metrics.ALPHA,
|
||||
DeprecatedVersion: "0.34.0",
|
||||
}, []string{"result", "strategy", "profile", "namespace", "node"})
|
||||
PodsEvictedTotal = metrics.NewCounterVec(
|
||||
&metrics.CounterOpts{
|
||||
Subsystem: DeschedulerSubsystem,
|
||||
Name: "pods_evicted",
|
||||
Help: "Number of evicted pods, by the result, by the strategy, by the namespace, by the node name. 'error' result means a pod could not be evicted",
|
||||
Name: "pods_evicted_total",
|
||||
Help: "Number of total evicted pods, by the result, by the strategy, by the namespace, by the node name. 'error' result means a pod could not be evicted",
|
||||
StabilityLevel: metrics.ALPHA,
|
||||
}, []string{"result", "strategy", "profile", "namespace", "node"})
|
||||
|
||||
@@ -49,18 +57,36 @@ var (
|
||||
)
|
||||
|
||||
DeschedulerLoopDuration = metrics.NewHistogramVec(
|
||||
&metrics.HistogramOpts{
|
||||
Subsystem: DeschedulerSubsystem,
|
||||
Name: "descheduler_loop_duration_seconds",
|
||||
Help: "Time taken to complete a full descheduling cycle",
|
||||
StabilityLevel: metrics.ALPHA,
|
||||
DeprecatedVersion: "0.34.0",
|
||||
Buckets: []float64{0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2.5, 5, 10, 25, 50, 100, 250, 500},
|
||||
}, []string{})
|
||||
LoopDuration = metrics.NewHistogramVec(
|
||||
&metrics.HistogramOpts{
|
||||
Subsystem: DeschedulerSubsystem,
|
||||
Name: "descheduler_loop_duration_seconds",
|
||||
Name: "loop_duration_seconds",
|
||||
Help: "Time taken to complete a full descheduling cycle",
|
||||
StabilityLevel: metrics.ALPHA,
|
||||
Buckets: []float64{0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2.5, 5, 10, 25, 50, 100, 250, 500},
|
||||
}, []string{})
|
||||
|
||||
DeschedulerStrategyDuration = metrics.NewHistogramVec(
|
||||
&metrics.HistogramOpts{
|
||||
Subsystem: DeschedulerSubsystem,
|
||||
Name: "descheduler_strategy_duration_seconds",
|
||||
Help: "Time taken to complete Each strategy of the descheduling operation",
|
||||
StabilityLevel: metrics.ALPHA,
|
||||
DeprecatedVersion: "0.34.0",
|
||||
Buckets: []float64{0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2.5, 5, 10, 25, 50, 100},
|
||||
}, []string{"strategy", "profile"})
|
||||
StrategyDuration = metrics.NewHistogramVec(
|
||||
&metrics.HistogramOpts{
|
||||
Subsystem: DeschedulerSubsystem,
|
||||
Name: "descheduler_strategy_duration_seconds",
|
||||
Name: "strategy_duration_seconds",
|
||||
Help: "Time taken to complete Each strategy of the descheduling operation",
|
||||
StabilityLevel: metrics.ALPHA,
|
||||
Buckets: []float64{0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2.5, 5, 10, 25, 50, 100},
|
||||
@@ -68,9 +94,12 @@ var (
|
||||
|
||||
metricsList = []metrics.Registerable{
|
||||
PodsEvicted,
|
||||
PodsEvictedTotal,
|
||||
buildInfo,
|
||||
DeschedulerLoopDuration,
|
||||
DeschedulerStrategyDuration,
|
||||
LoopDuration,
|
||||
StrategyDuration,
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
2
pkg/api/v1alpha2/zz_generated.conversion.go
generated
2
pkg/api/v1alpha2/zz_generated.conversion.go
generated
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
Copyright 2026 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
2
pkg/api/v1alpha2/zz_generated.deepcopy.go
generated
2
pkg/api/v1alpha2/zz_generated.deepcopy.go
generated
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
Copyright 2026 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
2
pkg/api/v1alpha2/zz_generated.defaults.go
generated
2
pkg/api/v1alpha2/zz_generated.defaults.go
generated
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
Copyright 2026 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
2
pkg/api/zz_generated.deepcopy.go
generated
2
pkg/api/zz_generated.deepcopy.go
generated
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
Copyright 2026 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
Copyright 2026 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
Copyright 2026 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
Copyright 2026 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
Copyright 2026 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -164,7 +164,7 @@ func newDescheduler(ctx context.Context, rs *options.DeschedulerServer, deschedu
|
||||
v1.SchemeGroupVersion.WithResource("namespaces"), // Used by the defaultevictor plugin
|
||||
schedulingv1.SchemeGroupVersion.WithResource("priorityclasses"), // Used by the defaultevictor plugin
|
||||
policyv1.SchemeGroupVersion.WithResource("poddisruptionbudgets"), // Used by the defaultevictor plugin
|
||||
|
||||
v1.SchemeGroupVersion.WithResource("persistentvolumeclaims"), // Used by the defaultevictor plugin
|
||||
) // Used by the defaultevictor plugin
|
||||
|
||||
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
|
||||
@@ -351,12 +351,13 @@ func (d *descheduler) runDeschedulerLoop(ctx context.Context, nodes []*v1.Node)
|
||||
defer span.End()
|
||||
defer func(loopStartDuration time.Time) {
|
||||
metrics.DeschedulerLoopDuration.With(map[string]string{}).Observe(time.Since(loopStartDuration).Seconds())
|
||||
metrics.LoopDuration.With(map[string]string{}).Observe(time.Since(loopStartDuration).Seconds())
|
||||
}(time.Now())
|
||||
|
||||
// if len is still <= 1 error out
|
||||
if len(nodes) <= 1 {
|
||||
klog.V(1).InfoS("The cluster size is 0 or 1 meaning eviction causes service disruption or degradation. So aborting..")
|
||||
return fmt.Errorf("the cluster size is 0 or 1")
|
||||
klog.InfoS("Skipping descheduling cycle: requires >=2 nodes", "found", len(nodes))
|
||||
return nil // gracefully skip this cycle instead of aborting
|
||||
}
|
||||
|
||||
var client clientset.Interface
|
||||
@@ -415,6 +416,7 @@ func (d *descheduler) runProfiles(ctx context.Context, client clientset.Interfac
|
||||
var profileRunners []profileRunner
|
||||
for _, profile := range d.deschedulerPolicy.Profiles {
|
||||
currProfile, err := frameworkprofile.NewProfile(
|
||||
ctx,
|
||||
profile,
|
||||
pluginregistry.PluginRegistry,
|
||||
frameworkprofile.WithClientSet(client),
|
||||
|
||||
@@ -42,6 +42,12 @@ import (
|
||||
"sigs.k8s.io/descheduler/pkg/tracing"
|
||||
)
|
||||
|
||||
const (
|
||||
deschedulerGlobalName = "sigs.k8s.io/descheduler"
|
||||
reasonAnnotationKey = "reason"
|
||||
requestedByAnnotationKey = "requested-by"
|
||||
)
|
||||
|
||||
var (
|
||||
assumedEvictionRequestTimeoutSeconds uint = 10 * 60 // 10 minutes
|
||||
evictionRequestsCacheResyncPeriod time.Duration = 10 * time.Minute
|
||||
@@ -482,6 +488,7 @@ func (pe *PodEvictor) EvictPod(ctx context.Context, pod *v1.Pod, opts EvictOptio
|
||||
err := NewEvictionTotalLimitError()
|
||||
if pe.metricsEnabled {
|
||||
metrics.PodsEvicted.With(map[string]string{"result": err.Error(), "strategy": opts.StrategyName, "namespace": pod.Namespace, "node": pod.Spec.NodeName, "profile": opts.ProfileName}).Inc()
|
||||
metrics.PodsEvictedTotal.With(map[string]string{"result": err.Error(), "strategy": opts.StrategyName, "namespace": pod.Namespace, "node": pod.Spec.NodeName, "profile": opts.ProfileName}).Inc()
|
||||
}
|
||||
span.AddEvent("Eviction Failed", trace.WithAttributes(attribute.String("node", pod.Spec.NodeName), attribute.String("err", err.Error())))
|
||||
klog.ErrorS(err, "Error evicting pod", "limit", *pe.maxPodsToEvictTotal)
|
||||
@@ -496,6 +503,7 @@ func (pe *PodEvictor) EvictPod(ctx context.Context, pod *v1.Pod, opts EvictOptio
|
||||
err := NewEvictionNodeLimitError(pod.Spec.NodeName)
|
||||
if pe.metricsEnabled {
|
||||
metrics.PodsEvicted.With(map[string]string{"result": err.Error(), "strategy": opts.StrategyName, "namespace": pod.Namespace, "node": pod.Spec.NodeName, "profile": opts.ProfileName}).Inc()
|
||||
metrics.PodsEvictedTotal.With(map[string]string{"result": err.Error(), "strategy": opts.StrategyName, "namespace": pod.Namespace, "node": pod.Spec.NodeName, "profile": opts.ProfileName}).Inc()
|
||||
}
|
||||
span.AddEvent("Eviction Failed", trace.WithAttributes(attribute.String("node", pod.Spec.NodeName), attribute.String("err", err.Error())))
|
||||
klog.ErrorS(err, "Error evicting pod", "limit", *pe.maxPodsToEvictPerNode, "node", pod.Spec.NodeName)
|
||||
@@ -510,6 +518,7 @@ func (pe *PodEvictor) EvictPod(ctx context.Context, pod *v1.Pod, opts EvictOptio
|
||||
err := NewEvictionNamespaceLimitError(pod.Namespace)
|
||||
if pe.metricsEnabled {
|
||||
metrics.PodsEvicted.With(map[string]string{"result": err.Error(), "strategy": opts.StrategyName, "namespace": pod.Namespace, "node": pod.Spec.NodeName, "profile": opts.ProfileName}).Inc()
|
||||
metrics.PodsEvictedTotal.With(map[string]string{"result": err.Error(), "strategy": opts.StrategyName, "namespace": pod.Namespace, "node": pod.Spec.NodeName, "profile": opts.ProfileName}).Inc()
|
||||
}
|
||||
span.AddEvent("Eviction Failed", trace.WithAttributes(attribute.String("node", pod.Spec.NodeName), attribute.String("err", err.Error())))
|
||||
klog.ErrorS(err, "Error evicting pod", "limit", *pe.maxPodsToEvictPerNamespace, "namespace", pod.Namespace, "pod", klog.KObj(pod))
|
||||
@@ -519,13 +528,14 @@ func (pe *PodEvictor) EvictPod(ctx context.Context, pod *v1.Pod, opts EvictOptio
|
||||
return err
|
||||
}
|
||||
|
||||
ignore, err := pe.evictPod(ctx, pod)
|
||||
ignore, err := pe.evictPod(ctx, pod, opts)
|
||||
if err != nil {
|
||||
// err is used only for logging purposes
|
||||
span.AddEvent("Eviction Failed", trace.WithAttributes(attribute.String("node", pod.Spec.NodeName), attribute.String("err", err.Error())))
|
||||
klog.ErrorS(err, "Error evicting pod", "pod", klog.KObj(pod), "reason", opts.Reason)
|
||||
if pe.metricsEnabled {
|
||||
metrics.PodsEvicted.With(map[string]string{"result": "error", "strategy": opts.StrategyName, "namespace": pod.Namespace, "node": pod.Spec.NodeName, "profile": opts.ProfileName}).Inc()
|
||||
metrics.PodsEvictedTotal.With(map[string]string{"result": "error", "strategy": opts.StrategyName, "namespace": pod.Namespace, "node": pod.Spec.NodeName, "profile": opts.ProfileName}).Inc()
|
||||
}
|
||||
if pe.evictionFailureEventNotification {
|
||||
pe.eventRecorder.Eventf(pod, nil, v1.EventTypeWarning, "EvictionFailed", "Descheduled", "pod eviction from %v node by sigs.k8s.io/descheduler failed: %v", pod.Spec.NodeName, err.Error())
|
||||
@@ -545,6 +555,7 @@ func (pe *PodEvictor) EvictPod(ctx context.Context, pod *v1.Pod, opts EvictOptio
|
||||
|
||||
if pe.metricsEnabled {
|
||||
metrics.PodsEvicted.With(map[string]string{"result": "success", "strategy": opts.StrategyName, "namespace": pod.Namespace, "node": pod.Spec.NodeName, "profile": opts.ProfileName}).Inc()
|
||||
metrics.PodsEvictedTotal.With(map[string]string{"result": "success", "strategy": opts.StrategyName, "namespace": pod.Namespace, "node": pod.Spec.NodeName, "profile": opts.ProfileName}).Inc()
|
||||
}
|
||||
|
||||
if pe.dryRun {
|
||||
@@ -564,7 +575,7 @@ func (pe *PodEvictor) EvictPod(ctx context.Context, pod *v1.Pod, opts EvictOptio
|
||||
}
|
||||
|
||||
// return (ignore, err)
|
||||
func (pe *PodEvictor) evictPod(ctx context.Context, pod *v1.Pod) (bool, error) {
|
||||
func (pe *PodEvictor) evictPod(ctx context.Context, pod *v1.Pod, opts EvictOptions) (bool, error) {
|
||||
deleteOptions := &metav1.DeleteOptions{
|
||||
GracePeriodSeconds: pe.gracePeriodSeconds,
|
||||
}
|
||||
@@ -577,6 +588,10 @@ func (pe *PodEvictor) evictPod(ctx context.Context, pod *v1.Pod) (bool, error) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: pod.Name,
|
||||
Namespace: pod.Namespace,
|
||||
Annotations: map[string]string{
|
||||
"reason": fmt.Sprintf("triggered by %v/%v: %v", opts.ProfileName, opts.StrategyName, opts.Reason),
|
||||
"requested-by": deschedulerGlobalName,
|
||||
},
|
||||
},
|
||||
DeleteOptions: deleteOptions,
|
||||
}
|
||||
|
||||
@@ -20,6 +20,7 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -114,7 +115,7 @@ func TestEvictPod(t *testing.T) {
|
||||
t.Fatalf("Unexpected error when creating a pod evictor: %v", err)
|
||||
}
|
||||
|
||||
_, got := podEvictor.evictPod(ctx, test.evictedPod)
|
||||
_, got := podEvictor.evictPod(ctx, test.evictedPod, EvictOptions{})
|
||||
if got != test.wantErr {
|
||||
t.Errorf("Test error for Desc: %s. Expected %v pod eviction to be %v, got %v", test.description, test.evictedPod.Name, test.wantErr, got)
|
||||
}
|
||||
@@ -418,7 +419,11 @@ func TestEvictionRequestsCacheCleanup(t *testing.T) {
|
||||
}
|
||||
if eviction, matched := createAct.Object.(*policy.Eviction); matched {
|
||||
podName := eviction.GetName()
|
||||
if podName == "p1" || podName == "p2" {
|
||||
annotations := eviction.GetAnnotations()
|
||||
if (podName == "p1" || podName == "p2") && annotations[requestedByAnnotationKey] == deschedulerGlobalName && strings.HasPrefix(
|
||||
annotations[reasonAnnotationKey],
|
||||
"triggered by",
|
||||
) {
|
||||
return true, nil, &apierrors.StatusError{
|
||||
ErrStatus: metav1.Status{
|
||||
Reason: metav1.StatusReasonTooManyRequests,
|
||||
|
||||
@@ -17,12 +17,17 @@ limitations under the License.
|
||||
package utils
|
||||
|
||||
import (
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
)
|
||||
|
||||
const (
|
||||
EvictionKind = "Eviction"
|
||||
EvictionSubresource = "pods/eviction"
|
||||
// A new experimental feature for soft no-eviction preference.
|
||||
// Each plugin will decide whether the soft preference will be respected.
|
||||
// If configured the soft preference turns into a mandatory no-eviction policy for the DefaultEvictor plugin.
|
||||
SoftNoEvictionAnnotationKey = "descheduler.alpha.kubernetes.io/prefer-no-eviction"
|
||||
)
|
||||
|
||||
// SupportEviction uses Discovery API to find out if the server support eviction subresource
|
||||
@@ -56,3 +61,9 @@ func SupportEviction(client clientset.Interface) (string, error) {
|
||||
}
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// HaveNoEvictionAnnotation checks if the pod have soft no-eviction annotation
|
||||
func HaveNoEvictionAnnotation(pod *corev1.Pod) bool {
|
||||
_, found := pod.ObjectMeta.Annotations[SoftNoEvictionAnnotationKey]
|
||||
return found
|
||||
}
|
||||
|
||||
@@ -105,20 +105,29 @@ func IsReady(node *v1.Node) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// NodeFit returns true if the provided pod can be scheduled onto the provided node.
|
||||
// NodeFit returns nil if the provided pod can be scheduled onto the provided node.
|
||||
// Otherwise, it returns an error explaining why the node does not fit the pod.
|
||||
//
|
||||
// This function is used when the NodeFit pod filtering feature of the Descheduler is enabled.
|
||||
// This function currently considers a subset of the Kubernetes Scheduler's predicates when
|
||||
// deciding if a pod would fit on a node, but more predicates may be added in the future.
|
||||
// There should be no methods to modify nodes or pods in this method.
|
||||
// It considers a subset of the Kubernetes Scheduler's predicates
|
||||
// when deciding if a pod would fit on a node. More predicates may be added in the future.
|
||||
//
|
||||
// The checks are ordered from fastest to slowest to reduce unnecessary computation,
|
||||
// especially for nodes that are clearly unsuitable early in the evaluation process.
|
||||
func NodeFit(nodeIndexer podutil.GetPodsAssignedToNodeFunc, pod *v1.Pod, node *v1.Node) error {
|
||||
// Check node selector and required affinity
|
||||
// Check if the node is marked as unschedulable.
|
||||
if IsNodeUnschedulable(node) {
|
||||
return errors.New("node is not schedulable")
|
||||
}
|
||||
|
||||
// Check if the pod matches the node's label selector (nodeSelector) and required node affinity rules.
|
||||
if ok, err := utils.PodMatchNodeSelector(pod, node); err != nil {
|
||||
return err
|
||||
} else if !ok {
|
||||
return errors.New("pod node selector does not match the node label")
|
||||
}
|
||||
|
||||
// Check taints (we only care about NoSchedule and NoExecute taints)
|
||||
// Check taints on the node that have effect NoSchedule or NoExecute.
|
||||
ok := utils.TolerationsTolerateTaintsWithFilter(pod.Spec.Tolerations, node.Spec.Taints, func(taint *v1.Taint) bool {
|
||||
return taint.Effect == v1.TaintEffectNoSchedule || taint.Effect == v1.TaintEffectNoExecute
|
||||
})
|
||||
@@ -126,25 +135,21 @@ func NodeFit(nodeIndexer podutil.GetPodsAssignedToNodeFunc, pod *v1.Pod, node *v
|
||||
return errors.New("pod does not tolerate taints on the node")
|
||||
}
|
||||
|
||||
// Check if the pod can fit on a node based off it's requests
|
||||
if pod.Spec.NodeName == "" || pod.Spec.NodeName != node.Name {
|
||||
if ok, reqError := fitsRequest(nodeIndexer, pod, node); !ok {
|
||||
return reqError
|
||||
}
|
||||
}
|
||||
|
||||
// Check if node is schedulable
|
||||
if IsNodeUnschedulable(node) {
|
||||
return errors.New("node is not schedulable")
|
||||
}
|
||||
|
||||
// Check if pod matches inter-pod anti-affinity rule of pod on node
|
||||
// Check if the pod violates any inter-pod anti-affinity rules with existing pods on the node.
|
||||
// This involves iterating over all pods assigned to the node and evaluating label selectors.
|
||||
if match, err := podMatchesInterPodAntiAffinity(nodeIndexer, pod, node); err != nil {
|
||||
return err
|
||||
} else if match {
|
||||
return errors.New("pod matches inter-pod anti-affinity rule of other pod on node")
|
||||
}
|
||||
|
||||
// Check whether the node has enough available resources to accommodate the pod.
|
||||
if pod.Spec.NodeName == "" || pod.Spec.NodeName != node.Name {
|
||||
if ok, reqError := fitsRequest(nodeIndexer, pod, node); !ok {
|
||||
return reqError
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -236,7 +241,7 @@ func fitsRequest(nodeIndexer podutil.GetPodsAssignedToNodeFunc, pod *v1.Pod, nod
|
||||
return false, fmt.Errorf("insufficient %v", resource)
|
||||
}
|
||||
}
|
||||
// check pod num, at least one pod number is avaibalbe
|
||||
// check pod num, at least one pod number is available
|
||||
if quantity, ok := availableResources[v1.ResourcePods]; ok && quantity.MilliValue() <= 0 {
|
||||
return false, fmt.Errorf("insufficient %v", v1.ResourcePods)
|
||||
}
|
||||
|
||||
@@ -25,9 +25,11 @@ import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
"k8s.io/utils/ptr"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/test"
|
||||
)
|
||||
@@ -78,9 +80,24 @@ func TestReadyNodesWithNodeSelector(t *testing.T) {
|
||||
sharedInformerFactory.WaitForCacheSync(stopChannel)
|
||||
defer close(stopChannel)
|
||||
|
||||
// First verify nodeLister returns non-empty list
|
||||
allNodes, err := nodeLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to list nodes from nodeLister: %v", err)
|
||||
}
|
||||
if len(allNodes) == 0 {
|
||||
t.Fatal("Expected nodeLister to return non-empty list of nodes")
|
||||
}
|
||||
if len(allNodes) != 2 {
|
||||
t.Errorf("Expected nodeLister to return 2 nodes, got %d", len(allNodes))
|
||||
}
|
||||
|
||||
// Now test ReadyNodes
|
||||
nodes, _ := ReadyNodes(ctx, fakeClient, nodeLister, nodeSelector)
|
||||
|
||||
if nodes[0].Name != "node1" {
|
||||
if len(nodes) != 1 {
|
||||
t.Errorf("Expected 1 node, got %d", len(nodes))
|
||||
} else if nodes[0].Name != "node1" {
|
||||
t.Errorf("Expected node1, got %s", nodes[0].Name)
|
||||
}
|
||||
}
|
||||
@@ -1020,6 +1037,64 @@ func TestNodeFit(t *testing.T) {
|
||||
node: node,
|
||||
podsOnNode: []*v1.Pod{},
|
||||
},
|
||||
{
|
||||
description: "Pod with native sidecars with too much cpu does not fit on node",
|
||||
pod: test.BuildTestPod("p1", 1, 100, "", func(pod *v1.Pod) {
|
||||
pod.Spec.InitContainers = append(pod.Spec.InitContainers, v1.Container{
|
||||
RestartPolicy: ptr.To(v1.ContainerRestartPolicyAlways), // native sidecar
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: createResourceList(100000, 100*1000*1000, 0),
|
||||
},
|
||||
})
|
||||
}),
|
||||
node: node,
|
||||
podsOnNode: []*v1.Pod{},
|
||||
err: errors.New("insufficient cpu"),
|
||||
},
|
||||
{
|
||||
description: "Pod with native sidecars with too much memory does not fit on node",
|
||||
pod: test.BuildTestPod("p1", 1, 100, "", func(pod *v1.Pod) {
|
||||
pod.Spec.InitContainers = append(pod.Spec.InitContainers, v1.Container{
|
||||
RestartPolicy: ptr.To(v1.ContainerRestartPolicyAlways), // native sidecar
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: createResourceList(100, 1000*1000*1000*1000, 0),
|
||||
},
|
||||
})
|
||||
}),
|
||||
node: node,
|
||||
podsOnNode: []*v1.Pod{},
|
||||
err: errors.New("insufficient memory"),
|
||||
},
|
||||
{
|
||||
description: "Pod with small native sidecars fits on node",
|
||||
pod: test.BuildTestPod("p1", 1, 100, "", func(pod *v1.Pod) {
|
||||
pod.Spec.InitContainers = append(pod.Spec.InitContainers, v1.Container{
|
||||
RestartPolicy: ptr.To(v1.ContainerRestartPolicyAlways), // native sidecar
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: createResourceList(100, 100*1000*1000, 0),
|
||||
},
|
||||
})
|
||||
}),
|
||||
node: node,
|
||||
podsOnNode: []*v1.Pod{},
|
||||
},
|
||||
{
|
||||
description: "Pod with large overhead does not fit on node",
|
||||
pod: test.BuildTestPod("p1", 1, 100, "", func(pod *v1.Pod) {
|
||||
pod.Spec.Overhead = createResourceList(100000, 100*1000*1000, 0)
|
||||
}),
|
||||
node: node,
|
||||
podsOnNode: []*v1.Pod{},
|
||||
err: errors.New("insufficient cpu"),
|
||||
},
|
||||
{
|
||||
description: "Pod with small overhead fits on node",
|
||||
pod: test.BuildTestPod("p1", 1, 100, "", func(pod *v1.Pod) {
|
||||
pod.Spec.Overhead = createResourceList(1, 1*1000*1000, 0)
|
||||
}),
|
||||
node: node,
|
||||
podsOnNode: []*v1.Pod{},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
|
||||
@@ -25,6 +25,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
|
||||
evictionutils "sigs.k8s.io/descheduler/pkg/descheduler/evictions/utils"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
)
|
||||
|
||||
@@ -254,14 +255,32 @@ func SortPodsBasedOnPriorityLowToHigh(pods []*v1.Pod) {
|
||||
return false
|
||||
}
|
||||
if (pods[j].Spec.Priority == nil && pods[i].Spec.Priority == nil) || (*pods[i].Spec.Priority == *pods[j].Spec.Priority) {
|
||||
if IsBestEffortPod(pods[i]) {
|
||||
iIsBestEffortPod := IsBestEffortPod(pods[i])
|
||||
jIsBestEffortPod := IsBestEffortPod(pods[j])
|
||||
iIsBurstablePod := IsBurstablePod(pods[i])
|
||||
jIsBurstablePod := IsBurstablePod(pods[j])
|
||||
iIsGuaranteedPod := IsGuaranteedPod(pods[i])
|
||||
jIsGuaranteedPod := IsGuaranteedPod(pods[j])
|
||||
if (iIsBestEffortPod && jIsBestEffortPod) || (iIsBurstablePod && jIsBurstablePod) || (iIsGuaranteedPod && jIsGuaranteedPod) {
|
||||
iHasNoEvictonPolicy := evictionutils.HaveNoEvictionAnnotation(pods[i])
|
||||
jHasNoEvictonPolicy := evictionutils.HaveNoEvictionAnnotation(pods[j])
|
||||
if !iHasNoEvictonPolicy {
|
||||
return true
|
||||
}
|
||||
if !jHasNoEvictonPolicy {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
if IsBurstablePod(pods[i]) && IsGuaranteedPod(pods[j]) {
|
||||
if iIsBestEffortPod {
|
||||
return true
|
||||
}
|
||||
if iIsBurstablePod && jIsGuaranteedPod {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
return *pods[i].Spec.Priority < *pods[j].Spec.Priority
|
||||
})
|
||||
}
|
||||
|
||||
@@ -117,6 +117,14 @@ func TestListPodsOnANode(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func getPodListNames(pods []*v1.Pod) []string {
|
||||
names := []string{}
|
||||
for _, pod := range pods {
|
||||
names = append(names, pod.Name)
|
||||
}
|
||||
return names
|
||||
}
|
||||
|
||||
func TestSortPodsBasedOnPriorityLowToHigh(t *testing.T) {
|
||||
n1 := test.BuildTestNode("n1", 4000, 3000, 9, nil)
|
||||
|
||||
@@ -149,11 +157,70 @@ func TestSortPodsBasedOnPriorityLowToHigh(t *testing.T) {
|
||||
p6 := test.BuildTestPod("p6", 400, 100, n1.Name, test.MakeGuaranteedPod)
|
||||
p6.Spec.Priority = nil
|
||||
|
||||
podList := []*v1.Pod{p4, p3, p2, p1, p6, p5}
|
||||
p7 := test.BuildTestPod("p7", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
test.SetPodPriority(pod, lowPriority)
|
||||
pod.Annotations = map[string]string{
|
||||
"descheduler.alpha.kubernetes.io/prefer-no-eviction": "",
|
||||
}
|
||||
})
|
||||
|
||||
// BestEffort
|
||||
p8 := test.BuildTestPod("p8", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
test.SetPodPriority(pod, highPriority)
|
||||
test.MakeBestEffortPod(pod)
|
||||
pod.Annotations = map[string]string{
|
||||
"descheduler.alpha.kubernetes.io/prefer-no-eviction": "",
|
||||
}
|
||||
})
|
||||
|
||||
// Burstable
|
||||
p9 := test.BuildTestPod("p9", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
test.SetPodPriority(pod, highPriority)
|
||||
test.MakeBurstablePod(pod)
|
||||
pod.Annotations = map[string]string{
|
||||
"descheduler.alpha.kubernetes.io/prefer-no-eviction": "",
|
||||
}
|
||||
})
|
||||
|
||||
// Guaranteed
|
||||
p10 := test.BuildTestPod("p10", 400, 100, n1.Name, func(pod *v1.Pod) {
|
||||
test.SetPodPriority(pod, highPriority)
|
||||
test.MakeGuaranteedPod(pod)
|
||||
pod.Annotations = map[string]string{
|
||||
"descheduler.alpha.kubernetes.io/prefer-no-eviction": "",
|
||||
}
|
||||
})
|
||||
|
||||
// Burstable
|
||||
p11 := test.BuildTestPod("p11", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
test.MakeBurstablePod(pod)
|
||||
})
|
||||
|
||||
// Burstable
|
||||
p12 := test.BuildTestPod("p12", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
test.MakeBurstablePod(pod)
|
||||
pod.Annotations = map[string]string{
|
||||
"descheduler.alpha.kubernetes.io/prefer-no-eviction": "",
|
||||
}
|
||||
})
|
||||
|
||||
podList := []*v1.Pod{p1, p8, p9, p10, p2, p3, p4, p5, p6, p7, p11, p12}
|
||||
// p5: no priority, best effort
|
||||
// p11: no priority, burstable
|
||||
// p6: no priority, guaranteed
|
||||
// p1: low priority
|
||||
// p7: low priority, prefer-no-eviction
|
||||
// p2: high priority, best effort
|
||||
// p8: high priority, best effort, prefer-no-eviction
|
||||
// p3: high priority, burstable
|
||||
// p9: high priority, burstable, prefer-no-eviction
|
||||
// p4: high priority, guaranteed
|
||||
// p10: high priority, guaranteed, prefer-no-eviction
|
||||
expectedPodList := []*v1.Pod{p5, p11, p12, p6, p1, p7, p2, p8, p3, p9, p4, p10}
|
||||
|
||||
SortPodsBasedOnPriorityLowToHigh(podList)
|
||||
if !reflect.DeepEqual(podList[len(podList)-1], p4) {
|
||||
t.Errorf("Expected last pod in sorted list to be %v which of highest priority and guaranteed but got %v", p4, podList[len(podList)-1])
|
||||
if !reflect.DeepEqual(getPodListNames(podList), getPodListNames(expectedPodList)) {
|
||||
t.Errorf("Pods were sorted in an unexpected order: %v, expected %v", getPodListNames(podList), getPodListNames(expectedPodList))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -108,6 +108,10 @@ func setDefaultEvictor(profile api.DeschedulerProfile, client clientset.Interfac
|
||||
IgnorePvcPods: false,
|
||||
EvictFailedBarePods: false,
|
||||
IgnorePodsWithoutPDB: false,
|
||||
PodProtections: defaultevictor.PodProtections{
|
||||
DefaultDisabled: []defaultevictor.PodProtection{},
|
||||
ExtraEnabled: []defaultevictor.PodProtection{},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -172,13 +176,8 @@ func validateDeschedulerConfiguration(in api.DeschedulerPolicy, registry pluginr
|
||||
} else {
|
||||
if prometheusConfig.Prometheus.URL == "" {
|
||||
errorsInPolicy = append(errorsInPolicy, fmt.Errorf("prometheus URL is required when prometheus is enabled"))
|
||||
} else {
|
||||
u, err := url.Parse(prometheusConfig.Prometheus.URL)
|
||||
if err != nil {
|
||||
errorsInPolicy = append(errorsInPolicy, fmt.Errorf("error parsing prometheus URL: %v", err))
|
||||
} else if u.Scheme != "https" {
|
||||
errorsInPolicy = append(errorsInPolicy, fmt.Errorf("prometheus URL's scheme is not https, got %q instead", u.Scheme))
|
||||
}
|
||||
} else if _, err := url.Parse(prometheusConfig.Prometheus.URL); err != nil {
|
||||
errorsInPolicy = append(errorsInPolicy, fmt.Errorf("error parsing prometheus URL: %v", err))
|
||||
}
|
||||
|
||||
if prometheusConfig.Prometheus.AuthToken != nil {
|
||||
|
||||
@@ -22,6 +22,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
|
||||
"k8s.io/apimachinery/pkg/conversion"
|
||||
fakeclientset "k8s.io/client-go/kubernetes/fake"
|
||||
utilptr "k8s.io/utils/ptr"
|
||||
@@ -259,20 +260,6 @@ func TestValidateDeschedulerConfiguration(t *testing.T) {
|
||||
},
|
||||
result: fmt.Errorf("error parsing prometheus URL: parse \"http://example.com:-80\": invalid port \":-80\" after host"),
|
||||
},
|
||||
{
|
||||
description: "prometheus url does not have https error",
|
||||
deschedulerPolicy: api.DeschedulerPolicy{
|
||||
MetricsProviders: []api.MetricsProvider{
|
||||
{
|
||||
Source: api.PrometheusMetrics,
|
||||
Prometheus: &api.Prometheus{
|
||||
URL: "http://example.com:80",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
result: fmt.Errorf("prometheus URL's scheme is not https, got \"http\" instead"),
|
||||
},
|
||||
{
|
||||
description: "prometheus authtoken with no secret reference error",
|
||||
deschedulerPolicy: api.DeschedulerPolicy{
|
||||
@@ -510,6 +497,313 @@ profiles:
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "test DisabledDefaultPodProtections configuration",
|
||||
policy: []byte(`apiVersion: "descheduler/v1alpha2"
|
||||
kind: "DeschedulerPolicy"
|
||||
profiles:
|
||||
- name: ProfileName
|
||||
pluginConfig:
|
||||
- name: "DefaultEvictor"
|
||||
args:
|
||||
podProtections:
|
||||
defaultDisabled:
|
||||
- "PodsWithLocalStorage"
|
||||
- "DaemonSetPods"
|
||||
priorityThreshold:
|
||||
value: 2000000000
|
||||
nodeFit: true
|
||||
plugins:
|
||||
filter:
|
||||
enabled:
|
||||
- "DefaultEvictor"
|
||||
preEvictionFilter:
|
||||
enabled:
|
||||
- "DefaultEvictor"
|
||||
`),
|
||||
result: &api.DeschedulerPolicy{
|
||||
Profiles: []api.DeschedulerProfile{
|
||||
{
|
||||
Name: "ProfileName",
|
||||
PluginConfigs: []api.PluginConfig{
|
||||
{
|
||||
Name: defaultevictor.PluginName,
|
||||
Args: &defaultevictor.DefaultEvictorArgs{
|
||||
PodProtections: defaultevictor.PodProtections{
|
||||
DefaultDisabled: []defaultevictor.PodProtection{
|
||||
defaultevictor.PodsWithLocalStorage,
|
||||
defaultevictor.DaemonSetPods,
|
||||
},
|
||||
},
|
||||
PriorityThreshold: &api.PriorityThreshold{Value: utilptr.To[int32](2000000000)},
|
||||
NodeFit: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
Plugins: api.Plugins{
|
||||
Filter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
PreEvictionFilter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "test podProtections extraEnabled configuration",
|
||||
policy: []byte(`apiVersion: "descheduler/v1alpha2"
|
||||
kind: "DeschedulerPolicy"
|
||||
profiles:
|
||||
- name: ProfileName
|
||||
pluginConfig:
|
||||
- name: "DefaultEvictor"
|
||||
args:
|
||||
podProtections:
|
||||
extraEnabled:
|
||||
- "PodsWithPVC"
|
||||
- "PodsWithoutPDB"
|
||||
priorityThreshold:
|
||||
value: 2000000000
|
||||
nodeFit: true
|
||||
plugins:
|
||||
filter:
|
||||
enabled:
|
||||
- "DefaultEvictor"
|
||||
preEvictionFilter:
|
||||
enabled:
|
||||
- "DefaultEvictor"
|
||||
`),
|
||||
result: &api.DeschedulerPolicy{
|
||||
Profiles: []api.DeschedulerProfile{
|
||||
{
|
||||
Name: "ProfileName",
|
||||
PluginConfigs: []api.PluginConfig{
|
||||
{
|
||||
Name: defaultevictor.PluginName,
|
||||
Args: &defaultevictor.DefaultEvictorArgs{
|
||||
PodProtections: defaultevictor.PodProtections{
|
||||
ExtraEnabled: []defaultevictor.PodProtection{
|
||||
defaultevictor.PodsWithPVC,
|
||||
defaultevictor.PodsWithoutPDB,
|
||||
},
|
||||
},
|
||||
PriorityThreshold: &api.PriorityThreshold{Value: utilptr.To[int32](2000000000)},
|
||||
NodeFit: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
Plugins: api.Plugins{
|
||||
Filter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
PreEvictionFilter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "test both ExtraPodProtections and DisabledDefaultPodProtections configuration",
|
||||
policy: []byte(`apiVersion: "descheduler/v1alpha2"
|
||||
kind: "DeschedulerPolicy"
|
||||
profiles:
|
||||
- name: ProfileName
|
||||
pluginConfig:
|
||||
- name: "DefaultEvictor"
|
||||
args:
|
||||
podProtections:
|
||||
extraEnabled:
|
||||
- "PodsWithPVC"
|
||||
- "PodsWithoutPDB"
|
||||
defaultDisabled:
|
||||
- "PodsWithLocalStorage"
|
||||
- "DaemonSetPods"
|
||||
priorityThreshold:
|
||||
value: 2000000000
|
||||
nodeFit: true
|
||||
plugins:
|
||||
filter:
|
||||
enabled:
|
||||
- "DefaultEvictor"
|
||||
preEvictionFilter:
|
||||
enabled:
|
||||
- "DefaultEvictor"
|
||||
`),
|
||||
result: &api.DeschedulerPolicy{
|
||||
Profiles: []api.DeschedulerProfile{
|
||||
{
|
||||
Name: "ProfileName",
|
||||
PluginConfigs: []api.PluginConfig{
|
||||
{
|
||||
Name: defaultevictor.PluginName,
|
||||
Args: &defaultevictor.DefaultEvictorArgs{
|
||||
PodProtections: defaultevictor.PodProtections{
|
||||
ExtraEnabled: []defaultevictor.PodProtection{
|
||||
defaultevictor.PodsWithPVC,
|
||||
defaultevictor.PodsWithoutPDB,
|
||||
},
|
||||
DefaultDisabled: []defaultevictor.PodProtection{
|
||||
defaultevictor.PodsWithLocalStorage,
|
||||
defaultevictor.DaemonSetPods,
|
||||
},
|
||||
},
|
||||
PriorityThreshold: &api.PriorityThreshold{Value: utilptr.To[int32](2000000000)},
|
||||
NodeFit: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
Plugins: api.Plugins{
|
||||
Filter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
PreEvictionFilter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "test error when using both Deprecated fields and DisabledDefaultPodProtections/ExtraPodProtections",
|
||||
policy: []byte(`apiVersion: "descheduler/v1alpha2"
|
||||
kind: "DeschedulerPolicy"
|
||||
profiles:
|
||||
- name: ProfileName
|
||||
pluginConfig:
|
||||
- name: "DefaultEvictor"
|
||||
args:
|
||||
evictSystemCriticalPods: true
|
||||
podProtections:
|
||||
extraEnabled:
|
||||
- "PodsWithPVC"
|
||||
- "PodsWithoutPDB"
|
||||
defaultDisabled:
|
||||
- "PodsWithLocalStorage"
|
||||
- "DaemonSetPods"
|
||||
priorityThreshold:
|
||||
value: 2000000000
|
||||
nodeFit: true
|
||||
plugins:
|
||||
filter:
|
||||
enabled:
|
||||
- "DefaultEvictor"
|
||||
preEvictionFilter:
|
||||
enabled:
|
||||
- "DefaultEvictor"
|
||||
`),
|
||||
result: nil,
|
||||
err: fmt.Errorf("in profile ProfileName: cannot use Deprecated fields alongside PodProtections.ExtraEnabled or PodProtections.DefaultDisabled"),
|
||||
},
|
||||
{
|
||||
description: "test error when Disables a default protection that does not exist",
|
||||
policy: []byte(`apiVersion: "descheduler/v1alpha2"
|
||||
kind: "DeschedulerPolicy"
|
||||
profiles:
|
||||
- name: ProfileName
|
||||
pluginConfig:
|
||||
- name: "DefaultEvictor"
|
||||
args:
|
||||
podProtections:
|
||||
defaultDisabled:
|
||||
- "InvalidProtection"
|
||||
priorityThreshold:
|
||||
value: 2000000000
|
||||
nodeFit: true
|
||||
plugins:
|
||||
filter:
|
||||
enabled:
|
||||
- "DefaultEvictor"
|
||||
preEvictionFilter:
|
||||
enabled:
|
||||
- "DefaultEvictor"
|
||||
`),
|
||||
result: nil,
|
||||
err: fmt.Errorf("in profile ProfileName: invalid pod protection policy in DefaultDisabled: \"InvalidProtection\". Valid options are: [PodsWithLocalStorage SystemCriticalPods FailedBarePods DaemonSetPods]"),
|
||||
},
|
||||
{
|
||||
description: "test error when Enables an extra protection that does not exist",
|
||||
policy: []byte(`apiVersion: "descheduler/v1alpha2"
|
||||
kind: "DeschedulerPolicy"
|
||||
profiles:
|
||||
- name: ProfileName
|
||||
pluginConfig:
|
||||
- name: "DefaultEvictor"
|
||||
args:
|
||||
podProtections:
|
||||
extraEnabled:
|
||||
- "InvalidProtection"
|
||||
priorityThreshold:
|
||||
value: 2000000000
|
||||
nodeFit: true
|
||||
plugins:
|
||||
filter:
|
||||
enabled:
|
||||
- "DefaultEvictor"
|
||||
preEvictionFilter:
|
||||
enabled:
|
||||
- "DefaultEvictor"
|
||||
`),
|
||||
result: nil,
|
||||
err: fmt.Errorf("in profile ProfileName: invalid pod protection policy in ExtraEnabled: \"InvalidProtection\". Valid options are: [PodsWithPVC PodsWithoutPDB PodsWithResourceClaims]"),
|
||||
},
|
||||
{
|
||||
description: "test error when Disables an extra protection",
|
||||
policy: []byte(`apiVersion: "descheduler/v1alpha2"
|
||||
kind: "DeschedulerPolicy"
|
||||
profiles:
|
||||
- name: ProfileName
|
||||
pluginConfig:
|
||||
- name: "DefaultEvictor"
|
||||
args:
|
||||
podProtections:
|
||||
defaultDisabled:
|
||||
- "PodsWithPVC"
|
||||
priorityThreshold:
|
||||
value: 2000000000
|
||||
nodeFit: true
|
||||
plugins:
|
||||
filter:
|
||||
enabled:
|
||||
- "DefaultEvictor"
|
||||
preEvictionFilter:
|
||||
enabled:
|
||||
- "DefaultEvictor"
|
||||
`),
|
||||
result: nil,
|
||||
err: fmt.Errorf("in profile ProfileName: invalid pod protection policy in DefaultDisabled: \"PodsWithPVC\". Valid options are: [PodsWithLocalStorage SystemCriticalPods FailedBarePods DaemonSetPods]"),
|
||||
},
|
||||
{
|
||||
description: "test error when Enables a default protection",
|
||||
policy: []byte(`apiVersion: "descheduler/v1alpha2"
|
||||
kind: "DeschedulerPolicy"
|
||||
profiles:
|
||||
- name: ProfileName
|
||||
pluginConfig:
|
||||
- name: "DefaultEvictor"
|
||||
args:
|
||||
podProtections:
|
||||
extraEnabled:
|
||||
- "DaemonSetPods"
|
||||
priorityThreshold:
|
||||
value: 2000000000
|
||||
nodeFit: true
|
||||
plugins:
|
||||
filter:
|
||||
enabled:
|
||||
- "DefaultEvictor"
|
||||
preEvictionFilter:
|
||||
enabled:
|
||||
- "DefaultEvictor"
|
||||
`),
|
||||
result: nil,
|
||||
err: fmt.Errorf("in profile ProfileName: invalid pod protection policy in ExtraEnabled: \"DaemonSetPods\". Valid options are: [PodsWithPVC PodsWithoutPDB PodsWithResourceClaims]"),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
@@ -517,14 +811,14 @@ profiles:
|
||||
result, err := decode("filename", tc.policy, client, pluginregistry.PluginRegistry)
|
||||
if err != nil {
|
||||
if tc.err == nil {
|
||||
t.Errorf("unexpected error: %s.", err.Error())
|
||||
} else {
|
||||
t.Errorf("unexpected error: %s. Was expecting %s", err.Error(), tc.err.Error())
|
||||
t.Fatalf("unexpected error: %s.", err.Error())
|
||||
} else if err.Error() != tc.err.Error() {
|
||||
t.Fatalf("unexpected error: %s. Was expecting %s", err.Error(), tc.err.Error())
|
||||
}
|
||||
}
|
||||
diff := cmp.Diff(tc.result, result)
|
||||
if diff != "" && err == nil {
|
||||
t.Errorf("test '%s' failed. Results are not deep equal. mismatch (-want +got):\n%s", tc.description, diff)
|
||||
if diff != "" {
|
||||
t.Fatalf("test '%s' failed. Results are not deep equal. mismatch (-want +got):\n%s", tc.description, diff)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -60,7 +60,7 @@ type FakePlugin struct {
|
||||
}
|
||||
|
||||
func NewPluginFncFromFake(fp *FakePlugin) pluginregistry.PluginBuilder {
|
||||
return func(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
|
||||
return func(ctx context.Context, args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
|
||||
fakePluginArgs, ok := args.(*FakePluginArgs)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("want args to be of type FakePluginArgs, got %T", args)
|
||||
@@ -74,7 +74,7 @@ func NewPluginFncFromFake(fp *FakePlugin) pluginregistry.PluginBuilder {
|
||||
}
|
||||
|
||||
// New builds plugin from its arguments while passing a handle
|
||||
func New(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
|
||||
func New(ctx context.Context, args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
|
||||
fakePluginArgs, ok := args.(*FakePluginArgs)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("want args to be of type FakePluginArgs, got %T", args)
|
||||
@@ -165,7 +165,7 @@ type FakeDeschedulePlugin struct {
|
||||
}
|
||||
|
||||
func NewFakeDeschedulePluginFncFromFake(fp *FakeDeschedulePlugin) pluginregistry.PluginBuilder {
|
||||
return func(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
|
||||
return func(ctx context.Context, args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
|
||||
fakePluginArgs, ok := args.(*FakeDeschedulePluginArgs)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("want args to be of type FakeDeschedulePluginArgs, got %T", args)
|
||||
@@ -252,7 +252,7 @@ type FakeBalancePlugin struct {
|
||||
}
|
||||
|
||||
func NewFakeBalancePluginFncFromFake(fp *FakeBalancePlugin) pluginregistry.PluginBuilder {
|
||||
return func(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
|
||||
return func(ctx context.Context, args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
|
||||
fakePluginArgs, ok := args.(*FakeBalancePluginArgs)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("want args to be of type FakeBalancePluginArgs, got %T", args)
|
||||
@@ -339,7 +339,7 @@ type FakeFilterPlugin struct {
|
||||
}
|
||||
|
||||
func NewFakeFilterPluginFncFromFake(fp *FakeFilterPlugin) pluginregistry.PluginBuilder {
|
||||
return func(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
|
||||
return func(ctx context.Context, args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
|
||||
fakePluginArgs, ok := args.(*FakeFilterPluginArgs)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("want args to be of type FakeFilterPluginArgs, got %T", args)
|
||||
@@ -408,3 +408,55 @@ func (d *FakeFilterPlugin) handleBoolAction(action Action) bool {
|
||||
}
|
||||
panic(fmt.Errorf("unhandled %q action", action.GetExtensionPoint()))
|
||||
}
|
||||
|
||||
// RegisterFakePlugin registers a FakePlugin with the given registry
|
||||
func RegisterFakePlugin(name string, plugin *FakePlugin, registry pluginregistry.Registry) {
|
||||
pluginregistry.Register(
|
||||
name,
|
||||
NewPluginFncFromFake(plugin),
|
||||
&FakePlugin{},
|
||||
&FakePluginArgs{},
|
||||
ValidateFakePluginArgs,
|
||||
SetDefaults_FakePluginArgs,
|
||||
registry,
|
||||
)
|
||||
}
|
||||
|
||||
// RegisterFakeDeschedulePlugin registers a FakeDeschedulePlugin with the given registry
|
||||
func RegisterFakeDeschedulePlugin(name string, plugin *FakeDeschedulePlugin, registry pluginregistry.Registry) {
|
||||
pluginregistry.Register(
|
||||
name,
|
||||
NewFakeDeschedulePluginFncFromFake(plugin),
|
||||
&FakeDeschedulePlugin{},
|
||||
&FakeDeschedulePluginArgs{},
|
||||
ValidateFakePluginArgs,
|
||||
SetDefaults_FakePluginArgs,
|
||||
registry,
|
||||
)
|
||||
}
|
||||
|
||||
// RegisterFakeBalancePlugin registers a FakeBalancePlugin with the given registry
|
||||
func RegisterFakeBalancePlugin(name string, plugin *FakeBalancePlugin, registry pluginregistry.Registry) {
|
||||
pluginregistry.Register(
|
||||
name,
|
||||
NewFakeBalancePluginFncFromFake(plugin),
|
||||
&FakeBalancePlugin{},
|
||||
&FakeBalancePluginArgs{},
|
||||
ValidateFakePluginArgs,
|
||||
SetDefaults_FakePluginArgs,
|
||||
registry,
|
||||
)
|
||||
}
|
||||
|
||||
// RegisterFakeFilterPlugin registers a FakeFilterPlugin with the given registry
|
||||
func RegisterFakeFilterPlugin(name string, plugin *FakeFilterPlugin, registry pluginregistry.Registry) {
|
||||
pluginregistry.Register(
|
||||
name,
|
||||
NewFakeFilterPluginFncFromFake(plugin),
|
||||
&FakeFilterPlugin{},
|
||||
&FakeFilterPluginArgs{},
|
||||
ValidateFakePluginArgs,
|
||||
SetDefaults_FakePluginArgs,
|
||||
registry,
|
||||
)
|
||||
}
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
Copyright 2026 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -17,6 +17,8 @@ limitations under the License.
|
||||
package pluginregistry
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/klog/v2"
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
@@ -35,7 +37,7 @@ type PluginUtilities struct {
|
||||
PluginArgDefaulter PluginArgDefaulter
|
||||
}
|
||||
|
||||
type PluginBuilder = func(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error)
|
||||
type PluginBuilder = func(ctx context.Context, args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error)
|
||||
|
||||
type (
|
||||
PluginArgValidator = func(args runtime.Object) error
|
||||
|
||||
94
pkg/framework/plugins/defaultevictor/constraints.go
Normal file
94
pkg/framework/plugins/defaultevictor/constraints.go
Normal file
@@ -0,0 +1,94 @@
|
||||
/*
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package defaultevictor
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
)
|
||||
|
||||
func evictionConstraintsForLabelSelector(logger klog.Logger, labelSelector *metav1.LabelSelector) ([]constraint, error) {
|
||||
if labelSelector != nil {
|
||||
selector, err := metav1.LabelSelectorAsSelector(labelSelector)
|
||||
if err != nil {
|
||||
logger.Error(err, "could not get selector from label selector")
|
||||
return nil, err
|
||||
}
|
||||
if !selector.Empty() {
|
||||
return []constraint{
|
||||
func(pod *v1.Pod) error {
|
||||
if !selector.Matches(labels.Set(pod.Labels)) {
|
||||
return fmt.Errorf("pod labels do not match the labelSelector filter in the policy parameter")
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func evictionConstraintsForMinReplicas(logger klog.Logger, minReplicas uint, handle frameworktypes.Handle) ([]constraint, error) {
|
||||
if minReplicas > 1 {
|
||||
indexName := "metadata.ownerReferences"
|
||||
indexer, err := getPodIndexerByOwnerRefs(indexName, handle)
|
||||
if err != nil {
|
||||
logger.Error(err, "could not get pod indexer by ownerRefs")
|
||||
return nil, err
|
||||
}
|
||||
return []constraint{
|
||||
func(pod *v1.Pod) error {
|
||||
if len(pod.OwnerReferences) == 0 {
|
||||
return nil
|
||||
}
|
||||
if len(pod.OwnerReferences) > 1 {
|
||||
logger.V(5).Info("pod has multiple owner references which is not supported for minReplicas check", "size", len(pod.OwnerReferences), "pod", klog.KObj(pod))
|
||||
return nil
|
||||
}
|
||||
ownerRef := pod.OwnerReferences[0]
|
||||
objs, err := indexer.ByIndex(indexName, string(ownerRef.UID))
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to list pods for minReplicas filter in the policy parameter")
|
||||
}
|
||||
if uint(len(objs)) < minReplicas {
|
||||
return fmt.Errorf("owner has %d replicas which is less than minReplicas of %d", len(objs), minReplicas)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}, nil
|
||||
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func evictionConstraintsForMinPodAge(minPodAge *metav1.Duration) []constraint {
|
||||
if minPodAge != nil {
|
||||
return []constraint{
|
||||
func(pod *v1.Pod) error {
|
||||
if pod.Status.StartTime == nil || time.Since(pod.Status.StartTime.Time) < minPodAge.Duration {
|
||||
return fmt.Errorf("pod age is not older than MinPodAge: %s seconds", minPodAge.String())
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -14,19 +14,20 @@ limitations under the License.
|
||||
package defaultevictor
|
||||
|
||||
import (
|
||||
// "context"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
"maps"
|
||||
"slices"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
evictionutils "sigs.k8s.io/descheduler/pkg/descheduler/evictions/utils"
|
||||
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
@@ -48,6 +49,7 @@ type constraint func(pod *v1.Pod) error
|
||||
// This plugin is only meant to customize other actions (extension points) of the evictor,
|
||||
// like filtering, sorting, and other ones that might be relevant in the future
|
||||
type DefaultEvictor struct {
|
||||
logger klog.Logger
|
||||
args *DefaultEvictorArgs
|
||||
constraints []constraint
|
||||
handle frameworktypes.Handle
|
||||
@@ -66,150 +68,326 @@ func HaveEvictAnnotation(pod *v1.Pod) bool {
|
||||
|
||||
// New builds plugin from its arguments while passing a handle
|
||||
// nolint: gocyclo
|
||||
func New(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
|
||||
func New(ctx context.Context, args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
|
||||
defaultEvictorArgs, ok := args.(*DefaultEvictorArgs)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("want args to be of type defaultEvictorFilterArgs, got %T", args)
|
||||
}
|
||||
logger := klog.FromContext(ctx).WithValues("plugin", PluginName)
|
||||
|
||||
ev := &DefaultEvictor{
|
||||
logger: logger,
|
||||
handle: handle,
|
||||
args: defaultEvictorArgs,
|
||||
}
|
||||
// add constraints
|
||||
err := ev.addAllConstraints(logger, handle)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ev, nil
|
||||
}
|
||||
|
||||
if defaultEvictorArgs.EvictFailedBarePods {
|
||||
klog.V(1).InfoS("Warning: EvictFailedBarePods is set to True. This could cause eviction of pods without ownerReferences.")
|
||||
ev.constraints = append(ev.constraints, func(pod *v1.Pod) error {
|
||||
func (d *DefaultEvictor) addAllConstraints(logger klog.Logger, handle frameworktypes.Handle) error {
|
||||
args := d.args
|
||||
// Determine effective protected policies based on the provided arguments.
|
||||
effectivePodProtections := getEffectivePodProtections(args)
|
||||
|
||||
if err := applyEffectivePodProtections(d, effectivePodProtections, handle); err != nil {
|
||||
return fmt.Errorf("failed to apply effective protected policies: %w", err)
|
||||
}
|
||||
if constraints, err := evictionConstraintsForLabelSelector(logger, args.LabelSelector); err != nil {
|
||||
return err
|
||||
} else {
|
||||
d.constraints = append(d.constraints, constraints...)
|
||||
}
|
||||
if constraints, err := evictionConstraintsForMinReplicas(logger, args.MinReplicas, handle); err != nil {
|
||||
return err
|
||||
} else {
|
||||
d.constraints = append(d.constraints, constraints...)
|
||||
}
|
||||
d.constraints = append(d.constraints, evictionConstraintsForMinPodAge(args.MinPodAge)...)
|
||||
return nil
|
||||
}
|
||||
|
||||
// applyEffectivePodProtections configures the evictor with specified Pod protection.
|
||||
func applyEffectivePodProtections(d *DefaultEvictor, podProtections []PodProtection, handle frameworktypes.Handle) error {
|
||||
protectionMap := make(map[PodProtection]bool, len(podProtections))
|
||||
for _, protection := range podProtections {
|
||||
protectionMap[protection] = true
|
||||
}
|
||||
|
||||
// Apply protections
|
||||
if err := applySystemCriticalPodsProtection(d, protectionMap, handle); err != nil {
|
||||
return err
|
||||
}
|
||||
applyFailedBarePodsProtection(d, protectionMap)
|
||||
applyLocalStoragePodsProtection(d, protectionMap)
|
||||
applyDaemonSetPodsProtection(d, protectionMap)
|
||||
applyPVCPodsProtection(d, protectionMap)
|
||||
applyPodsWithoutPDBProtection(d, protectionMap, handle)
|
||||
applyPodsWithResourceClaimsProtection(d, protectionMap)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// protectedPVCStorageClasses returns the list of storage classes that should
|
||||
// be protected from eviction. If the list is empty or nil then all storage
|
||||
// classes are protected (assuming PodsWithPVC protection is enabled).
|
||||
func protectedPVCStorageClasses(d *DefaultEvictor) []ProtectedStorageClass {
|
||||
protcfg := d.args.PodProtections.Config
|
||||
if protcfg == nil {
|
||||
return nil
|
||||
}
|
||||
scconfig := protcfg.PodsWithPVC
|
||||
if scconfig == nil {
|
||||
return nil
|
||||
}
|
||||
return scconfig.ProtectedStorageClasses
|
||||
}
|
||||
|
||||
// podStorageClasses returns a list of storage classes referred by a pod. We
|
||||
// need this when assessing if a pod should be protected because it refers to a
|
||||
// protected storage class.
|
||||
func podStorageClasses(inf informers.SharedInformerFactory, pod *v1.Pod) ([]string, error) {
|
||||
lister := inf.Core().V1().PersistentVolumeClaims().Lister().PersistentVolumeClaims(
|
||||
pod.Namespace,
|
||||
)
|
||||
|
||||
referred := map[string]bool{}
|
||||
for _, vol := range pod.Spec.Volumes {
|
||||
if vol.PersistentVolumeClaim == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
claim, err := lister.Get(vol.PersistentVolumeClaim.ClaimName)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"failed to get persistent volume claim %q/%q: %w",
|
||||
pod.Namespace, vol.PersistentVolumeClaim.ClaimName, err,
|
||||
)
|
||||
}
|
||||
|
||||
// this should never happen as once a pvc is created with a nil
|
||||
// storageClass it is automatically picked up by the default
|
||||
// storage class. By returning an error here we make the pod
|
||||
// protected from eviction.
|
||||
if claim.Spec.StorageClassName == nil || *claim.Spec.StorageClassName == "" {
|
||||
return nil, fmt.Errorf(
|
||||
"failed to resolve storage class for pod %q/%q",
|
||||
pod.Namespace, claim.Name,
|
||||
)
|
||||
}
|
||||
|
||||
referred[*claim.Spec.StorageClassName] = true
|
||||
}
|
||||
|
||||
return slices.Collect(maps.Keys(referred)), nil
|
||||
}
|
||||
|
||||
func applyFailedBarePodsProtection(d *DefaultEvictor, protectionMap map[PodProtection]bool) {
|
||||
isProtectionEnabled := protectionMap[FailedBarePods]
|
||||
if !isProtectionEnabled {
|
||||
d.logger.V(1).Info("Warning: EvictFailedBarePods is set to True. This could cause eviction of pods without ownerReferences.")
|
||||
d.constraints = append(d.constraints, func(pod *v1.Pod) error {
|
||||
ownerRefList := podutil.OwnerRef(pod)
|
||||
// Enable evictFailedBarePods to evict bare pods in failed phase
|
||||
if len(ownerRefList) == 0 && pod.Status.Phase != v1.PodFailed {
|
||||
return fmt.Errorf("pod does not have any ownerRefs and is not in failed phase")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
} else {
|
||||
ev.constraints = append(ev.constraints, func(pod *v1.Pod) error {
|
||||
ownerRefList := podutil.OwnerRef(pod)
|
||||
if len(ownerRefList) == 0 {
|
||||
d.constraints = append(d.constraints, func(pod *v1.Pod) error {
|
||||
if len(podutil.OwnerRef(pod)) == 0 {
|
||||
return fmt.Errorf("pod does not have any ownerRefs")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
if !defaultEvictorArgs.EvictSystemCriticalPods {
|
||||
ev.constraints = append(ev.constraints, func(pod *v1.Pod) error {
|
||||
if utils.IsCriticalPriorityPod(pod) {
|
||||
return fmt.Errorf("pod has system critical priority")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
if defaultEvictorArgs.PriorityThreshold != nil && (defaultEvictorArgs.PriorityThreshold.Value != nil || len(defaultEvictorArgs.PriorityThreshold.Name) > 0) {
|
||||
thresholdPriority, err := utils.GetPriorityValueFromPriorityThreshold(context.TODO(), handle.ClientSet(), defaultEvictorArgs.PriorityThreshold)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get priority threshold: %v", err)
|
||||
}
|
||||
ev.constraints = append(ev.constraints, func(pod *v1.Pod) error {
|
||||
if IsPodEvictableBasedOnPriority(pod, thresholdPriority) {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("pod has higher priority than specified priority class threshold")
|
||||
})
|
||||
}
|
||||
} else {
|
||||
klog.V(1).InfoS("Warning: EvictSystemCriticalPods is set to True. This could cause eviction of Kubernetes system pods.")
|
||||
func applySystemCriticalPodsProtection(d *DefaultEvictor, protectionMap map[PodProtection]bool, handle frameworktypes.Handle) error {
|
||||
isProtectionEnabled := protectionMap[SystemCriticalPods]
|
||||
if !isProtectionEnabled {
|
||||
d.logger.V(1).Info("Warning: System critical pod protection is disabled. This could cause eviction of Kubernetes system pods.")
|
||||
return nil
|
||||
}
|
||||
if !defaultEvictorArgs.EvictLocalStoragePods {
|
||||
ev.constraints = append(ev.constraints, func(pod *v1.Pod) error {
|
||||
if utils.IsPodWithLocalStorage(pod) {
|
||||
return fmt.Errorf("pod has local storage and descheduler is not configured with evictLocalStoragePods")
|
||||
|
||||
d.constraints = append(d.constraints, func(pod *v1.Pod) error {
|
||||
if utils.IsCriticalPriorityPod(pod) {
|
||||
return fmt.Errorf("pod has system critical priority and is protected against eviction")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
priorityThreshold := d.args.PriorityThreshold
|
||||
if priorityThreshold != nil && (priorityThreshold.Value != nil || len(priorityThreshold.Name) > 0) {
|
||||
thresholdPriority, err := utils.GetPriorityValueFromPriorityThreshold(context.TODO(), handle.ClientSet(), priorityThreshold)
|
||||
if err != nil {
|
||||
d.logger.Error(err, "failed to get priority threshold")
|
||||
return err
|
||||
}
|
||||
d.constraints = append(d.constraints, func(pod *v1.Pod) error {
|
||||
if !IsPodEvictableBasedOnPriority(pod, thresholdPriority) {
|
||||
return fmt.Errorf("pod has higher priority than specified priority class threshold")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
if !defaultEvictorArgs.EvictDaemonSetPods {
|
||||
ev.constraints = append(ev.constraints, func(pod *v1.Pod) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func applyLocalStoragePodsProtection(d *DefaultEvictor, protectionMap map[PodProtection]bool) {
|
||||
isProtectionEnabled := protectionMap[PodsWithLocalStorage]
|
||||
if isProtectionEnabled {
|
||||
d.constraints = append(d.constraints, func(pod *v1.Pod) error {
|
||||
if utils.IsPodWithLocalStorage(pod) {
|
||||
return fmt.Errorf("pod has local storage and is protected against eviction")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func applyDaemonSetPodsProtection(d *DefaultEvictor, protectionMap map[PodProtection]bool) {
|
||||
isProtectionEnabled := protectionMap[DaemonSetPods]
|
||||
if isProtectionEnabled {
|
||||
d.constraints = append(d.constraints, func(pod *v1.Pod) error {
|
||||
ownerRefList := podutil.OwnerRef(pod)
|
||||
if utils.IsDaemonsetPod(ownerRefList) {
|
||||
return fmt.Errorf("pod is related to daemonset and descheduler is not configured with evictDaemonSetPods")
|
||||
return fmt.Errorf("daemonset pods are protected against eviction")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
if defaultEvictorArgs.IgnorePvcPods {
|
||||
ev.constraints = append(ev.constraints, func(pod *v1.Pod) error {
|
||||
if utils.IsPodWithPVC(pod) {
|
||||
return fmt.Errorf("pod has a PVC and descheduler is configured to ignore PVC pods")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
selector, err := metav1.LabelSelectorAsSelector(defaultEvictorArgs.LabelSelector)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not get selector from label selector")
|
||||
}
|
||||
if defaultEvictorArgs.LabelSelector != nil && !selector.Empty() {
|
||||
ev.constraints = append(ev.constraints, func(pod *v1.Pod) error {
|
||||
if !selector.Matches(labels.Set(pod.Labels)) {
|
||||
return fmt.Errorf("pod labels do not match the labelSelector filter in the policy parameter")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// applyPVCPodsProtection protects pods that refer to a PVC from eviction. If
|
||||
// the user has specified a list of storage classes to protect then only pods
|
||||
// referring to PVCs of those storage classes are protected.
|
||||
func applyPVCPodsProtection(d *DefaultEvictor, enabledProtections map[PodProtection]bool) {
|
||||
if !enabledProtections[PodsWithPVC] {
|
||||
return
|
||||
}
|
||||
|
||||
if defaultEvictorArgs.MinReplicas > 1 {
|
||||
indexName := "metadata.ownerReferences"
|
||||
indexer, err := getPodIndexerByOwnerRefs(indexName, handle)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ev.constraints = append(ev.constraints, func(pod *v1.Pod) error {
|
||||
if len(pod.OwnerReferences) == 0 {
|
||||
// if the user isn't filtering by storage classes we protect all pods
|
||||
// referring to a PVC.
|
||||
protected := protectedPVCStorageClasses(d)
|
||||
if len(protected) == 0 {
|
||||
d.constraints = append(
|
||||
d.constraints,
|
||||
func(pod *v1.Pod) error {
|
||||
if utils.IsPodWithPVC(pod) {
|
||||
return fmt.Errorf("pod with PVC is protected against eviction")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
},
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
if len(pod.OwnerReferences) > 1 {
|
||||
klog.V(5).InfoS("pod has multiple owner references which is not supported for minReplicas check", "size", len(pod.OwnerReferences), "pod", klog.KObj(pod))
|
||||
return nil
|
||||
}
|
||||
protectedsc := map[string]bool{}
|
||||
for _, class := range protected {
|
||||
protectedsc[class.Name] = true
|
||||
}
|
||||
|
||||
ownerRef := pod.OwnerReferences[0]
|
||||
objs, err := indexer.ByIndex(indexName, string(ownerRef.UID))
|
||||
d.constraints = append(
|
||||
d.constraints, func(pod *v1.Pod) error {
|
||||
classes, err := podStorageClasses(d.handle.SharedInformerFactory(), pod)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to list pods for minReplicas filter in the policy parameter")
|
||||
return err
|
||||
}
|
||||
|
||||
if uint(len(objs)) < defaultEvictorArgs.MinReplicas {
|
||||
return fmt.Errorf("owner has %d replicas which is less than minReplicas of %d", len(objs), defaultEvictorArgs.MinReplicas)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
if defaultEvictorArgs.MinPodAge != nil {
|
||||
ev.constraints = append(ev.constraints, func(pod *v1.Pod) error {
|
||||
if pod.Status.StartTime == nil || time.Since(pod.Status.StartTime.Time) < defaultEvictorArgs.MinPodAge.Duration {
|
||||
return fmt.Errorf("pod age is not older than MinPodAge: %s seconds", defaultEvictorArgs.MinPodAge.String())
|
||||
for _, class := range classes {
|
||||
if !protectedsc[class] {
|
||||
continue
|
||||
}
|
||||
return fmt.Errorf("pod using protected storage class %q", class)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
if defaultEvictorArgs.IgnorePodsWithoutPDB {
|
||||
ev.constraints = append(ev.constraints, func(pod *v1.Pod) error {
|
||||
func applyPodsWithoutPDBProtection(d *DefaultEvictor, protectionMap map[PodProtection]bool, handle frameworktypes.Handle) {
|
||||
isProtectionEnabled := protectionMap[PodsWithoutPDB]
|
||||
if isProtectionEnabled {
|
||||
d.constraints = append(d.constraints, func(pod *v1.Pod) error {
|
||||
hasPdb, err := utils.IsPodCoveredByPDB(pod, handle.SharedInformerFactory().Policy().V1().PodDisruptionBudgets().Lister())
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to check if pod is covered by PodDisruptionBudget: %w", err)
|
||||
}
|
||||
if !hasPdb {
|
||||
return fmt.Errorf("no PodDisruptionBudget found for pod")
|
||||
return fmt.Errorf("pod does not have a PodDisruptionBudget and is protected against eviction")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return ev, nil
|
||||
func applyPodsWithResourceClaimsProtection(d *DefaultEvictor, protectionMap map[PodProtection]bool) {
|
||||
isProtectionEnabled := protectionMap[PodsWithResourceClaims]
|
||||
if isProtectionEnabled {
|
||||
d.constraints = append(d.constraints, func(pod *v1.Pod) error {
|
||||
if utils.IsPodWithResourceClaims(pod) {
|
||||
return fmt.Errorf("pod has ResourceClaims and descheduler is configured to protect ResourceClaims pods")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// getEffectivePodProtections determines which policies are currently active.
|
||||
// It supports both new-style (PodProtections) and legacy-style flags.
|
||||
func getEffectivePodProtections(args *DefaultEvictorArgs) []PodProtection {
|
||||
// determine whether to use PodProtections config
|
||||
useNewConfig := len(args.PodProtections.DefaultDisabled) > 0 || len(args.PodProtections.ExtraEnabled) > 0
|
||||
|
||||
if !useNewConfig {
|
||||
// fall back to the Deprecated config
|
||||
return legacyGetPodProtections(args)
|
||||
}
|
||||
|
||||
// effective is the final list of active protection.
|
||||
effective := make([]PodProtection, 0)
|
||||
effective = append(effective, defaultPodProtections...)
|
||||
|
||||
// Remove PodProtections that are in the DefaultDisabled list.
|
||||
effective = slices.DeleteFunc(effective, func(protection PodProtection) bool {
|
||||
return slices.Contains(args.PodProtections.DefaultDisabled, protection)
|
||||
})
|
||||
|
||||
// Add extra enabled in PodProtections
|
||||
effective = append(effective, args.PodProtections.ExtraEnabled...)
|
||||
|
||||
return effective
|
||||
}
|
||||
|
||||
// legacyGetPodProtections returns protections using deprecated boolean flags.
|
||||
func legacyGetPodProtections(args *DefaultEvictorArgs) []PodProtection {
|
||||
var protections []PodProtection
|
||||
|
||||
// defaultDisabled
|
||||
if !args.EvictLocalStoragePods {
|
||||
protections = append(protections, PodsWithLocalStorage)
|
||||
}
|
||||
if !args.EvictDaemonSetPods {
|
||||
protections = append(protections, DaemonSetPods)
|
||||
}
|
||||
if !args.EvictSystemCriticalPods {
|
||||
protections = append(protections, SystemCriticalPods)
|
||||
}
|
||||
if !args.EvictFailedBarePods {
|
||||
protections = append(protections, FailedBarePods)
|
||||
}
|
||||
|
||||
// extraEnabled
|
||||
if args.IgnorePvcPods {
|
||||
protections = append(protections, PodsWithPVC)
|
||||
}
|
||||
if args.IgnorePodsWithoutPDB {
|
||||
protections = append(protections, PodsWithoutPDB)
|
||||
}
|
||||
return protections
|
||||
}
|
||||
|
||||
// Name retrieves the plugin name
|
||||
@@ -218,14 +396,15 @@ func (d *DefaultEvictor) Name() string {
|
||||
}
|
||||
|
||||
func (d *DefaultEvictor) PreEvictionFilter(pod *v1.Pod) bool {
|
||||
logger := d.logger.WithValues("ExtensionPoint", frameworktypes.PreEvictionFilterExtensionPoint)
|
||||
if d.args.NodeFit {
|
||||
nodes, err := nodeutil.ReadyNodes(context.TODO(), d.handle.ClientSet(), d.handle.SharedInformerFactory().Core().V1().Nodes().Lister(), d.args.NodeSelector)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "unable to list ready nodes", "pod", klog.KObj(pod))
|
||||
logger.Error(err, "unable to list ready nodes", "pod", klog.KObj(pod))
|
||||
return false
|
||||
}
|
||||
if !nodeutil.PodFitsAnyOtherNode(d.handle.GetPodsAssignedToNodeFunc(), pod, nodes) {
|
||||
klog.InfoS("pod does not fit on any other node because of nodeSelector(s), Taint(s), or nodes marked as unschedulable", "pod", klog.KObj(pod))
|
||||
logger.Info("pod does not fit on any other node because of nodeSelector(s), Taint(s), or nodes marked as unschedulable", "pod", klog.KObj(pod))
|
||||
return false
|
||||
}
|
||||
return true
|
||||
@@ -234,12 +413,17 @@ func (d *DefaultEvictor) PreEvictionFilter(pod *v1.Pod) bool {
|
||||
}
|
||||
|
||||
func (d *DefaultEvictor) Filter(pod *v1.Pod) bool {
|
||||
logger := d.logger.WithValues("ExtensionPoint", frameworktypes.FilterExtensionPoint)
|
||||
checkErrs := []error{}
|
||||
|
||||
if HaveEvictAnnotation(pod) {
|
||||
return true
|
||||
}
|
||||
|
||||
if d.args.NoEvictionPolicy == MandatoryNoEvictionPolicy && evictionutils.HaveNoEvictionAnnotation(pod) {
|
||||
return false
|
||||
}
|
||||
|
||||
if utils.IsMirrorPod(pod) {
|
||||
checkErrs = append(checkErrs, fmt.Errorf("pod is a mirror pod"))
|
||||
}
|
||||
@@ -259,7 +443,7 @@ func (d *DefaultEvictor) Filter(pod *v1.Pod) bool {
|
||||
}
|
||||
|
||||
if len(checkErrs) > 0 {
|
||||
klog.V(4).InfoS("Pod fails the following checks", "pod", klog.KObj(pod), "checks", utilerrors.NewAggregate(checkErrs).Error())
|
||||
logger.V(4).Info("Pod fails the following checks", "pod", klog.KObj(pod), "checks", utilerrors.NewAggregate(checkErrs).Error())
|
||||
return false
|
||||
}
|
||||
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -21,35 +21,7 @@ func addDefaultingFuncs(scheme *runtime.Scheme) error {
|
||||
return RegisterDefaults(scheme)
|
||||
}
|
||||
|
||||
// SetDefaults_DefaultEvictorArgs
|
||||
// TODO: the final default values would be discussed in community
|
||||
// SetDefaults_DefaultEvictorArgs sets the default values for the
|
||||
// DefaultEvictorArgs configuration.
|
||||
func SetDefaults_DefaultEvictorArgs(obj runtime.Object) {
|
||||
args := obj.(*DefaultEvictorArgs)
|
||||
if args.NodeSelector == "" {
|
||||
args.NodeSelector = ""
|
||||
}
|
||||
if !args.EvictLocalStoragePods {
|
||||
args.EvictLocalStoragePods = false
|
||||
}
|
||||
if !args.EvictDaemonSetPods {
|
||||
args.EvictDaemonSetPods = false
|
||||
}
|
||||
if !args.EvictSystemCriticalPods {
|
||||
args.EvictSystemCriticalPods = false
|
||||
}
|
||||
if !args.IgnorePvcPods {
|
||||
args.IgnorePvcPods = false
|
||||
}
|
||||
if !args.EvictFailedBarePods {
|
||||
args.EvictFailedBarePods = false
|
||||
}
|
||||
if args.LabelSelector == nil {
|
||||
args.LabelSelector = nil
|
||||
}
|
||||
if args.PriorityThreshold == nil {
|
||||
args.PriorityThreshold = nil
|
||||
}
|
||||
if !args.NodeFit {
|
||||
args.NodeFit = false
|
||||
}
|
||||
}
|
||||
|
||||
@@ -17,6 +17,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
utilptr "k8s.io/utils/ptr"
|
||||
|
||||
@@ -25,16 +25,132 @@ import (
|
||||
type DefaultEvictorArgs struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
|
||||
NodeSelector string `json:"nodeSelector,omitempty"`
|
||||
EvictLocalStoragePods bool `json:"evictLocalStoragePods,omitempty"`
|
||||
EvictDaemonSetPods bool `json:"evictDaemonSetPods,omitempty"`
|
||||
EvictSystemCriticalPods bool `json:"evictSystemCriticalPods,omitempty"`
|
||||
IgnorePvcPods bool `json:"ignorePvcPods,omitempty"`
|
||||
EvictFailedBarePods bool `json:"evictFailedBarePods,omitempty"`
|
||||
LabelSelector *metav1.LabelSelector `json:"labelSelector,omitempty"`
|
||||
PriorityThreshold *api.PriorityThreshold `json:"priorityThreshold,omitempty"`
|
||||
NodeFit bool `json:"nodeFit,omitempty"`
|
||||
MinReplicas uint `json:"minReplicas,omitempty"`
|
||||
MinPodAge *metav1.Duration `json:"minPodAge,omitempty"`
|
||||
IgnorePodsWithoutPDB bool `json:"ignorePodsWithoutPDB,omitempty"`
|
||||
NodeSelector string `json:"nodeSelector,omitempty"`
|
||||
LabelSelector *metav1.LabelSelector `json:"labelSelector,omitempty"`
|
||||
PriorityThreshold *api.PriorityThreshold `json:"priorityThreshold,omitempty"`
|
||||
NodeFit bool `json:"nodeFit,omitempty"`
|
||||
MinReplicas uint `json:"minReplicas,omitempty"`
|
||||
MinPodAge *metav1.Duration `json:"minPodAge,omitempty"`
|
||||
NoEvictionPolicy NoEvictionPolicy `json:"noEvictionPolicy,omitempty"`
|
||||
|
||||
// PodProtections holds the list of enabled and disabled protection policies.
|
||||
// Users can selectively disable certain default protection rules or enable extra ones.
|
||||
PodProtections PodProtections `json:"podProtections,omitempty"`
|
||||
|
||||
// Deprecated: Use DisabledDefaultPodProtection with "PodsWithLocalStorage" instead.
|
||||
EvictLocalStoragePods bool `json:"evictLocalStoragePods,omitempty"`
|
||||
// Deprecated: Use DisabledDefaultPodProtection with "DaemonSetPods" instead.
|
||||
EvictDaemonSetPods bool `json:"evictDaemonSetPods,omitempty"`
|
||||
// Deprecated: Use DisabledDefaultPodProtection with "SystemCriticalPods" instead.
|
||||
EvictSystemCriticalPods bool `json:"evictSystemCriticalPods,omitempty"`
|
||||
// Deprecated: Use ExtraPodProtection with "PodsWithPVC" instead.
|
||||
IgnorePvcPods bool `json:"ignorePvcPods,omitempty"`
|
||||
// Deprecated: Use ExtraPodProtection with "PodsWithoutPDB" instead.
|
||||
IgnorePodsWithoutPDB bool `json:"ignorePodsWithoutPDB,omitempty"`
|
||||
// Deprecated: Use DisabledDefaultPodProtection with "FailedBarePods" instead.
|
||||
EvictFailedBarePods bool `json:"evictFailedBarePods,omitempty"`
|
||||
}
|
||||
|
||||
// PodProtection defines the protection policy for a pod.
|
||||
type PodProtection string
|
||||
|
||||
const (
|
||||
PodsWithLocalStorage PodProtection = "PodsWithLocalStorage"
|
||||
DaemonSetPods PodProtection = "DaemonSetPods"
|
||||
SystemCriticalPods PodProtection = "SystemCriticalPods"
|
||||
FailedBarePods PodProtection = "FailedBarePods"
|
||||
PodsWithPVC PodProtection = "PodsWithPVC"
|
||||
PodsWithoutPDB PodProtection = "PodsWithoutPDB"
|
||||
PodsWithResourceClaims PodProtection = "PodsWithResourceClaims"
|
||||
)
|
||||
|
||||
// PodProtections holds the list of enabled and disabled protection policies.
|
||||
// NOTE: The list of default enabled pod protection policies is subject to change in future versions.
|
||||
// +k8s:deepcopy-gen=true
|
||||
type PodProtections struct {
|
||||
// ExtraEnabled specifies additional protection policies that should be enabled.
|
||||
// Supports: PodsWithPVC, PodsWithoutPDB
|
||||
ExtraEnabled []PodProtection `json:"extraEnabled,omitempty"`
|
||||
|
||||
// DefaultDisabled specifies which default protection policies should be disabled.
|
||||
// Supports: PodsWithLocalStorage, DaemonSetPods, SystemCriticalPods, FailedBarePods
|
||||
DefaultDisabled []PodProtection `json:"defaultDisabled,omitempty"`
|
||||
|
||||
// Config holds configuration for pod protection policies. Depending on
|
||||
// the enabled policies this may be required. For instance, when
|
||||
// enabling the PodsWithPVC policy the user may specify which storage
|
||||
// classes should be protected.
|
||||
Config *PodProtectionsConfig `json:"config,omitempty"`
|
||||
}
|
||||
|
||||
// PodProtectionsConfig holds configuration for pod protection policies. The
|
||||
// name of the fields here must be equal to a protection name. This struct is
|
||||
// meant to be extended as more protection policies are added.
|
||||
// +k8s:deepcopy-gen=true
|
||||
type PodProtectionsConfig struct {
|
||||
PodsWithPVC *PodsWithPVCConfig `json:"PodsWithPVC,omitempty"`
|
||||
}
|
||||
|
||||
// PodsWithPVCConfig holds configuration for the PodsWithPVC protection.
|
||||
// +k8s:deepcopy-gen=true
|
||||
type PodsWithPVCConfig struct {
|
||||
// ProtectedStorageClasses is a list of storage classes that we want to
|
||||
// protect. i.e. if a pod refers to one of these storage classes it is
|
||||
// protected from being evicted. If none is provided then all pods with
|
||||
// PVCs are protected from eviction.
|
||||
ProtectedStorageClasses []ProtectedStorageClass `json:"protectedStorageClasses,omitempty"`
|
||||
}
|
||||
|
||||
// ProtectedStorageClass is used to determine what storage classes are
|
||||
// protected when the PodsWithPVC protection is enabled. This object exists
|
||||
// so we can later on extend it with more configuration if needed.
|
||||
type ProtectedStorageClass struct {
|
||||
Name string `json:"name"`
|
||||
}
|
||||
|
||||
// defaultPodProtections holds the list of protection policies that are enabled by default.
|
||||
// User can use the 'disabledDefaultPodProtections' evictor arguments (via PodProtections.DefaultDisabled)
|
||||
// to disable any of these default protections.
|
||||
//
|
||||
// The following four policies are included by default:
|
||||
// - PodsWithLocalStorage: Protects pods with local storage.
|
||||
// - DaemonSetPods: Protects DaemonSet managed pods.
|
||||
// - SystemCriticalPods: Protects system-critical pods.
|
||||
// - FailedBarePods: Protects failed bare pods (not part of any controller).
|
||||
var defaultPodProtections = []PodProtection{
|
||||
PodsWithLocalStorage,
|
||||
SystemCriticalPods,
|
||||
FailedBarePods,
|
||||
DaemonSetPods,
|
||||
}
|
||||
|
||||
// extraPodProtections holds a list of protection policies that the user can optionally enable
|
||||
// through the configuration (via PodProtections.ExtraEnabled). These policies are not enabled by default.
|
||||
//
|
||||
// Currently supported extra policies:
|
||||
// - PodsWithPVC: Protects pods using PersistentVolumeClaims.
|
||||
// - PodsWithoutPDB: Protects pods lacking a PodDisruptionBudget.
|
||||
// - PodsWithResourceClaims: Protects pods using ResourceClaims.
|
||||
var extraPodProtections = []PodProtection{
|
||||
PodsWithPVC,
|
||||
PodsWithoutPDB,
|
||||
PodsWithResourceClaims,
|
||||
}
|
||||
|
||||
// NoEvictionPolicy dictates whether a no-eviction policy is preferred or mandatory.
|
||||
// Needs to be used with caution as this will give users ability to protect their pods
|
||||
// from eviction. Which might work against enfored policies. E.g. plugins evicting pods
|
||||
// violating security policies.
|
||||
type NoEvictionPolicy string
|
||||
|
||||
const (
|
||||
// PreferredNoEvictionPolicy interprets the no-eviction policy as a preference.
|
||||
// Meaning the annotation will get ignored by the DefaultEvictor plugin.
|
||||
// Yet, plugins may optionally sort their pods based on the annotation
|
||||
// and focus on evicting pods that do not set the annotation.
|
||||
PreferredNoEvictionPolicy NoEvictionPolicy = "Preferred"
|
||||
|
||||
// MandatoryNoEvictionPolicy interprets the no-eviction policy as mandatory.
|
||||
// Every pod carying the annotation will get excluded from eviction.
|
||||
MandatoryNoEvictionPolicy NoEvictionPolicy = "Mandatory"
|
||||
)
|
||||
|
||||
@@ -15,22 +15,86 @@ package defaultevictor
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
"slices"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
func ValidateDefaultEvictorArgs(obj runtime.Object) error {
|
||||
args := obj.(*DefaultEvictorArgs)
|
||||
|
||||
var allErrs []error
|
||||
if args.PriorityThreshold != nil && args.PriorityThreshold.Value != nil && len(args.PriorityThreshold.Name) > 0 {
|
||||
return fmt.Errorf("priority threshold misconfigured, only one of priorityThreshold fields can be set, got %v", args)
|
||||
allErrs = append(allErrs, fmt.Errorf("priority threshold misconfigured, only one of priorityThreshold fields can be set"))
|
||||
}
|
||||
|
||||
if args.MinReplicas == 1 {
|
||||
klog.V(4).Info("DefaultEvictor minReplicas must be greater than 1 to check for min pods during eviction. This check will be ignored during eviction.")
|
||||
}
|
||||
|
||||
return nil
|
||||
if args.NoEvictionPolicy != "" {
|
||||
if args.NoEvictionPolicy != PreferredNoEvictionPolicy && args.NoEvictionPolicy != MandatoryNoEvictionPolicy {
|
||||
allErrs = append(allErrs, fmt.Errorf("noEvictionPolicy accepts only %q values", []NoEvictionPolicy{PreferredNoEvictionPolicy, MandatoryNoEvictionPolicy}))
|
||||
}
|
||||
}
|
||||
|
||||
// check if any deprecated fields are set to true
|
||||
hasDeprecatedFields := args.EvictLocalStoragePods || args.EvictDaemonSetPods ||
|
||||
args.EvictSystemCriticalPods || args.IgnorePvcPods ||
|
||||
args.EvictFailedBarePods || args.IgnorePodsWithoutPDB
|
||||
|
||||
// disallow mixing deprecated fields with PodProtections.ExtraEnabled and PodProtections.DefaultDisabled
|
||||
if hasDeprecatedFields && (len(args.PodProtections.ExtraEnabled) > 0 || len(args.PodProtections.DefaultDisabled) > 0) {
|
||||
allErrs = append(allErrs, fmt.Errorf("cannot use Deprecated fields alongside PodProtections.ExtraEnabled or PodProtections.DefaultDisabled"))
|
||||
}
|
||||
|
||||
if len(args.PodProtections.ExtraEnabled) > 0 || len(args.PodProtections.DefaultDisabled) > 0 {
|
||||
|
||||
for _, policy := range args.PodProtections.ExtraEnabled {
|
||||
if !slices.Contains(extraPodProtections, policy) {
|
||||
allErrs = append(allErrs, fmt.Errorf("invalid pod protection policy in ExtraEnabled: %q. Valid options are: %v",
|
||||
string(policy), extraPodProtections))
|
||||
}
|
||||
}
|
||||
|
||||
for _, policy := range args.PodProtections.DefaultDisabled {
|
||||
if !slices.Contains(defaultPodProtections, policy) {
|
||||
allErrs = append(allErrs, fmt.Errorf("invalid pod protection policy in DefaultDisabled: %q. Valid options are: %v",
|
||||
string(policy), defaultPodProtections))
|
||||
}
|
||||
}
|
||||
|
||||
if hasDuplicates(args.PodProtections.DefaultDisabled) {
|
||||
allErrs = append(allErrs, fmt.Errorf("PodProtections.DefaultDisabled contains duplicate entries"))
|
||||
}
|
||||
|
||||
if hasDuplicates(args.PodProtections.ExtraEnabled) {
|
||||
allErrs = append(allErrs, fmt.Errorf("PodProtections.ExtraEnabled contains duplicate entries"))
|
||||
}
|
||||
|
||||
if slices.Contains(args.PodProtections.ExtraEnabled, PodsWithPVC) {
|
||||
if args.PodProtections.Config != nil && args.PodProtections.Config.PodsWithPVC != nil {
|
||||
protectedsc := args.PodProtections.Config.PodsWithPVC.ProtectedStorageClasses
|
||||
for i, sc := range protectedsc {
|
||||
if sc.Name == "" {
|
||||
allErrs = append(allErrs, fmt.Errorf("PodProtections.Config.PodsWithPVC.ProtectedStorageClasses[%d] name cannot be empty", i))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return utilerrors.NewAggregate(allErrs)
|
||||
}
|
||||
|
||||
func hasDuplicates(slice []PodProtection) bool {
|
||||
seen := make(map[PodProtection]struct{}, len(slice))
|
||||
for _, item := range slice {
|
||||
if _, exists := seen[item]; exists {
|
||||
return true
|
||||
}
|
||||
seen[item] = struct{}{}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
242
pkg/framework/plugins/defaultevictor/validation_test.go
Normal file
242
pkg/framework/plugins/defaultevictor/validation_test.go
Normal file
@@ -0,0 +1,242 @@
|
||||
/*
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package defaultevictor
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilptr "k8s.io/utils/ptr"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
)
|
||||
|
||||
func TestValidateDefaultEvictorArgs(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
args *DefaultEvictorArgs
|
||||
errInfo error
|
||||
}{
|
||||
{
|
||||
name: "passing invalid priority",
|
||||
args: &DefaultEvictorArgs{
|
||||
PriorityThreshold: &api.PriorityThreshold{
|
||||
Value: utilptr.To[int32](1),
|
||||
Name: "priority-name",
|
||||
},
|
||||
},
|
||||
errInfo: fmt.Errorf("priority threshold misconfigured, only one of priorityThreshold fields can be set"),
|
||||
},
|
||||
{
|
||||
name: "passing invalid no eviction policy",
|
||||
args: &DefaultEvictorArgs{
|
||||
NoEvictionPolicy: "invalid-no-eviction-policy",
|
||||
},
|
||||
errInfo: fmt.Errorf("noEvictionPolicy accepts only %q values", []NoEvictionPolicy{PreferredNoEvictionPolicy, MandatoryNoEvictionPolicy}),
|
||||
},
|
||||
{
|
||||
name: "Valid configuration with no deprecated fields",
|
||||
args: &DefaultEvictorArgs{
|
||||
PodProtections: PodProtections{
|
||||
DefaultDisabled: []PodProtection{},
|
||||
ExtraEnabled: []PodProtection{},
|
||||
},
|
||||
},
|
||||
errInfo: nil,
|
||||
},
|
||||
{
|
||||
name: "Valid configuration: both Disabled and ExtraEnabled",
|
||||
args: &DefaultEvictorArgs{
|
||||
PodProtections: PodProtections{
|
||||
DefaultDisabled: []PodProtection{
|
||||
DaemonSetPods,
|
||||
PodsWithLocalStorage,
|
||||
},
|
||||
ExtraEnabled: []PodProtection{
|
||||
PodsWithPVC,
|
||||
},
|
||||
},
|
||||
},
|
||||
errInfo: nil,
|
||||
},
|
||||
{
|
||||
name: "Valid configuration with ExtraEnabled",
|
||||
args: &DefaultEvictorArgs{
|
||||
PodProtections: PodProtections{
|
||||
ExtraEnabled: []PodProtection{
|
||||
PodsWithPVC,
|
||||
},
|
||||
},
|
||||
},
|
||||
errInfo: nil,
|
||||
},
|
||||
{
|
||||
name: "Invalid configuration: Deprecated field used with Disabled",
|
||||
args: &DefaultEvictorArgs{
|
||||
EvictLocalStoragePods: true,
|
||||
PodProtections: PodProtections{
|
||||
DefaultDisabled: []PodProtection{
|
||||
DaemonSetPods,
|
||||
},
|
||||
},
|
||||
},
|
||||
errInfo: fmt.Errorf("cannot use Deprecated fields alongside PodProtections.ExtraEnabled or PodProtections.DefaultDisabled"),
|
||||
},
|
||||
{
|
||||
name: "Invalid configuration: Deprecated field used with ExtraPodProtections",
|
||||
args: &DefaultEvictorArgs{
|
||||
EvictDaemonSetPods: true,
|
||||
PodProtections: PodProtections{
|
||||
ExtraEnabled: []PodProtection{
|
||||
PodsWithPVC,
|
||||
},
|
||||
},
|
||||
},
|
||||
errInfo: fmt.Errorf("cannot use Deprecated fields alongside PodProtections.ExtraEnabled or PodProtections.DefaultDisabled"),
|
||||
},
|
||||
{
|
||||
name: "MinReplicas warning logged but no error",
|
||||
args: &DefaultEvictorArgs{
|
||||
MinReplicas: 1,
|
||||
},
|
||||
errInfo: nil,
|
||||
},
|
||||
{
|
||||
name: "Invalid ExtraEnabled: Unknown policy",
|
||||
args: &DefaultEvictorArgs{
|
||||
PodProtections: PodProtections{
|
||||
ExtraEnabled: []PodProtection{"InvalidPolicy"},
|
||||
},
|
||||
},
|
||||
errInfo: fmt.Errorf(`invalid pod protection policy in ExtraEnabled: "InvalidPolicy". Valid options are: [PodsWithPVC PodsWithoutPDB PodsWithResourceClaims]`),
|
||||
},
|
||||
{
|
||||
name: "Invalid ExtraEnabled: Misspelled policy",
|
||||
args: &DefaultEvictorArgs{
|
||||
PodProtections: PodProtections{
|
||||
ExtraEnabled: []PodProtection{"PodsWithPVCC"},
|
||||
},
|
||||
},
|
||||
errInfo: fmt.Errorf(`invalid pod protection policy in ExtraEnabled: "PodsWithPVCC". Valid options are: [PodsWithPVC PodsWithoutPDB PodsWithResourceClaims]`),
|
||||
},
|
||||
{
|
||||
name: "Invalid ExtraEnabled: Policy from DefaultDisabled list",
|
||||
args: &DefaultEvictorArgs{
|
||||
PodProtections: PodProtections{
|
||||
ExtraEnabled: []PodProtection{DaemonSetPods},
|
||||
},
|
||||
},
|
||||
errInfo: fmt.Errorf(`invalid pod protection policy in ExtraEnabled: "DaemonSetPods". Valid options are: [PodsWithPVC PodsWithoutPDB PodsWithResourceClaims]`),
|
||||
},
|
||||
{
|
||||
name: "Invalid DefaultDisabled: Unknown policy",
|
||||
args: &DefaultEvictorArgs{
|
||||
PodProtections: PodProtections{
|
||||
DefaultDisabled: []PodProtection{"InvalidPolicy"},
|
||||
},
|
||||
},
|
||||
errInfo: fmt.Errorf(`invalid pod protection policy in DefaultDisabled: "InvalidPolicy". Valid options are: [PodsWithLocalStorage SystemCriticalPods FailedBarePods DaemonSetPods]`),
|
||||
},
|
||||
{
|
||||
name: "Invalid DefaultDisabled: Misspelled policy",
|
||||
args: &DefaultEvictorArgs{
|
||||
PodProtections: PodProtections{
|
||||
DefaultDisabled: []PodProtection{"PodsWithLocalStorag"},
|
||||
},
|
||||
},
|
||||
errInfo: fmt.Errorf(`invalid pod protection policy in DefaultDisabled: "PodsWithLocalStorag". Valid options are: [PodsWithLocalStorage SystemCriticalPods FailedBarePods DaemonSetPods]`),
|
||||
},
|
||||
{
|
||||
name: "Invalid DefaultDisabled: Policy from ExtraEnabled list",
|
||||
args: &DefaultEvictorArgs{
|
||||
PodProtections: PodProtections{
|
||||
DefaultDisabled: []PodProtection{PodsWithPVC},
|
||||
},
|
||||
},
|
||||
errInfo: fmt.Errorf(`invalid pod protection policy in DefaultDisabled: "PodsWithPVC". Valid options are: [PodsWithLocalStorage SystemCriticalPods FailedBarePods DaemonSetPods]`),
|
||||
},
|
||||
{
|
||||
name: "Invalid ExtraEnabled duplicate",
|
||||
args: &DefaultEvictorArgs{
|
||||
PodProtections: PodProtections{
|
||||
ExtraEnabled: []PodProtection{PodsWithPVC, PodsWithPVC},
|
||||
},
|
||||
},
|
||||
errInfo: fmt.Errorf(`PodProtections.ExtraEnabled contains duplicate entries`),
|
||||
},
|
||||
{
|
||||
name: "Invalid DefaultDisabled duplicate",
|
||||
args: &DefaultEvictorArgs{
|
||||
PodProtections: PodProtections{
|
||||
DefaultDisabled: []PodProtection{PodsWithLocalStorage, PodsWithLocalStorage},
|
||||
},
|
||||
},
|
||||
errInfo: fmt.Errorf(`PodProtections.DefaultDisabled contains duplicate entries`),
|
||||
},
|
||||
{
|
||||
name: "Invalid DefaultDisabled duplicate and Invalid ExtraEnabled duplicate and passing invalid no eviction policy",
|
||||
args: &DefaultEvictorArgs{
|
||||
NoEvictionPolicy: "invalid-no-eviction-policy",
|
||||
PodProtections: PodProtections{
|
||||
ExtraEnabled: []PodProtection{PodsWithPVC, PodsWithPVC},
|
||||
DefaultDisabled: []PodProtection{PodsWithLocalStorage, PodsWithLocalStorage, PodsWithoutPDB},
|
||||
},
|
||||
},
|
||||
errInfo: fmt.Errorf(`[noEvictionPolicy accepts only ["Preferred" "Mandatory"] values, invalid pod protection policy in DefaultDisabled: "PodsWithoutPDB". Valid options are: [PodsWithLocalStorage SystemCriticalPods FailedBarePods DaemonSetPods], PodProtections.DefaultDisabled contains duplicate entries, PodProtections.ExtraEnabled contains duplicate entries]`),
|
||||
},
|
||||
{
|
||||
name: "Protected storage classes without storage class name",
|
||||
args: &DefaultEvictorArgs{
|
||||
PodProtections: PodProtections{
|
||||
ExtraEnabled: []PodProtection{PodsWithPVC},
|
||||
Config: &PodProtectionsConfig{
|
||||
PodsWithPVC: &PodsWithPVCConfig{
|
||||
ProtectedStorageClasses: []ProtectedStorageClass{
|
||||
{
|
||||
Name: "",
|
||||
},
|
||||
{
|
||||
Name: "protected-storage-class-0",
|
||||
},
|
||||
{
|
||||
Name: "",
|
||||
},
|
||||
{
|
||||
Name: "protected-storage-class-1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
errInfo: fmt.Errorf(`[PodProtections.Config.PodsWithPVC.ProtectedStorageClasses[0] name cannot be empty, PodProtections.Config.PodsWithPVC.ProtectedStorageClasses[2] name cannot be empty]`),
|
||||
},
|
||||
}
|
||||
|
||||
for _, testCase := range tests {
|
||||
t.Run(testCase.name, func(t *testing.T) {
|
||||
validateErr := ValidateDefaultEvictorArgs(runtime.Object(testCase.args))
|
||||
if validateErr == nil || testCase.errInfo == nil {
|
||||
if validateErr != testCase.errInfo {
|
||||
t.Errorf("expected validity of plugin config: %q but got %q instead", testCase.errInfo, validateErr)
|
||||
}
|
||||
} else if validateErr.Error() != testCase.errInfo.Error() {
|
||||
t.Errorf("expected validity of plugin config: %q but got %q instead", testCase.errInfo, validateErr)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
Copyright 2026 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@@ -46,6 +46,7 @@ func (in *DefaultEvictorArgs) DeepCopyInto(out *DefaultEvictorArgs) {
|
||||
*out = new(v1.Duration)
|
||||
**out = **in
|
||||
}
|
||||
in.PodProtections.DeepCopyInto(&out.PodProtections)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -66,3 +67,76 @@ func (in *DefaultEvictorArgs) DeepCopyObject() runtime.Object {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *PodProtections) DeepCopyInto(out *PodProtections) {
|
||||
*out = *in
|
||||
if in.ExtraEnabled != nil {
|
||||
in, out := &in.ExtraEnabled, &out.ExtraEnabled
|
||||
*out = make([]PodProtection, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.DefaultDisabled != nil {
|
||||
in, out := &in.DefaultDisabled, &out.DefaultDisabled
|
||||
*out = make([]PodProtection, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.Config != nil {
|
||||
in, out := &in.Config, &out.Config
|
||||
*out = new(PodProtectionsConfig)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodProtections.
|
||||
func (in *PodProtections) DeepCopy() *PodProtections {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(PodProtections)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *PodProtectionsConfig) DeepCopyInto(out *PodProtectionsConfig) {
|
||||
*out = *in
|
||||
if in.PodsWithPVC != nil {
|
||||
in, out := &in.PodsWithPVC, &out.PodsWithPVC
|
||||
*out = new(PodsWithPVCConfig)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodProtectionsConfig.
|
||||
func (in *PodProtectionsConfig) DeepCopy() *PodProtectionsConfig {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(PodProtectionsConfig)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *PodsWithPVCConfig) DeepCopyInto(out *PodsWithPVCConfig) {
|
||||
*out = *in
|
||||
if in.ProtectedStorageClasses != nil {
|
||||
in, out := &in.ProtectedStorageClasses, &out.ProtectedStorageClasses
|
||||
*out = make([]ProtectedStorageClass, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodsWithPVCConfig.
|
||||
func (in *PodsWithPVCConfig) DeepCopy() *PodsWithPVCConfig {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(PodsWithPVCConfig)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
Copyright 2026 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -46,6 +46,7 @@ var _ fwtypes.DeschedulePlugin = &Example{}
|
||||
// Example is our plugin (implementing the DeschedulePlugin interface). This
|
||||
// plugin will evict pods that match a regex and are older than a certain age.
|
||||
type Example struct {
|
||||
logger klog.Logger
|
||||
handle fwtypes.Handle
|
||||
args *ExampleArgs
|
||||
podFilter podutil.FilterFunc
|
||||
@@ -55,12 +56,13 @@ type Example struct {
|
||||
// a runtime.Object. Handle is used by plugins to retrieve a kubernetes client
|
||||
// set, evictor interface, shared informer factory and other instruments shared
|
||||
// across different plugins.
|
||||
func New(args runtime.Object, handle fwtypes.Handle) (fwtypes.Plugin, error) {
|
||||
func New(ctx context.Context, args runtime.Object, handle fwtypes.Handle) (fwtypes.Plugin, error) {
|
||||
// make sure we are receiving the right argument type.
|
||||
exampleArgs, ok := args.(*ExampleArgs)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("args must be of type ExampleArgs, got %T", args)
|
||||
}
|
||||
logger := klog.FromContext(ctx).WithValues("plugin", PluginName)
|
||||
|
||||
// we can use the included and excluded namespaces to filter the pods we want
|
||||
// to evict.
|
||||
@@ -90,6 +92,7 @@ func New(args runtime.Object, handle fwtypes.Handle) (fwtypes.Plugin, error) {
|
||||
}
|
||||
|
||||
return &Example{
|
||||
logger: logger,
|
||||
handle: handle,
|
||||
podFilter: podFilter,
|
||||
args: exampleArgs,
|
||||
@@ -107,7 +110,7 @@ func (d *Example) Name() string {
|
||||
// of nodes we need to process.
|
||||
func (d *Example) Deschedule(ctx context.Context, nodes []*v1.Node) *fwtypes.Status {
|
||||
var podsToEvict []*v1.Pod
|
||||
logger := klog.FromContext(ctx)
|
||||
logger := klog.FromContext(klog.NewContext(ctx, d.logger)).WithValues("ExtensionPoint", fwtypes.DescheduleExtensionPoint)
|
||||
logger.Info("Example plugin starting descheduling")
|
||||
|
||||
re, err := regexp.Compile(d.args.Regex)
|
||||
@@ -137,10 +140,23 @@ func (d *Example) Deschedule(ctx context.Context, nodes []*v1.Node) *fwtypes.Sta
|
||||
|
||||
// go node by node getting all pods that we can evict.
|
||||
for _, node := range nodes {
|
||||
// ListAllPodsOnANode is a helper function that retrieves all
|
||||
// pods filtering out the ones we can't evict. We merge the
|
||||
// default filters with the one we created above.
|
||||
pods, err := podutil.ListAllPodsOnANode(
|
||||
// ListAllPodsOnANode is a helper function that retrieves all pods filtering out the ones we can't evict.
|
||||
// ListPodsOnANode is a helper function that retrieves all pods(excluding Succeeded or Failed phases) filtering out the ones we can't evict.
|
||||
// We merge the default filters with the one we created above.
|
||||
//
|
||||
// The difference between ListPodsOnANode and ListAllPodsOnANode lies in their handling of Pods based on their phase:
|
||||
// - ListPodsOnANode excludes Pods that are in Succeeded or Failed phases because they do not occupy any resources.
|
||||
// - ListAllPodsOnANode does not exclude Pods based on their phase, listing all Pods regardless of their state.
|
||||
//
|
||||
// In this context, we prefer using ListPodsOnANode because:
|
||||
// 1. It ensures that only active Pods (not in Succeeded or Failed states) are considered for eviction.
|
||||
// 2. This helps avoid unnecessary processing of Pods that no longer consume resources.
|
||||
// 3. By applying an additional filter (d.podFilter and filter), we can further refine which Pods are eligible for eviction,
|
||||
// ensuring that only Pods meeting specific criteria are selected.
|
||||
//
|
||||
// However, if you need to consider all Pods including those in Succeeded or Failed states for other purposes,
|
||||
// you should use ListAllPodsOnANode instead.
|
||||
pods, err := podutil.ListPodsOnANode(
|
||||
node.Name,
|
||||
d.handle.GetPodsAssignedToNodeFunc(),
|
||||
podutil.WrapFilterFuncs(d.podFilter, filter),
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
Copyright 2026 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
Copyright 2026 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -44,6 +44,7 @@ var _ frameworktypes.BalancePlugin = &HighNodeUtilization{}
|
||||
// can schedule according to its plugin. Note that CPU/Memory requests are used
|
||||
// to calculate nodes' utilization and not the actual resource usage.
|
||||
type HighNodeUtilization struct {
|
||||
logger klog.Logger
|
||||
handle frameworktypes.Handle
|
||||
args *HighNodeUtilizationArgs
|
||||
podFilter func(pod *v1.Pod) bool
|
||||
@@ -55,7 +56,7 @@ type HighNodeUtilization struct {
|
||||
|
||||
// NewHighNodeUtilization builds plugin from its arguments while passing a handle.
|
||||
func NewHighNodeUtilization(
|
||||
genericArgs runtime.Object, handle frameworktypes.Handle,
|
||||
ctx context.Context, genericArgs runtime.Object, handle frameworktypes.Handle,
|
||||
) (frameworktypes.Plugin, error) {
|
||||
args, ok := genericArgs.(*HighNodeUtilizationArgs)
|
||||
if !ok {
|
||||
@@ -64,6 +65,7 @@ func NewHighNodeUtilization(
|
||||
genericArgs,
|
||||
)
|
||||
}
|
||||
logger := klog.FromContext(ctx).WithValues("plugin", HighNodeUtilizationPluginName)
|
||||
|
||||
// this plugins worries only about thresholds but the nodeplugins
|
||||
// package was made to take two thresholds into account, one for low
|
||||
@@ -113,6 +115,7 @@ func NewHighNodeUtilization(
|
||||
)
|
||||
|
||||
return &HighNodeUtilization{
|
||||
logger: logger,
|
||||
handle: handle,
|
||||
args: args,
|
||||
resourceNames: resourceNames,
|
||||
@@ -135,6 +138,8 @@ func (h *HighNodeUtilization) Name() string {
|
||||
// utilized nodes. The goal here is to concentrate pods in fewer nodes so that
|
||||
// less nodes are used.
|
||||
func (h *HighNodeUtilization) Balance(ctx context.Context, nodes []*v1.Node) *frameworktypes.Status {
|
||||
logger := klog.FromContext(klog.NewContext(ctx, h.logger)).WithValues("ExtensionPoint", frameworktypes.BalanceExtensionPoint)
|
||||
|
||||
if err := h.usageClient.sync(ctx, nodes); err != nil {
|
||||
return &frameworktypes.Status{
|
||||
Err: fmt.Errorf("error getting node usage: %v", err),
|
||||
@@ -165,7 +170,7 @@ func (h *HighNodeUtilization) Balance(ctx context.Context, nodes []*v1.Node) *fr
|
||||
// schedulable nodes.
|
||||
func(nodeName string, usage, threshold api.ResourceThresholds) bool {
|
||||
if nodeutil.IsNodeUnschedulable(nodesMap[nodeName]) {
|
||||
klog.V(2).InfoS(
|
||||
logger.V(2).Info(
|
||||
"Node is unschedulable",
|
||||
"node", klog.KObj(nodesMap[nodeName]),
|
||||
)
|
||||
@@ -184,7 +189,7 @@ func (h *HighNodeUtilization) Balance(ctx context.Context, nodes []*v1.Node) *fr
|
||||
category := []string{"underutilized", "overutilized"}
|
||||
for i := range nodeGroups {
|
||||
for nodeName := range nodeGroups[i] {
|
||||
klog.InfoS(
|
||||
logger.Info(
|
||||
"Node has been classified",
|
||||
"category", category[i],
|
||||
"node", klog.KObj(nodesMap[nodeName]),
|
||||
@@ -208,18 +213,18 @@ func (h *HighNodeUtilization) Balance(ctx context.Context, nodes []*v1.Node) *fr
|
||||
|
||||
lowNodes, schedulableNodes := nodeInfos[0], nodeInfos[1]
|
||||
|
||||
klog.V(1).InfoS("Criteria for a node below target utilization", h.criteria...)
|
||||
klog.V(1).InfoS("Number of underutilized nodes", "totalNumber", len(lowNodes))
|
||||
logger.V(1).Info("Criteria for a node below target utilization", h.criteria...)
|
||||
logger.V(1).Info("Number of underutilized nodes", "totalNumber", len(lowNodes))
|
||||
|
||||
if len(lowNodes) == 0 {
|
||||
klog.V(1).InfoS(
|
||||
logger.V(1).Info(
|
||||
"No node is underutilized, nothing to do here, you might tune your thresholds further",
|
||||
)
|
||||
return nil
|
||||
}
|
||||
|
||||
if len(lowNodes) <= h.args.NumberOfNodes {
|
||||
klog.V(1).InfoS(
|
||||
logger.V(1).Info(
|
||||
"Number of nodes underutilized is less or equal than NumberOfNodes, nothing to do here",
|
||||
"underutilizedNodes", len(lowNodes),
|
||||
"numberOfNodes", h.args.NumberOfNodes,
|
||||
@@ -228,12 +233,12 @@ func (h *HighNodeUtilization) Balance(ctx context.Context, nodes []*v1.Node) *fr
|
||||
}
|
||||
|
||||
if len(lowNodes) == len(nodes) {
|
||||
klog.V(1).InfoS("All nodes are underutilized, nothing to do here")
|
||||
logger.V(1).Info("All nodes are underutilized, nothing to do here")
|
||||
return nil
|
||||
}
|
||||
|
||||
if len(schedulableNodes) == 0 {
|
||||
klog.V(1).InfoS("No node is available to schedule the pods, nothing to do here")
|
||||
logger.V(1).Info("No node is available to schedule the pods, nothing to do here")
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -23,7 +23,6 @@ import (
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
policy "k8s.io/api/policy/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
core "k8s.io/client-go/testing"
|
||||
@@ -97,17 +96,7 @@ func TestHighNodeUtilization(t *testing.T) {
|
||||
test.BuildTestPod("p1", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
// A pod with local storage.
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Spec.Volumes = []v1.Volume{
|
||||
{
|
||||
Name: "sample",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
test.SetHostPathEmptyDirVolumeSource(pod)
|
||||
// A Mirror Pod.
|
||||
pod.Annotations = test.GetMirrorPodAnnotation()
|
||||
}),
|
||||
@@ -115,8 +104,7 @@ func TestHighNodeUtilization(t *testing.T) {
|
||||
// A Critical Pod.
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Namespace = "kube-system"
|
||||
priority := utils.SystemCriticalPriority
|
||||
pod.Spec.Priority = &priority
|
||||
test.SetPodPriority(pod, utils.SystemCriticalPriority)
|
||||
}),
|
||||
// These won't be evicted.
|
||||
test.BuildTestPod("p3", 400, 0, n2NodeName, test.SetDSOwnerRef),
|
||||
@@ -168,8 +156,7 @@ func TestHighNodeUtilization(t *testing.T) {
|
||||
// A Critical Pod.
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Namespace = "kube-system"
|
||||
priority := utils.SystemCriticalPriority
|
||||
pod.Spec.Priority = &priority
|
||||
test.SetPodPriority(pod, utils.SystemCriticalPriority)
|
||||
}),
|
||||
// These won't be evicted.
|
||||
test.BuildTestPod("p3", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
@@ -249,9 +236,7 @@ func TestHighNodeUtilization(t *testing.T) {
|
||||
test.SetRSOwnerRef(pod)
|
||||
test.MakeBestEffortPod(pod)
|
||||
}),
|
||||
test.BuildTestPod("p2", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
test.SetRSOwnerRef(pod)
|
||||
}),
|
||||
test.BuildTestPod("p2", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
// These won't be evicted.
|
||||
test.BuildTestPod("p3", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p4", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
@@ -466,9 +451,7 @@ func TestHighNodeUtilization(t *testing.T) {
|
||||
// pods in the other nodes must not be evicted
|
||||
// because they do not have the extended
|
||||
// resource defined in their requests.
|
||||
test.BuildTestPod("p3", 500, 0, n2NodeName, func(pod *v1.Pod) {
|
||||
test.SetRSOwnerRef(pod)
|
||||
}),
|
||||
test.BuildTestPod("p3", 500, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p4", 500, 0, n2NodeName, func(pod *v1.Pod) {
|
||||
test.SetRSOwnerRef(pod)
|
||||
}),
|
||||
@@ -523,6 +506,7 @@ func TestHighNodeUtilization(t *testing.T) {
|
||||
}
|
||||
|
||||
plugin, err := NewHighNodeUtilization(
|
||||
ctx,
|
||||
&HighNodeUtilizationArgs{
|
||||
Thresholds: testCase.thresholds,
|
||||
EvictionModes: testCase.evictionModes,
|
||||
@@ -637,7 +621,7 @@ func TestHighNodeUtilizationWithTaints(t *testing.T) {
|
||||
t.Fatalf("Unable to initialize a framework handle: %v", err)
|
||||
}
|
||||
|
||||
plugin, err := NewHighNodeUtilization(&HighNodeUtilizationArgs{
|
||||
plugin, err := NewHighNodeUtilization(ctx, &HighNodeUtilizationArgs{
|
||||
Thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 40,
|
||||
},
|
||||
|
||||
@@ -43,6 +43,7 @@ var _ frameworktypes.BalancePlugin = &LowNodeUtilization{}
|
||||
// nodes. Note that CPU/Memory requests are used to calculate nodes'
|
||||
// utilization and not the actual resource usage.
|
||||
type LowNodeUtilization struct {
|
||||
logger klog.Logger
|
||||
handle frameworktypes.Handle
|
||||
args *LowNodeUtilizationArgs
|
||||
podFilter func(pod *v1.Pod) bool
|
||||
@@ -57,7 +58,7 @@ type LowNodeUtilization struct {
|
||||
// handle. this plugin aims to move workload from overutilized nodes to
|
||||
// underutilized nodes.
|
||||
func NewLowNodeUtilization(
|
||||
genericArgs runtime.Object, handle frameworktypes.Handle,
|
||||
ctx context.Context, genericArgs runtime.Object, handle frameworktypes.Handle,
|
||||
) (frameworktypes.Plugin, error) {
|
||||
args, ok := genericArgs.(*LowNodeUtilizationArgs)
|
||||
if !ok {
|
||||
@@ -66,6 +67,7 @@ func NewLowNodeUtilization(
|
||||
genericArgs,
|
||||
)
|
||||
}
|
||||
logger := klog.FromContext(ctx).WithValues("plugin", LowNodeUtilizationPluginName)
|
||||
|
||||
// resourceNames holds a list of resources for which the user has
|
||||
// provided thresholds for. extendedResourceNames holds those as well
|
||||
@@ -115,6 +117,7 @@ func NewLowNodeUtilization(
|
||||
}
|
||||
|
||||
return &LowNodeUtilization{
|
||||
logger: logger,
|
||||
handle: handle,
|
||||
args: args,
|
||||
underCriteria: thresholdsToKeysAndValues(args.Thresholds),
|
||||
@@ -135,6 +138,8 @@ func (l *LowNodeUtilization) Name() string {
|
||||
// utilized nodes to under utilized nodes. The goal here is to evenly
|
||||
// distribute pods across nodes.
|
||||
func (l *LowNodeUtilization) Balance(ctx context.Context, nodes []*v1.Node) *frameworktypes.Status {
|
||||
logger := klog.FromContext(klog.NewContext(ctx, l.logger)).WithValues("ExtensionPoint", frameworktypes.BalanceExtensionPoint)
|
||||
|
||||
if err := l.usageClient.sync(ctx, nodes); err != nil {
|
||||
return &frameworktypes.Status{
|
||||
Err: fmt.Errorf("error getting node usage: %v", err),
|
||||
@@ -182,7 +187,7 @@ func (l *LowNodeUtilization) Balance(ctx context.Context, nodes []*v1.Node) *fra
|
||||
// underutilized but aren't schedulable are ignored.
|
||||
func(nodeName string, usage, threshold api.ResourceThresholds) bool {
|
||||
if nodeutil.IsNodeUnschedulable(nodesMap[nodeName]) {
|
||||
klog.V(2).InfoS(
|
||||
logger.V(2).Info(
|
||||
"Node is unschedulable, thus not considered as underutilized",
|
||||
"node", klog.KObj(nodesMap[nodeName]),
|
||||
)
|
||||
@@ -207,7 +212,7 @@ func (l *LowNodeUtilization) Balance(ctx context.Context, nodes []*v1.Node) *fra
|
||||
for nodeName := range nodeGroups[i] {
|
||||
classifiedNodes[nodeName] = true
|
||||
|
||||
klog.InfoS(
|
||||
logger.Info(
|
||||
"Node has been classified",
|
||||
"category", categories[i],
|
||||
"node", klog.KObj(nodesMap[nodeName]),
|
||||
@@ -233,7 +238,7 @@ func (l *LowNodeUtilization) Balance(ctx context.Context, nodes []*v1.Node) *fra
|
||||
// log nodes that are appropriately utilized.
|
||||
for nodeName := range nodesMap {
|
||||
if !classifiedNodes[nodeName] {
|
||||
klog.InfoS(
|
||||
logger.Info(
|
||||
"Node is appropriately utilized",
|
||||
"node", klog.KObj(nodesMap[nodeName]),
|
||||
"usage", nodesUsageMap[nodeName],
|
||||
@@ -245,20 +250,20 @@ func (l *LowNodeUtilization) Balance(ctx context.Context, nodes []*v1.Node) *fra
|
||||
lowNodes, highNodes := nodeInfos[0], nodeInfos[1]
|
||||
|
||||
// log messages for nodes with low and high utilization
|
||||
klog.V(1).InfoS("Criteria for a node under utilization", l.underCriteria...)
|
||||
klog.V(1).InfoS("Number of underutilized nodes", "totalNumber", len(lowNodes))
|
||||
klog.V(1).InfoS("Criteria for a node above target utilization", l.overCriteria...)
|
||||
klog.V(1).InfoS("Number of overutilized nodes", "totalNumber", len(highNodes))
|
||||
logger.V(1).Info("Criteria for a node under utilization", l.underCriteria...)
|
||||
logger.V(1).Info("Number of underutilized nodes", "totalNumber", len(lowNodes))
|
||||
logger.V(1).Info("Criteria for a node above target utilization", l.overCriteria...)
|
||||
logger.V(1).Info("Number of overutilized nodes", "totalNumber", len(highNodes))
|
||||
|
||||
if len(lowNodes) == 0 {
|
||||
klog.V(1).InfoS(
|
||||
logger.V(1).Info(
|
||||
"No node is underutilized, nothing to do here, you might tune your thresholds further",
|
||||
)
|
||||
return nil
|
||||
}
|
||||
|
||||
if len(lowNodes) <= l.args.NumberOfNodes {
|
||||
klog.V(1).InfoS(
|
||||
logger.V(1).Info(
|
||||
"Number of nodes underutilized is less or equal than NumberOfNodes, nothing to do here",
|
||||
"underutilizedNodes", len(lowNodes),
|
||||
"numberOfNodes", l.args.NumberOfNodes,
|
||||
@@ -267,12 +272,12 @@ func (l *LowNodeUtilization) Balance(ctx context.Context, nodes []*v1.Node) *fra
|
||||
}
|
||||
|
||||
if len(lowNodes) == len(nodes) {
|
||||
klog.V(1).InfoS("All nodes are underutilized, nothing to do here")
|
||||
logger.V(1).Info("All nodes are underutilized, nothing to do here")
|
||||
return nil
|
||||
}
|
||||
|
||||
if len(highNodes) == 0 {
|
||||
klog.V(1).InfoS("All nodes are under target utilization, nothing to do here")
|
||||
logger.V(1).Info("All nodes are under target utilization, nothing to do here")
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -23,7 +23,6 @@ import (
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
policy "k8s.io/api/policy/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/informers"
|
||||
@@ -92,25 +91,14 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
test.BuildTestPod("p5", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
// A pod with local storage.
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Spec.Volumes = []v1.Volume{
|
||||
{
|
||||
Name: "sample",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
test.SetHostPathEmptyDirVolumeSource(pod)
|
||||
// A Mirror Pod.
|
||||
pod.Annotations = test.GetMirrorPodAnnotation()
|
||||
}),
|
||||
test.BuildTestPod("p6", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
// A Critical Pod.
|
||||
pod.Namespace = "kube-system"
|
||||
priority := utils.SystemCriticalPriority
|
||||
pod.Spec.Priority = &priority
|
||||
test.SetPodPriority(pod, utils.SystemCriticalPriority)
|
||||
}),
|
||||
test.BuildTestPod("p9", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
},
|
||||
@@ -155,25 +143,14 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
test.BuildTestPod("p7", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
// A pod with local storage.
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Spec.Volumes = []v1.Volume{
|
||||
{
|
||||
Name: "sample",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
test.SetHostPathEmptyDirVolumeSource(pod)
|
||||
// A Mirror Pod.
|
||||
pod.Annotations = test.GetMirrorPodAnnotation()
|
||||
}),
|
||||
test.BuildTestPod("p8", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
// A Critical Pod.
|
||||
pod.Namespace = "kube-system"
|
||||
priority := utils.SystemCriticalPriority
|
||||
pod.Spec.Priority = &priority
|
||||
test.SetPodPriority(pod, utils.SystemCriticalPriority)
|
||||
}),
|
||||
test.BuildTestPod("p9", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
},
|
||||
@@ -233,25 +210,14 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
test.BuildTestPod("p7", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
// A pod with local storage.
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Spec.Volumes = []v1.Volume{
|
||||
{
|
||||
Name: "sample",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
test.SetHostPathEmptyDirVolumeSource(pod)
|
||||
// A Mirror Pod.
|
||||
pod.Annotations = test.GetMirrorPodAnnotation()
|
||||
}),
|
||||
test.BuildTestPod("p8", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
// A Critical Pod.
|
||||
pod.Namespace = "kube-system"
|
||||
priority := utils.SystemCriticalPriority
|
||||
pod.Spec.Priority = &priority
|
||||
test.SetPodPriority(pod, utils.SystemCriticalPriority)
|
||||
}),
|
||||
test.BuildTestPod("p9", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
},
|
||||
@@ -310,17 +276,7 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
test.BuildTestPod("p7", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
// A pod with local storage.
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Spec.Volumes = []v1.Volume{
|
||||
{
|
||||
Name: "sample",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
test.SetHostPathEmptyDirVolumeSource(pod)
|
||||
// A Mirror Pod.
|
||||
pod.Annotations = test.GetMirrorPodAnnotation()
|
||||
}),
|
||||
@@ -328,8 +284,7 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
// A Critical Pod.
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Namespace = "kube-system"
|
||||
priority := utils.SystemCriticalPriority
|
||||
pod.Spec.Priority = &priority
|
||||
test.SetPodPriority(pod, utils.SystemCriticalPriority)
|
||||
}),
|
||||
test.BuildTestPod("p9", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
},
|
||||
@@ -379,17 +334,7 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
test.BuildTestPod("p7", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
// A pod with local storage.
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Spec.Volumes = []v1.Volume{
|
||||
{
|
||||
Name: "sample",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
test.SetHostPathEmptyDirVolumeSource(pod)
|
||||
// A Mirror Pod.
|
||||
pod.Annotations = test.GetMirrorPodAnnotation()
|
||||
}),
|
||||
@@ -397,8 +342,7 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
// A Critical Pod.
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Namespace = "kube-system"
|
||||
priority := utils.SystemCriticalPriority
|
||||
pod.Spec.Priority = &priority
|
||||
test.SetPodPriority(pod, utils.SystemCriticalPriority)
|
||||
}),
|
||||
test.BuildTestPod("p9", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
},
|
||||
@@ -462,17 +406,7 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
// A pod with local storage.
|
||||
test.SetNormalOwnerRef(pod)
|
||||
test.SetPodPriority(pod, lowPriority)
|
||||
pod.Spec.Volumes = []v1.Volume{
|
||||
{
|
||||
Name: "sample",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
test.SetHostPathEmptyDirVolumeSource(pod)
|
||||
// A Mirror Pod.
|
||||
pod.Annotations = test.GetMirrorPodAnnotation()
|
||||
}),
|
||||
@@ -480,8 +414,7 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
// A Critical Pod.
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Namespace = "kube-system"
|
||||
priority := utils.SystemCriticalPriority
|
||||
pod.Spec.Priority = &priority
|
||||
test.SetPodPriority(pod, utils.SystemCriticalPriority)
|
||||
}),
|
||||
test.BuildTestPod("p9", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
},
|
||||
@@ -525,9 +458,7 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
test.SetRSOwnerRef(pod)
|
||||
test.MakeBestEffortPod(pod)
|
||||
}),
|
||||
test.BuildTestPod("p3", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
test.SetRSOwnerRef(pod)
|
||||
}),
|
||||
test.BuildTestPod("p3", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p4", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
test.SetRSOwnerRef(pod)
|
||||
test.MakeBestEffortPod(pod)
|
||||
@@ -537,23 +468,11 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
test.MakeBestEffortPod(pod)
|
||||
}),
|
||||
// These won't be evicted.
|
||||
test.BuildTestPod("p6", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
test.SetDSOwnerRef(pod)
|
||||
}),
|
||||
test.BuildTestPod("p6", 400, 0, n1NodeName, test.SetDSOwnerRef),
|
||||
test.BuildTestPod("p7", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
// A pod with local storage.
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Spec.Volumes = []v1.Volume{
|
||||
{
|
||||
Name: "sample",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
test.SetHostPathEmptyDirVolumeSource(pod)
|
||||
// A Mirror Pod.
|
||||
pod.Annotations = test.GetMirrorPodAnnotation()
|
||||
}),
|
||||
@@ -561,8 +480,7 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
// A Critical Pod.
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Namespace = "kube-system"
|
||||
priority := utils.SystemCriticalPriority
|
||||
pod.Spec.Priority = &priority
|
||||
test.SetPodPriority(pod, utils.SystemCriticalPriority)
|
||||
}),
|
||||
test.BuildTestPod("p9", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
},
|
||||
@@ -632,17 +550,7 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
// A pod with local storage.
|
||||
test.SetNormalOwnerRef(pod)
|
||||
test.SetPodExtendedResourceRequest(pod, extendedResource, 1)
|
||||
pod.Spec.Volumes = []v1.Volume{
|
||||
{
|
||||
Name: "sample",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
test.SetHostPathEmptyDirVolumeSource(pod)
|
||||
// A Mirror Pod.
|
||||
pod.Annotations = test.GetMirrorPodAnnotation()
|
||||
}),
|
||||
@@ -651,8 +559,7 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
test.SetNormalOwnerRef(pod)
|
||||
test.SetPodExtendedResourceRequest(pod, extendedResource, 1)
|
||||
pod.Namespace = "kube-system"
|
||||
priority := utils.SystemCriticalPriority
|
||||
pod.Spec.Priority = &priority
|
||||
test.SetPodPriority(pod, utils.SystemCriticalPriority)
|
||||
}),
|
||||
test.BuildTestPod("p9", 0, 0, n2NodeName, func(pod *v1.Pod) {
|
||||
test.SetRSOwnerRef(pod)
|
||||
@@ -747,12 +654,8 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
test.SetRSOwnerRef(pod)
|
||||
test.SetPodExtendedResourceRequest(pod, extendedResource, 7)
|
||||
}),
|
||||
test.BuildTestPod("p3", 0, 0, n2NodeName, func(pod *v1.Pod) {
|
||||
test.SetRSOwnerRef(pod)
|
||||
}),
|
||||
test.BuildTestPod("p8", 0, 0, n3NodeName, func(pod *v1.Pod) {
|
||||
test.SetRSOwnerRef(pod)
|
||||
}),
|
||||
test.BuildTestPod("p3", 0, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p8", 0, 0, n3NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p9", 0, 0, n3NodeName, test.SetRSOwnerRef),
|
||||
},
|
||||
nodemetricses: []*v1beta1.NodeMetrics{
|
||||
@@ -795,17 +698,7 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
test.BuildTestPod("p7", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
// A pod with local storage.
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Spec.Volumes = []v1.Volume{
|
||||
{
|
||||
Name: "sample",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
test.SetHostPathEmptyDirVolumeSource(pod)
|
||||
// A Mirror Pod.
|
||||
pod.Annotations = test.GetMirrorPodAnnotation()
|
||||
}),
|
||||
@@ -813,8 +706,7 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
// A Critical Pod.
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Namespace = "kube-system"
|
||||
priority := utils.SystemCriticalPriority
|
||||
pod.Spec.Priority = &priority
|
||||
test.SetPodPriority(pod, utils.SystemCriticalPriority)
|
||||
}),
|
||||
},
|
||||
nodemetricses: []*v1beta1.NodeMetrics{
|
||||
@@ -872,17 +764,7 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
test.BuildTestPod("p7", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
// A pod with local storage.
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Spec.Volumes = []v1.Volume{
|
||||
{
|
||||
Name: "sample",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
test.SetHostPathEmptyDirVolumeSource(pod)
|
||||
// A Mirror Pod.
|
||||
pod.Annotations = test.GetMirrorPodAnnotation()
|
||||
}),
|
||||
@@ -890,8 +772,7 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
// A Critical Pod.
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Namespace = "kube-system"
|
||||
priority := utils.SystemCriticalPriority
|
||||
pod.Spec.Priority = &priority
|
||||
test.SetPodPriority(pod, utils.SystemCriticalPriority)
|
||||
}),
|
||||
},
|
||||
nodemetricses: []*v1beta1.NodeMetrics{
|
||||
@@ -975,17 +856,7 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
test.BuildTestPod("p7", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
// A pod with local storage.
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Spec.Volumes = []v1.Volume{
|
||||
{
|
||||
Name: "sample",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
test.SetHostPathEmptyDirVolumeSource(pod)
|
||||
// A Mirror Pod.
|
||||
pod.Annotations = test.GetMirrorPodAnnotation()
|
||||
}),
|
||||
@@ -993,8 +864,7 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
// A Critical Pod.
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Namespace = "kube-system"
|
||||
priority := utils.SystemCriticalPriority
|
||||
pod.Spec.Priority = &priority
|
||||
test.SetPodPriority(pod, utils.SystemCriticalPriority)
|
||||
}),
|
||||
test.BuildTestPod("p9", 0, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
},
|
||||
@@ -1037,17 +907,7 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
test.BuildTestPod("p7", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
// A pod with local storage.
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Spec.Volumes = []v1.Volume{
|
||||
{
|
||||
Name: "sample",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
test.SetHostPathEmptyDirVolumeSource(pod)
|
||||
// A Mirror Pod.
|
||||
pod.Annotations = test.GetMirrorPodAnnotation()
|
||||
}),
|
||||
@@ -1055,8 +915,7 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
// A Critical Pod.
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Namespace = "kube-system"
|
||||
priority := utils.SystemCriticalPriority
|
||||
pod.Spec.Priority = &priority
|
||||
test.SetPodPriority(pod, utils.SystemCriticalPriority)
|
||||
}),
|
||||
test.BuildTestPod("p9", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
},
|
||||
@@ -1106,17 +965,7 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
test.BuildTestPod("p7", 400, 375, n1NodeName, func(pod *v1.Pod) {
|
||||
// A pod with local storage.
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Spec.Volumes = []v1.Volume{
|
||||
{
|
||||
Name: "sample",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
test.SetHostPathEmptyDirVolumeSource(pod)
|
||||
// A Mirror Pod.
|
||||
pod.Annotations = test.GetMirrorPodAnnotation()
|
||||
}),
|
||||
@@ -1124,8 +973,7 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
// A Critical Pod.
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Namespace = "kube-system"
|
||||
priority := utils.SystemCriticalPriority
|
||||
pod.Spec.Priority = &priority
|
||||
test.SetPodPriority(pod, utils.SystemCriticalPriority)
|
||||
}),
|
||||
test.BuildTestPod("p9", 400, 3000, n2NodeName, test.SetRSOwnerRef),
|
||||
},
|
||||
@@ -1218,17 +1066,7 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
test.BuildTestPod("p7", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
// A pod with local storage.
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Spec.Volumes = []v1.Volume{
|
||||
{
|
||||
Name: "sample",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
test.SetHostPathEmptyDirVolumeSource(pod)
|
||||
// A Mirror Pod.
|
||||
pod.Annotations = test.GetMirrorPodAnnotation()
|
||||
}),
|
||||
@@ -1236,8 +1074,7 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
// A Critical Pod.
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Namespace = "kube-system"
|
||||
priority := utils.SystemCriticalPriority
|
||||
pod.Spec.Priority = &priority
|
||||
test.SetPodPriority(pod, utils.SystemCriticalPriority)
|
||||
}),
|
||||
test.BuildTestPod("p9", 0, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
},
|
||||
@@ -1282,25 +1119,14 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
test.BuildTestPod("p7", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
// A pod with local storage.
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Spec.Volumes = []v1.Volume{
|
||||
{
|
||||
Name: "sample",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
test.SetHostPathEmptyDirVolumeSource(pod)
|
||||
// A Mirror Pod.
|
||||
pod.Annotations = test.GetMirrorPodAnnotation()
|
||||
}),
|
||||
test.BuildTestPod("p8", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
// A Critical Pod.
|
||||
pod.Namespace = "kube-system"
|
||||
priority := utils.SystemCriticalPriority
|
||||
pod.Spec.Priority = &priority
|
||||
test.SetPodPriority(pod, utils.SystemCriticalPriority)
|
||||
}),
|
||||
test.BuildTestPod("p9", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
},
|
||||
@@ -1391,7 +1217,7 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
metricsUtilization = &MetricsUtilization{Source: api.KubernetesMetrics}
|
||||
}
|
||||
|
||||
plugin, err := NewLowNodeUtilization(&LowNodeUtilizationArgs{
|
||||
plugin, err := NewLowNodeUtilization(ctx, &LowNodeUtilizationArgs{
|
||||
Thresholds: tc.thresholds,
|
||||
TargetThresholds: tc.targetThresholds,
|
||||
UseDeviationThresholds: tc.useDeviationThresholds,
|
||||
@@ -1551,7 +1377,7 @@ func TestLowNodeUtilizationWithTaints(t *testing.T) {
|
||||
t.Fatalf("Unable to initialize a framework handle: %v", err)
|
||||
}
|
||||
|
||||
plugin, err := NewLowNodeUtilization(&LowNodeUtilizationArgs{
|
||||
plugin, err := NewLowNodeUtilization(ctx, &LowNodeUtilizationArgs{
|
||||
Thresholds: api.ResourceThresholds{
|
||||
v1.ResourcePods: 20,
|
||||
},
|
||||
@@ -1575,17 +1401,7 @@ func TestLowNodeUtilizationWithTaints(t *testing.T) {
|
||||
func withLocalStorage(pod *v1.Pod) {
|
||||
// A pod with local storage.
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Spec.Volumes = []v1.Volume{
|
||||
{
|
||||
Name: "sample",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
test.SetHostPathEmptyDirVolumeSource(pod)
|
||||
// A Mirror Pod.
|
||||
pod.Annotations = test.GetMirrorPodAnnotation()
|
||||
}
|
||||
@@ -1594,8 +1410,7 @@ func withCriticalPod(pod *v1.Pod) {
|
||||
// A Critical Pod.
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Namespace = "kube-system"
|
||||
priority := utils.SystemCriticalPriority
|
||||
pod.Spec.Priority = &priority
|
||||
test.SetPodPriority(pod, utils.SystemCriticalPriority)
|
||||
}
|
||||
|
||||
func TestLowNodeUtilizationWithPrometheusMetrics(t *testing.T) {
|
||||
@@ -1824,7 +1639,7 @@ func TestLowNodeUtilizationWithPrometheusMetrics(t *testing.T) {
|
||||
result: tc.samples,
|
||||
dataType: model.ValVector,
|
||||
}
|
||||
plugin, err := NewLowNodeUtilization(tc.args, handle)
|
||||
plugin, err := NewLowNodeUtilization(ctx, tc.args, handle)
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to initialize the plugin: %v", err)
|
||||
}
|
||||
|
||||
@@ -176,13 +176,14 @@ func evictPodsFromSourceNodes(
|
||||
usageClient usageClient,
|
||||
maxNoOfPodsToEvictPerNode *uint,
|
||||
) {
|
||||
logger := klog.FromContext(ctx)
|
||||
available, err := assessAvailableResourceInNodes(destinationNodes, resourceNames)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "unable to assess available resources in nodes")
|
||||
logger.Error(err, "unable to assess available resources in nodes")
|
||||
return
|
||||
}
|
||||
|
||||
klog.V(1).InfoS("Total capacity to be moved", usageToKeysAndValues(available)...)
|
||||
logger.V(1).Info("Total capacity to be moved", usageToKeysAndValues(available)...)
|
||||
|
||||
destinationTaints := make(map[string][]v1.Taint, len(destinationNodes))
|
||||
for _, node := range destinationNodes {
|
||||
@@ -190,14 +191,14 @@ func evictPodsFromSourceNodes(
|
||||
}
|
||||
|
||||
for _, node := range sourceNodes {
|
||||
klog.V(3).InfoS(
|
||||
logger.V(3).Info(
|
||||
"Evicting pods from node",
|
||||
"node", klog.KObj(node.node),
|
||||
"usage", node.usage,
|
||||
)
|
||||
|
||||
nonRemovablePods, removablePods := classifyPods(node.allPods, podFilter)
|
||||
klog.V(2).InfoS(
|
||||
logger.V(2).Info(
|
||||
"Pods on node",
|
||||
"node", klog.KObj(node.node),
|
||||
"allPods", len(node.allPods),
|
||||
@@ -206,14 +207,14 @@ func evictPodsFromSourceNodes(
|
||||
)
|
||||
|
||||
if len(removablePods) == 0 {
|
||||
klog.V(1).InfoS(
|
||||
logger.V(1).Info(
|
||||
"No removable pods on node, try next node",
|
||||
"node", klog.KObj(node.node),
|
||||
)
|
||||
continue
|
||||
}
|
||||
|
||||
klog.V(1).InfoS(
|
||||
logger.V(1).Info(
|
||||
"Evicting pods based on priority, if they have same priority, they'll be evicted based on QoS tiers",
|
||||
)
|
||||
|
||||
@@ -260,6 +261,7 @@ func evictPods(
|
||||
usageClient usageClient,
|
||||
maxNoOfPodsToEvictPerNode *uint,
|
||||
) error {
|
||||
logger := klog.FromContext(ctx)
|
||||
// preemptive check to see if we should continue evicting pods.
|
||||
if !continueEviction(nodeInfo, totalAvailableUsage) {
|
||||
return nil
|
||||
@@ -274,7 +276,7 @@ func evictPods(
|
||||
var evictionCounter uint = 0
|
||||
for _, pod := range inputPods {
|
||||
if maxNoOfPodsToEvictPerNode != nil && evictionCounter >= *maxNoOfPodsToEvictPerNode {
|
||||
klog.V(3).InfoS(
|
||||
logger.V(3).Info(
|
||||
"Max number of evictions per node per plugin reached",
|
||||
"limit", *maxNoOfPodsToEvictPerNode,
|
||||
)
|
||||
@@ -282,7 +284,7 @@ func evictPods(
|
||||
}
|
||||
|
||||
if !utils.PodToleratesTaints(pod, destinationTaints) {
|
||||
klog.V(3).InfoS(
|
||||
logger.V(3).Info(
|
||||
"Skipping eviction for pod, doesn't tolerate node taint",
|
||||
"pod", klog.KObj(pod),
|
||||
)
|
||||
@@ -297,7 +299,7 @@ func evictPods(
|
||||
WithoutNamespaces(excludedNamespaces).
|
||||
BuildFilterFunc()
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "could not build preEvictionFilter with namespace exclusion")
|
||||
logger.Error(err, "could not build preEvictionFilter with namespace exclusion")
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -311,9 +313,8 @@ func evictPods(
|
||||
podUsage, err := usageClient.podUsage(pod)
|
||||
if err != nil {
|
||||
if _, ok := err.(*notSupportedError); !ok {
|
||||
klog.Errorf(
|
||||
"unable to get pod usage for %v/%v: %v",
|
||||
pod.Namespace, pod.Name, err,
|
||||
logger.Error(err,
|
||||
"unable to get pod usage", "pod", klog.KObj(pod),
|
||||
)
|
||||
continue
|
||||
}
|
||||
@@ -325,18 +326,18 @@ func evictPods(
|
||||
case *evictions.EvictionNodeLimitError, *evictions.EvictionTotalLimitError:
|
||||
return err
|
||||
default:
|
||||
klog.Errorf("eviction failed: %v", err)
|
||||
logger.Error(err, "eviction failed")
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if maxNoOfPodsToEvictPerNode == nil && unconstrainedResourceEviction {
|
||||
klog.V(3).InfoS("Currently, only a single pod eviction is allowed")
|
||||
logger.V(3).Info("Currently, only a single pod eviction is allowed")
|
||||
break
|
||||
}
|
||||
|
||||
evictionCounter++
|
||||
klog.V(3).InfoS("Evicted pods", "pod", klog.KObj(pod))
|
||||
logger.V(3).Info("Evicted pods", "pod", klog.KObj(pod))
|
||||
if unconstrainedResourceEviction {
|
||||
continue
|
||||
}
|
||||
@@ -345,7 +346,7 @@ func evictPods(
|
||||
|
||||
keysAndValues := []any{"node", nodeInfo.node.Name}
|
||||
keysAndValues = append(keysAndValues, usageToKeysAndValues(nodeInfo.usage)...)
|
||||
klog.V(3).InfoS("Updated node usage", keysAndValues...)
|
||||
logger.V(3).Info("Updated node usage", keysAndValues...)
|
||||
|
||||
// make sure we should continue evicting pods.
|
||||
if !continueEviction(nodeInfo, totalAvailableUsage) {
|
||||
|
||||
@@ -264,12 +264,13 @@ func (client *prometheusUsageClient) podUsage(pod *v1.Pod) (map[v1.ResourceName]
|
||||
}
|
||||
|
||||
func NodeUsageFromPrometheusMetrics(ctx context.Context, promClient promapi.Client, promQuery string) (map[string]map[v1.ResourceName]*resource.Quantity, error) {
|
||||
logger := klog.FromContext(ctx)
|
||||
results, warnings, err := promv1.NewAPI(promClient).Query(ctx, promQuery, time.Now())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to capture prometheus metrics: %v", err)
|
||||
}
|
||||
if len(warnings) > 0 {
|
||||
klog.Infof("prometheus metrics warnings: %v", warnings)
|
||||
logger.Info("prometheus metrics warnings: %v", warnings)
|
||||
}
|
||||
|
||||
if results.Type() != model.ValVector {
|
||||
|
||||
@@ -257,3 +257,138 @@ func TestValidateLowNodeUtilizationPluginConfig(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateHighNodeUtilizationPluginConfig(t *testing.T) {
|
||||
extendedResource := v1.ResourceName("example.com/foo")
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
args *HighNodeUtilizationArgs
|
||||
wantErr bool
|
||||
errMsg string
|
||||
}{
|
||||
{
|
||||
name: "valid configuration with CPU and memory",
|
||||
args: &HighNodeUtilizationArgs{
|
||||
Thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
v1.ResourceMemory: 90,
|
||||
},
|
||||
EvictionModes: []EvictionMode{EvictionModeOnlyThresholdingResources},
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "valid configuration with extended resource",
|
||||
args: &HighNodeUtilizationArgs{
|
||||
Thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 85,
|
||||
extendedResource: 95,
|
||||
},
|
||||
EvictionModes: []EvictionMode{EvictionModeOnlyThresholdingResources},
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "empty thresholds",
|
||||
args: &HighNodeUtilizationArgs{
|
||||
Thresholds: api.ResourceThresholds{},
|
||||
},
|
||||
wantErr: true,
|
||||
errMsg: "no resource threshold is configured",
|
||||
},
|
||||
{
|
||||
name: "threshold below minimum (0%)",
|
||||
args: &HighNodeUtilizationArgs{
|
||||
Thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: -1,
|
||||
},
|
||||
},
|
||||
wantErr: true,
|
||||
errMsg: "cpu threshold not in [0, 100] range",
|
||||
},
|
||||
{
|
||||
name: "threshold above maximum (100%)",
|
||||
args: &HighNodeUtilizationArgs{
|
||||
Thresholds: api.ResourceThresholds{
|
||||
v1.ResourceMemory: 101,
|
||||
},
|
||||
},
|
||||
wantErr: true,
|
||||
errMsg: "memory threshold not in [0, 100] range",
|
||||
},
|
||||
{
|
||||
name: "multiple thresholds with one out of range",
|
||||
args: &HighNodeUtilizationArgs{
|
||||
Thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 50,
|
||||
v1.ResourceMemory: 150,
|
||||
},
|
||||
},
|
||||
wantErr: true,
|
||||
errMsg: "memory threshold not in [0, 100] range",
|
||||
},
|
||||
{
|
||||
name: "evictableNamespaces with Exclude (allowed)",
|
||||
args: &HighNodeUtilizationArgs{
|
||||
Thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
},
|
||||
EvictableNamespaces: &api.Namespaces{
|
||||
Exclude: []string{"ns1", "ns2"},
|
||||
},
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "invalid eviction mode",
|
||||
args: &HighNodeUtilizationArgs{
|
||||
Thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
},
|
||||
EvictionModes: []EvictionMode{"InvalidMode"},
|
||||
},
|
||||
wantErr: true,
|
||||
errMsg: "invalid eviction mode InvalidMode",
|
||||
},
|
||||
{
|
||||
name: "missing eviction modes (nil) - should be allowed (treated as empty)",
|
||||
args: &HighNodeUtilizationArgs{
|
||||
Thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
},
|
||||
EvictionModes: nil,
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "empty eviction modes slice - should be allowed",
|
||||
args: &HighNodeUtilizationArgs{
|
||||
Thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
},
|
||||
EvictionModes: []EvictionMode{},
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
err := ValidateHighNodeUtilizationArgs(runtime.Object(tc.args))
|
||||
|
||||
if tc.wantErr {
|
||||
if err == nil {
|
||||
t.Fatalf("expected error, but got nil")
|
||||
}
|
||||
if tc.errMsg != "" && err.Error() != tc.errMsg {
|
||||
t.Errorf("expected error message: %q, but got: %q", tc.errMsg, err.Error())
|
||||
}
|
||||
} else {
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
Copyright 2026 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
Copyright 2026 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
155
pkg/framework/plugins/podlifetime/README.md
Normal file
155
pkg/framework/plugins/podlifetime/README.md
Normal file
@@ -0,0 +1,155 @@
|
||||
# PodLifeTime Plugin
|
||||
|
||||
## What It Does
|
||||
|
||||
The PodLifeTime plugin evicts pods that have been running for too long. You can configure a maximum age threshold, and the plugin evicts pods older than that threshold. The oldest pods are evicted first.
|
||||
|
||||
## How It Works
|
||||
|
||||
The plugin examines all pods across your nodes and selects those that exceed the configured age threshold. You can further narrow down which pods are considered by specifying:
|
||||
|
||||
- Which namespaces to include or exclude
|
||||
- Which labels pods must have
|
||||
- Which states pods must be in (e.g., Running, Pending, CrashLoopBackOff)
|
||||
|
||||
Once pods are selected, they are sorted by age (oldest first) and evicted in that order. Eviction stops when limits are reached (per-node limits, total limits, or Pod Disruption Budget constraints).
|
||||
|
||||
## Use Cases
|
||||
|
||||
- **Resource Leakage Mitigation**: Restart long-running pods that may have accumulated memory leaks, stale cache, or resource leaks
|
||||
```yaml
|
||||
args:
|
||||
maxPodLifeTimeSeconds: 604800 # 7 days
|
||||
states: [Running]
|
||||
```
|
||||
|
||||
- **Ephemeral Workload Cleanup**: Remove long-running batch jobs, test pods, or temporary workloads that have exceeded their expected lifetime
|
||||
```yaml
|
||||
args:
|
||||
maxPodLifeTimeSeconds: 7200 # 2 hours
|
||||
states: [Succeeded, Failed]
|
||||
```
|
||||
|
||||
- **Node Hygiene**: Remove forgotten or stuck pods that are consuming resources but not making progress
|
||||
```yaml
|
||||
args:
|
||||
maxPodLifeTimeSeconds: 3600 # 1 hour
|
||||
states: [CrashLoopBackOff, ImagePullBackOff, ErrImagePull]
|
||||
includingInitContainers: true
|
||||
```
|
||||
|
||||
- **Config/Secret Update Pickup**: Force pod restart to pick up updated ConfigMaps, Secrets, or environment variables
|
||||
```yaml
|
||||
args:
|
||||
maxPodLifeTimeSeconds: 86400 # 1 day
|
||||
states: [Running]
|
||||
labelSelector:
|
||||
matchLabels:
|
||||
config-refresh: enabled
|
||||
```
|
||||
|
||||
- **Security Rotation**: Periodically refresh pods to pick up new security tokens, certificates, or patched container images
|
||||
```yaml
|
||||
args:
|
||||
maxPodLifeTimeSeconds: 259200 # 3 days
|
||||
states: [Running]
|
||||
namespaces:
|
||||
exclude: [kube-system]
|
||||
```
|
||||
|
||||
- **Dev/Test Environment Cleanup**: Automatically clean up old pods in development or staging namespaces
|
||||
```yaml
|
||||
args:
|
||||
maxPodLifeTimeSeconds: 86400 # 1 day
|
||||
namespaces:
|
||||
include: [dev, staging, test]
|
||||
```
|
||||
|
||||
- **Cluster Health Freshness**: Ensure pods periodically restart to maintain cluster health and verify workloads can recover from restarts
|
||||
```yaml
|
||||
args:
|
||||
maxPodLifeTimeSeconds: 604800 # 7 days
|
||||
states: [Running]
|
||||
namespaces:
|
||||
exclude: [kube-system, production]
|
||||
```
|
||||
|
||||
- **Rebalancing Assistance**: Work alongside other descheduler strategies by removing old pods to allow better pod distribution
|
||||
```yaml
|
||||
args:
|
||||
maxPodLifeTimeSeconds: 1209600 # 14 days
|
||||
states: [Running]
|
||||
```
|
||||
|
||||
- **Non-Critical Stateful Refresh**: Occasionally reset tolerable stateful workloads that can handle data loss or have external backup mechanisms
|
||||
```yaml
|
||||
args:
|
||||
maxPodLifeTimeSeconds: 2592000 # 30 days
|
||||
labelSelector:
|
||||
matchLabels:
|
||||
stateful-tier: cache
|
||||
```
|
||||
|
||||
## Configuration
|
||||
|
||||
| Parameter | Description | Type | Required | Default |
|
||||
|-----------|-------------|------|----------|---------|
|
||||
| `maxPodLifeTimeSeconds` | Pods older than this many seconds are evicted | `uint` | Yes | - |
|
||||
| `namespaces` | Limit eviction to specific namespaces (or exclude specific namespaces) | `Namespaces` | No | `nil` |
|
||||
| `labelSelector` | Only evict pods matching these labels | `metav1.LabelSelector` | No | `nil` |
|
||||
| `states` | Only evict pods in specific states (e.g., Running, CrashLoopBackOff) | `[]string` | No | `nil` |
|
||||
| `includingInitContainers` | When checking states, also check init container states | `bool` | No | `false` |
|
||||
| `includingEphemeralContainers` | When checking states, also check ephemeral container states | `bool` | No | `false` |
|
||||
|
||||
### Discovering states
|
||||
|
||||
Each pod is checked for the following locations to discover its relevant state:
|
||||
|
||||
1. **Pod Phase** - The overall pod lifecycle phase:
|
||||
- `Running` - Pod is running on a node
|
||||
- `Pending` - Pod has been accepted but containers are not yet running
|
||||
- `Succeeded` - All containers terminated successfully
|
||||
- `Failed` - All containers terminated, at least one failed
|
||||
- `Unknown` - Pod state cannot be determined
|
||||
|
||||
2. **Pod Status Reason** - Why the pod is in its current state:
|
||||
- `NodeAffinity` - Pod cannot be scheduled due to node affinity rules
|
||||
- `NodeLost` - Node hosting the pod is lost
|
||||
- `Shutdown` - Pod terminated due to node shutdown
|
||||
- `UnexpectedAdmissionError` - Pod admission failed unexpectedly
|
||||
|
||||
3. **Container Waiting Reason** - Why containers are waiting to start:
|
||||
- `PodInitializing` - Pod is still initializing
|
||||
- `ContainerCreating` - Container is being created
|
||||
- `ImagePullBackOff` - Image pull is failing and backing off
|
||||
- `CrashLoopBackOff` - Container is crashing repeatedly
|
||||
- `CreateContainerConfigError` - Container configuration is invalid
|
||||
- `ErrImagePull` - Image cannot be pulled
|
||||
- `CreateContainerError` - Container creation failed
|
||||
- `InvalidImageName` - Image name is invalid
|
||||
|
||||
By default, only regular containers are checked. Enable `includingInitContainers` or `includingEphemeralContainers` to also check those container types.
|
||||
|
||||
## Example
|
||||
|
||||
```yaml
|
||||
apiVersion: descheduler/v1alpha2
|
||||
kind: DeschedulerPolicy
|
||||
profiles:
|
||||
- name: default
|
||||
plugins:
|
||||
deschedule:
|
||||
enabled:
|
||||
- name: PodLifeTime
|
||||
pluginConfig:
|
||||
- name: PodLifeTime
|
||||
args:
|
||||
maxPodLifeTimeSeconds: 86400 # 1 day
|
||||
namespaces:
|
||||
include:
|
||||
- default
|
||||
states:
|
||||
- Running
|
||||
```
|
||||
|
||||
This configuration evicts Running pods in the `default` namespace that are older than 1 day.
|
||||
@@ -38,17 +38,19 @@ var _ frameworktypes.DeschedulePlugin = &PodLifeTime{}
|
||||
|
||||
// PodLifeTime evicts pods on the node that violate the max pod lifetime threshold
|
||||
type PodLifeTime struct {
|
||||
logger klog.Logger
|
||||
handle frameworktypes.Handle
|
||||
args *PodLifeTimeArgs
|
||||
podFilter podutil.FilterFunc
|
||||
}
|
||||
|
||||
// New builds plugin from its arguments while passing a handle
|
||||
func New(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
|
||||
func New(ctx context.Context, args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
|
||||
podLifeTimeArgs, ok := args.(*PodLifeTimeArgs)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("want args to be of type PodLifeTimeArgs, got %T", args)
|
||||
}
|
||||
logger := klog.FromContext(ctx).WithValues("plugin", PluginName)
|
||||
|
||||
var includedNamespaces, excludedNamespaces sets.Set[string]
|
||||
if podLifeTimeArgs.Namespaces != nil {
|
||||
@@ -115,6 +117,7 @@ func New(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plug
|
||||
}
|
||||
|
||||
return &PodLifeTime{
|
||||
logger: logger,
|
||||
handle: handle,
|
||||
podFilter: podFilter,
|
||||
args: podLifeTimeArgs,
|
||||
@@ -130,9 +133,9 @@ func (d *PodLifeTime) Name() string {
|
||||
func (d *PodLifeTime) Deschedule(ctx context.Context, nodes []*v1.Node) *frameworktypes.Status {
|
||||
podsToEvict := make([]*v1.Pod, 0)
|
||||
nodeMap := make(map[string]*v1.Node, len(nodes))
|
||||
|
||||
logger := klog.FromContext(klog.NewContext(ctx, d.logger)).WithValues("ExtensionPoint", frameworktypes.DescheduleExtensionPoint)
|
||||
for _, node := range nodes {
|
||||
klog.V(2).InfoS("Processing node", "node", klog.KObj(node))
|
||||
logger.V(2).Info("Processing node", "node", klog.KObj(node))
|
||||
pods, err := podutil.ListAllPodsOnANode(node.Name, d.handle.GetPodsAssignedToNodeFunc(), d.podFilter)
|
||||
if err != nil {
|
||||
// no pods evicted as error encountered retrieving evictable Pods
|
||||
@@ -161,7 +164,7 @@ loop:
|
||||
case *evictions.EvictionTotalLimitError:
|
||||
return nil
|
||||
default:
|
||||
klog.Errorf("eviction failed: %v", err)
|
||||
logger.Error(err, "eviction failed")
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -18,35 +18,39 @@ package podlifetime
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"sort"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
)
|
||||
|
||||
// ValidatePodLifeTimeArgs validates PodLifeTime arguments
|
||||
func ValidatePodLifeTimeArgs(obj runtime.Object) error {
|
||||
args := obj.(*PodLifeTimeArgs)
|
||||
var allErrs []error
|
||||
if args.MaxPodLifeTimeSeconds == nil {
|
||||
return fmt.Errorf("MaxPodLifeTimeSeconds not set")
|
||||
allErrs = append(allErrs, fmt.Errorf("MaxPodLifeTimeSeconds not set"))
|
||||
}
|
||||
|
||||
// At most one of include/exclude can be set
|
||||
if args.Namespaces != nil && len(args.Namespaces.Include) > 0 && len(args.Namespaces.Exclude) > 0 {
|
||||
return fmt.Errorf("only one of Include/Exclude namespaces can be set")
|
||||
allErrs = append(allErrs, fmt.Errorf("only one of Include/Exclude namespaces can be set"))
|
||||
}
|
||||
|
||||
if args.LabelSelector != nil {
|
||||
if _, err := metav1.LabelSelectorAsSelector(args.LabelSelector); err != nil {
|
||||
return fmt.Errorf("failed to get label selectors from strategy's params: %+v", err)
|
||||
allErrs = append(allErrs, fmt.Errorf("failed to get label selectors from strategy's params: %+v", err))
|
||||
}
|
||||
}
|
||||
podLifeTimeAllowedStates := sets.New(
|
||||
// Pod Status Phase
|
||||
string(v1.PodRunning),
|
||||
string(v1.PodPending),
|
||||
string(v1.PodSucceeded),
|
||||
string(v1.PodFailed),
|
||||
string(v1.PodUnknown),
|
||||
|
||||
// Pod Status Reasons
|
||||
@@ -70,8 +74,10 @@ func ValidatePodLifeTimeArgs(obj runtime.Object) error {
|
||||
)
|
||||
|
||||
if !podLifeTimeAllowedStates.HasAll(args.States...) {
|
||||
return fmt.Errorf("states must be one of %v", podLifeTimeAllowedStates.UnsortedList())
|
||||
allowed := podLifeTimeAllowedStates.UnsortedList()
|
||||
sort.Strings(allowed)
|
||||
allErrs = append(allErrs, fmt.Errorf("states must be one of %v", allowed))
|
||||
}
|
||||
|
||||
return nil
|
||||
return utilerrors.NewAggregate(allErrs)
|
||||
}
|
||||
|
||||
@@ -17,6 +17,7 @@ limitations under the License.
|
||||
package podlifetime
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
@@ -26,7 +27,7 @@ func TestValidateRemovePodLifeTimeArgs(t *testing.T) {
|
||||
testCases := []struct {
|
||||
description string
|
||||
args *PodLifeTimeArgs
|
||||
expectError bool
|
||||
errInfo error
|
||||
}{
|
||||
{
|
||||
description: "valid arg, no errors",
|
||||
@@ -34,7 +35,13 @@ func TestValidateRemovePodLifeTimeArgs(t *testing.T) {
|
||||
MaxPodLifeTimeSeconds: func(i uint) *uint { return &i }(1),
|
||||
States: []string{string(v1.PodRunning)},
|
||||
},
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
description: "Pod Status Reasons Succeeded or Failed",
|
||||
args: &PodLifeTimeArgs{
|
||||
MaxPodLifeTimeSeconds: func(i uint) *uint { return &i }(1),
|
||||
States: []string{string(v1.PodSucceeded), string(v1.PodFailed)},
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "Pod Status Reasons CrashLoopBackOff ",
|
||||
@@ -42,31 +49,41 @@ func TestValidateRemovePodLifeTimeArgs(t *testing.T) {
|
||||
MaxPodLifeTimeSeconds: func(i uint) *uint { return &i }(1),
|
||||
States: []string{"CrashLoopBackOff"},
|
||||
},
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
description: "nil MaxPodLifeTimeSeconds arg, expects errors",
|
||||
args: &PodLifeTimeArgs{
|
||||
MaxPodLifeTimeSeconds: nil,
|
||||
},
|
||||
expectError: true,
|
||||
errInfo: fmt.Errorf("MaxPodLifeTimeSeconds not set"),
|
||||
},
|
||||
{
|
||||
description: "invalid pod state arg, expects errors",
|
||||
args: &PodLifeTimeArgs{
|
||||
States: []string{string(v1.NodeRunning)},
|
||||
MaxPodLifeTimeSeconds: func(i uint) *uint { return &i }(1),
|
||||
States: []string{string("InvalidState")},
|
||||
},
|
||||
expectError: true,
|
||||
errInfo: fmt.Errorf("states must be one of [ContainerCreating CrashLoopBackOff CreateContainerConfigError CreateContainerError ErrImagePull Failed ImagePullBackOff InvalidImageName NodeAffinity NodeLost Pending PodInitializing Running Shutdown Succeeded UnexpectedAdmissionError Unknown]"),
|
||||
},
|
||||
{
|
||||
description: "nil MaxPodLifeTimeSeconds arg and invalid pod state arg, expects errors",
|
||||
args: &PodLifeTimeArgs{
|
||||
MaxPodLifeTimeSeconds: nil,
|
||||
States: []string{string("InvalidState")},
|
||||
},
|
||||
errInfo: fmt.Errorf("[MaxPodLifeTimeSeconds not set, states must be one of [ContainerCreating CrashLoopBackOff CreateContainerConfigError CreateContainerError ErrImagePull Failed ImagePullBackOff InvalidImageName NodeAffinity NodeLost Pending PodInitializing Running Shutdown Succeeded UnexpectedAdmissionError Unknown]]"),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.description, func(t *testing.T) {
|
||||
err := ValidatePodLifeTimeArgs(tc.args)
|
||||
|
||||
hasError := err != nil
|
||||
if tc.expectError != hasError {
|
||||
t.Error("unexpected arg validation behavior")
|
||||
for _, testCase := range testCases {
|
||||
t.Run(testCase.description, func(t *testing.T) {
|
||||
validateErr := ValidatePodLifeTimeArgs(testCase.args)
|
||||
if validateErr == nil || testCase.errInfo == nil {
|
||||
if validateErr != testCase.errInfo {
|
||||
t.Errorf("expected validity of plugin config: %q but got %q instead", testCase.errInfo, validateErr)
|
||||
}
|
||||
} else if validateErr.Error() != testCase.errInfo.Error() {
|
||||
t.Errorf("expected validity of plugin config: %q but got %q instead", testCase.errInfo, validateErr)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
Copyright 2026 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
Copyright 2026 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -45,6 +45,7 @@ const PluginName = "RemoveDuplicates"
|
||||
// As of now, this plugin won't evict daemonsets, mirror pods, critical pods and pods with local storages.
|
||||
|
||||
type RemoveDuplicates struct {
|
||||
logger klog.Logger
|
||||
handle frameworktypes.Handle
|
||||
args *RemoveDuplicatesArgs
|
||||
podFilter podutil.FilterFunc
|
||||
@@ -62,11 +63,12 @@ func (po podOwner) String() string {
|
||||
}
|
||||
|
||||
// New builds plugin from its arguments while passing a handle
|
||||
func New(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
|
||||
func New(ctx context.Context, args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
|
||||
removeDuplicatesArgs, ok := args.(*RemoveDuplicatesArgs)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("want args to be of type RemoveDuplicatesArgs, got %T", args)
|
||||
}
|
||||
logger := klog.FromContext(ctx).WithValues("plugin", PluginName)
|
||||
|
||||
var includedNamespaces, excludedNamespaces sets.Set[string]
|
||||
if removeDuplicatesArgs.Namespaces != nil {
|
||||
@@ -85,6 +87,7 @@ func New(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plug
|
||||
}
|
||||
|
||||
return &RemoveDuplicates{
|
||||
logger: logger,
|
||||
handle: handle,
|
||||
args: removeDuplicatesArgs,
|
||||
podFilter: podFilter,
|
||||
@@ -102,12 +105,13 @@ func (r *RemoveDuplicates) Balance(ctx context.Context, nodes []*v1.Node) *frame
|
||||
ownerKeyOccurence := make(map[podOwner]int32)
|
||||
nodeCount := 0
|
||||
nodeMap := make(map[string]*v1.Node)
|
||||
logger := klog.FromContext(klog.NewContext(ctx, r.logger)).WithValues("ExtensionPoint", frameworktypes.BalanceExtensionPoint)
|
||||
|
||||
for _, node := range nodes {
|
||||
klog.V(2).InfoS("Processing node", "node", klog.KObj(node))
|
||||
logger.V(2).Info("Processing node", "node", klog.KObj(node))
|
||||
pods, err := podutil.ListPodsOnANode(node.Name, r.handle.GetPodsAssignedToNodeFunc(), r.podFilter)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Error listing evictable pods on node", "node", klog.KObj(node))
|
||||
logger.Error(err, "Error listing evictable pods on node", "node", klog.KObj(node))
|
||||
continue
|
||||
}
|
||||
nodeMap[node.Name] = node
|
||||
@@ -163,7 +167,7 @@ func (r *RemoveDuplicates) Balance(ctx context.Context, nodes []*v1.Node) *frame
|
||||
for _, keys := range existing {
|
||||
if reflect.DeepEqual(keys, podContainerKeys) {
|
||||
matched = true
|
||||
klog.V(3).InfoS("Duplicate found", "pod", klog.KObj(pod))
|
||||
logger.V(3).Info("Duplicate found", "pod", klog.KObj(pod))
|
||||
for _, ownerRef := range ownerRefList {
|
||||
ownerKey := podOwner{
|
||||
namespace: pod.ObjectMeta.Namespace,
|
||||
@@ -195,16 +199,16 @@ func (r *RemoveDuplicates) Balance(ctx context.Context, nodes []*v1.Node) *frame
|
||||
|
||||
targetNodes := getTargetNodes(podNodes, nodes)
|
||||
|
||||
klog.V(2).InfoS("Adjusting feasible nodes", "owner", ownerKey, "from", nodeCount, "to", len(targetNodes))
|
||||
logger.V(2).Info("Adjusting feasible nodes", "owner", ownerKey, "from", nodeCount, "to", len(targetNodes))
|
||||
if len(targetNodes) < 2 {
|
||||
klog.V(1).InfoS("Less than two feasible nodes for duplicates to land, skipping eviction", "owner", ownerKey)
|
||||
logger.V(1).Info("Less than two feasible nodes for duplicates to land, skipping eviction", "owner", ownerKey)
|
||||
continue
|
||||
}
|
||||
|
||||
upperAvg := int(math.Ceil(float64(ownerKeyOccurence[ownerKey]) / float64(len(targetNodes))))
|
||||
loop:
|
||||
for nodeName, pods := range podNodes {
|
||||
klog.V(2).InfoS("Average occurrence per node", "node", klog.KObj(nodeMap[nodeName]), "ownerKey", ownerKey, "avg", upperAvg)
|
||||
logger.V(2).Info("Average occurrence per node", "node", klog.KObj(nodeMap[nodeName]), "ownerKey", ownerKey, "avg", upperAvg)
|
||||
// list of duplicated pods does not contain the original referential pod
|
||||
if len(pods)+1 > upperAvg {
|
||||
// It's assumed all duplicated pods are in the same priority class
|
||||
@@ -220,7 +224,7 @@ func (r *RemoveDuplicates) Balance(ctx context.Context, nodes []*v1.Node) *frame
|
||||
case *evictions.EvictionTotalLimitError:
|
||||
return nil
|
||||
default:
|
||||
klog.Errorf("eviction failed: %v", err)
|
||||
logger.Error(err, "eviction failed")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -17,14 +17,16 @@ import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
)
|
||||
|
||||
func ValidateRemoveDuplicatesArgs(obj runtime.Object) error {
|
||||
args := obj.(*RemoveDuplicatesArgs)
|
||||
var allErrs []error
|
||||
// At most one of include/exclude can be set
|
||||
if args.Namespaces != nil && len(args.Namespaces.Include) > 0 && len(args.Namespaces.Exclude) > 0 {
|
||||
return fmt.Errorf("only one of Include/Exclude namespaces can be set")
|
||||
allErrs = append(allErrs, fmt.Errorf("only one of Include/Exclude namespaces can be set"))
|
||||
}
|
||||
|
||||
return nil
|
||||
return utilerrors.NewAggregate(allErrs)
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package removeduplicates
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
@@ -11,6 +12,7 @@ func TestValidateRemovePodsViolatingNodeTaintsArgs(t *testing.T) {
|
||||
description string
|
||||
args *RemoveDuplicatesArgs
|
||||
expectError bool
|
||||
errInfo error
|
||||
}{
|
||||
{
|
||||
description: "valid namespace args, no errors",
|
||||
@@ -20,7 +22,6 @@ func TestValidateRemovePodsViolatingNodeTaintsArgs(t *testing.T) {
|
||||
Include: []string{"default"},
|
||||
},
|
||||
},
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
description: "invalid namespaces args, expects error",
|
||||
@@ -31,17 +32,19 @@ func TestValidateRemovePodsViolatingNodeTaintsArgs(t *testing.T) {
|
||||
Exclude: []string{"kube-system"},
|
||||
},
|
||||
},
|
||||
expectError: true,
|
||||
errInfo: fmt.Errorf("only one of Include/Exclude namespaces can be set"),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.description, func(t *testing.T) {
|
||||
err := ValidateRemoveDuplicatesArgs(tc.args)
|
||||
|
||||
hasError := err != nil
|
||||
if tc.expectError != hasError {
|
||||
t.Error("unexpected arg validation behavior")
|
||||
for _, testCase := range testCases {
|
||||
t.Run(testCase.description, func(t *testing.T) {
|
||||
validateErr := ValidateRemoveDuplicatesArgs(testCase.args)
|
||||
if validateErr == nil || testCase.errInfo == nil {
|
||||
if validateErr != testCase.errInfo {
|
||||
t.Errorf("expected validity of plugin config: %q but got %q instead", testCase.errInfo, validateErr)
|
||||
}
|
||||
} else if validateErr.Error() != testCase.errInfo.Error() {
|
||||
t.Errorf("expected validity of plugin config: %q but got %q instead", testCase.errInfo, validateErr)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
Copyright 2026 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
Copyright 2026 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -36,6 +36,7 @@ const PluginName = "RemoveFailedPods"
|
||||
|
||||
// RemoveFailedPods evicts pods in failed status phase that match the given args criteria
|
||||
type RemoveFailedPods struct {
|
||||
logger klog.Logger
|
||||
handle frameworktypes.Handle
|
||||
args *RemoveFailedPodsArgs
|
||||
podFilter podutil.FilterFunc
|
||||
@@ -44,11 +45,12 @@ type RemoveFailedPods struct {
|
||||
var _ frameworktypes.DeschedulePlugin = &RemoveFailedPods{}
|
||||
|
||||
// New builds plugin from its arguments while passing a handle
|
||||
func New(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
|
||||
func New(ctx context.Context, args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
|
||||
failedPodsArgs, ok := args.(*RemoveFailedPodsArgs)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("want args to be of type RemoveFailedPodsArgs, got %T", args)
|
||||
}
|
||||
logger := klog.FromContext(ctx).WithValues("plugin", PluginName)
|
||||
|
||||
var includedNamespaces, excludedNamespaces sets.Set[string]
|
||||
if failedPodsArgs.Namespaces != nil {
|
||||
@@ -71,7 +73,7 @@ func New(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plug
|
||||
|
||||
podFilter = podutil.WrapFilterFuncs(podFilter, func(pod *v1.Pod) bool {
|
||||
if err := validateCanEvict(pod, failedPodsArgs); err != nil {
|
||||
klog.V(4).InfoS(fmt.Sprintf("ignoring pod for eviction due to: %s", err.Error()), "pod", klog.KObj(pod))
|
||||
logger.V(4).Info(fmt.Sprintf("ignoring pod for eviction due to: %s", err.Error()), "pod", klog.KObj(pod))
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -79,6 +81,7 @@ func New(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plug
|
||||
})
|
||||
|
||||
return &RemoveFailedPods{
|
||||
logger: logger,
|
||||
handle: handle,
|
||||
podFilter: podFilter,
|
||||
args: failedPodsArgs,
|
||||
@@ -92,8 +95,9 @@ func (d *RemoveFailedPods) Name() string {
|
||||
|
||||
// Deschedule extension point implementation for the plugin
|
||||
func (d *RemoveFailedPods) Deschedule(ctx context.Context, nodes []*v1.Node) *frameworktypes.Status {
|
||||
logger := klog.FromContext(klog.NewContext(ctx, d.logger)).WithValues("ExtensionPoint", frameworktypes.DescheduleExtensionPoint)
|
||||
for _, node := range nodes {
|
||||
klog.V(2).InfoS("Processing node", "node", klog.KObj(node))
|
||||
logger.V(2).Info("Processing node", "node", klog.KObj(node))
|
||||
pods, err := podutil.ListAllPodsOnANode(node.Name, d.handle.GetPodsAssignedToNodeFunc(), d.podFilter)
|
||||
if err != nil {
|
||||
// no pods evicted as error encountered retrieving evictable Pods
|
||||
@@ -114,7 +118,7 @@ func (d *RemoveFailedPods) Deschedule(ctx context.Context, nodes []*v1.Node) *fr
|
||||
case *evictions.EvictionTotalLimitError:
|
||||
return nil
|
||||
default:
|
||||
klog.Errorf("eviction failed: %v", err)
|
||||
logger.Error(err, "eviction failed")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -362,7 +362,7 @@ func TestRemoveFailedPods(t *testing.T) {
|
||||
t.Fatalf("Unable to initialize a framework handle: %v", err)
|
||||
}
|
||||
|
||||
plugin, err := New(&RemoveFailedPodsArgs{
|
||||
plugin, err := New(ctx, &RemoveFailedPodsArgs{
|
||||
Reasons: tc.args.Reasons,
|
||||
ExitCodes: tc.args.ExitCodes,
|
||||
MinPodLifetimeSeconds: tc.args.MinPodLifetimeSeconds,
|
||||
|
||||
@@ -18,21 +18,23 @@ import (
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
)
|
||||
|
||||
// ValidateRemoveFailedPodsArgs validates RemoveFailedPods arguments
|
||||
func ValidateRemoveFailedPodsArgs(obj runtime.Object) error {
|
||||
args := obj.(*RemoveFailedPodsArgs)
|
||||
var allErrs []error
|
||||
// At most one of include/exclude can be set
|
||||
if args.Namespaces != nil && len(args.Namespaces.Include) > 0 && len(args.Namespaces.Exclude) > 0 {
|
||||
return fmt.Errorf("only one of Include/Exclude namespaces can be set")
|
||||
allErrs = append(allErrs, fmt.Errorf("only one of Include/Exclude namespaces can be set"))
|
||||
}
|
||||
|
||||
if args.LabelSelector != nil {
|
||||
if _, err := metav1.LabelSelectorAsSelector(args.LabelSelector); err != nil {
|
||||
return fmt.Errorf("failed to get label selectors from strategy's params: %+v", err)
|
||||
allErrs = append(allErrs, fmt.Errorf("failed to get label selectors from strategy's params: %+v", err))
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
return utilerrors.NewAggregate(allErrs)
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package removefailedpods
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -12,7 +13,7 @@ func TestValidateRemoveFailedPodsArgs(t *testing.T) {
|
||||
testCases := []struct {
|
||||
description string
|
||||
args *RemoveFailedPodsArgs
|
||||
expectError bool
|
||||
errInfo error
|
||||
}{
|
||||
{
|
||||
description: "valid namespace args, no errors",
|
||||
@@ -24,7 +25,6 @@ func TestValidateRemoveFailedPodsArgs(t *testing.T) {
|
||||
Reasons: []string{"ReasonDoesNotMatch"},
|
||||
MinPodLifetimeSeconds: &oneHourPodLifetimeSeconds,
|
||||
},
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
description: "invalid namespaces args, expects error",
|
||||
@@ -34,7 +34,7 @@ func TestValidateRemoveFailedPodsArgs(t *testing.T) {
|
||||
Exclude: []string{"kube-system"},
|
||||
},
|
||||
},
|
||||
expectError: true,
|
||||
errInfo: fmt.Errorf(`only one of Include/Exclude namespaces can be set`),
|
||||
},
|
||||
{
|
||||
description: "valid label selector args, no errors",
|
||||
@@ -43,7 +43,6 @@ func TestValidateRemoveFailedPodsArgs(t *testing.T) {
|
||||
MatchLabels: map[string]string{"role.kubernetes.io/node": ""},
|
||||
},
|
||||
},
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
description: "invalid label selector args, expects errors",
|
||||
@@ -56,16 +55,19 @@ func TestValidateRemoveFailedPodsArgs(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
expectError: true,
|
||||
errInfo: fmt.Errorf(`failed to get label selectors from strategy's params: [key: Invalid value: "": name part must be non-empty; name part must consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character (e.g. 'MyName', or 'my.name', or '123-abc', regex used for validation is '([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]'), values: Invalid value: null: for 'in', 'notin' operators, values set can't be empty]`),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.description, func(t *testing.T) {
|
||||
err := ValidateRemoveFailedPodsArgs(tc.args)
|
||||
hasError := err != nil
|
||||
if tc.expectError != hasError {
|
||||
t.Error("unexpected arg validation behavior")
|
||||
for _, testCase := range testCases {
|
||||
t.Run(testCase.description, func(t *testing.T) {
|
||||
validateErr := ValidateRemoveFailedPodsArgs(testCase.args)
|
||||
if validateErr == nil || testCase.errInfo == nil {
|
||||
if validateErr != testCase.errInfo {
|
||||
t.Errorf("expected validity of plugin config: %q but got %q instead", testCase.errInfo, validateErr)
|
||||
}
|
||||
} else if validateErr.Error() != testCase.errInfo.Error() {
|
||||
t.Errorf("expected validity of plugin config: %q but got %q instead", testCase.errInfo, validateErr)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
Copyright 2026 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
Copyright 2026 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -19,6 +19,7 @@ package removepodshavingtoomanyrestarts
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
@@ -36,6 +37,7 @@ const PluginName = "RemovePodsHavingTooManyRestarts"
|
||||
// There are too many cases leading this issue: Volume mount failed, app error due to nodes' different settings.
|
||||
// As of now, this strategy won't evict daemonsets, mirror pods, critical pods and pods with local storages.
|
||||
type RemovePodsHavingTooManyRestarts struct {
|
||||
logger klog.Logger
|
||||
handle frameworktypes.Handle
|
||||
args *RemovePodsHavingTooManyRestartsArgs
|
||||
podFilter podutil.FilterFunc
|
||||
@@ -44,11 +46,12 @@ type RemovePodsHavingTooManyRestarts struct {
|
||||
var _ frameworktypes.DeschedulePlugin = &RemovePodsHavingTooManyRestarts{}
|
||||
|
||||
// New builds plugin from its arguments while passing a handle
|
||||
func New(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
|
||||
func New(ctx context.Context, args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
|
||||
tooManyRestartsArgs, ok := args.(*RemovePodsHavingTooManyRestartsArgs)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("want args to be of type RemovePodsHavingTooManyRestartsArgs, got %T", args)
|
||||
}
|
||||
logger := klog.FromContext(ctx).WithValues("plugin", PluginName)
|
||||
|
||||
var includedNamespaces, excludedNamespaces sets.Set[string]
|
||||
if tooManyRestartsArgs.Namespaces != nil {
|
||||
@@ -69,7 +72,7 @@ func New(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plug
|
||||
|
||||
podFilter = podutil.WrapFilterFuncs(podFilter, func(pod *v1.Pod) bool {
|
||||
if err := validateCanEvict(pod, tooManyRestartsArgs); err != nil {
|
||||
klog.V(4).InfoS(fmt.Sprintf("ignoring pod for eviction due to: %s", err.Error()), "pod", klog.KObj(pod))
|
||||
logger.V(4).Info(fmt.Sprintf("ignoring pod for eviction due to: %s", err.Error()), "pod", klog.KObj(pod))
|
||||
return false
|
||||
}
|
||||
return true
|
||||
@@ -99,6 +102,7 @@ func New(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plug
|
||||
}
|
||||
|
||||
return &RemovePodsHavingTooManyRestarts{
|
||||
logger: logger,
|
||||
handle: handle,
|
||||
args: tooManyRestartsArgs,
|
||||
podFilter: podFilter,
|
||||
@@ -112,8 +116,9 @@ func (d *RemovePodsHavingTooManyRestarts) Name() string {
|
||||
|
||||
// Deschedule extension point implementation for the plugin
|
||||
func (d *RemovePodsHavingTooManyRestarts) Deschedule(ctx context.Context, nodes []*v1.Node) *frameworktypes.Status {
|
||||
logger := klog.FromContext(klog.NewContext(ctx, d.logger)).WithValues("ExtensionPoint", frameworktypes.DescheduleExtensionPoint)
|
||||
for _, node := range nodes {
|
||||
klog.V(2).InfoS("Processing node", "node", klog.KObj(node))
|
||||
logger.V(2).Info("Processing node", "node", klog.KObj(node))
|
||||
pods, err := podutil.ListAllPodsOnANode(node.Name, d.handle.GetPodsAssignedToNodeFunc(), d.podFilter)
|
||||
if err != nil {
|
||||
// no pods evicted as error encountered retrieving evictable Pods
|
||||
@@ -121,6 +126,15 @@ func (d *RemovePodsHavingTooManyRestarts) Deschedule(ctx context.Context, nodes
|
||||
Err: fmt.Errorf("error listing pods on a node: %v", err),
|
||||
}
|
||||
}
|
||||
|
||||
podRestarts := make(map[*v1.Pod]int32)
|
||||
for _, pod := range pods {
|
||||
podRestarts[pod] = getPodTotalRestarts(pod, d.args.IncludingInitContainers)
|
||||
}
|
||||
// sort pods by restarts count
|
||||
sort.Slice(pods, func(i, j int) bool {
|
||||
return podRestarts[pods[i]] > podRestarts[pods[j]]
|
||||
})
|
||||
totalPods := len(pods)
|
||||
loop:
|
||||
for i := 0; i < totalPods; i++ {
|
||||
@@ -134,7 +148,7 @@ func (d *RemovePodsHavingTooManyRestarts) Deschedule(ctx context.Context, nodes
|
||||
case *evictions.EvictionTotalLimitError:
|
||||
return nil
|
||||
default:
|
||||
klog.Errorf("eviction failed: %v", err)
|
||||
logger.Error(err, "eviction failed")
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -145,11 +159,7 @@ func (d *RemovePodsHavingTooManyRestarts) Deschedule(ctx context.Context, nodes
|
||||
func validateCanEvict(pod *v1.Pod, tooManyRestartsArgs *RemovePodsHavingTooManyRestartsArgs) error {
|
||||
var err error
|
||||
|
||||
restarts := calcContainerRestartsFromStatuses(pod.Status.ContainerStatuses)
|
||||
if tooManyRestartsArgs.IncludingInitContainers {
|
||||
restarts += calcContainerRestartsFromStatuses(pod.Status.InitContainerStatuses)
|
||||
}
|
||||
|
||||
restarts := getPodTotalRestarts(pod, tooManyRestartsArgs.IncludingInitContainers)
|
||||
if restarts < tooManyRestartsArgs.PodRestartThreshold {
|
||||
err = fmt.Errorf("number of container restarts (%v) not exceeding the threshold", restarts)
|
||||
}
|
||||
@@ -165,3 +175,12 @@ func calcContainerRestartsFromStatuses(statuses []v1.ContainerStatus) int32 {
|
||||
}
|
||||
return restarts
|
||||
}
|
||||
|
||||
// getPodTotalRestarts get total restarts of a pod.
|
||||
func getPodTotalRestarts(pod *v1.Pod, includeInitContainers bool) int32 {
|
||||
restarts := calcContainerRestartsFromStatuses(pod.Status.ContainerStatuses)
|
||||
if includeInitContainers {
|
||||
restarts += calcContainerRestartsFromStatuses(pod.Status.InitContainerStatuses)
|
||||
}
|
||||
return restarts
|
||||
}
|
||||
|
||||
@@ -22,7 +22,6 @@ import (
|
||||
"testing"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
|
||||
@@ -33,73 +32,93 @@ import (
|
||||
"sigs.k8s.io/descheduler/test"
|
||||
)
|
||||
|
||||
func initPods(node *v1.Node) []*v1.Pod {
|
||||
pods := make([]*v1.Pod, 0)
|
||||
const (
|
||||
nodeName1 = "node1"
|
||||
nodeName2 = "node2"
|
||||
nodeName3 = "node3"
|
||||
nodeName4 = "node4"
|
||||
nodeName5 = "node5"
|
||||
)
|
||||
|
||||
for i := int32(0); i <= 9; i++ {
|
||||
pod := test.BuildTestPod(fmt.Sprintf("pod-%d", i), 100, 0, node.Name, nil)
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
func buildTestNode(nodeName string, apply func(*v1.Node)) *v1.Node {
|
||||
return test.BuildTestNode(nodeName, 2000, 3000, 10, apply)
|
||||
}
|
||||
|
||||
// pod at index i will have 25 * i restarts.
|
||||
pod.Status = v1.PodStatus{
|
||||
InitContainerStatuses: []v1.ContainerStatus{
|
||||
{
|
||||
RestartCount: 5 * i,
|
||||
},
|
||||
func setPodContainerStatusRestartCount(pod *v1.Pod, base int32) {
|
||||
pod.Status = v1.PodStatus{
|
||||
InitContainerStatuses: []v1.ContainerStatus{
|
||||
{
|
||||
RestartCount: 5 * base,
|
||||
},
|
||||
ContainerStatuses: []v1.ContainerStatus{
|
||||
{
|
||||
RestartCount: 10 * i,
|
||||
},
|
||||
{
|
||||
RestartCount: 10 * i,
|
||||
},
|
||||
},
|
||||
ContainerStatuses: []v1.ContainerStatus{
|
||||
{
|
||||
RestartCount: 10 * base,
|
||||
},
|
||||
}
|
||||
pods = append(pods, pod)
|
||||
}
|
||||
|
||||
// The following 3 pods won't get evicted.
|
||||
// A daemonset.
|
||||
pods[6].ObjectMeta.OwnerReferences = test.GetDaemonSetOwnerRefList()
|
||||
// A pod with local storage.
|
||||
pods[7].ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
pods[7].Spec.Volumes = []v1.Volume{
|
||||
{
|
||||
Name: "sample",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
|
||||
},
|
||||
{
|
||||
RestartCount: 10 * base,
|
||||
},
|
||||
},
|
||||
}
|
||||
// A Mirror Pod.
|
||||
pods[8].Annotations = test.GetMirrorPodAnnotation()
|
||||
}
|
||||
|
||||
func initPodContainersWithStatusRestartCount(name string, base int32, apply func(pod *v1.Pod)) *v1.Pod {
|
||||
return test.BuildTestPod(name, 100, 0, nodeName1, func(pod *v1.Pod) {
|
||||
test.SetNormalOwnerRef(pod)
|
||||
// pod at index i will have 25 * i restarts, 5 for init container, 20 for other two containers
|
||||
setPodContainerStatusRestartCount(pod, base)
|
||||
if apply != nil {
|
||||
apply(pod)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func initPods(apply func(pod *v1.Pod)) []*v1.Pod {
|
||||
pods := make([]*v1.Pod, 0)
|
||||
|
||||
for i := int32(0); i <= 9; i++ {
|
||||
switch i {
|
||||
default:
|
||||
pods = append(pods, initPodContainersWithStatusRestartCount(fmt.Sprintf("pod-%d", i), i, apply))
|
||||
// The following 3 pods won't get evicted.
|
||||
// A daemonset.
|
||||
case 6:
|
||||
pods = append(pods, initPodContainersWithStatusRestartCount(fmt.Sprintf("pod-%d", i), i, func(pod *v1.Pod) {
|
||||
test.SetDSOwnerRef(pod)
|
||||
if apply != nil {
|
||||
apply(pod)
|
||||
}
|
||||
}))
|
||||
// A pod with local storage.
|
||||
case 7:
|
||||
pods = append(pods, initPodContainersWithStatusRestartCount(fmt.Sprintf("pod-%d", i), i, func(pod *v1.Pod) {
|
||||
test.SetNormalOwnerRef(pod)
|
||||
test.SetHostPathEmptyDirVolumeSource(pod)
|
||||
if apply != nil {
|
||||
apply(pod)
|
||||
}
|
||||
}))
|
||||
// A Mirror Pod.
|
||||
case 8:
|
||||
pods = append(pods, initPodContainersWithStatusRestartCount(fmt.Sprintf("pod-%d", i), i, func(pod *v1.Pod) {
|
||||
pod.Annotations = test.GetMirrorPodAnnotation()
|
||||
if apply != nil {
|
||||
apply(pod)
|
||||
}
|
||||
}))
|
||||
}
|
||||
}
|
||||
|
||||
pods = append(
|
||||
pods,
|
||||
test.BuildTestPod("CPU-consumer-1", 150, 100, nodeName4, test.SetNormalOwnerRef),
|
||||
test.BuildTestPod("CPU-consumer-2", 150, 100, nodeName5, test.SetNormalOwnerRef),
|
||||
)
|
||||
|
||||
return pods
|
||||
}
|
||||
|
||||
func TestRemovePodsHavingTooManyRestarts(t *testing.T) {
|
||||
node1 := test.BuildTestNode("node1", 2000, 3000, 10, nil)
|
||||
node2 := test.BuildTestNode("node2", 2000, 3000, 10, func(node *v1.Node) {
|
||||
node.Spec.Taints = []v1.Taint{
|
||||
{
|
||||
Key: "hardware",
|
||||
Value: "gpu",
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
}
|
||||
})
|
||||
node3 := test.BuildTestNode("node3", 2000, 3000, 10, func(node *v1.Node) {
|
||||
node.Spec = v1.NodeSpec{
|
||||
Unschedulable: true,
|
||||
}
|
||||
})
|
||||
node4 := test.BuildTestNode("node4", 200, 3000, 10, nil)
|
||||
node5 := test.BuildTestNode("node5", 2000, 3000, 10, nil)
|
||||
|
||||
createRemovePodsHavingTooManyRestartsAgrs := func(
|
||||
podRestartThresholds int32,
|
||||
includingInitContainers bool,
|
||||
@@ -114,207 +133,261 @@ func TestRemovePodsHavingTooManyRestarts(t *testing.T) {
|
||||
|
||||
tests := []struct {
|
||||
description string
|
||||
pods []*v1.Pod
|
||||
nodes []*v1.Node
|
||||
args RemovePodsHavingTooManyRestartsArgs
|
||||
expectedEvictedPodCount uint
|
||||
maxPodsToEvictPerNode *uint
|
||||
maxNoOfPodsToEvictPerNamespace *uint
|
||||
nodeFit bool
|
||||
applyFunc func([]*v1.Pod)
|
||||
}{
|
||||
{
|
||||
description: "All pods have total restarts under threshold, no pod evictions",
|
||||
args: createRemovePodsHavingTooManyRestartsAgrs(10000, true),
|
||||
nodes: []*v1.Node{node1},
|
||||
description: "All pods have total restarts under threshold, no pod evictions",
|
||||
args: createRemovePodsHavingTooManyRestartsAgrs(10000, true),
|
||||
pods: initPods(nil),
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode(nodeName1, nil),
|
||||
},
|
||||
expectedEvictedPodCount: 0,
|
||||
},
|
||||
{
|
||||
description: "Some pods have total restarts bigger than threshold",
|
||||
args: createRemovePodsHavingTooManyRestartsAgrs(1, true),
|
||||
nodes: []*v1.Node{node1},
|
||||
description: "Some pods have total restarts bigger than threshold",
|
||||
args: createRemovePodsHavingTooManyRestartsAgrs(1, true),
|
||||
pods: initPods(nil),
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode(nodeName1, nil),
|
||||
},
|
||||
expectedEvictedPodCount: 6,
|
||||
},
|
||||
{
|
||||
description: "Nine pods have total restarts equals threshold(includingInitContainers=true), 6 pod evictions",
|
||||
args: createRemovePodsHavingTooManyRestartsAgrs(1*25, true),
|
||||
nodes: []*v1.Node{node1},
|
||||
description: "Nine pods have total restarts equals threshold(includingInitContainers=true), 6 pod evictions",
|
||||
args: createRemovePodsHavingTooManyRestartsAgrs(1*25, true),
|
||||
pods: initPods(nil),
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode(nodeName1, nil),
|
||||
},
|
||||
expectedEvictedPodCount: 6,
|
||||
},
|
||||
{
|
||||
description: "Nine pods have total restarts equals threshold(includingInitContainers=false), 5 pod evictions",
|
||||
args: createRemovePodsHavingTooManyRestartsAgrs(1*25, false),
|
||||
nodes: []*v1.Node{node1},
|
||||
description: "Nine pods have total restarts equals threshold(includingInitContainers=false), 5 pod evictions",
|
||||
args: createRemovePodsHavingTooManyRestartsAgrs(1*25, false),
|
||||
pods: initPods(nil),
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode(nodeName1, nil),
|
||||
},
|
||||
expectedEvictedPodCount: 5,
|
||||
},
|
||||
{
|
||||
description: "All pods have total restarts equals threshold(includingInitContainers=true), 6 pod evictions",
|
||||
args: createRemovePodsHavingTooManyRestartsAgrs(1*20, true),
|
||||
nodes: []*v1.Node{node1},
|
||||
description: "All pods have total restarts equals threshold(includingInitContainers=true), 6 pod evictions",
|
||||
args: createRemovePodsHavingTooManyRestartsAgrs(1*20, true),
|
||||
pods: initPods(nil),
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode(nodeName1, nil),
|
||||
},
|
||||
expectedEvictedPodCount: 6,
|
||||
},
|
||||
{
|
||||
description: "Nine pods have total restarts equals threshold(includingInitContainers=false), 6 pod evictions",
|
||||
args: createRemovePodsHavingTooManyRestartsAgrs(1*20, false),
|
||||
nodes: []*v1.Node{node1},
|
||||
description: "Nine pods have total restarts equals threshold(includingInitContainers=false), 6 pod evictions",
|
||||
args: createRemovePodsHavingTooManyRestartsAgrs(1*20, false),
|
||||
pods: initPods(nil),
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode(nodeName1, nil),
|
||||
},
|
||||
expectedEvictedPodCount: 6,
|
||||
},
|
||||
{
|
||||
description: "Five pods have total restarts bigger than threshold(includingInitContainers=true), but only 1 pod eviction",
|
||||
args: createRemovePodsHavingTooManyRestartsAgrs(5*25+1, true),
|
||||
nodes: []*v1.Node{node1},
|
||||
description: "Five pods have total restarts bigger than threshold(includingInitContainers=true), but only 1 pod eviction",
|
||||
args: createRemovePodsHavingTooManyRestartsAgrs(5*25+1, true),
|
||||
pods: initPods(nil),
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode(nodeName1, nil),
|
||||
},
|
||||
expectedEvictedPodCount: 1,
|
||||
},
|
||||
{
|
||||
description: "Five pods have total restarts bigger than threshold(includingInitContainers=false), but only 1 pod eviction",
|
||||
args: createRemovePodsHavingTooManyRestartsAgrs(5*20+1, false),
|
||||
nodes: []*v1.Node{node1},
|
||||
description: "Five pods have total restarts bigger than threshold(includingInitContainers=false), but only 1 pod eviction",
|
||||
args: createRemovePodsHavingTooManyRestartsAgrs(5*20+1, false),
|
||||
pods: initPods(nil),
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode(nodeName1, nil),
|
||||
},
|
||||
expectedEvictedPodCount: 1,
|
||||
nodeFit: false,
|
||||
},
|
||||
{
|
||||
description: "All pods have total restarts equals threshold(maxPodsToEvictPerNode=3), 3 pod evictions",
|
||||
args: createRemovePodsHavingTooManyRestartsAgrs(1, true),
|
||||
nodes: []*v1.Node{node1},
|
||||
description: "All pods have total restarts equals threshold(maxPodsToEvictPerNode=3), 3 pod evictions",
|
||||
args: createRemovePodsHavingTooManyRestartsAgrs(1, true),
|
||||
pods: initPods(nil),
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode(nodeName1, nil),
|
||||
},
|
||||
expectedEvictedPodCount: 3,
|
||||
maxPodsToEvictPerNode: &uint3,
|
||||
},
|
||||
{
|
||||
description: "All pods have total restarts equals threshold(maxNoOfPodsToEvictPerNamespace=3), 3 pod evictions",
|
||||
args: createRemovePodsHavingTooManyRestartsAgrs(1, true),
|
||||
nodes: []*v1.Node{node1},
|
||||
description: "All pods have total restarts equals threshold(maxNoOfPodsToEvictPerNamespace=3), 3 pod evictions",
|
||||
args: createRemovePodsHavingTooManyRestartsAgrs(1, true),
|
||||
pods: initPods(nil),
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode(nodeName1, nil),
|
||||
},
|
||||
expectedEvictedPodCount: 3,
|
||||
maxNoOfPodsToEvictPerNamespace: &uint3,
|
||||
},
|
||||
{
|
||||
description: "All pods have total restarts equals threshold(maxPodsToEvictPerNode=3) but the only other node is tainted, 0 pod evictions",
|
||||
args: createRemovePodsHavingTooManyRestartsAgrs(1, true),
|
||||
nodes: []*v1.Node{node1, node2},
|
||||
description: "All pods have total restarts equals threshold(maxPodsToEvictPerNode=3) but the only other node is tainted, 0 pod evictions",
|
||||
args: createRemovePodsHavingTooManyRestartsAgrs(1, true),
|
||||
pods: initPods(nil),
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode(nodeName1, nil),
|
||||
buildTestNode(nodeName2, func(node *v1.Node) {
|
||||
node.Spec.Taints = []v1.Taint{
|
||||
{
|
||||
Key: "hardware",
|
||||
Value: "gpu",
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
}
|
||||
}),
|
||||
},
|
||||
expectedEvictedPodCount: 0,
|
||||
maxPodsToEvictPerNode: &uint3,
|
||||
nodeFit: true,
|
||||
},
|
||||
{
|
||||
description: "All pods have total restarts equals threshold(maxPodsToEvictPerNode=3) but the only other node is not schedulable, 0 pod evictions",
|
||||
args: createRemovePodsHavingTooManyRestartsAgrs(1, true),
|
||||
nodes: []*v1.Node{node1, node3},
|
||||
description: "All pods have total restarts equals threshold(maxPodsToEvictPerNode=3) but the only other node is not schedulable, 0 pod evictions",
|
||||
args: createRemovePodsHavingTooManyRestartsAgrs(1, true),
|
||||
pods: initPods(nil),
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode(nodeName1, nil),
|
||||
buildTestNode(nodeName3, func(node *v1.Node) {
|
||||
node.Spec = v1.NodeSpec{
|
||||
Unschedulable: true,
|
||||
}
|
||||
}),
|
||||
},
|
||||
expectedEvictedPodCount: 0,
|
||||
maxPodsToEvictPerNode: &uint3,
|
||||
nodeFit: true,
|
||||
},
|
||||
{
|
||||
description: "All pods have total restarts equals threshold(maxPodsToEvictPerNode=3) but the only other node does not have enough CPU, 0 pod evictions",
|
||||
args: createRemovePodsHavingTooManyRestartsAgrs(1, true),
|
||||
nodes: []*v1.Node{node1, node4},
|
||||
description: "All pods have total restarts equals threshold(maxPodsToEvictPerNode=3) but the only other node does not have enough CPU, 0 pod evictions",
|
||||
args: createRemovePodsHavingTooManyRestartsAgrs(1, true),
|
||||
pods: initPods(nil),
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode(nodeName1, nil),
|
||||
test.BuildTestNode(nodeName4, 200, 3000, 10, nil),
|
||||
},
|
||||
expectedEvictedPodCount: 0,
|
||||
maxPodsToEvictPerNode: &uint3,
|
||||
nodeFit: true,
|
||||
},
|
||||
{
|
||||
description: "All pods have total restarts equals threshold(maxPodsToEvictPerNode=3) but the only other node has enough CPU, 3 pod evictions",
|
||||
args: createRemovePodsHavingTooManyRestartsAgrs(1, true),
|
||||
nodes: []*v1.Node{node1, node5},
|
||||
description: "All pods have total restarts equals threshold(maxPodsToEvictPerNode=3) but the only other node has enough CPU, 3 pod evictions",
|
||||
args: createRemovePodsHavingTooManyRestartsAgrs(1, true),
|
||||
pods: initPods(nil),
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode(nodeName1, nil),
|
||||
buildTestNode(nodeName5, nil),
|
||||
},
|
||||
expectedEvictedPodCount: 3,
|
||||
maxPodsToEvictPerNode: &uint3,
|
||||
nodeFit: true,
|
||||
},
|
||||
{
|
||||
description: "pods are in CrashLoopBackOff with states=CrashLoopBackOff, 3 pod evictions",
|
||||
args: RemovePodsHavingTooManyRestartsArgs{PodRestartThreshold: 1, States: []string{"CrashLoopBackOff"}},
|
||||
nodes: []*v1.Node{node1, node5},
|
||||
description: "pods are in CrashLoopBackOff with states=CrashLoopBackOff, 3 pod evictions",
|
||||
args: RemovePodsHavingTooManyRestartsArgs{PodRestartThreshold: 1, States: []string{"CrashLoopBackOff"}},
|
||||
pods: initPods(func(pod *v1.Pod) {
|
||||
if len(pod.Status.ContainerStatuses) > 0 {
|
||||
pod.Status.ContainerStatuses[0].State = v1.ContainerState{
|
||||
Waiting: &v1.ContainerStateWaiting{Reason: "CrashLoopBackOff"},
|
||||
}
|
||||
}
|
||||
}),
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode(nodeName1, nil),
|
||||
buildTestNode(nodeName5, nil),
|
||||
},
|
||||
expectedEvictedPodCount: 3,
|
||||
maxPodsToEvictPerNode: &uint3,
|
||||
nodeFit: true,
|
||||
applyFunc: func(pods []*v1.Pod) {
|
||||
for _, pod := range pods {
|
||||
if len(pod.Status.ContainerStatuses) > 0 {
|
||||
pod.Status.ContainerStatuses[0].State = v1.ContainerState{
|
||||
},
|
||||
{
|
||||
description: "pods without CrashLoopBackOff with states=CrashLoopBackOff, 0 pod evictions",
|
||||
args: RemovePodsHavingTooManyRestartsArgs{PodRestartThreshold: 1, States: []string{"CrashLoopBackOff"}},
|
||||
pods: initPods(nil),
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode(nodeName1, nil),
|
||||
buildTestNode(nodeName5, nil),
|
||||
},
|
||||
expectedEvictedPodCount: 0,
|
||||
maxPodsToEvictPerNode: &uint3,
|
||||
nodeFit: true,
|
||||
},
|
||||
{
|
||||
description: "pods running with state=Running, 3 pod evictions",
|
||||
args: RemovePodsHavingTooManyRestartsArgs{PodRestartThreshold: 1, States: []string{string(v1.PodRunning)}},
|
||||
pods: initPods(func(pod *v1.Pod) {
|
||||
pod.Status.Phase = v1.PodRunning
|
||||
}),
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode(nodeName1, nil),
|
||||
},
|
||||
expectedEvictedPodCount: 3,
|
||||
maxPodsToEvictPerNode: &uint3,
|
||||
},
|
||||
{
|
||||
description: "pods pending with state=Running, 0 pod evictions",
|
||||
args: RemovePodsHavingTooManyRestartsArgs{PodRestartThreshold: 1, States: []string{string(v1.PodRunning)}},
|
||||
pods: initPods(func(pod *v1.Pod) {
|
||||
pod.Status.Phase = v1.PodPending
|
||||
}),
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode(nodeName1, nil),
|
||||
},
|
||||
expectedEvictedPodCount: 0,
|
||||
maxPodsToEvictPerNode: &uint3,
|
||||
},
|
||||
{
|
||||
description: "pods pending with initContainer with states=CrashLoopBackOff threshold(includingInitContainers=true), 3 pod evictions",
|
||||
args: RemovePodsHavingTooManyRestartsArgs{PodRestartThreshold: 1, States: []string{"CrashLoopBackOff"}, IncludingInitContainers: true},
|
||||
pods: initPods(func(pod *v1.Pod) {
|
||||
pod.Status.InitContainerStatuses = []v1.ContainerStatus{
|
||||
{
|
||||
State: v1.ContainerState{
|
||||
Waiting: &v1.ContainerStateWaiting{Reason: "CrashLoopBackOff"},
|
||||
}
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
}),
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode(nodeName1, nil),
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "pods without CrashLoopBackOff with states=CrashLoopBackOff, 0 pod evictions",
|
||||
args: RemovePodsHavingTooManyRestartsArgs{PodRestartThreshold: 1, States: []string{"CrashLoopBackOff"}},
|
||||
nodes: []*v1.Node{node1, node5},
|
||||
expectedEvictedPodCount: 0,
|
||||
maxPodsToEvictPerNode: &uint3,
|
||||
nodeFit: true,
|
||||
},
|
||||
{
|
||||
description: "pods running with state=Running, 3 pod evictions",
|
||||
args: RemovePodsHavingTooManyRestartsArgs{PodRestartThreshold: 1, States: []string{string(v1.PodRunning)}},
|
||||
nodes: []*v1.Node{node1},
|
||||
expectedEvictedPodCount: 3,
|
||||
maxPodsToEvictPerNode: &uint3,
|
||||
applyFunc: func(pods []*v1.Pod) {
|
||||
for _, pod := range pods {
|
||||
pod.Status.Phase = v1.PodRunning
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "pods pending with state=Running, 0 pod evictions",
|
||||
args: RemovePodsHavingTooManyRestartsArgs{PodRestartThreshold: 1, States: []string{string(v1.PodRunning)}},
|
||||
nodes: []*v1.Node{node1},
|
||||
description: "pods pending with initContainer with states=CrashLoopBackOff threshold(includingInitContainers=false), 0 pod evictions",
|
||||
args: RemovePodsHavingTooManyRestartsArgs{PodRestartThreshold: 1, States: []string{"CrashLoopBackOff"}, IncludingInitContainers: false},
|
||||
pods: initPods(func(pod *v1.Pod) {
|
||||
pod.Status.InitContainerStatuses = []v1.ContainerStatus{
|
||||
{
|
||||
State: v1.ContainerState{
|
||||
Waiting: &v1.ContainerStateWaiting{Reason: "CrashLoopBackOff"},
|
||||
},
|
||||
},
|
||||
}
|
||||
}),
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode(nodeName1, nil),
|
||||
},
|
||||
expectedEvictedPodCount: 0,
|
||||
maxPodsToEvictPerNode: &uint3,
|
||||
applyFunc: func(pods []*v1.Pod) {
|
||||
for _, pod := range pods {
|
||||
pod.Status.Phase = v1.PodPending
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "pods pending with initContainer with states=CrashLoopBackOff threshold(includingInitContainers=true), 3 pod evictions",
|
||||
args: RemovePodsHavingTooManyRestartsArgs{PodRestartThreshold: 1, States: []string{"CrashLoopBackOff"}, IncludingInitContainers: true},
|
||||
nodes: []*v1.Node{node1},
|
||||
expectedEvictedPodCount: 3,
|
||||
maxPodsToEvictPerNode: &uint3,
|
||||
applyFunc: func(pods []*v1.Pod) {
|
||||
for _, pod := range pods {
|
||||
pod.Status.InitContainerStatuses = []v1.ContainerStatus{
|
||||
{
|
||||
State: v1.ContainerState{
|
||||
Waiting: &v1.ContainerStateWaiting{Reason: "CrashLoopBackOff"},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "pods pending with initContainer with states=CrashLoopBackOff threshold(includingInitContainers=false), 0 pod evictions",
|
||||
args: RemovePodsHavingTooManyRestartsArgs{PodRestartThreshold: 1, States: []string{"CrashLoopBackOff"}, IncludingInitContainers: false},
|
||||
nodes: []*v1.Node{node1},
|
||||
expectedEvictedPodCount: 0,
|
||||
maxPodsToEvictPerNode: &uint3,
|
||||
applyFunc: func(pods []*v1.Pod) {
|
||||
for _, pod := range pods {
|
||||
pod.Status.InitContainerStatuses = []v1.ContainerStatus{
|
||||
{
|
||||
State: v1.ContainerState{
|
||||
Waiting: &v1.ContainerStateWaiting{Reason: "CrashLoopBackOff"},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.description, func(t *testing.T) {
|
||||
pods := append(
|
||||
initPods(node1),
|
||||
test.BuildTestPod("CPU-consumer-1", 150, 100, node4.Name, test.SetNormalOwnerRef),
|
||||
test.BuildTestPod("CPU-consumer-2", 150, 100, node5.Name, test.SetNormalOwnerRef),
|
||||
)
|
||||
if tc.applyFunc != nil {
|
||||
tc.applyFunc(pods)
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
@@ -322,7 +395,7 @@ func TestRemovePodsHavingTooManyRestarts(t *testing.T) {
|
||||
for _, node := range tc.nodes {
|
||||
objs = append(objs, node)
|
||||
}
|
||||
for _, pod := range pods {
|
||||
for _, pod := range tc.pods {
|
||||
objs = append(objs, pod)
|
||||
}
|
||||
fakeClient := fake.NewSimpleClientset(objs...)
|
||||
@@ -341,6 +414,7 @@ func TestRemovePodsHavingTooManyRestarts(t *testing.T) {
|
||||
}
|
||||
|
||||
plugin, err := New(
|
||||
ctx,
|
||||
&tc.args,
|
||||
handle)
|
||||
if err != nil {
|
||||
|
||||
@@ -15,29 +15,32 @@ package removepodshavingtoomanyrestarts
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
)
|
||||
|
||||
// ValidateRemovePodsHavingTooManyRestartsArgs validates RemovePodsHavingTooManyRestarts arguments
|
||||
func ValidateRemovePodsHavingTooManyRestartsArgs(obj runtime.Object) error {
|
||||
args := obj.(*RemovePodsHavingTooManyRestartsArgs)
|
||||
var allErrs []error
|
||||
// At most one of include/exclude can be set
|
||||
if args.Namespaces != nil && len(args.Namespaces.Include) > 0 && len(args.Namespaces.Exclude) > 0 {
|
||||
return fmt.Errorf("only one of Include/Exclude namespaces can be set")
|
||||
allErrs = append(allErrs, fmt.Errorf("only one of Include/Exclude namespaces can be set"))
|
||||
}
|
||||
|
||||
if args.LabelSelector != nil {
|
||||
if _, err := metav1.LabelSelectorAsSelector(args.LabelSelector); err != nil {
|
||||
return fmt.Errorf("failed to get label selectors from strategy's params: %+v", err)
|
||||
allErrs = append(allErrs, fmt.Errorf("failed to get label selectors from strategy's params: %+v", err))
|
||||
}
|
||||
}
|
||||
|
||||
if args.PodRestartThreshold < 1 {
|
||||
return fmt.Errorf("invalid PodsHavingTooManyRestarts threshold")
|
||||
allErrs = append(allErrs, fmt.Errorf("invalid PodsHavingTooManyRestarts threshold"))
|
||||
}
|
||||
|
||||
allowedStates := sets.New(
|
||||
@@ -49,8 +52,10 @@ func ValidateRemovePodsHavingTooManyRestartsArgs(obj runtime.Object) error {
|
||||
)
|
||||
|
||||
if !allowedStates.HasAll(args.States...) {
|
||||
return fmt.Errorf("states must be one of %v", allowedStates.UnsortedList())
|
||||
allowed := allowedStates.UnsortedList()
|
||||
sort.Strings(allowed)
|
||||
allErrs = append(allErrs, fmt.Errorf("states must be one of %v", allowed))
|
||||
}
|
||||
|
||||
return nil
|
||||
return utilerrors.NewAggregate(allErrs)
|
||||
}
|
||||
|
||||
@@ -17,6 +17,7 @@ limitations under the License.
|
||||
package removepodshavingtoomanyrestarts
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
@@ -26,7 +27,7 @@ func TestValidateRemovePodsHavingTooManyRestartsArgs(t *testing.T) {
|
||||
testCases := []struct {
|
||||
description string
|
||||
args *RemovePodsHavingTooManyRestartsArgs
|
||||
expectError bool
|
||||
errInfo error
|
||||
}{
|
||||
{
|
||||
description: "valid arg, no errors",
|
||||
@@ -34,14 +35,13 @@ func TestValidateRemovePodsHavingTooManyRestartsArgs(t *testing.T) {
|
||||
PodRestartThreshold: 1,
|
||||
States: []string{string(v1.PodRunning)},
|
||||
},
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
description: "invalid PodRestartThreshold arg, expects errors",
|
||||
args: &RemovePodsHavingTooManyRestartsArgs{
|
||||
PodRestartThreshold: 0,
|
||||
},
|
||||
expectError: true,
|
||||
errInfo: fmt.Errorf(`invalid PodsHavingTooManyRestarts threshold`),
|
||||
},
|
||||
{
|
||||
description: "invalid States arg, expects errors",
|
||||
@@ -49,7 +49,7 @@ func TestValidateRemovePodsHavingTooManyRestartsArgs(t *testing.T) {
|
||||
PodRestartThreshold: 1,
|
||||
States: []string{string(v1.PodFailed)},
|
||||
},
|
||||
expectError: true,
|
||||
errInfo: fmt.Errorf(`states must be one of [CrashLoopBackOff Running]`),
|
||||
},
|
||||
{
|
||||
description: "allows CrashLoopBackOff state",
|
||||
@@ -57,17 +57,26 @@ func TestValidateRemovePodsHavingTooManyRestartsArgs(t *testing.T) {
|
||||
PodRestartThreshold: 1,
|
||||
States: []string{"CrashLoopBackOff"},
|
||||
},
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
description: "invalid PodRestartThreshold arg and invalid States arg, expects errors",
|
||||
args: &RemovePodsHavingTooManyRestartsArgs{
|
||||
PodRestartThreshold: 0,
|
||||
States: []string{string(v1.PodFailed)},
|
||||
},
|
||||
errInfo: fmt.Errorf(`[invalid PodsHavingTooManyRestarts threshold, states must be one of [CrashLoopBackOff Running]]`),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.description, func(t *testing.T) {
|
||||
err := ValidateRemovePodsHavingTooManyRestartsArgs(tc.args)
|
||||
|
||||
hasError := err != nil
|
||||
if tc.expectError != hasError {
|
||||
t.Error("unexpected arg validation behavior")
|
||||
for _, testCase := range testCases {
|
||||
t.Run(testCase.description, func(t *testing.T) {
|
||||
validateErr := ValidateRemovePodsHavingTooManyRestartsArgs(testCase.args)
|
||||
if validateErr == nil || testCase.errInfo == nil {
|
||||
if validateErr != testCase.errInfo {
|
||||
t.Errorf("expected validity of plugin config: %q but got %q instead", testCase.errInfo, validateErr)
|
||||
}
|
||||
} else if validateErr.Error() != testCase.errInfo.Error() {
|
||||
t.Errorf("expected validity of plugin config: %q but got %q instead", testCase.errInfo, validateErr)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
Copyright 2026 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
Copyright 2026 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -35,6 +35,7 @@ const PluginName = "RemovePodsViolatingInterPodAntiAffinity"
|
||||
|
||||
// RemovePodsViolatingInterPodAntiAffinity evicts pods on the node which violate inter pod anti affinity
|
||||
type RemovePodsViolatingInterPodAntiAffinity struct {
|
||||
logger klog.Logger
|
||||
handle frameworktypes.Handle
|
||||
args *RemovePodsViolatingInterPodAntiAffinityArgs
|
||||
podFilter podutil.FilterFunc
|
||||
@@ -43,11 +44,12 @@ type RemovePodsViolatingInterPodAntiAffinity struct {
|
||||
var _ frameworktypes.DeschedulePlugin = &RemovePodsViolatingInterPodAntiAffinity{}
|
||||
|
||||
// New builds plugin from its arguments while passing a handle
|
||||
func New(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
|
||||
func New(ctx context.Context, args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
|
||||
interPodAntiAffinityArgs, ok := args.(*RemovePodsViolatingInterPodAntiAffinityArgs)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("want args to be of type RemovePodsViolatingInterPodAntiAffinityArgs, got %T", args)
|
||||
}
|
||||
logger := klog.FromContext(ctx).WithValues("plugin", PluginName)
|
||||
|
||||
var includedNamespaces, excludedNamespaces sets.Set[string]
|
||||
if interPodAntiAffinityArgs.Namespaces != nil {
|
||||
@@ -65,6 +67,7 @@ func New(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plug
|
||||
}
|
||||
|
||||
return &RemovePodsViolatingInterPodAntiAffinity{
|
||||
logger: logger,
|
||||
handle: handle,
|
||||
podFilter: podFilter,
|
||||
args: interPodAntiAffinityArgs,
|
||||
@@ -77,6 +80,7 @@ func (d *RemovePodsViolatingInterPodAntiAffinity) Name() string {
|
||||
}
|
||||
|
||||
func (d *RemovePodsViolatingInterPodAntiAffinity) Deschedule(ctx context.Context, nodes []*v1.Node) *frameworktypes.Status {
|
||||
logger := klog.FromContext(klog.NewContext(ctx, d.logger)).WithValues("ExtensionPoint", frameworktypes.DescheduleExtensionPoint)
|
||||
pods, err := podutil.ListPodsOnNodes(nodes, d.handle.GetPodsAssignedToNodeFunc(), d.podFilter)
|
||||
if err != nil {
|
||||
return &frameworktypes.Status{
|
||||
@@ -90,7 +94,7 @@ func (d *RemovePodsViolatingInterPodAntiAffinity) Deschedule(ctx context.Context
|
||||
|
||||
loop:
|
||||
for _, node := range nodes {
|
||||
klog.V(2).InfoS("Processing node", "node", klog.KObj(node))
|
||||
logger.V(2).Info("Processing node", "node", klog.KObj(node))
|
||||
pods := podsOnANode[node.Name]
|
||||
// sort the evict-able Pods based on priority, if there are multiple pods with same priority, they are sorted based on QoS tiers.
|
||||
podutil.SortPodsBasedOnPriorityLowToHigh(pods)
|
||||
@@ -115,7 +119,7 @@ loop:
|
||||
case *evictions.EvictionTotalLimitError:
|
||||
return nil
|
||||
default:
|
||||
klog.Errorf("eviction failed: %v", err)
|
||||
logger.Error(err, "eviction failed")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -33,84 +33,75 @@ import (
|
||||
"sigs.k8s.io/descheduler/test"
|
||||
)
|
||||
|
||||
func TestPodAntiAffinity(t *testing.T) {
|
||||
node1 := test.BuildTestNode("n1", 2000, 3000, 10, func(node *v1.Node) {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
"region": "main-region",
|
||||
}
|
||||
})
|
||||
node2 := test.BuildTestNode("n2", 2000, 3000, 10, func(node *v1.Node) {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
"datacenter": "east",
|
||||
}
|
||||
})
|
||||
node3 := test.BuildTestNode("n3", 2000, 3000, 10, func(node *v1.Node) {
|
||||
node.Spec = v1.NodeSpec{
|
||||
Unschedulable: true,
|
||||
}
|
||||
})
|
||||
node4 := test.BuildTestNode("n4", 2, 2, 1, nil)
|
||||
node5 := test.BuildTestNode("n5", 200, 3000, 10, func(node *v1.Node) {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
"region": "main-region",
|
||||
}
|
||||
})
|
||||
const (
|
||||
nodeName1 = "n1"
|
||||
nodeName2 = "n2"
|
||||
nodeName3 = "n3"
|
||||
nodeName4 = "n4"
|
||||
nodeName5 = "n5"
|
||||
)
|
||||
|
||||
p1 := test.BuildTestPod("p1", 100, 0, node1.Name, nil)
|
||||
p2 := test.BuildTestPod("p2", 100, 0, node1.Name, nil)
|
||||
p3 := test.BuildTestPod("p3", 100, 0, node1.Name, nil)
|
||||
p4 := test.BuildTestPod("p4", 100, 0, node1.Name, nil)
|
||||
p5 := test.BuildTestPod("p5", 100, 0, node1.Name, nil)
|
||||
p6 := test.BuildTestPod("p6", 100, 0, node1.Name, nil)
|
||||
p7 := test.BuildTestPod("p7", 100, 0, node1.Name, nil)
|
||||
p8 := test.BuildTestPod("p8", 100, 0, node1.Name, nil)
|
||||
p9 := test.BuildTestPod("p9", 100, 0, node1.Name, nil)
|
||||
p10 := test.BuildTestPod("p10", 100, 0, node1.Name, nil)
|
||||
p11 := test.BuildTestPod("p11", 100, 0, node5.Name, nil)
|
||||
p9.DeletionTimestamp = &metav1.Time{}
|
||||
p10.DeletionTimestamp = &metav1.Time{}
|
||||
func buildTestNode(name string, apply func(*v1.Node)) *v1.Node {
|
||||
return test.BuildTestNode(name, 2000, 3000, 10, apply)
|
||||
}
|
||||
|
||||
criticalPriority := utils.SystemCriticalPriority
|
||||
nonEvictablePod := test.BuildTestPod("non-evict", 100, 0, node1.Name, func(pod *v1.Pod) {
|
||||
pod.Spec.Priority = &criticalPriority
|
||||
})
|
||||
p2.Labels = map[string]string{"foo": "bar"}
|
||||
p5.Labels = map[string]string{"foo": "bar"}
|
||||
p6.Labels = map[string]string{"foo": "bar"}
|
||||
p7.Labels = map[string]string{"foo1": "bar1"}
|
||||
p11.Labels = map[string]string{"foo": "bar"}
|
||||
nonEvictablePod.Labels = map[string]string{"foo": "bar"}
|
||||
test.SetNormalOwnerRef(p1)
|
||||
test.SetNormalOwnerRef(p2)
|
||||
test.SetNormalOwnerRef(p3)
|
||||
test.SetNormalOwnerRef(p4)
|
||||
test.SetNormalOwnerRef(p5)
|
||||
test.SetNormalOwnerRef(p6)
|
||||
test.SetNormalOwnerRef(p7)
|
||||
test.SetNormalOwnerRef(p9)
|
||||
test.SetNormalOwnerRef(p10)
|
||||
test.SetNormalOwnerRef(p11)
|
||||
|
||||
// set pod anti affinity
|
||||
test.SetPodAntiAffinity(p1, "foo", "bar")
|
||||
test.SetPodAntiAffinity(p3, "foo", "bar")
|
||||
test.SetPodAntiAffinity(p4, "foo", "bar")
|
||||
test.SetPodAntiAffinity(p5, "foo1", "bar1")
|
||||
test.SetPodAntiAffinity(p6, "foo1", "bar1")
|
||||
test.SetPodAntiAffinity(p7, "foo", "bar")
|
||||
test.SetPodAntiAffinity(p9, "foo", "bar")
|
||||
test.SetPodAntiAffinity(p10, "foo", "bar")
|
||||
|
||||
// set pod priority
|
||||
test.SetPodPriority(p5, 100)
|
||||
test.SetPodPriority(p6, 50)
|
||||
test.SetPodPriority(p7, 0)
|
||||
|
||||
// Set pod node selectors
|
||||
p8.Spec.NodeSelector = map[string]string{
|
||||
"datacenter": "west",
|
||||
func setNodeMainRegionLabel(node *v1.Node) {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
"region": "main-region",
|
||||
}
|
||||
}
|
||||
|
||||
func buildTestNode1() *v1.Node {
|
||||
return buildTestNode(nodeName1, setNodeMainRegionLabel)
|
||||
}
|
||||
|
||||
func buildTestPod(name, nodeName string, apply func(*v1.Pod)) *v1.Pod {
|
||||
return test.BuildTestPod(name, 100, 0, nodeName, apply)
|
||||
}
|
||||
|
||||
func buildTestPodForNode1(name string, apply func(*v1.Pod)) *v1.Pod {
|
||||
return buildTestPod(name, nodeName1, apply)
|
||||
}
|
||||
|
||||
func setPodAntiAffinityFooBar(pod *v1.Pod) {
|
||||
test.SetPodAntiAffinity(pod, "foo", "bar")
|
||||
}
|
||||
|
||||
func setPodAntiAffinityFoo1Bar1(pod *v1.Pod) {
|
||||
test.SetPodAntiAffinity(pod, "foo1", "bar1")
|
||||
}
|
||||
|
||||
func setLabelsFooBar(pod *v1.Pod) {
|
||||
pod.Labels = map[string]string{"foo": "bar"}
|
||||
}
|
||||
|
||||
func setLabelsFoo1Bar1(pod *v1.Pod) {
|
||||
pod.Labels = map[string]string{"foo1": "bar1"}
|
||||
}
|
||||
|
||||
func buildTestPodWithAntiAffinityForNode1(name string) *v1.Pod {
|
||||
return buildTestPodForNode1(name, func(pod *v1.Pod) {
|
||||
test.SetNormalOwnerRef(pod)
|
||||
setPodAntiAffinityFooBar(pod)
|
||||
})
|
||||
}
|
||||
|
||||
func buildTestPodP2ForNode1() *v1.Pod {
|
||||
return buildTestPodForNode1("p2", func(pod *v1.Pod) {
|
||||
test.SetNormalOwnerRef(pod)
|
||||
setLabelsFooBar(pod)
|
||||
})
|
||||
}
|
||||
|
||||
func buildTestPodNonEvictableForNode1() *v1.Pod {
|
||||
criticalPriority := utils.SystemCriticalPriority
|
||||
return buildTestPodForNode1("non-evict", func(pod *v1.Pod) {
|
||||
test.SetPodPriority(pod, criticalPriority)
|
||||
setLabelsFooBar(pod)
|
||||
})
|
||||
}
|
||||
|
||||
func TestPodAntiAffinity(t *testing.T) {
|
||||
var uint1 uint = 1
|
||||
var uint3 uint = 3
|
||||
|
||||
@@ -125,87 +116,204 @@ func TestPodAntiAffinity(t *testing.T) {
|
||||
nodes []*v1.Node
|
||||
}{
|
||||
{
|
||||
description: "Maximum pods to evict - 0",
|
||||
pods: []*v1.Pod{p1, p2, p3, p4},
|
||||
nodes: []*v1.Node{node1},
|
||||
description: "Maximum pods to evict - 0",
|
||||
pods: []*v1.Pod{
|
||||
buildTestPodWithAntiAffinityForNode1("p1"),
|
||||
buildTestPodP2ForNode1(),
|
||||
buildTestPodWithAntiAffinityForNode1("p3"),
|
||||
buildTestPodWithAntiAffinityForNode1("p4"),
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode1(),
|
||||
},
|
||||
expectedEvictedPodCount: 3,
|
||||
},
|
||||
{
|
||||
description: "Maximum pods to evict - 3",
|
||||
maxPodsToEvictPerNode: &uint3,
|
||||
pods: []*v1.Pod{p1, p2, p3, p4},
|
||||
nodes: []*v1.Node{node1},
|
||||
description: "Maximum pods to evict - 3",
|
||||
maxPodsToEvictPerNode: &uint3,
|
||||
pods: []*v1.Pod{
|
||||
buildTestPodWithAntiAffinityForNode1("p1"),
|
||||
buildTestPodP2ForNode1(),
|
||||
buildTestPodWithAntiAffinityForNode1("p3"),
|
||||
buildTestPodWithAntiAffinityForNode1("p4"),
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode1(),
|
||||
},
|
||||
expectedEvictedPodCount: 3,
|
||||
},
|
||||
{
|
||||
description: "Maximum pods to evict (maxPodsToEvictPerNamespace=3) - 3",
|
||||
maxNoOfPodsToEvictPerNamespace: &uint3,
|
||||
pods: []*v1.Pod{p1, p2, p3, p4},
|
||||
nodes: []*v1.Node{node1},
|
||||
expectedEvictedPodCount: 3,
|
||||
pods: []*v1.Pod{
|
||||
buildTestPodWithAntiAffinityForNode1("p1"),
|
||||
buildTestPodP2ForNode1(),
|
||||
buildTestPodWithAntiAffinityForNode1("p3"),
|
||||
buildTestPodWithAntiAffinityForNode1("p4"),
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode1(),
|
||||
},
|
||||
expectedEvictedPodCount: 3,
|
||||
},
|
||||
{
|
||||
description: "Maximum pods to evict (maxNoOfPodsToEvictTotal)",
|
||||
maxNoOfPodsToEvictPerNamespace: &uint3,
|
||||
maxNoOfPodsToEvictTotal: &uint1,
|
||||
pods: []*v1.Pod{p1, p2, p3, p4},
|
||||
nodes: []*v1.Node{node1},
|
||||
expectedEvictedPodCount: 1,
|
||||
},
|
||||
{
|
||||
description: "Evict only 1 pod after sorting",
|
||||
pods: []*v1.Pod{p5, p6, p7},
|
||||
nodes: []*v1.Node{node1},
|
||||
pods: []*v1.Pod{
|
||||
buildTestPodWithAntiAffinityForNode1("p1"),
|
||||
buildTestPodP2ForNode1(),
|
||||
buildTestPodWithAntiAffinityForNode1("p3"),
|
||||
buildTestPodWithAntiAffinityForNode1("p4"),
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode1(),
|
||||
},
|
||||
expectedEvictedPodCount: 1,
|
||||
},
|
||||
{
|
||||
description: "Evicts pod that conflicts with critical pod (but does not evict critical pod)",
|
||||
maxPodsToEvictPerNode: &uint1,
|
||||
pods: []*v1.Pod{p1, nonEvictablePod},
|
||||
nodes: []*v1.Node{node1},
|
||||
description: "Evict only 1 pod after sorting",
|
||||
pods: []*v1.Pod{
|
||||
buildTestPodForNode1("p5", func(pod *v1.Pod) {
|
||||
test.SetNormalOwnerRef(pod)
|
||||
setLabelsFooBar(pod)
|
||||
setPodAntiAffinityFoo1Bar1(pod)
|
||||
test.SetPodPriority(pod, 100)
|
||||
}),
|
||||
buildTestPodForNode1("p6", func(pod *v1.Pod) {
|
||||
test.SetNormalOwnerRef(pod)
|
||||
setLabelsFooBar(pod)
|
||||
setPodAntiAffinityFoo1Bar1(pod)
|
||||
test.SetPodPriority(pod, 50)
|
||||
}),
|
||||
buildTestPodForNode1("p7", func(pod *v1.Pod) {
|
||||
test.SetNormalOwnerRef(pod)
|
||||
setLabelsFoo1Bar1(pod)
|
||||
setPodAntiAffinityFooBar(pod)
|
||||
test.SetPodPriority(pod, 0)
|
||||
}),
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode1(),
|
||||
},
|
||||
expectedEvictedPodCount: 1,
|
||||
},
|
||||
{
|
||||
description: "Evicts pod that conflicts with critical pod (but does not evict critical pod)",
|
||||
maxPodsToEvictPerNode: &uint1,
|
||||
pods: []*v1.Pod{p1, nonEvictablePod},
|
||||
nodes: []*v1.Node{node1},
|
||||
description: "Evicts pod that conflicts with critical pod (but does not evict critical pod)",
|
||||
maxPodsToEvictPerNode: &uint1,
|
||||
pods: []*v1.Pod{
|
||||
buildTestPodWithAntiAffinityForNode1("p1"),
|
||||
buildTestPodNonEvictableForNode1(),
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode1(),
|
||||
},
|
||||
expectedEvictedPodCount: 1,
|
||||
},
|
||||
{
|
||||
description: "Won't evict pods because node selectors don't match available nodes",
|
||||
maxPodsToEvictPerNode: &uint1,
|
||||
pods: []*v1.Pod{p8, nonEvictablePod},
|
||||
nodes: []*v1.Node{node1, node2},
|
||||
description: "Evicts pod that conflicts with critical pod (but does not evict critical pod)",
|
||||
maxPodsToEvictPerNode: &uint1,
|
||||
pods: []*v1.Pod{
|
||||
buildTestPodWithAntiAffinityForNode1("p1"),
|
||||
buildTestPodNonEvictableForNode1(),
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode1(),
|
||||
},
|
||||
expectedEvictedPodCount: 1,
|
||||
},
|
||||
{
|
||||
description: "Won't evict pods because node selectors don't match available nodes",
|
||||
maxPodsToEvictPerNode: &uint1,
|
||||
pods: []*v1.Pod{
|
||||
buildTestPodForNode1("p8", func(pod *v1.Pod) {
|
||||
pod.Spec.NodeSelector = map[string]string{
|
||||
"datacenter": "west",
|
||||
}
|
||||
}),
|
||||
buildTestPodNonEvictableForNode1(),
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode1(),
|
||||
buildTestNode(nodeName2, func(node *v1.Node) {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
"datacenter": "east",
|
||||
}
|
||||
}),
|
||||
},
|
||||
expectedEvictedPodCount: 0,
|
||||
nodeFit: true,
|
||||
},
|
||||
{
|
||||
description: "Won't evict pods because only other node is not schedulable",
|
||||
maxPodsToEvictPerNode: &uint1,
|
||||
pods: []*v1.Pod{p8, nonEvictablePod},
|
||||
nodes: []*v1.Node{node1, node3},
|
||||
description: "Won't evict pods because only other node is not schedulable",
|
||||
maxPodsToEvictPerNode: &uint1,
|
||||
pods: []*v1.Pod{
|
||||
buildTestPodForNode1("p8", func(pod *v1.Pod) {
|
||||
pod.Spec.NodeSelector = map[string]string{
|
||||
"datacenter": "west",
|
||||
}
|
||||
}),
|
||||
buildTestPodNonEvictableForNode1(),
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode1(),
|
||||
buildTestNode(nodeName3, func(node *v1.Node) {
|
||||
node.Spec = v1.NodeSpec{
|
||||
Unschedulable: true,
|
||||
}
|
||||
}),
|
||||
},
|
||||
expectedEvictedPodCount: 0,
|
||||
nodeFit: true,
|
||||
},
|
||||
{
|
||||
description: "No pod to evicted since all pod terminating",
|
||||
pods: []*v1.Pod{p9, p10},
|
||||
nodes: []*v1.Node{node1},
|
||||
description: "No pod to evicted since all pod terminating",
|
||||
pods: []*v1.Pod{
|
||||
buildTestPodForNode1("p9", func(pod *v1.Pod) {
|
||||
test.SetNormalOwnerRef(pod)
|
||||
setPodAntiAffinityFooBar(pod)
|
||||
pod.DeletionTimestamp = &metav1.Time{}
|
||||
}),
|
||||
buildTestPodForNode1("p10", func(pod *v1.Pod) {
|
||||
test.SetNormalOwnerRef(pod)
|
||||
setPodAntiAffinityFooBar(pod)
|
||||
pod.DeletionTimestamp = &metav1.Time{}
|
||||
}),
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode1(),
|
||||
},
|
||||
expectedEvictedPodCount: 0,
|
||||
},
|
||||
{
|
||||
description: "Won't evict pods because only other node doesn't have enough resources",
|
||||
maxPodsToEvictPerNode: &uint3,
|
||||
pods: []*v1.Pod{p1, p2, p3, p4},
|
||||
nodes: []*v1.Node{node1, node4},
|
||||
description: "Won't evict pods because only other node doesn't have enough resources",
|
||||
maxPodsToEvictPerNode: &uint3,
|
||||
pods: []*v1.Pod{
|
||||
buildTestPodWithAntiAffinityForNode1("p1"),
|
||||
buildTestPodP2ForNode1(),
|
||||
buildTestPodWithAntiAffinityForNode1("p3"),
|
||||
buildTestPodWithAntiAffinityForNode1("p4"),
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode1(),
|
||||
test.BuildTestNode(nodeName4, 2, 2, 1, nil),
|
||||
},
|
||||
expectedEvictedPodCount: 0,
|
||||
nodeFit: true,
|
||||
},
|
||||
{
|
||||
description: "Evict pod violating anti-affinity among different node (all pods have anti-affinity)",
|
||||
pods: []*v1.Pod{p1, p11},
|
||||
nodes: []*v1.Node{node1, node5},
|
||||
description: "Evict pod violating anti-affinity among different node (all pods have anti-affinity)",
|
||||
pods: []*v1.Pod{
|
||||
buildTestPodWithAntiAffinityForNode1("p1"),
|
||||
buildTestPod("p11", nodeName5, func(pod *v1.Pod) {
|
||||
test.SetNormalOwnerRef(pod)
|
||||
setLabelsFooBar(pod)
|
||||
}),
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode1(),
|
||||
test.BuildTestNode(nodeName5, 200, 3000, 10, setNodeMainRegionLabel),
|
||||
},
|
||||
expectedEvictedPodCount: 1,
|
||||
nodeFit: false,
|
||||
},
|
||||
@@ -240,6 +348,7 @@ func TestPodAntiAffinity(t *testing.T) {
|
||||
}
|
||||
|
||||
plugin, err := New(
|
||||
ctx,
|
||||
&RemovePodsViolatingInterPodAntiAffinityArgs{},
|
||||
handle,
|
||||
)
|
||||
|
||||
@@ -18,21 +18,23 @@ import (
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
)
|
||||
|
||||
// ValidateRemovePodsViolatingInterPodAntiAffinityArgs validates ValidateRemovePodsViolatingInterPodAntiAffinity arguments
|
||||
func ValidateRemovePodsViolatingInterPodAntiAffinityArgs(obj runtime.Object) error {
|
||||
args := obj.(*RemovePodsViolatingInterPodAntiAffinityArgs)
|
||||
var allErrs []error
|
||||
// At most one of include/exclude can be set
|
||||
if args.Namespaces != nil && len(args.Namespaces.Include) > 0 && len(args.Namespaces.Exclude) > 0 {
|
||||
return fmt.Errorf("only one of Include/Exclude namespaces can be set")
|
||||
allErrs = append(allErrs, fmt.Errorf("only one of Include/Exclude namespaces can be set"))
|
||||
}
|
||||
|
||||
if args.LabelSelector != nil {
|
||||
if _, err := metav1.LabelSelectorAsSelector(args.LabelSelector); err != nil {
|
||||
return fmt.Errorf("failed to get label selectors from strategy's params: %+v", err)
|
||||
allErrs = append(allErrs, fmt.Errorf("failed to get label selectors from strategy's params: %+v", err))
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
return utilerrors.NewAggregate(allErrs)
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package removepodsviolatinginterpodantiaffinity
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -11,7 +12,7 @@ func TestValidateRemovePodsViolatingInterPodAntiAffinityArgs(t *testing.T) {
|
||||
testCases := []struct {
|
||||
description string
|
||||
args *RemovePodsViolatingInterPodAntiAffinityArgs
|
||||
expectError bool
|
||||
errInfo error
|
||||
}{
|
||||
{
|
||||
description: "valid namespace args, no errors",
|
||||
@@ -20,7 +21,6 @@ func TestValidateRemovePodsViolatingInterPodAntiAffinityArgs(t *testing.T) {
|
||||
Include: []string{"default"},
|
||||
},
|
||||
},
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
description: "invalid namespaces args, expects error",
|
||||
@@ -30,7 +30,7 @@ func TestValidateRemovePodsViolatingInterPodAntiAffinityArgs(t *testing.T) {
|
||||
Exclude: []string{"kube-system"},
|
||||
},
|
||||
},
|
||||
expectError: true,
|
||||
errInfo: fmt.Errorf(`only one of Include/Exclude namespaces can be set`),
|
||||
},
|
||||
{
|
||||
description: "valid label selector args, no errors",
|
||||
@@ -39,7 +39,6 @@ func TestValidateRemovePodsViolatingInterPodAntiAffinityArgs(t *testing.T) {
|
||||
MatchLabels: map[string]string{"role.kubernetes.io/node": ""},
|
||||
},
|
||||
},
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
description: "invalid label selector args, expects errors",
|
||||
@@ -52,16 +51,19 @@ func TestValidateRemovePodsViolatingInterPodAntiAffinityArgs(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
expectError: true,
|
||||
errInfo: fmt.Errorf(`failed to get label selectors from strategy's params: [key: Invalid value: "": name part must be non-empty; name part must consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character (e.g. 'MyName', or 'my.name', or '123-abc', regex used for validation is '([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]'), values: Invalid value: null: for 'in', 'notin' operators, values set can't be empty]`),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.description, func(t *testing.T) {
|
||||
err := ValidateRemovePodsViolatingInterPodAntiAffinityArgs(tc.args)
|
||||
hasError := err != nil
|
||||
if tc.expectError != hasError {
|
||||
t.Error("unexpected arg validation behavior")
|
||||
for _, testCase := range testCases {
|
||||
t.Run(testCase.description, func(t *testing.T) {
|
||||
validateErr := ValidateRemovePodsViolatingInterPodAntiAffinityArgs(testCase.args)
|
||||
if validateErr == nil || testCase.errInfo == nil {
|
||||
if validateErr != testCase.errInfo {
|
||||
t.Errorf("expected validity of plugin config: %q but got %q instead", testCase.errInfo, validateErr)
|
||||
}
|
||||
} else if validateErr.Error() != testCase.errInfo.Error() {
|
||||
t.Errorf("expected validity of plugin config: %q but got %q instead", testCase.errInfo, validateErr)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
Copyright 2026 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
Copyright 2026 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -33,6 +33,7 @@ const PluginName = "RemovePodsViolatingNodeAffinity"
|
||||
|
||||
// RemovePodsViolatingNodeAffinity evicts pods on the node which violate node affinity
|
||||
type RemovePodsViolatingNodeAffinity struct {
|
||||
logger klog.Logger
|
||||
handle frameworktypes.Handle
|
||||
args *RemovePodsViolatingNodeAffinityArgs
|
||||
podFilter podutil.FilterFunc
|
||||
@@ -41,11 +42,12 @@ type RemovePodsViolatingNodeAffinity struct {
|
||||
var _ frameworktypes.DeschedulePlugin = &RemovePodsViolatingNodeAffinity{}
|
||||
|
||||
// New builds plugin from its arguments while passing a handle
|
||||
func New(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
|
||||
func New(ctx context.Context, args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
|
||||
nodeAffinityArgs, ok := args.(*RemovePodsViolatingNodeAffinityArgs)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("want args to be of type RemovePodsViolatingNodeAffinityArgs, got %T", args)
|
||||
}
|
||||
logger := klog.FromContext(ctx).WithValues("plugin", PluginName)
|
||||
|
||||
var includedNamespaces, excludedNamespaces sets.Set[string]
|
||||
if nodeAffinityArgs.Namespaces != nil {
|
||||
@@ -65,6 +67,7 @@ func New(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plug
|
||||
}
|
||||
|
||||
return &RemovePodsViolatingNodeAffinity{
|
||||
logger: logger,
|
||||
handle: handle,
|
||||
podFilter: podFilter,
|
||||
args: nodeAffinityArgs,
|
||||
@@ -77,8 +80,9 @@ func (d *RemovePodsViolatingNodeAffinity) Name() string {
|
||||
}
|
||||
|
||||
func (d *RemovePodsViolatingNodeAffinity) Deschedule(ctx context.Context, nodes []*v1.Node) *frameworktypes.Status {
|
||||
logger := klog.FromContext(klog.NewContext(ctx, d.logger)).WithValues("ExtensionPoint", frameworktypes.DescheduleExtensionPoint)
|
||||
for _, nodeAffinity := range d.args.NodeAffinityType {
|
||||
klog.V(2).InfoS("Executing for nodeAffinityType", "nodeAffinity", nodeAffinity)
|
||||
logger.V(2).Info("Executing for nodeAffinityType", "nodeAffinity", nodeAffinity)
|
||||
var err *frameworktypes.Status = nil
|
||||
|
||||
// The pods that we'll evict must be evictable. For example, the current number of replicas
|
||||
@@ -106,7 +110,7 @@ func (d *RemovePodsViolatingNodeAffinity) Deschedule(ctx context.Context, nodes
|
||||
}
|
||||
err = d.processNodes(ctx, nodes, filterFunc)
|
||||
default:
|
||||
klog.ErrorS(nil, "Invalid nodeAffinityType", "nodeAffinity", nodeAffinity)
|
||||
logger.Error(nil, "Invalid nodeAffinityType", "nodeAffinity", nodeAffinity)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
@@ -118,7 +122,7 @@ func (d *RemovePodsViolatingNodeAffinity) Deschedule(ctx context.Context, nodes
|
||||
|
||||
func (d *RemovePodsViolatingNodeAffinity) processNodes(ctx context.Context, nodes []*v1.Node, filterFunc func(*v1.Pod, *v1.Node, []*v1.Node) bool) *frameworktypes.Status {
|
||||
for _, node := range nodes {
|
||||
klog.V(2).InfoS("Processing node", "node", klog.KObj(node))
|
||||
d.logger.V(2).Info("Processing node", "node", klog.KObj(node))
|
||||
|
||||
// Potentially evictable pods
|
||||
pods, err := podutil.ListPodsOnANode(
|
||||
@@ -136,7 +140,7 @@ func (d *RemovePodsViolatingNodeAffinity) processNodes(ctx context.Context, node
|
||||
|
||||
loop:
|
||||
for _, pod := range pods {
|
||||
klog.V(1).InfoS("Evicting pod", "pod", klog.KObj(pod))
|
||||
d.logger.V(1).Info("Evicting pod", "pod", klog.KObj(pod))
|
||||
err := d.handle.Evictor().Evict(ctx, pod, evictions.EvictOptions{StrategyName: PluginName})
|
||||
if err == nil {
|
||||
continue
|
||||
@@ -147,7 +151,7 @@ func (d *RemovePodsViolatingNodeAffinity) processNodes(ctx context.Context, node
|
||||
case *evictions.EvictionTotalLimitError:
|
||||
return nil
|
||||
default:
|
||||
klog.Errorf("eviction failed: %v", err)
|
||||
d.logger.Error(err, "eviction failed")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -32,73 +32,91 @@ import (
|
||||
"sigs.k8s.io/descheduler/test"
|
||||
)
|
||||
|
||||
const (
|
||||
nodeWithLabelsName = "nodeWithLabels"
|
||||
nodeWithoutLabelsName = "nodeWithoutLabels"
|
||||
unschedulableNodeWithLabelsName = "unschedulableNodeWithLabels"
|
||||
nodeLabelKey = "kubernetes.io/desiredNode"
|
||||
nodeLabelValue = "yes"
|
||||
)
|
||||
|
||||
func buildTestNode(name string, apply func(*v1.Node)) *v1.Node {
|
||||
return test.BuildTestNode(name, 2000, 3000, 10, apply)
|
||||
}
|
||||
|
||||
func setNodeDesiredNodeLabel(node *v1.Node) {
|
||||
node.Labels[nodeLabelKey] = nodeLabelValue
|
||||
}
|
||||
|
||||
func buildTestPod(name, nodeName string, apply func(*v1.Pod)) *v1.Pod {
|
||||
return test.BuildTestPod(name, 100, 0, nodeName, apply)
|
||||
}
|
||||
|
||||
func buildUnschedulableNodeWithLabels() *v1.Node {
|
||||
return buildTestNode(unschedulableNodeWithLabelsName, func(node *v1.Node) {
|
||||
setNodeDesiredNodeLabel(node)
|
||||
node.Spec.Unschedulable = true
|
||||
})
|
||||
}
|
||||
|
||||
func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||
nodeLabelKey := "kubernetes.io/desiredNode"
|
||||
nodeLabelValue := "yes"
|
||||
nodeWithLabels := test.BuildTestNode("nodeWithLabels", 2000, 3000, 10, nil)
|
||||
nodeWithLabels.Labels[nodeLabelKey] = nodeLabelValue
|
||||
addPodsToNode := func(nodeName string, deletionTimestamp *metav1.Time, affinityType string) []*v1.Pod {
|
||||
podWithNodeAffinity := buildTestPod("podWithNodeAffinity", nodeName, func(pod *v1.Pod) {
|
||||
pod.Spec.Affinity = &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{},
|
||||
}
|
||||
|
||||
nodeWithoutLabels := test.BuildTestNode("nodeWithoutLabels", 2000, 3000, 10, nil)
|
||||
|
||||
unschedulableNodeWithLabels := test.BuildTestNode("unschedulableNodeWithLabels", 2000, 3000, 10, nil)
|
||||
unschedulableNodeWithLabels.Labels[nodeLabelKey] = nodeLabelValue
|
||||
unschedulableNodeWithLabels.Spec.Unschedulable = true
|
||||
|
||||
addPodsToNode := func(node *v1.Node, deletionTimestamp *metav1.Time, affinityType string) []*v1.Pod {
|
||||
podWithNodeAffinity := test.BuildTestPod("podWithNodeAffinity", 100, 0, node.Name, nil)
|
||||
podWithNodeAffinity.Spec.Affinity = &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{},
|
||||
}
|
||||
|
||||
switch affinityType {
|
||||
case "requiredDuringSchedulingIgnoredDuringExecution":
|
||||
podWithNodeAffinity.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution = &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
switch affinityType {
|
||||
case "requiredDuringSchedulingIgnoredDuringExecution":
|
||||
pod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution = &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: nodeLabelKey,
|
||||
Operator: "In",
|
||||
Values: []string{
|
||||
nodeLabelValue,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
case "preferredDuringSchedulingIgnoredDuringExecution":
|
||||
pod.Spec.Affinity.NodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution = []v1.PreferredSchedulingTerm{
|
||||
{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: nodeLabelKey,
|
||||
Operator: "In",
|
||||
Values: []string{
|
||||
nodeLabelValue,
|
||||
Weight: 10,
|
||||
Preference: v1.NodeSelectorTerm{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: nodeLabelKey,
|
||||
Operator: "In",
|
||||
Values: []string{
|
||||
nodeLabelValue,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
case "requiredDuringSchedulingRequiredDuringExecution":
|
||||
default:
|
||||
t.Fatalf("Invalid affinity type %s", affinityType)
|
||||
}
|
||||
case "preferredDuringSchedulingIgnoredDuringExecution":
|
||||
podWithNodeAffinity.Spec.Affinity.NodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution = []v1.PreferredSchedulingTerm{
|
||||
{
|
||||
Weight: 10,
|
||||
Preference: v1.NodeSelectorTerm{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: nodeLabelKey,
|
||||
Operator: "In",
|
||||
Values: []string{
|
||||
nodeLabelValue,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
case "requiredDuringSchedulingRequiredDuringExecution":
|
||||
default:
|
||||
t.Fatalf("Invalid affinity type %s", affinityType)
|
||||
}
|
||||
|
||||
pod1 := test.BuildTestPod("pod1", 100, 0, node.Name, nil)
|
||||
pod2 := test.BuildTestPod("pod2", 100, 0, node.Name, nil)
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
pod.DeletionTimestamp = deletionTimestamp
|
||||
})
|
||||
|
||||
podWithNodeAffinity.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
pod1.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
pod2.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
|
||||
podWithNodeAffinity.DeletionTimestamp = deletionTimestamp
|
||||
pod1.DeletionTimestamp = deletionTimestamp
|
||||
pod2.DeletionTimestamp = deletionTimestamp
|
||||
pod1 := buildTestPod("pod1", nodeName, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
pod.DeletionTimestamp = deletionTimestamp
|
||||
})
|
||||
pod2 := buildTestPod("pod2", nodeName, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
pod.DeletionTimestamp = deletionTimestamp
|
||||
})
|
||||
|
||||
return []*v1.Pod{
|
||||
podWithNodeAffinity,
|
||||
@@ -126,8 +144,11 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||
NodeAffinityType: []string{"requiredDuringSchedulingRequiredDuringExecution"},
|
||||
},
|
||||
expectedEvictedPodCount: 0,
|
||||
pods: addPodsToNode(nodeWithoutLabels, nil, "requiredDuringSchedulingRequiredDuringExecution"),
|
||||
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
|
||||
pods: addPodsToNode(nodeWithoutLabelsName, nil, "requiredDuringSchedulingRequiredDuringExecution"),
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode(nodeWithoutLabelsName, nil),
|
||||
buildTestNode(nodeWithLabelsName, setNodeDesiredNodeLabel),
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "Pod is correctly scheduled on node, no eviction expected [required affinity]",
|
||||
@@ -135,8 +156,10 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||
NodeAffinityType: []string{"requiredDuringSchedulingIgnoredDuringExecution"},
|
||||
},
|
||||
expectedEvictedPodCount: 0,
|
||||
pods: addPodsToNode(nodeWithLabels, nil, "requiredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{nodeWithLabels},
|
||||
pods: addPodsToNode(nodeWithLabelsName, nil, "requiredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode(nodeWithLabelsName, setNodeDesiredNodeLabel),
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "Pod is correctly scheduled on node, no eviction expected [preferred affinity]",
|
||||
@@ -144,8 +167,10 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||
NodeAffinityType: []string{"preferredDuringSchedulingIgnoredDuringExecution"},
|
||||
},
|
||||
expectedEvictedPodCount: 0,
|
||||
pods: addPodsToNode(nodeWithLabels, nil, "preferredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{nodeWithLabels},
|
||||
pods: addPodsToNode(nodeWithLabelsName, nil, "preferredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode(nodeWithLabelsName, setNodeDesiredNodeLabel),
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "Pod is scheduled on node without matching labels, another schedulable node available, should be evicted",
|
||||
@@ -153,8 +178,11 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||
args: RemovePodsViolatingNodeAffinityArgs{
|
||||
NodeAffinityType: []string{"requiredDuringSchedulingIgnoredDuringExecution"},
|
||||
},
|
||||
pods: addPodsToNode(nodeWithoutLabels, nil, "requiredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
|
||||
pods: addPodsToNode(nodeWithoutLabelsName, nil, "requiredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode(nodeWithoutLabelsName, nil),
|
||||
buildTestNode(nodeWithLabelsName, setNodeDesiredNodeLabel),
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "Pod is scheduled on node without matching labels, another schedulable node available with better fit, should be evicted",
|
||||
@@ -162,8 +190,11 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||
args: RemovePodsViolatingNodeAffinityArgs{
|
||||
NodeAffinityType: []string{"preferredDuringSchedulingIgnoredDuringExecution"},
|
||||
},
|
||||
pods: addPodsToNode(nodeWithoutLabels, nil, "preferredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
|
||||
pods: addPodsToNode(nodeWithoutLabelsName, nil, "preferredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode(nodeWithoutLabelsName, nil),
|
||||
buildTestNode(nodeWithLabelsName, setNodeDesiredNodeLabel),
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "Pod is scheduled on node without matching labels, another schedulable node available, maxPodsToEvictPerNode set to 1, should be evicted [required affinity]",
|
||||
@@ -171,8 +202,11 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||
args: RemovePodsViolatingNodeAffinityArgs{
|
||||
NodeAffinityType: []string{"requiredDuringSchedulingIgnoredDuringExecution"},
|
||||
},
|
||||
pods: addPodsToNode(nodeWithoutLabels, nil, "requiredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
|
||||
pods: addPodsToNode(nodeWithoutLabelsName, nil, "requiredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode(nodeWithoutLabelsName, nil),
|
||||
buildTestNode(nodeWithLabelsName, setNodeDesiredNodeLabel),
|
||||
},
|
||||
maxPodsToEvictPerNode: &uint1,
|
||||
},
|
||||
{
|
||||
@@ -181,8 +215,11 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||
args: RemovePodsViolatingNodeAffinityArgs{
|
||||
NodeAffinityType: []string{"preferredDuringSchedulingIgnoredDuringExecution"},
|
||||
},
|
||||
pods: addPodsToNode(nodeWithoutLabels, nil, "preferredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
|
||||
pods: addPodsToNode(nodeWithoutLabelsName, nil, "preferredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode(nodeWithoutLabelsName, nil),
|
||||
buildTestNode(nodeWithLabelsName, setNodeDesiredNodeLabel),
|
||||
},
|
||||
maxPodsToEvictPerNode: &uint1,
|
||||
},
|
||||
{
|
||||
@@ -191,8 +228,11 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||
args: RemovePodsViolatingNodeAffinityArgs{
|
||||
NodeAffinityType: []string{"requiredDuringSchedulingIgnoredDuringExecution"},
|
||||
},
|
||||
pods: addPodsToNode(nodeWithoutLabels, nil, "requiredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
|
||||
pods: addPodsToNode(nodeWithoutLabelsName, nil, "requiredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode(nodeWithoutLabelsName, nil),
|
||||
buildTestNode(nodeWithLabelsName, setNodeDesiredNodeLabel),
|
||||
},
|
||||
maxPodsToEvictPerNode: &uint0,
|
||||
},
|
||||
{
|
||||
@@ -201,8 +241,11 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||
args: RemovePodsViolatingNodeAffinityArgs{
|
||||
NodeAffinityType: []string{"preferredDuringSchedulingIgnoredDuringExecution"},
|
||||
},
|
||||
pods: addPodsToNode(nodeWithoutLabels, nil, "preferredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
|
||||
pods: addPodsToNode(nodeWithoutLabelsName, nil, "preferredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode(nodeWithoutLabelsName, nil),
|
||||
buildTestNode(nodeWithLabelsName, setNodeDesiredNodeLabel),
|
||||
},
|
||||
maxPodsToEvictPerNode: &uint0,
|
||||
},
|
||||
{
|
||||
@@ -211,8 +254,11 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||
args: RemovePodsViolatingNodeAffinityArgs{
|
||||
NodeAffinityType: []string{"requiredDuringSchedulingIgnoredDuringExecution"},
|
||||
},
|
||||
pods: addPodsToNode(nodeWithoutLabels, &metav1.Time{}, "requiredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
|
||||
pods: addPodsToNode(nodeWithoutLabelsName, &metav1.Time{}, "requiredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode(nodeWithoutLabelsName, nil),
|
||||
buildTestNode(nodeWithLabelsName, setNodeDesiredNodeLabel),
|
||||
},
|
||||
maxPodsToEvictPerNode: &uint1,
|
||||
},
|
||||
{
|
||||
@@ -221,8 +267,11 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||
args: RemovePodsViolatingNodeAffinityArgs{
|
||||
NodeAffinityType: []string{"preferredDuringSchedulingIgnoredDuringExecution"},
|
||||
},
|
||||
pods: addPodsToNode(nodeWithoutLabels, &metav1.Time{}, "preferredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
|
||||
pods: addPodsToNode(nodeWithoutLabelsName, &metav1.Time{}, "preferredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode(nodeWithoutLabelsName, nil),
|
||||
buildTestNode(nodeWithLabelsName, setNodeDesiredNodeLabel),
|
||||
},
|
||||
maxPodsToEvictPerNode: &uint1,
|
||||
},
|
||||
{
|
||||
@@ -231,8 +280,11 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||
args: RemovePodsViolatingNodeAffinityArgs{
|
||||
NodeAffinityType: []string{"requiredDuringSchedulingIgnoredDuringExecution"},
|
||||
},
|
||||
pods: addPodsToNode(nodeWithoutLabels, nil, "requiredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
|
||||
pods: addPodsToNode(nodeWithoutLabelsName, nil, "requiredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode(nodeWithoutLabelsName, nil),
|
||||
buildTestNode(nodeWithLabelsName, setNodeDesiredNodeLabel),
|
||||
},
|
||||
maxNoOfPodsToEvictPerNamespace: &uint1,
|
||||
},
|
||||
{
|
||||
@@ -241,8 +293,11 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||
args: RemovePodsViolatingNodeAffinityArgs{
|
||||
NodeAffinityType: []string{"requiredDuringSchedulingIgnoredDuringExecution"},
|
||||
},
|
||||
pods: addPodsToNode(nodeWithoutLabels, nil, "requiredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
|
||||
pods: addPodsToNode(nodeWithoutLabelsName, nil, "requiredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode(nodeWithoutLabelsName, nil),
|
||||
buildTestNode(nodeWithLabelsName, setNodeDesiredNodeLabel),
|
||||
},
|
||||
maxNoOfPodsToEvictPerNamespace: &uint1,
|
||||
maxNoOfPodsToEvictTotal: &uint0,
|
||||
},
|
||||
@@ -252,8 +307,11 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||
args: RemovePodsViolatingNodeAffinityArgs{
|
||||
NodeAffinityType: []string{"preferredDuringSchedulingIgnoredDuringExecution"},
|
||||
},
|
||||
pods: addPodsToNode(nodeWithoutLabels, nil, "preferredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
|
||||
pods: addPodsToNode(nodeWithoutLabelsName, nil, "preferredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode(nodeWithoutLabelsName, nil),
|
||||
buildTestNode(nodeWithLabelsName, setNodeDesiredNodeLabel),
|
||||
},
|
||||
maxNoOfPodsToEvictPerNamespace: &uint1,
|
||||
},
|
||||
{
|
||||
@@ -262,8 +320,11 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||
args: RemovePodsViolatingNodeAffinityArgs{
|
||||
NodeAffinityType: []string{"requiredDuringSchedulingIgnoredDuringExecution"},
|
||||
},
|
||||
pods: addPodsToNode(nodeWithoutLabels, nil, "requiredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
|
||||
pods: addPodsToNode(nodeWithoutLabelsName, nil, "requiredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode(nodeWithoutLabelsName, nil),
|
||||
buildTestNode(nodeWithLabelsName, setNodeDesiredNodeLabel),
|
||||
},
|
||||
maxNoOfPodsToEvictPerNamespace: &uint0,
|
||||
},
|
||||
{
|
||||
@@ -272,8 +333,11 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||
args: RemovePodsViolatingNodeAffinityArgs{
|
||||
NodeAffinityType: []string{"preferredDuringSchedulingIgnoredDuringExecution"},
|
||||
},
|
||||
pods: addPodsToNode(nodeWithoutLabels, nil, "preferredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
|
||||
pods: addPodsToNode(nodeWithoutLabelsName, nil, "preferredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode(nodeWithoutLabelsName, nil),
|
||||
buildTestNode(nodeWithLabelsName, setNodeDesiredNodeLabel),
|
||||
},
|
||||
maxNoOfPodsToEvictPerNamespace: &uint0,
|
||||
},
|
||||
{
|
||||
@@ -282,8 +346,11 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||
args: RemovePodsViolatingNodeAffinityArgs{
|
||||
NodeAffinityType: []string{"requiredDuringSchedulingIgnoredDuringExecution"},
|
||||
},
|
||||
pods: addPodsToNode(nodeWithoutLabels, &metav1.Time{}, "requiredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
|
||||
pods: addPodsToNode(nodeWithoutLabelsName, &metav1.Time{}, "requiredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode(nodeWithoutLabelsName, nil),
|
||||
buildTestNode(nodeWithLabelsName, setNodeDesiredNodeLabel),
|
||||
},
|
||||
maxNoOfPodsToEvictPerNamespace: &uint1,
|
||||
},
|
||||
{
|
||||
@@ -292,8 +359,11 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||
args: RemovePodsViolatingNodeAffinityArgs{
|
||||
NodeAffinityType: []string{"preferredDuringSchedulingIgnoredDuringExecution"},
|
||||
},
|
||||
pods: addPodsToNode(nodeWithoutLabels, &metav1.Time{}, "preferredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
|
||||
pods: addPodsToNode(nodeWithoutLabelsName, &metav1.Time{}, "preferredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode(nodeWithoutLabelsName, nil),
|
||||
buildTestNode(nodeWithLabelsName, setNodeDesiredNodeLabel),
|
||||
},
|
||||
maxNoOfPodsToEvictPerNamespace: &uint1,
|
||||
},
|
||||
{
|
||||
@@ -302,8 +372,11 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||
args: RemovePodsViolatingNodeAffinityArgs{
|
||||
NodeAffinityType: []string{"requiredDuringSchedulingIgnoredDuringExecution"},
|
||||
},
|
||||
pods: addPodsToNode(nodeWithoutLabels, nil, "requiredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{nodeWithoutLabels, unschedulableNodeWithLabels},
|
||||
pods: addPodsToNode(nodeWithoutLabelsName, nil, "requiredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode(nodeWithoutLabelsName, nil),
|
||||
buildUnschedulableNodeWithLabels(),
|
||||
},
|
||||
nodefit: true,
|
||||
},
|
||||
{
|
||||
@@ -312,8 +385,11 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||
args: RemovePodsViolatingNodeAffinityArgs{
|
||||
NodeAffinityType: []string{"preferredDuringSchedulingIgnoredDuringExecution"},
|
||||
},
|
||||
pods: addPodsToNode(nodeWithoutLabels, nil, "preferredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{nodeWithoutLabels, unschedulableNodeWithLabels},
|
||||
pods: addPodsToNode(nodeWithoutLabelsName, nil, "preferredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode(nodeWithoutLabelsName, nil),
|
||||
buildUnschedulableNodeWithLabels(),
|
||||
},
|
||||
nodefit: true,
|
||||
},
|
||||
{
|
||||
@@ -322,8 +398,11 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||
args: RemovePodsViolatingNodeAffinityArgs{
|
||||
NodeAffinityType: []string{"requiredDuringSchedulingIgnoredDuringExecution"},
|
||||
},
|
||||
pods: addPodsToNode(nodeWithoutLabels, nil, "requiredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{nodeWithLabels, unschedulableNodeWithLabels},
|
||||
pods: addPodsToNode(nodeWithoutLabelsName, nil, "requiredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode(nodeWithLabelsName, setNodeDesiredNodeLabel),
|
||||
buildUnschedulableNodeWithLabels(),
|
||||
},
|
||||
maxPodsToEvictPerNode: &uint1,
|
||||
nodefit: true,
|
||||
},
|
||||
@@ -333,8 +412,11 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||
args: RemovePodsViolatingNodeAffinityArgs{
|
||||
NodeAffinityType: []string{"preferredDuringSchedulingIgnoredDuringExecution"},
|
||||
},
|
||||
pods: addPodsToNode(nodeWithoutLabels, nil, "preferredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{nodeWithLabels, unschedulableNodeWithLabels},
|
||||
pods: addPodsToNode(nodeWithoutLabelsName, nil, "preferredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode(nodeWithLabelsName, setNodeDesiredNodeLabel),
|
||||
buildUnschedulableNodeWithLabels(),
|
||||
},
|
||||
maxPodsToEvictPerNode: &uint1,
|
||||
nodefit: true,
|
||||
},
|
||||
@@ -369,6 +451,7 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||
}
|
||||
|
||||
plugin, err := New(
|
||||
ctx,
|
||||
&RemovePodsViolatingNodeAffinityArgs{
|
||||
NodeAffinityType: tc.args.NodeAffinityType,
|
||||
},
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user