mirror of
https://github.com/kubernetes-sigs/descheduler.git
synced 2026-01-26 13:29:11 +01:00
Compare commits
542 Commits
v0.31.0
...
f164943257
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f164943257 | ||
|
|
1fe9e2c345 | ||
|
|
16ccff8ed8 | ||
|
|
38f0f15787 | ||
|
|
52f2aea444 | ||
|
|
f3c63011cc | ||
|
|
47b939dd86 | ||
|
|
89c88f483b | ||
|
|
d558fa3a5b | ||
|
|
7ef3673d4c | ||
|
|
988e0b8868 | ||
|
|
fc8ae3b4e8 | ||
|
|
6d7fedc982 | ||
|
|
769ded35f1 | ||
|
|
3283635149 | ||
|
|
994ce3e2f7 | ||
|
|
90e4115b78 | ||
|
|
8913d79d14 | ||
|
|
9a5d7e8286 | ||
|
|
674e463bc2 | ||
|
|
1df3ef5030 | ||
|
|
3068f8431a | ||
|
|
dfd2b95d2d | ||
|
|
3bb4529c34 | ||
|
|
b8765bd8ee | ||
|
|
d666e4b830 | ||
|
|
08f733863e | ||
|
|
60da931e0e | ||
|
|
12a9db4da0 | ||
|
|
41da7497c3 | ||
|
|
b56f3cdae9 | ||
|
|
162a2d14b7 | ||
|
|
78788d72de | ||
|
|
956eeefede | ||
|
|
1f7bd1fba9 | ||
|
|
5fdf368593 | ||
|
|
50b6e178c1 | ||
|
|
c1ad532c46 | ||
|
|
7e40aae2dc | ||
|
|
e09bd976f5 | ||
|
|
ffb1f44144 | ||
|
|
cb595f2524 | ||
|
|
c46817f6df | ||
|
|
032db38d6c | ||
|
|
c1cd3ae794 | ||
|
|
060d9c8573 | ||
|
|
51bcf60ccf | ||
|
|
b472549cf6 | ||
|
|
c68e8a6d06 | ||
|
|
68d9d4d044 | ||
|
|
452b1ff7d9 | ||
|
|
f123f78b44 | ||
|
|
ca0f7535fb | ||
|
|
78ff3fe92a | ||
|
|
0269283185 | ||
|
|
57ed329feb | ||
|
|
b96a41a745 | ||
|
|
6b6f7ba5c7 | ||
|
|
a3ca65ea14 | ||
|
|
d81580c93e | ||
|
|
0f7ff8a2b7 | ||
|
|
d27afd0319 | ||
|
|
3d48efdff4 | ||
|
|
e5d5cf2229 | ||
|
|
f65209d4fa | ||
|
|
b9ceb9144f | ||
|
|
2bbec0cbc6 | ||
|
|
a363da9806 | ||
|
|
63b3bd3b4d | ||
|
|
7fb935c650 | ||
|
|
f85b2f8d4d | ||
|
|
0580b5942c | ||
|
|
4171af7e8a | ||
|
|
a1678cd464 | ||
|
|
2f90d1dd01 | ||
|
|
f0cda32b6e | ||
|
|
43523113ff | ||
|
|
1b7889f4a3 | ||
|
|
b86315f097 | ||
|
|
0d496dfc5d | ||
|
|
d6b35eaed6 | ||
|
|
dc18f9f330 | ||
|
|
39212419e6 | ||
|
|
64f77ce6ee | ||
|
|
ca5326c5c4 | ||
|
|
9cf075ffc4 | ||
|
|
3325fe0b8b | ||
|
|
6c41ebd8f3 | ||
|
|
ba034d6e0e | ||
|
|
3289554f90 | ||
|
|
72575c2f23 | ||
|
|
07616c3fc0 | ||
|
|
cad120881f | ||
|
|
aec4416099 | ||
|
|
7b9d5d2539 | ||
|
|
9f7629136f | ||
|
|
42d255fd95 | ||
|
|
183a138d82 | ||
|
|
f669c45892 | ||
|
|
a2ffbc1261 | ||
|
|
2cda1bd89d | ||
|
|
691a1da43b | ||
|
|
8fe74c7a0c | ||
|
|
102bd6a91d | ||
|
|
3d1e15bb82 | ||
|
|
3c02d9029c | ||
|
|
57a3e610a7 | ||
|
|
7cec27d467 | ||
|
|
688b45011a | ||
|
|
a96451030c | ||
|
|
a4930ebc83 | ||
|
|
ad872f8b77 | ||
|
|
a0654df270 | ||
|
|
03b5a9a967 | ||
|
|
9f2d22c1f7 | ||
|
|
cbe1c1e559 | ||
|
|
87182c5e8f | ||
|
|
2765e31048 | ||
|
|
87f675a2cd | ||
|
|
a400a66d51 | ||
|
|
fa427a2b37 | ||
|
|
90672630da | ||
|
|
6a00214457 | ||
|
|
9413b0c654 | ||
|
|
3072a59ea0 | ||
|
|
0e56823865 | ||
|
|
ea80f7d307 | ||
|
|
6638b976ad | ||
|
|
116385718f | ||
|
|
5ad695166a | ||
|
|
d5e0ec597f | ||
|
|
4b86cdd31a | ||
|
|
99527292e0 | ||
|
|
cf79af6fba | ||
|
|
da55c779f2 | ||
|
|
bc6500d917 | ||
|
|
c5b9debe56 | ||
|
|
18f847bbe8 | ||
|
|
6e753ac5fb | ||
|
|
b797ca6ba2 | ||
|
|
4ffabad669 | ||
|
|
bba62ccb93 | ||
|
|
1f856595f5 | ||
|
|
993162dd44 | ||
|
|
ee73336fd8 | ||
|
|
75f655e271 | ||
|
|
76895273f9 | ||
|
|
35d2103fcf | ||
|
|
b069ae009a | ||
|
|
be275deea5 | ||
|
|
a5d3241a54 | ||
|
|
2af9ea8449 | ||
|
|
60fa5aa228 | ||
|
|
a94d22fd1b | ||
|
|
8c70b02088 | ||
|
|
ec58fed521 | ||
|
|
bf9cf0ee1c | ||
|
|
6ebb0b7aa7 | ||
|
|
bb01360776 | ||
|
|
c8bc668e04 | ||
|
|
b64426888b | ||
|
|
1306cf38a1 | ||
|
|
bc06d1be83 | ||
|
|
c9e87bb97d | ||
|
|
05b6d5e343 | ||
|
|
044f75dcec | ||
|
|
6e62af3dbf | ||
|
|
2fac727be3 | ||
|
|
babc4137a4 | ||
|
|
fc033caf21 | ||
|
|
fd524f2172 | ||
|
|
47275831ab | ||
|
|
b8b0fa0565 | ||
|
|
daaa5896a9 | ||
|
|
e27864717d | ||
|
|
e8cf01591e | ||
|
|
d7766cccfd | ||
|
|
3ebffe5a86 | ||
|
|
4e758c18e8 | ||
|
|
1c494f9c44 | ||
|
|
45dfe3011c | ||
|
|
eeb459d6d4 | ||
|
|
f3d91fc69f | ||
|
|
e9dcd4e54d | ||
|
|
8490ed9c8f | ||
|
|
01fb826bd3 | ||
|
|
9b50aa91f8 | ||
|
|
7a5bf8c2f0 | ||
|
|
df06442830 | ||
|
|
180548cc1a | ||
|
|
0aee6cff48 | ||
|
|
7a0257a682 | ||
|
|
f5253faeb0 | ||
|
|
59f499e2cd | ||
|
|
008265db9b | ||
|
|
61190b805b | ||
|
|
8c83840bf9 | ||
|
|
e46b5db6d5 | ||
|
|
b21fb4a655 | ||
|
|
8f3c5f4978 | ||
|
|
6f94e19385 | ||
|
|
3bb99512d8 | ||
|
|
56f49bc78f | ||
|
|
800dd280cd | ||
|
|
8dada79593 | ||
|
|
660e2dba40 | ||
|
|
294ce39231 | ||
|
|
f2031ddcb0 | ||
|
|
7435b5d474 | ||
|
|
b5f177efa0 | ||
|
|
4a4ec4afb7 | ||
|
|
0c33be962d | ||
|
|
511ed214b0 | ||
|
|
3d4263bf5e | ||
|
|
96171413ba | ||
|
|
5578211253 | ||
|
|
08c2fc7621 | ||
|
|
9e45259399 | ||
|
|
e5bbedb602 | ||
|
|
2710fd3781 | ||
|
|
2658864ac0 | ||
|
|
e05de87368 | ||
|
|
293a9ca4b7 | ||
|
|
83151219e7 | ||
|
|
fb0bddf85d | ||
|
|
286f2848fc | ||
|
|
d8d997a25d | ||
|
|
5d7a483dc8 | ||
|
|
58076dd162 | ||
|
|
b6e81fdd4b | ||
|
|
59dfd041a8 | ||
|
|
7bf29ce56d | ||
|
|
c77f1a4ed2 | ||
|
|
7e14c6c7c4 | ||
|
|
b4a0b8dbac | ||
|
|
680b10099d | ||
|
|
e92dda1a37 | ||
|
|
07dc0c61c5 | ||
|
|
cab310e55c | ||
|
|
822a1d4c40 | ||
|
|
1d7368b58d | ||
|
|
70a71f54bc | ||
|
|
3ea0eadcb3 | ||
|
|
41a0a9c994 | ||
|
|
c707f53cec | ||
|
|
9be42e50cc | ||
|
|
bed39d70f0 | ||
|
|
8a0fd10315 | ||
|
|
5e6cd6057b | ||
|
|
b857869371 | ||
|
|
3e764eb564 | ||
|
|
2648749eb8 | ||
|
|
ff43002060 | ||
|
|
4f42a7ae9b | ||
|
|
7d84b68556 | ||
|
|
5b4719634c | ||
|
|
94a0fbdcbb | ||
|
|
bbc3eef1c9 | ||
|
|
3a3e72e9c2 | ||
|
|
e6c14a365f | ||
|
|
2b2ab0b9ad | ||
|
|
16b9311e9e | ||
|
|
1a61470e81 | ||
|
|
c02779b6a5 | ||
|
|
ff6363692c | ||
|
|
34540c3c95 | ||
|
|
ee40f7ff30 | ||
|
|
cece2ee3cc | ||
|
|
fbdf86fdfd | ||
|
|
7bfd4088ce | ||
|
|
18f61b5e64 | ||
|
|
769b4fe34a | ||
|
|
6ffc7e3975 | ||
|
|
31af0d8223 | ||
|
|
0c80f3689d | ||
|
|
9722018847 | ||
|
|
47cfdf7057 | ||
|
|
db6d460677 | ||
|
|
237d9c1a7b | ||
|
|
5b66733ada | ||
|
|
eb1b91d085 | ||
|
|
058056d965 | ||
|
|
f9aa969791 | ||
|
|
4bbfa08dfb | ||
|
|
4b7c2c90ea | ||
|
|
06cab8e2aa | ||
|
|
582641c2e9 | ||
|
|
4d78cd49a0 | ||
|
|
ce56624cea | ||
|
|
dd7b76f2c4 | ||
|
|
bc4f17194b | ||
|
|
d9d6ca64e9 | ||
|
|
ebaf155e23 | ||
|
|
781572fed5 | ||
|
|
e3503d22f4 | ||
|
|
564c2c29d8 | ||
|
|
bbb915e003 | ||
|
|
1db6b615d1 | ||
|
|
e9188852ef | ||
|
|
c36466bb1c | ||
|
|
841fd29282 | ||
|
|
79b2e04199 | ||
|
|
3a608a590a | ||
|
|
07b1c4e681 | ||
|
|
7d6f6fedec | ||
|
|
3033aec6a0 | ||
|
|
9eb582cd67 | ||
|
|
925b388702 | ||
|
|
e599018adb | ||
|
|
f9a3be8cde | ||
|
|
d47e077897 | ||
|
|
483c9c1499 | ||
|
|
d841a2d913 | ||
|
|
f7f86ed075 | ||
|
|
46f55b5221 | ||
|
|
fa9fa70ed7 | ||
|
|
43dc9a9616 | ||
|
|
f8c8d9a385 | ||
|
|
b5bb15ae10 | ||
|
|
94d1825c68 | ||
|
|
257bd55909 | ||
|
|
fd2febbfe1 | ||
|
|
63ecaec0ef | ||
|
|
4740bc3e27 | ||
|
|
8f521bb6f7 | ||
|
|
d641488ea1 | ||
|
|
e5c57a759b | ||
|
|
9c7e01de67 | ||
|
|
eb4c7d102f | ||
|
|
e5ea03ce75 | ||
|
|
2cce141feb | ||
|
|
f2211e1cef | ||
|
|
d9697591d5 | ||
|
|
419fe74702 | ||
|
|
7380aa6e0a | ||
|
|
b84b2623b9 | ||
|
|
d0548b75d7 | ||
|
|
6e9d8891c5 | ||
|
|
5cc9e68127 | ||
|
|
bf6a51f733 | ||
|
|
f15d8d0d54 | ||
|
|
a177744169 | ||
|
|
eadfe4a546 | ||
|
|
fbf11df729 | ||
|
|
e5ab156a99 | ||
|
|
6e714a2134 | ||
|
|
a01fa87de8 | ||
|
|
ba694cfac1 | ||
|
|
2570bedd52 | ||
|
|
89eab59d82 | ||
|
|
ace001c618 | ||
|
|
33894afe2b | ||
|
|
528aff2d42 | ||
|
|
a2b899aa15 | ||
|
|
a1ddb3f28f | ||
|
|
b63b09089e | ||
|
|
2d7528411a | ||
|
|
e06443ef40 | ||
|
|
9f918371a2 | ||
|
|
c8912acfb7 | ||
|
|
1974c12e0f | ||
|
|
b3aeca73db | ||
|
|
d34848086c | ||
|
|
9aa6d79c21 | ||
|
|
7a76d9f0d3 | ||
|
|
71746262b1 | ||
|
|
8b0ae7ce52 | ||
|
|
6691720da5 | ||
|
|
0a691debfb | ||
|
|
fbc875fac1 | ||
|
|
e466307d7c | ||
|
|
9fed73148c | ||
|
|
957c5bc8e0 | ||
|
|
5ce857b3fd | ||
|
|
1f4d20dd6b | ||
|
|
75d9800baf | ||
|
|
045fbb6a04 | ||
|
|
035849c721 | ||
|
|
a3ca3093e5 | ||
|
|
ee376e12ac | ||
|
|
4750dc19e6 | ||
|
|
6bf75bedef | ||
|
|
2b450c15b8 | ||
|
|
2a27d1be90 | ||
|
|
6e1832b1af | ||
|
|
a249c9baf0 | ||
|
|
5d4dc6604a | ||
|
|
96652f3c28 | ||
|
|
6e9622bf41 | ||
|
|
f66804396e | ||
|
|
3a5c651136 | ||
|
|
68207da9c8 | ||
|
|
35a7178df6 | ||
|
|
cca28f7bbe | ||
|
|
38b1f5c1a8 | ||
|
|
c2ed7eb575 | ||
|
|
603473839a | ||
|
|
17b90969cf | ||
|
|
2cce60dc2b | ||
|
|
b4b203cc60 | ||
|
|
59d1d5d1b9 | ||
|
|
98e6ed6587 | ||
|
|
4548723dea | ||
|
|
7542cac9d0 | ||
|
|
8871272d35 | ||
|
|
a31a3b5e85 | ||
|
|
9b9ae9a3be | ||
|
|
c22d773200 | ||
|
|
9ba0f9b410 | ||
|
|
95a631f6a5 | ||
|
|
89535b9b9b | ||
|
|
54d0a22ad1 | ||
|
|
87ba84b2ad | ||
|
|
b300faece0 | ||
|
|
04ebdbee32 | ||
|
|
bda785f7dc | ||
|
|
6ab73d6ac5 | ||
|
|
e283c31030 | ||
|
|
aed345994f | ||
|
|
be4abe1727 | ||
|
|
e14b86eb8c | ||
|
|
a4d6119bcd | ||
|
|
57bb31de78 | ||
|
|
581d997379 | ||
|
|
17d9b152a2 | ||
|
|
b935c7d82c | ||
|
|
b8e3c0bba3 | ||
|
|
5bf11813e6 | ||
|
|
d883c8a9e1 | ||
|
|
50dd3b8971 | ||
|
|
fd9f2b4614 | ||
|
|
655ab516c7 | ||
|
|
0d5301ead2 | ||
|
|
d97f1c9057 | ||
|
|
57a04aae9f | ||
|
|
f3abaf48ae | ||
|
|
88af72b907 | ||
|
|
fa3fb4e954 | ||
|
|
d5b609b34a | ||
|
|
9c6604fc51 | ||
|
|
1a49e116df | ||
|
|
4775db9e2f | ||
|
|
52d5d6398c | ||
|
|
6100c914b4 | ||
|
|
0b3c022c32 | ||
|
|
335c698b38 | ||
|
|
e39ae80628 | ||
|
|
3440abfa41 | ||
|
|
e6d0caa1bc | ||
|
|
e085610bfd | ||
|
|
03246d6843 | ||
|
|
5a201a32d9 | ||
|
|
fc484030b9 | ||
|
|
8e6be70ff9 | ||
|
|
d536cf8ed0 | ||
|
|
48aede9fde | ||
|
|
bd5b95dbf9 | ||
|
|
c02c889734 | ||
|
|
ca7afd60b9 | ||
|
|
71726c8c85 | ||
|
|
32e29973d8 | ||
|
|
d0fd115747 | ||
|
|
da65808f77 | ||
|
|
7369b1291e | ||
|
|
29ff28cbb5 | ||
|
|
d653537ee6 | ||
|
|
c3b9c97827 | ||
|
|
75c5c75e13 | ||
|
|
b66b5d35f0 | ||
|
|
5c3a3bdcf1 | ||
|
|
46fa370ede | ||
|
|
4e8c7e6702 | ||
|
|
bc6323611b | ||
|
|
51a004c848 | ||
|
|
44bde42b63 | ||
|
|
bbffb830b9 | ||
|
|
73fecfb7c4 | ||
|
|
f4c3fdf418 | ||
|
|
2c11481856 | ||
|
|
e6deb65299 | ||
|
|
677c6a60ce | ||
|
|
a2fd3aa1eb | ||
|
|
697ecc79e4 | ||
|
|
e619ec6c41 | ||
|
|
be9e971cda | ||
|
|
a8e14ec14d | ||
|
|
00b6e3528f | ||
|
|
18e3d17c29 | ||
|
|
a962cca90d | ||
|
|
6567f01e86 | ||
|
|
c86416612e | ||
|
|
a4c09bf560 | ||
|
|
7d4ec60e2d | ||
|
|
3a1a3ff9d8 | ||
|
|
343ebb9ff9 | ||
|
|
d1c64c48cd | ||
|
|
7b1178be9f | ||
|
|
23a6d26209 | ||
|
|
cd408dd785 | ||
|
|
9950b8a55d | ||
|
|
f115e780d8 | ||
|
|
af8a7445a4 | ||
|
|
5ba11e09c7 | ||
|
|
d41981644a | ||
|
|
67d3d52de8 | ||
|
|
e9f43856a9 | ||
|
|
e655a7eb27 | ||
|
|
da52983b27 | ||
|
|
1e48cfe6f8 | ||
|
|
fb4b8746ec | ||
|
|
269f16cf73 | ||
|
|
7eeb07d96a | ||
|
|
a18425a18d | ||
|
|
0c552b667f | ||
|
|
ef0c2c1c47 | ||
|
|
7696f00518 | ||
|
|
89bd188a35 | ||
|
|
e3c41d6ea6 | ||
|
|
e0ff750fa7 | ||
|
|
b07be078c3 | ||
|
|
22d9230a67 | ||
|
|
3e6166666b | ||
|
|
e1e537de95 | ||
|
|
8e762d2585 | ||
|
|
042fef7c91 | ||
|
|
2c033a1f6d | ||
|
|
e0a8c77d0e | ||
|
|
05ce561a06 | ||
|
|
8b6a67535f | ||
|
|
347a08a11a | ||
|
|
0ac05f6ea3 | ||
|
|
af495e65f7 | ||
|
|
18ef69584e | ||
|
|
d25cba08a9 | ||
|
|
8b0744c5b2 | ||
|
|
6e30321989 | ||
|
|
b094acb572 | ||
|
|
9f15e02245 | ||
|
|
3bf40c830a | ||
|
|
c9c03ee536 | ||
|
|
f19a297d64 | ||
|
|
2c005600cc |
22
.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
22
.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
## Description
|
||||
<!-- Please include a summary of the change and which issue is fixed -->
|
||||
|
||||
## Checklist
|
||||
Please ensure your pull request meets the following criteria before submitting
|
||||
for review, these items will be used by reviewers to assess the quality and
|
||||
completeness of your changes:
|
||||
|
||||
- [ ] **Code Readability**: Is the code easy to understand, well-structured, and consistent with project conventions?
|
||||
- [ ] **Naming Conventions**: Are variable, function, and structs descriptive and consistent?
|
||||
- [ ] **Code Duplication**: Is there any repeated code that should be refactored?
|
||||
- [ ] **Function/Method Size**: Are functions/methods short and focused on a single task?
|
||||
- [ ] **Comments & Documentation**: Are comments clear, useful, and not excessive? Were comments updated where necessary?
|
||||
- [ ] **Error Handling**: Are errors handled appropriately ?
|
||||
- [ ] **Testing**: Are there sufficient unit/integration tests?
|
||||
- [ ] **Performance**: Are there any obvious performance issues or unnecessary computations?
|
||||
- [ ] **Dependencies**: Are new dependencies justified ?
|
||||
- [ ] **Logging & Monitoring**: Is logging used appropriately (not too verbose, not too silent)?
|
||||
- [ ] **Backward Compatibility**: Does this change break any existing functionality or APIs?
|
||||
- [ ] **Resource Management**: Are resources (files, connections, memory) managed and released properly?
|
||||
- [ ] **PR Description**: Is the PR description clear, providing enough context and explaining the motivation for the change?
|
||||
- [ ] **Documentation & Changelog**: Are README and docs updated if necessary?
|
||||
8
.github/workflows/manifests.yaml
vendored
8
.github/workflows/manifests.yaml
vendored
@@ -7,20 +7,22 @@ jobs:
|
||||
deploy:
|
||||
strategy:
|
||||
matrix:
|
||||
k8s-version: ["v1.31.0"]
|
||||
descheduler-version: ["v0.30.0"]
|
||||
k8s-version: ["v1.34.0"]
|
||||
descheduler-version: ["v0.34.0"]
|
||||
descheduler-api: ["v1alpha2"]
|
||||
manifest: ["deployment"]
|
||||
kind-version: ["v0.30.0"] # keep in sync with test/run-e2e-tests.sh
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout Repo
|
||||
uses: actions/checkout@v4
|
||||
- name: Create kind cluster
|
||||
uses: helm/kind-action@v1.10.0
|
||||
uses: helm/kind-action@v1.12.0
|
||||
with:
|
||||
node_image: kindest/node:${{ matrix.k8s-version }}
|
||||
kubectl_version: ${{ matrix.k8s-version }}
|
||||
config: test/kind-config.yaml
|
||||
version: ${{ matrix.kind-version }}
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
|
||||
3
.github/workflows/release.yaml
vendored
3
.github/workflows/release.yaml
vendored
@@ -5,6 +5,9 @@ on:
|
||||
branches:
|
||||
- release-*
|
||||
|
||||
permissions:
|
||||
contents: write # allow actions to update gh-pages branch
|
||||
|
||||
jobs:
|
||||
release:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
run:
|
||||
timeout: 2m
|
||||
timeout: 5m
|
||||
|
||||
linters:
|
||||
disable-all: true
|
||||
|
||||
30
CONTRIBUTING-descheduler.md
Normal file
30
CONTRIBUTING-descheduler.md
Normal file
@@ -0,0 +1,30 @@
|
||||
# Descheduler Design Constraints
|
||||
|
||||
This is a slowly growing document that lists good practices, conventions, and design decisions.
|
||||
|
||||
## Overview
|
||||
|
||||
TBD
|
||||
|
||||
## Code convention
|
||||
|
||||
* *formatting code*: running `make fmt` before committing each change to avoid ci failing
|
||||
|
||||
## Unit Test Conventions
|
||||
|
||||
These are the known conventions that are useful to practice whenever reasonable:
|
||||
|
||||
* *single pod creation*: each pod variable built using `test.BuildTestPod` is updated only through the `apply` argument of `BuildTestPod`
|
||||
* *single node creation*: each node variable built using `test.BuildTestNode` is updated only through the `apply` argument of `BuildTestNode`
|
||||
* *no object instance sharing*: each object built through `test.BuildXXX` functions is newly created in each unit test to avoid accidental object mutations
|
||||
* *no object instance duplication*: avoid duplication by no creating two objects with the same passed values at two different places. E.g. two nodes created with the same memory, cpu and pods requests. Rather create a single function wrapping test.BuildTestNode and invoke this wrapper multiple times.
|
||||
|
||||
The aim is to reduce cognitive load when reading and debugging the test code.
|
||||
|
||||
## Design Decisions FAQ
|
||||
|
||||
This section documents common questions about design decisions in the descheduler codebase and the rationale behind them.
|
||||
|
||||
### Why doesn't the framework provide helpers for registering and retrieving indexers for plugins?
|
||||
|
||||
In general, each plugin can have many indexers—for example, for nodes, namespaces, pods, and other resources. Each plugin, depending on its internal optimizations, may choose a different indexing function. Indexers are currently used very rarely in the framework and default plugins. Therefore, extending the framework interface with additional helpers for registering and retrieving indexers might introduce an unnecessary and overly restrictive layer without first understanding how indexers will be used. For the moment, I suggest avoiding any restrictions on how many indexers can be registered or which ones can be registered. Instead, we should extend the framework handle to provide a unique ID for each profile, so that indexers within the same profile share a unique prefix. This avoids collisions when the same profile is instantiated more than once. Later, once we learn more about indexer usage, we can revisit whether it makes sense to impose additional restrictions.
|
||||
@@ -11,7 +11,7 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
FROM golang:1.22.5
|
||||
FROM golang:1.24.6
|
||||
|
||||
WORKDIR /go/src/sigs.k8s.io/descheduler
|
||||
COPY . .
|
||||
@@ -21,7 +21,7 @@ RUN VERSION=${VERSION} make build.$ARCH
|
||||
|
||||
FROM scratch
|
||||
|
||||
MAINTAINER Kubernetes SIG Scheduling <kubernetes-sig-scheduling@googlegroups.com>
|
||||
MAINTAINER Kubernetes SIG Scheduling <sig-scheduling@kubernetes.io>
|
||||
|
||||
LABEL org.opencontainers.image.source https://github.com/kubernetes-sigs/descheduler
|
||||
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
# limitations under the License.
|
||||
FROM scratch
|
||||
|
||||
MAINTAINER Kubernetes SIG Scheduling <kubernetes-sig-scheduling@googlegroups.com>
|
||||
MAINTAINER Kubernetes SIG Scheduling <sig-scheduling@kubernetes.io>
|
||||
|
||||
USER 1000
|
||||
|
||||
|
||||
4
Makefile
4
Makefile
@@ -26,7 +26,7 @@ ARCHS = amd64 arm arm64
|
||||
|
||||
LDFLAGS=-ldflags "-X ${LDFLAG_LOCATION}.version=${VERSION} -X ${LDFLAG_LOCATION}.buildDate=${BUILD} -X ${LDFLAG_LOCATION}.gitbranch=${BRANCH} -X ${LDFLAG_LOCATION}.gitsha1=${SHA1}"
|
||||
|
||||
GOLANGCI_VERSION := v1.60.3
|
||||
GOLANGCI_VERSION := v1.64.8
|
||||
HAS_GOLANGCI := $(shell ls _output/bin/golangci-lint 2> /dev/null)
|
||||
|
||||
GOFUMPT_VERSION := v0.7.0
|
||||
@@ -148,7 +148,7 @@ lint:
|
||||
ifndef HAS_GOLANGCI
|
||||
curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b ./_output/bin ${GOLANGCI_VERSION}
|
||||
endif
|
||||
./_output/bin/golangci-lint run
|
||||
./_output/bin/golangci-lint run -v
|
||||
|
||||
fmt:
|
||||
ifndef HAS_GOFUMPT
|
||||
|
||||
3
OWNERS
3
OWNERS
@@ -4,6 +4,7 @@ approvers:
|
||||
- seanmalloy
|
||||
- a7i
|
||||
- knelasevero
|
||||
- ricardomaraschini
|
||||
reviewers:
|
||||
- damemi
|
||||
- seanmalloy
|
||||
@@ -13,6 +14,8 @@ reviewers:
|
||||
- janeliul
|
||||
- knelasevero
|
||||
- jklaw90
|
||||
- googs1025
|
||||
- ricardomaraschini
|
||||
emeritus_approvers:
|
||||
- aveshagarwal
|
||||
- k82cn
|
||||
|
||||
222
README.md
222
README.md
@@ -33,15 +33,16 @@ but relies on the default scheduler for that.
|
||||
## ⚠️ Documentation Versions by Release
|
||||
|
||||
If you are using a published release of Descheduler (such as
|
||||
`registry.k8s.io/descheduler/descheduler:v0.31.0`), follow the documentation in
|
||||
`registry.k8s.io/descheduler/descheduler:v0.34.0`), follow the documentation in
|
||||
that version's release branch, as listed below:
|
||||
|
||||
|Descheduler Version|Docs link|
|
||||
|---|---|
|
||||
|v0.34.x|[`release-1.34`](https://github.com/kubernetes-sigs/descheduler/blob/release-1.34/README.md)|
|
||||
|v0.33.x|[`release-1.33`](https://github.com/kubernetes-sigs/descheduler/blob/release-1.33/README.md)|
|
||||
|v0.32.x|[`release-1.32`](https://github.com/kubernetes-sigs/descheduler/blob/release-1.32/README.md)|
|
||||
|v0.31.x|[`release-1.31`](https://github.com/kubernetes-sigs/descheduler/blob/release-1.31/README.md)|
|
||||
|v0.30.x|[`release-1.30`](https://github.com/kubernetes-sigs/descheduler/blob/release-1.30/README.md)|
|
||||
|v0.29.x|[`release-1.29`](https://github.com/kubernetes-sigs/descheduler/blob/release-1.29/README.md)|
|
||||
|v0.28.x|[`release-1.28`](https://github.com/kubernetes-sigs/descheduler/blob/release-1.28/README.md)|
|
||||
|
||||
The
|
||||
[`master`](https://github.com/kubernetes-sigs/descheduler/blob/master/README.md)
|
||||
@@ -93,17 +94,17 @@ See the [resources | Kustomize](https://kubectl.docs.kubernetes.io/references/ku
|
||||
|
||||
Run As A Job
|
||||
```
|
||||
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/job?ref=v0.31.0' | kubectl apply -f -
|
||||
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/job?ref=release-1.34' | kubectl apply -f -
|
||||
```
|
||||
|
||||
Run As A CronJob
|
||||
```
|
||||
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/cronjob?ref=v0.31.0' | kubectl apply -f -
|
||||
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/cronjob?ref=release-1.34' | kubectl apply -f -
|
||||
```
|
||||
|
||||
Run As A Deployment
|
||||
```
|
||||
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/deployment?ref=v0.31.0' | kubectl apply -f -
|
||||
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/deployment?ref=release-1.34' | kubectl apply -f -
|
||||
```
|
||||
|
||||
## User Guide
|
||||
@@ -118,29 +119,100 @@ The Descheduler Policy is configurable and includes default strategy plugins tha
|
||||
|
||||
These are top level keys in the Descheduler Policy that you can use to configure all evictions.
|
||||
|
||||
| Name |type| Default Value | Description |
|
||||
|------|----|---------------|-------------|
|
||||
| `nodeSelector` |`string`| `nil` | limiting the nodes which are processed. Only used when `nodeFit`=`true` and only by the PreEvictionFilter Extension Point |
|
||||
| `maxNoOfPodsToEvictPerNode` |`int`| `nil` | maximum number of pods evicted from each node (summed through all strategies) |
|
||||
| `maxNoOfPodsToEvictPerNamespace` |`int`| `nil` | maximum number of pods evicted from each namespace (summed through all strategies) |
|
||||
| `maxNoOfPodsToEvictTotal` |`int`| `nil` | maximum number of pods evicted per rescheduling cycle (summed through all strategies) |
|
||||
| Name | type | Default Value | Description |
|
||||
|------------------------------------|----------|---------------|----------------------------------------------------------------------------------------------------------------------------|
|
||||
| `nodeSelector` | `string` | `nil` | Limiting the nodes which are processed. Only used when `nodeFit`=`true` and only by the PreEvictionFilter Extension Point. |
|
||||
| `maxNoOfPodsToEvictPerNode` | `int` | `nil` | Maximum number of pods evicted from each node (summed through all strategies). |
|
||||
| `maxNoOfPodsToEvictPerNamespace` | `int` | `nil` | Maximum number of pods evicted from each namespace (summed through all strategies). |
|
||||
| `maxNoOfPodsToEvictTotal` | `int` | `nil` | Maximum number of pods evicted per rescheduling cycle (summed through all strategies). |
|
||||
| `metricsCollector` (deprecated) | `object` | `nil` | Configures collection of metrics for actual resource utilization. |
|
||||
| `metricsCollector.enabled` | `bool` | `false` | Enables Kubernetes [Metrics Server](https://kubernetes-sigs.github.io/metrics-server/) collection. |
|
||||
| `metricsProviders` | `[]object` | `nil` | Enables various metrics providers like Kubernetes [Metrics Server](https://kubernetes-sigs.github.io/metrics-server/) |
|
||||
| `evictionFailureEventNotification` | `bool` | `false` | Enables eviction failure event notification. |
|
||||
| `gracePeriodSeconds` | `int` | `nil` | The duration in seconds before the object should be deleted. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. |
|
||||
| `prometheus` |`object`| `nil` | Configures collection of Prometheus metrics for actual resource utilization |
|
||||
| `prometheus.url` |`string`| `nil` | Points to a Prometheus server url |
|
||||
| `prometheus.authToken` |`object`| `nil` | Sets Prometheus server authentication token. If not specified in cluster authentication token from the container's file system is read. |
|
||||
| `prometheus.authToken.secretReference` |`object`| `nil` | Read the authentication token from a kubernetes secret (the secret is expected to contain the token under `prometheusAuthToken` data key) |
|
||||
| `prometheus.authToken.secretReference.namespace` |`string`| `nil` | Authentication token kubernetes secret namespace (currently, the RBAC configuration permits retrieving secrets from the `kube-system` namespace. If the secret needs to be accessed from a different namespace, the existing RBAC rules must be explicitly extended. |
|
||||
| `prometheus.authToken.secretReference.name` |`string`| `nil` | Authentication token kubernetes secret name |
|
||||
|
||||
The descheduler currently allows to configure a metric collection of Kubernetes Metrics through `metricsProviders` field.
|
||||
The previous way of setting `metricsCollector` field is deprecated. There are currently two sources to configure:
|
||||
- `KubernetesMetrics`: enables metrics collection from Kubernetes Metrics server
|
||||
- `Prometheus`: enables metrics collection from Prometheus server
|
||||
|
||||
In general, each plugin can consume metrics from a different provider so multiple distinct providers can be configured in parallel.
|
||||
|
||||
|
||||
### Evictor Plugin configuration (Default Evictor)
|
||||
|
||||
The Default Evictor Plugin is used by default for filtering pods before processing them in an strategy plugin, or for applying a PreEvictionFilter of pods before eviction. You can also create your own Evictor Plugin or use the Default one provided by Descheduler. Other uses for the Evictor plugin can be to sort, filter, validate or group pods by different criteria, and that's why this is handled by a plugin and not configured in the top level config.
|
||||
|
||||
| Name |type| Default Value | Description |
|
||||
|------|----|---------------|-------------|
|
||||
| `nodeSelector` |`string`| `nil` | limiting the nodes which are processed |
|
||||
| `evictLocalStoragePods` |`bool`| `false` | allows eviction of pods with local storage |
|
||||
| `evictSystemCriticalPods` |`bool`| `false` | [Warning: Will evict Kubernetes system pods] allows eviction of pods with any priority, including system pods like kube-dns |
|
||||
| `ignorePvcPods` |`bool`| `false` | set whether PVC pods should be evicted or ignored |
|
||||
| `evictFailedBarePods` |`bool`| `false` | allow eviction of pods without owner references and in failed phase |
|
||||
|`labelSelector`|`metav1.LabelSelector`||(see [label filtering](#label-filtering))|
|
||||
|`priorityThreshold`|`priorityThreshold`||(see [priority filtering](#priority-filtering))|
|
||||
|`nodeFit`|`bool`|`false`|(see [node fit filtering](#node-fit-filtering))|
|
||||
|`minReplicas`|`uint`|`0`| ignore eviction of pods where owner (e.g. `ReplicaSet`) replicas is below this threshold |
|
||||
|`minPodAge`|`metav1.Duration`|`0`| ignore eviction of pods with a creation time within this threshold |
|
||||
| Name | Type | Default Value | Description |
|
||||
|---------------------------|------------------------|---------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| `nodeSelector` | `string` | `nil` | Limits the nodes that are processed. |
|
||||
| `evictLocalStoragePods` | `bool` | `false` | **[Deprecated: Use `podProtections` with `"PodsWithLocalStorage"` instead]**<br>Allows eviction of pods using local storage. |
|
||||
| `evictDaemonSetPods` | `bool` | `false` | **[Deprecated: Use `podProtections` with `"DaemonSetPods"` instead]**<br>Allows eviction of DaemonSet managed Pods. |
|
||||
| `evictSystemCriticalPods` | `bool` | `false` | **[Deprecated: Use `podProtections` with `"SystemCriticalPods"` instead]**<br>[Warning: Will evict Kubernetes system pods] Allows eviction of pods with any priority, including system-critical pods like kube-dns. |
|
||||
| `ignorePvcPods` | `bool` | `false` | **[Deprecated: Use `podProtections` with `"PodsWithPVC"` instead]**<br>Sets whether PVC pods should be evicted or ignored. |
|
||||
| `evictFailedBarePods` | `bool` | `false` | **[Deprecated: Use `podProtections` with `"FailedBarePods"` instead]**<br>Allows eviction of pods without owner references and in a failed phase. |
|
||||
| `ignorePodsWithoutPDB` | `bool` | `false` | **[Deprecated: Use `podProtections` with `"PodsWithoutPDB"` instead]**<br>Sets whether pods without PodDisruptionBudget should be evicted or ignored. |
|
||||
| `labelSelector` | `metav1.LabelSelector` | | (See [label filtering](#label-filtering)) |
|
||||
| `priorityThreshold` | `priorityThreshold` | | (See [priority filtering](#priority-filtering)) |
|
||||
| `nodeFit` | `bool` | `false` | (See [node fit filtering](#node-fit-filtering)) |
|
||||
| `minReplicas` | `uint` | `0` | Ignores eviction of pods where the owner (e.g., `ReplicaSet`) replicas are below this threshold. |
|
||||
| `minPodAge` | `metav1.Duration` | `0` | Ignores eviction of pods with a creation time within this threshold. |
|
||||
| `noEvictionPolicy` | `enum` | `` | sets whether a `descheduler.alpha.kubernetes.io/prefer-no-eviction` pod annotation is considered preferred or mandatory. Accepted values: "", "Preferred", "Mandatory". Defaults to "Preferred". |
|
||||
| `podProtections` | `PodProtections` | `{}` | Holds the list of enabled and disabled protection pod policies.<br>Users can selectively disable certain default protection rules or enable extra ones. See below for supported values. |
|
||||
|
||||
#### Supported Values for `podProtections.DefaultDisabled`
|
||||
|
||||
> Setting a value in `defaultDisabled` **disables the corresponding default protection rule**. This means the specified type of Pods will **no longer be protected** from eviction and may be evicted if they meet other criteria.
|
||||
|
||||
| Value | Meaning |
|
||||
|--------------------------|-------------------------------------------------------------------------|
|
||||
| `"PodsWithLocalStorage"` | Allow eviction of Pods using local storage. |
|
||||
| `"DaemonSetPods"` | Allow eviction of DaemonSet-managed Pods. |
|
||||
| `"SystemCriticalPods"` | Allow eviction of system-critical Pods. |
|
||||
| `"FailedBarePods"` | Allow eviction of failed bare Pods (without controllers). |
|
||||
|
||||
---
|
||||
|
||||
#### Supported Values for `podProtections.ExtraEnabled`
|
||||
|
||||
> Setting a value in `extraEnabled` **enables an additional protection rule**. This means the specified type of Pods will be **protected** from eviction.
|
||||
|
||||
| Value | Meaning |
|
||||
|----------------------------|------------------------------------------------------------------|
|
||||
| `"PodsWithPVC"` | Prevents eviction of Pods using Persistent Volume Claims (PVCs). |
|
||||
| `"PodsWithoutPDB"` | Prevents eviction of Pods without a PodDisruptionBudget (PDB). |
|
||||
| `"PodsWithResourceClaims"` | Prevents eviction of Pods using ResourceClaims. |
|
||||
|
||||
|
||||
#### Protecting pods using specific Storage Classes
|
||||
|
||||
With the `PodsWithPVC` protection enabled all pods using PVCs are protected from eviction by default, if needed you can restrict the protection by filtering by PVC storage class. When filtering out by storage class, only pods using PVCs with the specified storage classes are protected from eviction. For example:
|
||||
|
||||
```yaml
|
||||
apiVersion: "descheduler/v1alpha2"
|
||||
kind: "DeschedulerPolicy"
|
||||
profiles:
|
||||
- name: ProfileName
|
||||
pluginConfig:
|
||||
- name: "DefaultEvictor"
|
||||
args:
|
||||
podProtections:
|
||||
extraEnabled:
|
||||
- PodsWithPVC
|
||||
config:
|
||||
PodsWithPVC:
|
||||
protectedStorageClasses:
|
||||
- name: storage-class-0
|
||||
- name: storage-class-1
|
||||
|
||||
```
|
||||
This example will protect pods using PVCs with storage classes `storage-class-0` and `storage-class-1` from eviction.
|
||||
|
||||
### Example policy
|
||||
|
||||
@@ -157,14 +229,32 @@ nodeSelector: "node=node1" # you don't need to set this, if not set all will be
|
||||
maxNoOfPodsToEvictPerNode: 5000 # you don't need to set this, unlimited if not set
|
||||
maxNoOfPodsToEvictPerNamespace: 5000 # you don't need to set this, unlimited if not set
|
||||
maxNoOfPodsToEvictTotal: 5000 # you don't need to set this, unlimited if not set
|
||||
gracePeriodSeconds: 60 # you don't need to set this, 0 if not set
|
||||
# you don't need to set this, metrics are not collected if not set
|
||||
metricsProviders:
|
||||
- source: Prometheus
|
||||
prometheus:
|
||||
url: http://prometheus-kube-prometheus-prometheus.prom.svc.cluster.local
|
||||
authToken:
|
||||
secretReference:
|
||||
namespace: "kube-system"
|
||||
name: "authtoken"
|
||||
profiles:
|
||||
- name: ProfileName
|
||||
pluginConfig:
|
||||
- name: "DefaultEvictor"
|
||||
args:
|
||||
evictSystemCriticalPods: true
|
||||
evictFailedBarePods: true
|
||||
evictLocalStoragePods: true
|
||||
podProtections:
|
||||
defaultDisabled:
|
||||
#- "PodsWithLocalStorage"
|
||||
#- "SystemCriticalPods"
|
||||
#- "DaemonSetPods"
|
||||
#- "FailedBarePods"
|
||||
extraEnabled:
|
||||
#- "PodsWithPVC"
|
||||
#- "PodsWithoutPDB"
|
||||
#- "PodsWithResourceClaims"
|
||||
config: {}
|
||||
nodeFit: true
|
||||
minReplicas: 2
|
||||
plugins:
|
||||
@@ -276,11 +366,18 @@ If that parameter is set to `true`, the thresholds are considered as percentage
|
||||
`thresholds` will be deducted from the mean among all nodes and `targetThresholds` will be added to the mean.
|
||||
A resource consumption above (resp. below) this window is considered as overutilization (resp. underutilization).
|
||||
|
||||
**NOTE:** Node resource consumption is determined by the requests and limits of pods, not actual usage.
|
||||
**NOTE:** By default node resource consumption is determined by the requests and limits of pods, not actual usage.
|
||||
This approach is chosen in order to maintain consistency with the kube-scheduler, which follows the same
|
||||
design for scheduling pods onto nodes. This means that resource usage as reported by Kubelet (or commands
|
||||
like `kubectl top`) may differ from the calculated consumption, due to these components reporting
|
||||
actual usage metrics. Implementing metrics-based descheduling is currently TODO for the project.
|
||||
actual usage metrics. Metrics-based descheduling can be enabled by setting `metricsUtilization.metricsServer` field (deprecated)
|
||||
or `metricsUtilization.source` field to `KubernetesMetrics`.
|
||||
In order to have the plugin consume the metrics the metric provider needs to be configured as well.
|
||||
Alternatively, it is possible to create a prometheus client and configure a prometheus query to consume
|
||||
metrics outside of the kubernetes metrics server. The query is expected to return a vector of values for
|
||||
each node. The values are expected to be any real number within <0; 1> interval. During eviction only
|
||||
a single pod is evicted at most from each overutilized node. There's currently no support for evicting more.
|
||||
See `metricsProviders` field at [Top Level configuration](#top-level-configuration) for available options.
|
||||
|
||||
**Parameters:**
|
||||
|
||||
@@ -290,7 +387,13 @@ actual usage metrics. Implementing metrics-based descheduling is currently TODO
|
||||
|`thresholds`|map(string:int)|
|
||||
|`targetThresholds`|map(string:int)|
|
||||
|`numberOfNodes`|int|
|
||||
|`evictionLimits`|object|
|
||||
|`evictableNamespaces`|(see [namespace filtering](#namespace-filtering))|
|
||||
|`metricsUtilization`|object|
|
||||
|`metricsUtilization.metricsServer` (deprecated)|bool|
|
||||
|`metricsUtilization.source`|string|
|
||||
|`metricsUtilization.prometheus.query`|string|
|
||||
|
||||
|
||||
**Example:**
|
||||
|
||||
@@ -310,6 +413,12 @@ profiles:
|
||||
"cpu" : 50
|
||||
"memory": 50
|
||||
"pods": 50
|
||||
# metricsUtilization:
|
||||
# source: Prometheus
|
||||
# prometheus:
|
||||
# query: instance:node_cpu:rate:sum
|
||||
evictionLimits:
|
||||
node: 5
|
||||
plugins:
|
||||
balance:
|
||||
enabled:
|
||||
@@ -325,10 +434,12 @@ and will not be used to compute node's usage if it's not specified in `threshold
|
||||
* The valid range of the resource's percentage value is \[0, 100\]
|
||||
* Percentage value of `thresholds` can not be greater than `targetThresholds` for the same resource.
|
||||
|
||||
There is another parameter associated with the `LowNodeUtilization` strategy, called `numberOfNodes`.
|
||||
This parameter can be configured to activate the strategy only when the number of under utilized nodes
|
||||
There are two more parameters associated with the `LowNodeUtilization` strategy, called `numberOfNodes` and `evictionLimits`.
|
||||
The first parameter can be configured to activate the strategy only when the number of under utilized nodes
|
||||
are above the configured value. This could be helpful in large clusters where a few nodes could go
|
||||
under utilized frequently or for a short period of time. By default, `numberOfNodes` is set to zero.
|
||||
The second parameter is useful when a number of evictions per the plugin per a descheduling cycle needs to be limited.
|
||||
The parameter currently enables to limit the number of evictions per node through `node` field.
|
||||
|
||||
### HighNodeUtilization
|
||||
|
||||
@@ -354,6 +465,12 @@ strategy evicts pods from `underutilized nodes` (those with usage below `thresho
|
||||
so that they can be recreated in appropriately utilized nodes.
|
||||
The strategy will abort if any number of `underutilized nodes` or `appropriately utilized nodes` is zero.
|
||||
|
||||
To control pod eviction from underutilized nodes, use the `evictionModes`
|
||||
array. A lenient policy, which evicts pods regardless of their resource
|
||||
requests, is the default. To enable a stricter policy that only evicts pods
|
||||
with resource requests defined for the provided threshold resources, add the
|
||||
option `OnlyThresholdingResources` to the `evictionModes` configuration.
|
||||
|
||||
**NOTE:** Node resource consumption is determined by the requests and limits of pods, not actual usage.
|
||||
This approach is chosen in order to maintain consistency with the kube-scheduler, which follows the same
|
||||
design for scheduling pods onto nodes. This means that resource usage as reported by Kubelet (or commands
|
||||
@@ -366,8 +483,15 @@ actual usage metrics. Implementing metrics-based descheduling is currently TODO
|
||||
|---|---|
|
||||
|`thresholds`|map(string:int)|
|
||||
|`numberOfNodes`|int|
|
||||
|`evictionModes`|list(string)|
|
||||
|`evictableNamespaces`|(see [namespace filtering](#namespace-filtering))|
|
||||
|
||||
**Supported Eviction Modes:**
|
||||
|
||||
|Name|Description|
|
||||
|---|---|
|
||||
|`OnlyThresholdingResources`|Evict only pods that have resource requests defined for the provided threshold resources.|
|
||||
|
||||
**Example:**
|
||||
|
||||
```yaml
|
||||
@@ -386,6 +510,8 @@ profiles:
|
||||
exclude:
|
||||
- "kube-system"
|
||||
- "namespace1"
|
||||
evictionModes:
|
||||
- "OnlyThresholdingResources"
|
||||
plugins:
|
||||
balance:
|
||||
enabled:
|
||||
@@ -660,7 +786,9 @@ profiles:
|
||||
This strategy evicts pods that are older than `maxPodLifeTimeSeconds`.
|
||||
|
||||
You can also specify `states` parameter to **only** evict pods matching the following conditions:
|
||||
- [Pod Phase](https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-phase) status of: `Running`, `Pending`, `Unknown`
|
||||
> The primary purpose for using states like `Succeeded` and `Failed` is releasing resources so that new pods can be rescheduled.
|
||||
> I.e., the main motivation is not for cleaning pods, rather to release resources.
|
||||
- [Pod Phase](https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-phase) status of: `Running`, `Pending`, `Succeeded`, `Failed`, `Unknown`
|
||||
- [Pod Reason](https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-conditions) reasons of: `NodeAffinity`, `NodeLost`, `Shutdown`, `UnexpectedAdmissionError`
|
||||
- [Container State Waiting](https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-state-waiting) condition of: `PodInitializing`, `ContainerCreating`, `ImagePullBackOff`, `CrashLoopBackOff`, `CreateContainerConfigError`, `ErrImagePull`, `ImagePullBackOff`, `CreateContainerError`, `InvalidImageName`
|
||||
|
||||
@@ -858,7 +986,7 @@ does not exist, descheduler won't create it and will throw an error.
|
||||
|
||||
### Label filtering
|
||||
|
||||
The following strategies can configure a [standard kubernetes labelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#labelselector-v1-meta)
|
||||
The following strategies can configure a [standard kubernetes labelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#labelselector-v1-meta)
|
||||
to filter pods by their labels:
|
||||
|
||||
* `PodLifeTime`
|
||||
@@ -944,12 +1072,16 @@ never evicted because these pods won't be recreated. (Standalone pods in failed
|
||||
* Pods with PVCs are evicted (unless `ignorePvcPods: true` is set).
|
||||
* In `LowNodeUtilization` and `RemovePodsViolatingInterPodAntiAffinity`, pods are evicted by their priority from low to high, and if they have same priority,
|
||||
best effort pods are evicted before burstable and guaranteed pods.
|
||||
* All types of pods with the annotation `descheduler.alpha.kubernetes.io/evict` are eligible for eviction. This
|
||||
* All types of pods with the `descheduler.alpha.kubernetes.io/evict` annotation are eligible for eviction. This
|
||||
annotation is used to override checks which prevent eviction and users can select which pod is evicted.
|
||||
Users should know how and if the pod will be recreated.
|
||||
The annotation only affects internal descheduler checks.
|
||||
The anti-disruption protection provided by the [/eviction](https://kubernetes.io/docs/concepts/scheduling-eviction/api-eviction/)
|
||||
subresource is still respected.
|
||||
* Pods with the `descheduler.alpha.kubernetes.io/prefer-no-eviction` annotation voice their preference not to be evicted.
|
||||
Each plugin decides whether the annotation gets respected or not. When the `DefaultEvictor` plugin sets `noEvictionPolicy`
|
||||
to `Mandatory` all such pods are excluded from eviction. Needs to be used with caution as some plugins may enfore
|
||||
various policies that are expected to be always met.
|
||||
* Pods with a non-nil DeletionTimestamp are not evicted by default.
|
||||
|
||||
Setting `--v=4` or greater on the Descheduler will log all reasons why any pod is not evictable.
|
||||
@@ -978,10 +1110,15 @@ To get best results from HA mode some additional configurations might require:
|
||||
|
||||
## Metrics
|
||||
|
||||
| name | type | description |
|
||||
|-------|-------|----------------|
|
||||
| build_info | gauge | constant 1 |
|
||||
| pods_evicted | CounterVec | total number of pods evicted |
|
||||
| name | type | description |
|
||||
|---------------------------------------|--------------|-----------------------------------------------------------------------------------|
|
||||
| build_info | gauge | constant 1 |
|
||||
| pods_evicted | CounterVec | total number of pods evicted, is deprecated in version v0.34.0 |
|
||||
| pods_evicted_total | CounterVec | total number of pods evicted |
|
||||
| descheduler_loop_duration_seconds | HistogramVec | time taken to complete a whole descheduling cycle (support _bucket, _sum, _count), is deprecated in version v0.34.0 |
|
||||
| loop_duration_seconds | HistogramVec | time taken to complete a whole descheduling cycle (support _bucket, _sum, _count) |
|
||||
| descheduler_strategy_duration_seconds | HistogramVec | time taken to complete each stragtegy of descheduling operation (support _bucket, _sum, _count), is deprecated in version v0.34.0 |
|
||||
| strategy_duration_seconds | HistogramVec | time taken to complete each stragtegy of descheduling operation (support _bucket, _sum, _count) |
|
||||
|
||||
The metrics are served through https://localhost:10258/metrics by default.
|
||||
The address and port can be changed by setting `--binding-address` and `--secure-port` flags.
|
||||
@@ -997,6 +1134,13 @@ packages that it is compiled with.
|
||||
|
||||
| Descheduler | Supported Kubernetes Version |
|
||||
|-------------|------------------------------|
|
||||
| v0.34 | v1.34 |
|
||||
| v0.33 | v1.33 |
|
||||
| v0.32 | v1.32 |
|
||||
| v0.31 | v1.31 |
|
||||
| v0.30 | v1.30 |
|
||||
| v0.29 | v1.29 |
|
||||
| v0.28 | v1.28 |
|
||||
| v0.27 | v1.27 |
|
||||
| v0.26 | v1.26 |
|
||||
| v0.25 | v1.25 |
|
||||
@@ -1034,7 +1178,7 @@ that the only people who can get things done around here are the "maintainers".
|
||||
We also would love to add more "official" maintainers, so show us what you can
|
||||
do!
|
||||
|
||||
This repository uses the Kubernetes bots. See a full list of the commands [here][prow].
|
||||
This repository uses the Kubernetes bots. See a full list of the commands [here](https://go.k8s.io/bot-commands).
|
||||
|
||||
### Communicating With Contributors
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
apiVersion: v1
|
||||
name: descheduler
|
||||
version: 0.30.1
|
||||
appVersion: 0.30.1
|
||||
version: 0.34.0
|
||||
appVersion: 0.34.0
|
||||
description: Descheduler for Kubernetes is used to rebalance clusters by evicting pods that can potentially be scheduled on better nodes. In the current implementation, descheduler does not schedule replacement of evicted pods but relies on the default scheduler for that.
|
||||
keywords:
|
||||
- kubernetes
|
||||
@@ -13,4 +13,4 @@ sources:
|
||||
- https://github.com/kubernetes-sigs/descheduler
|
||||
maintainers:
|
||||
- name: Kubernetes SIG Scheduling
|
||||
email: kubernetes-sig-scheduling@googlegroups.com
|
||||
email: sig-scheduling@kubernetes.io
|
||||
|
||||
@@ -11,7 +11,7 @@ helm install my-release --namespace kube-system descheduler/descheduler
|
||||
|
||||
## Introduction
|
||||
|
||||
This chart bootstraps a [descheduler](https://github.com/kubernetes-sigs/descheduler/) cron job on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
|
||||
This chart bootstraps a [descheduler](https://github.com/kubernetes-sigs/descheduler/) cron job with a default DeschedulerPolicy on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. To preview what changes descheduler would make without actually going forward with the changes, you can install descheduler in dry run mode by providing the flag `--set cmdOptions.dry-run=true` to the `helm install` command below.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
@@ -64,13 +64,16 @@ The following table lists the configurable parameters of the _descheduler_ chart
|
||||
| `replicas` | The replica count for Deployment | `1` |
|
||||
| `leaderElection` | The options for high availability when running replicated components | _see values.yaml_ |
|
||||
| `cmdOptions` | The options to pass to the _descheduler_ command | _see values.yaml_ |
|
||||
| `deschedulerPolicy.strategies` | The _descheduler_ strategies to apply | _see values.yaml_ |
|
||||
| `priorityClassName` | The name of the priority class to add to pods | `system-cluster-critical` |
|
||||
| `rbac.create` | If `true`, create & use RBAC resources | `true` |
|
||||
| `resources` | Descheduler container CPU and memory requests/limits | _see values.yaml_ |
|
||||
| `serviceAccount.create` | If `true`, create a service account for the cron job | `true` |
|
||||
| `serviceAccount.name` | The name of the service account to use, if not set and create is true a name is generated using the fullname template | `nil` |
|
||||
| `serviceAccount.annotations` | Specifies custom annotations for the serviceAccount | `{}` |
|
||||
| `cronJobAnnotations` | Annotations to add to the descheduler CronJob | `{}` |
|
||||
| `cronJobLabels` | Labels to add to the descheduler CronJob | `{}` |
|
||||
| `jobAnnotations` | Annotations to add to the descheduler Job resources (created by CronJob) | `{}` |
|
||||
| `jobLabels` | Labels to add to the descheduler Job resources (created by CronJob) | `{}` |
|
||||
| `podAnnotations` | Annotations to add to the descheduler Pods | `{}` |
|
||||
| `podLabels` | Labels to add to the descheduler Pods | `{}` |
|
||||
| `nodeSelector` | Node selectors to run the descheduler cronjob/deployment on specific nodes | `nil` |
|
||||
|
||||
@@ -10,3 +10,13 @@ WARNING: You enabled DryRun mode, you can't use Leader Election.
|
||||
{{- end}}
|
||||
{{- end}}
|
||||
{{- end}}
|
||||
{{- if .Values.deschedulerPolicy }}
|
||||
A DeschedulerPolicy has been applied for you. You can view the policy with:
|
||||
|
||||
kubectl get configmap -n {{ include "descheduler.namespace" . }} {{ template "descheduler.fullname" . }} -o yaml
|
||||
|
||||
If you wish to define your own policies out of band from this chart, you may define a configmap named {{ template "descheduler.fullname" . }}.
|
||||
To avoid a conflict between helm and your out of band method to deploy the configmap, please set deschedulerPolicy in values.yaml to an empty object as below.
|
||||
|
||||
deschedulerPolicy: {}
|
||||
{{- end }}
|
||||
|
||||
@@ -24,6 +24,9 @@ rules:
|
||||
- apiGroups: ["scheduling.k8s.io"]
|
||||
resources: ["priorityclasses"]
|
||||
verbs: ["get", "watch", "list"]
|
||||
- apiGroups: ["policy"]
|
||||
resources: ["poddisruptionbudgets"]
|
||||
verbs: ["get", "watch", "list"]
|
||||
{{- if .Values.leaderElection.enabled }}
|
||||
- apiGroups: ["coordination.k8s.io"]
|
||||
resources: ["leases"]
|
||||
@@ -33,4 +36,13 @@ rules:
|
||||
resourceNames: ["{{ .Values.leaderElection.resourceName | default "descheduler" }}"]
|
||||
verbs: ["get", "patch", "delete"]
|
||||
{{- end }}
|
||||
{{- if and .Values.deschedulerPolicy }}
|
||||
{{- range .Values.deschedulerPolicy.metricsProviders }}
|
||||
{{- if and (hasKey . "source") (eq .source "KubernetesMetrics") }}
|
||||
- apiGroups: ["metrics.k8s.io"]
|
||||
resources: ["pods", "nodes"]
|
||||
verbs: ["get", "list"]
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end -}}
|
||||
|
||||
@@ -10,5 +10,5 @@ data:
|
||||
policy.yaml: |
|
||||
apiVersion: "{{ .Values.deschedulerPolicyAPIVersion }}"
|
||||
kind: "DeschedulerPolicy"
|
||||
{{ toYaml .Values.deschedulerPolicy | trim | indent 4 }}
|
||||
{{ tpl (toYaml .Values.deschedulerPolicy) . | trim | indent 4 }}
|
||||
{{- end }}
|
||||
|
||||
@@ -4,8 +4,15 @@ kind: CronJob
|
||||
metadata:
|
||||
name: {{ template "descheduler.fullname" . }}
|
||||
namespace: {{ include "descheduler.namespace" . }}
|
||||
{{- if .Values.cronJobAnnotations }}
|
||||
annotations:
|
||||
{{- .Values.cronJobAnnotations | toYaml | nindent 4 }}
|
||||
{{- end }}
|
||||
labels:
|
||||
{{- include "descheduler.labels" . | nindent 4 }}
|
||||
{{- if .Values.cronJobLabels }}
|
||||
{{- .Values.cronJobLabels | toYaml | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
schedule: {{ .Values.schedule | quote }}
|
||||
{{- if .Values.suspend }}
|
||||
@@ -15,20 +22,34 @@ spec:
|
||||
{{- if .Values.startingDeadlineSeconds }}
|
||||
startingDeadlineSeconds: {{ .Values.startingDeadlineSeconds }}
|
||||
{{- end }}
|
||||
{{- if .Values.successfulJobsHistoryLimit }}
|
||||
{{- if ne .Values.successfulJobsHistoryLimit nil }}
|
||||
successfulJobsHistoryLimit: {{ .Values.successfulJobsHistoryLimit }}
|
||||
{{- end }}
|
||||
{{- if .Values.failedJobsHistoryLimit }}
|
||||
{{- if ne .Values.failedJobsHistoryLimit nil }}
|
||||
failedJobsHistoryLimit: {{ .Values.failedJobsHistoryLimit }}
|
||||
{{- end }}
|
||||
{{- if .Values.timeZone }}
|
||||
timeZone: {{ .Values.timeZone }}
|
||||
{{- end }}
|
||||
jobTemplate:
|
||||
{{- if or .Values.jobAnnotations .Values.jobLabels }}
|
||||
metadata:
|
||||
{{- if .Values.jobAnnotations }}
|
||||
annotations:
|
||||
{{- .Values.jobAnnotations | toYaml | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.jobLabels }}
|
||||
labels:
|
||||
{{- .Values.jobLabels | toYaml | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- if .Values.ttlSecondsAfterFinished }}
|
||||
ttlSecondsAfterFinished: {{ .Values.ttlSecondsAfterFinished }}
|
||||
{{- end }}
|
||||
{{- if .Values.activeDeadlineSeconds }}
|
||||
activeDeadlineSeconds: {{ .Values.activeDeadlineSeconds }}
|
||||
{{- end }}
|
||||
template:
|
||||
metadata:
|
||||
name: {{ template "descheduler.fullname" . }}
|
||||
@@ -67,6 +88,9 @@ spec:
|
||||
priorityClassName: {{ .Values.priorityClassName }}
|
||||
{{- end }}
|
||||
serviceAccountName: {{ template "descheduler.serviceAccountName" . }}
|
||||
{{- if kindIs "bool" .Values.automountServiceAccountToken }}
|
||||
automountServiceAccountToken: {{ .Values.automountServiceAccountToken }}
|
||||
{{- end }}
|
||||
restartPolicy: "Never"
|
||||
{{- with .Values.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
@@ -89,6 +113,8 @@ spec:
|
||||
{{- end }}
|
||||
livenessProbe:
|
||||
{{- toYaml .Values.livenessProbe | nindent 16 }}
|
||||
ports:
|
||||
{{- toYaml .Values.ports | nindent 16 }}
|
||||
resources:
|
||||
{{- toYaml .Values.resources | nindent 16 }}
|
||||
{{- if .Values.securityContext }}
|
||||
@@ -98,6 +124,9 @@ spec:
|
||||
volumeMounts:
|
||||
- mountPath: /policy-dir
|
||||
name: policy-volume
|
||||
{{- if and .Values.extraServiceAccountVolumeMounts (not .Values.automountServiceAccountToken) }}
|
||||
{{ toYaml .Values.extraServiceAccountVolumeMounts | nindent 16 }}
|
||||
{{- end }}
|
||||
{{- if .Values.podSecurityContext }}
|
||||
securityContext:
|
||||
{{- toYaml .Values.podSecurityContext | nindent 12 }}
|
||||
@@ -106,4 +135,7 @@ spec:
|
||||
- name: policy-volume
|
||||
configMap:
|
||||
name: {{ template "descheduler.fullname" . }}
|
||||
{{- if and .Values.extraServiceAccountVolumes (not .Values.automountServiceAccountToken) }}
|
||||
{{ toYaml .Values.extraServiceAccountVolumes | nindent 10 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
@@ -6,6 +6,9 @@ metadata:
|
||||
namespace: {{ include "descheduler.namespace" . }}
|
||||
labels:
|
||||
{{- include "descheduler.labels" . | nindent 4 }}
|
||||
{{- if .Values.annotations }}
|
||||
annotations: {{- toYaml .Values.deploymentAnnotations | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- if gt (.Values.replicas | int) 1 }}
|
||||
{{- if not .Values.leaderElection.enabled }}
|
||||
@@ -39,6 +42,9 @@ spec:
|
||||
priorityClassName: {{ .Values.priorityClassName }}
|
||||
{{- end }}
|
||||
serviceAccountName: {{ template "descheduler.serviceAccountName" . }}
|
||||
{{- if kindIs "bool" .Values.automountServiceAccountToken }}
|
||||
automountServiceAccountToken: {{ .Values.automountServiceAccountToken }}
|
||||
{{- end }}
|
||||
{{- with .Values.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- toYaml . | nindent 6 }}
|
||||
@@ -59,10 +65,11 @@ spec:
|
||||
- {{ printf "--%s" $key }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if .Values.leaderElection.enabled }}
|
||||
{{- include "descheduler.leaderElection" . | nindent 12 }}
|
||||
{{- end }}
|
||||
ports:
|
||||
- containerPort: 10258
|
||||
protocol: TCP
|
||||
{{- toYaml .Values.ports | nindent 12 }}
|
||||
livenessProbe:
|
||||
{{- toYaml .Values.livenessProbe | nindent 12 }}
|
||||
resources:
|
||||
@@ -74,6 +81,9 @@ spec:
|
||||
volumeMounts:
|
||||
- mountPath: /policy-dir
|
||||
name: policy-volume
|
||||
{{- if and .Values.extraServiceAccountVolumeMounts (not .Values.automountServiceAccountToken) }}
|
||||
{{ toYaml .Values.extraServiceAccountVolumeMounts | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- if .Values.podSecurityContext }}
|
||||
securityContext:
|
||||
{{- toYaml .Values.podSecurityContext | nindent 8 }}
|
||||
@@ -82,6 +92,9 @@ spec:
|
||||
- name: policy-volume
|
||||
configMap:
|
||||
name: {{ template "descheduler.fullname" . }}
|
||||
{{- if and .Values.extraServiceAccountVolumes (not .Values.automountServiceAccountToken) }}
|
||||
{{ toYaml .Values.extraServiceAccountVolumes | nindent 8}}
|
||||
{{- end }}
|
||||
{{- with .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
|
||||
@@ -1,6 +1,9 @@
|
||||
{{- if .Values.serviceAccount.create -}}
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
{{- if kindIs "bool" .Values.serviceAccount.automountServiceAccountToken }}
|
||||
automountServiceAccountToken: {{ .Values.serviceAccount.automountServiceAccountToken }}
|
||||
{{- end }}
|
||||
metadata:
|
||||
name: {{ template "descheduler.serviceAccountName" . }}
|
||||
namespace: {{ include "descheduler.namespace" . }}
|
||||
|
||||
109
charts/descheduler/tests/cronjob_annotations_test.yaml
Normal file
109
charts/descheduler/tests/cronjob_annotations_test.yaml
Normal file
@@ -0,0 +1,109 @@
|
||||
suite: Test Descheduler CronJob and Job Annotations and Labels
|
||||
|
||||
templates:
|
||||
- "*.yaml"
|
||||
|
||||
release:
|
||||
name: descheduler
|
||||
|
||||
set:
|
||||
kind: CronJob
|
||||
|
||||
tests:
|
||||
- it: adds cronJob and job annotations and labels when set
|
||||
template: templates/cronjob.yaml
|
||||
set:
|
||||
cronJobAnnotations:
|
||||
monitoring.company.com/scrape: "true"
|
||||
description: "test cronjob"
|
||||
cronJobLabels:
|
||||
environment: "test"
|
||||
team: "platform"
|
||||
jobAnnotations:
|
||||
sidecar.istio.io/inject: "false"
|
||||
job.company.com/retry-limit: "3"
|
||||
jobLabels:
|
||||
job-type: "maintenance"
|
||||
priority: "high"
|
||||
asserts:
|
||||
- equal:
|
||||
path: metadata.annotations["monitoring.company.com/scrape"]
|
||||
value: "true"
|
||||
- equal:
|
||||
path: metadata.annotations.description
|
||||
value: "test cronjob"
|
||||
- equal:
|
||||
path: metadata.labels.environment
|
||||
value: "test"
|
||||
- equal:
|
||||
path: metadata.labels.team
|
||||
value: "platform"
|
||||
- equal:
|
||||
path: spec.jobTemplate.metadata.annotations["sidecar.istio.io/inject"]
|
||||
value: "false"
|
||||
- equal:
|
||||
path: spec.jobTemplate.metadata.annotations["job.company.com/retry-limit"]
|
||||
value: "3"
|
||||
- equal:
|
||||
path: spec.jobTemplate.metadata.labels.job-type
|
||||
value: "maintenance"
|
||||
- equal:
|
||||
path: spec.jobTemplate.metadata.labels.priority
|
||||
value: "high"
|
||||
|
||||
- it: does not add cronJob and job metadata when not set
|
||||
template: templates/cronjob.yaml
|
||||
asserts:
|
||||
- isNull:
|
||||
path: metadata.annotations
|
||||
- isNotNull:
|
||||
path: metadata.labels
|
||||
- equal:
|
||||
path: metadata.labels["app.kubernetes.io/name"]
|
||||
value: descheduler
|
||||
- isNull:
|
||||
path: spec.jobTemplate.metadata
|
||||
|
||||
- it: does not add job metadata when job annotations and labels are empty
|
||||
template: templates/cronjob.yaml
|
||||
set:
|
||||
jobAnnotations: {}
|
||||
jobLabels: {}
|
||||
asserts:
|
||||
- isNull:
|
||||
path: spec.jobTemplate.metadata
|
||||
|
||||
- it: works with all annotation and label types together
|
||||
template: templates/cronjob.yaml
|
||||
set:
|
||||
cronJobAnnotations:
|
||||
cron-annotation: "cron-value"
|
||||
cronJobLabels:
|
||||
cron-label: "cron-value"
|
||||
jobAnnotations:
|
||||
job-annotation: "job-value"
|
||||
jobLabels:
|
||||
job-label: "job-value"
|
||||
podAnnotations:
|
||||
pod-annotation: "pod-value"
|
||||
podLabels:
|
||||
pod-label: "pod-value"
|
||||
asserts:
|
||||
- equal:
|
||||
path: metadata.annotations.cron-annotation
|
||||
value: "cron-value"
|
||||
- equal:
|
||||
path: metadata.labels.cron-label
|
||||
value: "cron-value"
|
||||
- equal:
|
||||
path: spec.jobTemplate.metadata.annotations.job-annotation
|
||||
value: "job-value"
|
||||
- equal:
|
||||
path: spec.jobTemplate.metadata.labels.job-label
|
||||
value: "job-value"
|
||||
- equal:
|
||||
path: spec.jobTemplate.spec.template.metadata.annotations.pod-annotation
|
||||
value: "pod-value"
|
||||
- equal:
|
||||
path: spec.jobTemplate.spec.template.metadata.labels.pod-label
|
||||
value: "pod-value"
|
||||
@@ -18,9 +18,13 @@ resources:
|
||||
requests:
|
||||
cpu: 500m
|
||||
memory: 256Mi
|
||||
# limits:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
limits:
|
||||
cpu: 500m
|
||||
memory: 256Mi
|
||||
|
||||
ports:
|
||||
- containerPort: 10258
|
||||
protocol: TCP
|
||||
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
@@ -51,7 +55,8 @@ suspend: false
|
||||
# startingDeadlineSeconds: 200
|
||||
# successfulJobsHistoryLimit: 3
|
||||
# failedJobsHistoryLimit: 1
|
||||
# ttlSecondsAfterFinished 600
|
||||
# ttlSecondsAfterFinished: 600
|
||||
# activeDeadlineSeconds: 60 # Make sure this value is SHORTER than the cron interval.
|
||||
# timeZone: Etc/UTC
|
||||
|
||||
# Required when running as a Deployment
|
||||
@@ -85,16 +90,12 @@ cmdOptions:
|
||||
deschedulerPolicyAPIVersion: "descheduler/v1alpha2"
|
||||
|
||||
# deschedulerPolicy contains the policies the descheduler will execute.
|
||||
# To use policies stored in an existing configMap use:
|
||||
# NOTE: The name of the cm should comply to {{ template "descheduler.fullname" . }}
|
||||
# deschedulerPolicy: {}
|
||||
deschedulerPolicy:
|
||||
# nodeSelector: "key1=value1,key2=value2"
|
||||
# maxNoOfPodsToEvictPerNode: 10
|
||||
# maxNoOfPodsToEvictPerNamespace: 10
|
||||
# ignorePvcPods: true
|
||||
# evictLocalStoragePods: true
|
||||
# evictDaemonSetPods: true
|
||||
# metricsProviders:
|
||||
# - source: KubernetesMetrics
|
||||
# tracing:
|
||||
# collectorEndpoint: otel-collector.observability.svc.cluster.local:4317
|
||||
# transportCert: ""
|
||||
@@ -107,8 +108,11 @@ deschedulerPolicy:
|
||||
pluginConfig:
|
||||
- name: DefaultEvictor
|
||||
args:
|
||||
ignorePvcPods: true
|
||||
evictLocalStoragePods: true
|
||||
podProtections:
|
||||
defaultDisabled:
|
||||
- "PodsWithLocalStorage"
|
||||
extraEnabled:
|
||||
- "PodsWithPVC"
|
||||
- name: RemoveDuplicates
|
||||
- name: RemovePodsHavingTooManyRestarts
|
||||
args:
|
||||
@@ -193,6 +197,25 @@ serviceAccount:
|
||||
name:
|
||||
# Specifies custom annotations for the serviceAccount
|
||||
annotations: {}
|
||||
# Opt out of API credential automounting
|
||||
#
|
||||
# automountServiceAccountToken Default is not set
|
||||
# automountServiceAccountToken: true
|
||||
|
||||
# Mount the ServiceAccountToken in the Pod of a CronJob or Deployment
|
||||
# Default is not set - but only implied by the ServiceAccount
|
||||
# automountServiceAccountToken: true
|
||||
|
||||
# Annotations that'll be applied to deployment
|
||||
deploymentAnnotations: {}
|
||||
|
||||
cronJobAnnotations: {}
|
||||
|
||||
cronJobLabels: {}
|
||||
|
||||
jobAnnotations: {}
|
||||
|
||||
jobLabels: {}
|
||||
|
||||
podAnnotations: {}
|
||||
|
||||
@@ -206,8 +229,9 @@ livenessProbe:
|
||||
path: /healthz
|
||||
port: 10258
|
||||
scheme: HTTPS
|
||||
initialDelaySeconds: 3
|
||||
periodSeconds: 10
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 20
|
||||
timeoutSeconds: 5
|
||||
|
||||
service:
|
||||
enabled: false
|
||||
@@ -244,3 +268,30 @@ serviceMonitor:
|
||||
# targetLabel: nodename
|
||||
# replacement: $1
|
||||
# action: replace
|
||||
|
||||
## Additional Volume mounts when automountServiceAccountToken is false
|
||||
# extraServiceAccountVolumeMounts:
|
||||
# - mountPath: /var/run/secrets/kubernetes.io/serviceaccount
|
||||
# name: kube-api-access
|
||||
# readOnly: true
|
||||
|
||||
## Additional Volumes when automountServiceAccountToken is false
|
||||
# extraServiceAccountVolumes:
|
||||
# - name: kube-api-access
|
||||
# projected:
|
||||
# defaultMode: 0444
|
||||
# sources:
|
||||
# - configMap:
|
||||
# items:
|
||||
# - key: ca.crt
|
||||
# path: ca.crt
|
||||
# name: kube-root-ca.crt
|
||||
# - downwardAPI:
|
||||
# items:
|
||||
# - fieldRef:
|
||||
# apiVersion: v1
|
||||
# fieldPath: metadata.namespace
|
||||
# path: namespace
|
||||
# - serviceAccountToken:
|
||||
# expirationSeconds: 3600
|
||||
# path: token
|
||||
|
||||
@@ -18,17 +18,29 @@ limitations under the License.
|
||||
package options
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
promapi "github.com/prometheus/client_golang/api"
|
||||
"github.com/spf13/pflag"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
apiserver "k8s.io/apiserver/pkg/server"
|
||||
apiserveroptions "k8s.io/apiserver/pkg/server/options"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
|
||||
restclient "k8s.io/client-go/rest"
|
||||
cliflag "k8s.io/component-base/cli/flag"
|
||||
componentbaseconfig "k8s.io/component-base/config"
|
||||
componentbaseoptions "k8s.io/component-base/config/options"
|
||||
"k8s.io/component-base/featuregate"
|
||||
"k8s.io/klog/v2"
|
||||
metricsclient "k8s.io/metrics/pkg/client/clientset/versioned"
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/apis/componentconfig"
|
||||
"sigs.k8s.io/descheduler/pkg/apis/componentconfig/v1alpha1"
|
||||
deschedulerscheme "sigs.k8s.io/descheduler/pkg/descheduler/scheme"
|
||||
"sigs.k8s.io/descheduler/pkg/features"
|
||||
"sigs.k8s.io/descheduler/pkg/tracing"
|
||||
)
|
||||
|
||||
@@ -40,11 +52,18 @@ const (
|
||||
type DeschedulerServer struct {
|
||||
componentconfig.DeschedulerConfiguration
|
||||
|
||||
Client clientset.Interface
|
||||
EventClient clientset.Interface
|
||||
SecureServing *apiserveroptions.SecureServingOptionsWithLoopback
|
||||
DisableMetrics bool
|
||||
EnableHTTP2 bool
|
||||
Client clientset.Interface
|
||||
EventClient clientset.Interface
|
||||
MetricsClient metricsclient.Interface
|
||||
PrometheusClient promapi.Client
|
||||
SecureServing *apiserveroptions.SecureServingOptionsWithLoopback
|
||||
SecureServingInfo *apiserver.SecureServingInfo
|
||||
DisableMetrics bool
|
||||
EnableHTTP2 bool
|
||||
// FeatureGates enabled by the user
|
||||
FeatureGates map[string]bool
|
||||
// DefaultFeatureGates for internal accessing so unit tests can enable/disable specific features
|
||||
DefaultFeatureGates featuregate.FeatureGate
|
||||
}
|
||||
|
||||
// NewDeschedulerServer creates a new DeschedulerServer with default parameters
|
||||
@@ -102,8 +121,33 @@ func (rs *DeschedulerServer) AddFlags(fs *pflag.FlagSet) {
|
||||
fs.Float64Var(&rs.Tracing.SampleRate, "otel-sample-rate", 1.0, "Sample rate to collect the Traces")
|
||||
fs.BoolVar(&rs.Tracing.FallbackToNoOpProviderOnError, "otel-fallback-no-op-on-error", false, "Fallback to NoOp Tracer in case of error")
|
||||
fs.BoolVar(&rs.EnableHTTP2, "enable-http2", false, "If http/2 should be enabled for the metrics and health check")
|
||||
fs.Var(cliflag.NewMapStringBool(&rs.FeatureGates), "feature-gates", "A set of key=value pairs that describe feature gates for alpha/experimental features. "+
|
||||
"Options are:\n"+strings.Join(features.DefaultMutableFeatureGate.KnownFeatures(), "\n"))
|
||||
|
||||
componentbaseoptions.BindLeaderElectionFlags(&rs.LeaderElection, fs)
|
||||
|
||||
rs.SecureServing.AddFlags(fs)
|
||||
}
|
||||
|
||||
func (rs *DeschedulerServer) Apply() error {
|
||||
err := features.DefaultMutableFeatureGate.SetFromMap(rs.FeatureGates)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rs.DefaultFeatureGates = features.DefaultMutableFeatureGate
|
||||
|
||||
// loopbackClientConfig is a config for a privileged loopback connection
|
||||
var loopbackClientConfig *restclient.Config
|
||||
var secureServing *apiserver.SecureServingInfo
|
||||
if err := rs.SecureServing.ApplyTo(&secureServing, &loopbackClientConfig); err != nil {
|
||||
klog.ErrorS(err, "failed to apply secure server configuration")
|
||||
return err
|
||||
}
|
||||
|
||||
if secureServing != nil {
|
||||
secureServing.DisableHTTP2 = !rs.EnableHTTP2
|
||||
rs.SecureServingInfo = secureServing
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -22,20 +22,18 @@ import (
|
||||
"io"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"k8s.io/apiserver/pkg/server/healthz"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler"
|
||||
"sigs.k8s.io/descheduler/pkg/tracing"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
apiserver "k8s.io/apiserver/pkg/server"
|
||||
"k8s.io/apiserver/pkg/server/healthz"
|
||||
"k8s.io/apiserver/pkg/server/mux"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/component-base/featuregate"
|
||||
"k8s.io/component-base/logs"
|
||||
logsapi "k8s.io/component-base/logs/api/v1"
|
||||
@@ -67,40 +65,16 @@ func NewDeschedulerCommand(out io.Writer) *cobra.Command {
|
||||
return nil
|
||||
},
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
// loopbackClientConfig is a config for a privileged loopback connection
|
||||
var loopbackClientConfig *restclient.Config
|
||||
var secureServing *apiserver.SecureServingInfo
|
||||
if err := s.SecureServing.ApplyTo(&secureServing, &loopbackClientConfig); err != nil {
|
||||
klog.ErrorS(err, "failed to apply secure server configuration")
|
||||
if err = s.Apply(); err != nil {
|
||||
klog.ErrorS(err, "failed to apply")
|
||||
return err
|
||||
}
|
||||
|
||||
secureServing.DisableHTTP2 = !s.EnableHTTP2
|
||||
|
||||
ctx, done := signal.NotifyContext(cmd.Context(), syscall.SIGINT, syscall.SIGTERM)
|
||||
|
||||
pathRecorderMux := mux.NewPathRecorderMux("descheduler")
|
||||
if !s.DisableMetrics {
|
||||
pathRecorderMux.Handle("/metrics", legacyregistry.HandlerWithReset())
|
||||
}
|
||||
|
||||
healthz.InstallHandler(pathRecorderMux, healthz.NamedCheck("Descheduler", healthz.PingHealthz.Check))
|
||||
|
||||
stoppedCh, _, err := secureServing.Serve(pathRecorderMux, 0, ctx.Done())
|
||||
if err != nil {
|
||||
klog.Fatalf("failed to start secure server: %v", err)
|
||||
if err = Run(cmd.Context(), s); err != nil {
|
||||
klog.ErrorS(err, "failed to run descheduler server")
|
||||
return err
|
||||
}
|
||||
|
||||
if err = Run(ctx, s); err != nil {
|
||||
klog.ErrorS(err, "descheduler server")
|
||||
return err
|
||||
}
|
||||
|
||||
done()
|
||||
// wait for metrics server to close
|
||||
<-stoppedCh
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
@@ -114,15 +88,52 @@ func NewDeschedulerCommand(out io.Writer) *cobra.Command {
|
||||
return cmd
|
||||
}
|
||||
|
||||
func Run(ctx context.Context, rs *options.DeschedulerServer) error {
|
||||
err := tracing.NewTracerProvider(ctx, rs.Tracing.CollectorEndpoint, rs.Tracing.TransportCert, rs.Tracing.ServiceName, rs.Tracing.ServiceNamespace, rs.Tracing.SampleRate, rs.Tracing.FallbackToNoOpProviderOnError)
|
||||
func Run(rootCtx context.Context, rs *options.DeschedulerServer) error {
|
||||
ctx, done := signal.NotifyContext(rootCtx, syscall.SIGINT, syscall.SIGTERM)
|
||||
|
||||
pathRecorderMux := mux.NewPathRecorderMux("descheduler")
|
||||
if !rs.DisableMetrics {
|
||||
pathRecorderMux.Handle("/metrics", legacyregistry.HandlerWithReset())
|
||||
}
|
||||
|
||||
healthz.InstallHandler(pathRecorderMux, healthz.NamedCheck("Descheduler", healthz.PingHealthz.Check))
|
||||
|
||||
var stoppedCh <-chan struct{}
|
||||
var err error
|
||||
if rs.SecureServingInfo != nil {
|
||||
stoppedCh, _, err = rs.SecureServingInfo.Serve(pathRecorderMux, 0, ctx.Done())
|
||||
if err != nil {
|
||||
klog.Fatalf("failed to start secure server: %v", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
err = tracing.NewTracerProvider(ctx, rs.Tracing.CollectorEndpoint, rs.Tracing.TransportCert, rs.Tracing.ServiceName, rs.Tracing.ServiceNamespace, rs.Tracing.SampleRate, rs.Tracing.FallbackToNoOpProviderOnError)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "failed to create tracer provider")
|
||||
}
|
||||
defer tracing.Shutdown(ctx)
|
||||
defer func() {
|
||||
// we give the tracing.Shutdown() its own context as the
|
||||
// original context may have been cancelled already. we
|
||||
// have arbitrarily chosen the timeout duration.
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
|
||||
defer cancel()
|
||||
tracing.Shutdown(ctx)
|
||||
}()
|
||||
|
||||
// increase the fake watch channel so the dry-run mode can be run
|
||||
// over a cluster with thousands of pods
|
||||
watch.DefaultChanSize = 100000
|
||||
return descheduler.Run(ctx, rs)
|
||||
err = descheduler.Run(ctx, rs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
done()
|
||||
if stoppedCh != nil {
|
||||
// wait for metrics server to close
|
||||
<-stoppedCh
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -23,13 +23,17 @@ descheduler [flags]
|
||||
--disable-metrics Disables metrics. The metrics are by default served through https://localhost:10258/metrics. Secure address, resp. port can be changed through --bind-address, resp. --secure-port flags.
|
||||
--dry-run Execute descheduler in dry run mode.
|
||||
--enable-http2 If http/2 should be enabled for the metrics and health check
|
||||
--feature-gates mapStringBool A set of key=value pairs that describe feature gates for alpha/experimental features. Options are:
|
||||
AllAlpha=true|false (ALPHA - default=false)
|
||||
AllBeta=true|false (BETA - default=false)
|
||||
EvictionsInBackground=true|false (ALPHA - default=false)
|
||||
-h, --help help for descheduler
|
||||
--http2-max-streams-per-connection int The limit that the server gives to clients for the maximum number of streams in an HTTP/2 connection. Zero means to use golang's default.
|
||||
--kubeconfig string File with kube configuration. Deprecated, use client-connection-kubeconfig instead.
|
||||
--leader-elect Start a leader election client and gain leadership before executing the main loop. Enable this when running replicated components for high availability.
|
||||
--leader-elect-lease-duration duration The duration that non-leader candidates will wait after observing a leadership renewal until attempting to acquire leadership of a led but unrenewed leader slot. This is effectively the maximum duration that a leader can be stopped before it is replaced by another candidate. This is only applicable if leader election is enabled. (default 2m17s)
|
||||
--leader-elect-renew-deadline duration The interval between attempts by the acting master to renew a leadership slot before it stops leading. This must be less than the lease duration. This is only applicable if leader election is enabled. (default 1m47s)
|
||||
--leader-elect-resource-lock string The type of resource object that is used for locking during leader election. Supported options are 'leases', 'endpointsleases' and 'configmapsleases'. (default "leases")
|
||||
--leader-elect-resource-lock string The type of resource object that is used for locking during leader election. Supported options are 'leases'. (default "leases")
|
||||
--leader-elect-resource-name string The name of resource object that is used for locking during leader election. (default "descheduler")
|
||||
--leader-elect-resource-namespace string The namespace of resource object that is used for locking during leader election. (default "kube-system")
|
||||
--leader-elect-retry-period duration The duration the clients should wait between attempting acquisition and renewal of a leadership. This is only applicable if leader election is enabled. (default 26s)
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
## Required Tools
|
||||
|
||||
- [Git](https://git-scm.com/downloads)
|
||||
- [Go 1.16+](https://golang.org/dl/)
|
||||
- [Go 1.23+](https://golang.org/dl/)
|
||||
- [Docker](https://docs.docker.com/install/)
|
||||
- [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl)
|
||||
- [kind v0.10.0+](https://kind.sigs.k8s.io/)
|
||||
|
||||
@@ -26,7 +26,7 @@ When the above pre-release steps are complete and the release is ready to be cut
|
||||
3. Perform the [image promotion process](https://github.com/kubernetes/k8s.io/tree/main/k8s.gcr.io#image-promoter). [Example](https://github.com/kubernetes/k8s.io/pull/3344)
|
||||
4. Cut release branch from `master`, eg `release-1.24`
|
||||
5. Publish release using Github's release process from the git tag you created
|
||||
6. Email `kubernetes-sig-scheduling@googlegroups.com` to announce the release
|
||||
6. Email `sig-scheduling@kubernetes.io` to announce the release
|
||||
|
||||
**Patch release**
|
||||
1. Pick relevant code change commits to the matching release branch, eg `release-1.24`
|
||||
@@ -34,7 +34,7 @@ When the above pre-release steps are complete and the release is ready to be cut
|
||||
3. Merge Helm chart version update to release branch
|
||||
4. Perform the image promotion process for the patch version
|
||||
5. Publish release using Github's release process from the git tag you created
|
||||
6. Email `kubernetes-sig-scheduling@googlegroups.com` to announce the release
|
||||
6. Email `sig-scheduling@kubernetes.io` to announce the release
|
||||
|
||||
### Flowchart
|
||||
|
||||
|
||||
@@ -4,12 +4,13 @@ Starting with descheduler release v0.10.0 container images are available in the
|
||||
|
||||
Descheduler Version | Container Image | Architectures |
|
||||
------------------- |-------------------------------------------------|-------------------------|
|
||||
v0.34.0 | registry.k8s.io/descheduler/descheduler:v0.34.0 | AMD64<br>ARM64<br>ARMv7 |
|
||||
v0.33.0 | registry.k8s.io/descheduler/descheduler:v0.33.0 | AMD64<br>ARM64<br>ARMv7 |
|
||||
v0.32.0 | registry.k8s.io/descheduler/descheduler:v0.32.0 | AMD64<br>ARM64<br>ARMv7 |
|
||||
v0.31.0 | registry.k8s.io/descheduler/descheduler:v0.31.0 | AMD64<br>ARM64<br>ARMv7 |
|
||||
v0.30.1 | registry.k8s.io/descheduler/descheduler:v0.30.1 | AMD64<br>ARM64<br>ARMv7 |
|
||||
v0.30.0 | registry.k8s.io/descheduler/descheduler:v0.30.0 | AMD64<br>ARM64<br>ARMv7 |
|
||||
v0.29.0 | registry.k8s.io/descheduler/descheduler:v0.29.0 | AMD64<br>ARM64<br>ARMv7 |
|
||||
v0.28.1 | registry.k8s.io/descheduler/descheduler:v0.28.1 | AMD64<br>ARM64<br>ARMv7 |
|
||||
v0.28.0 | registry.k8s.io/descheduler/descheduler:v0.28.0 | AMD64<br>ARM64<br>ARMv7 |
|
||||
|
||||
Note that multi-arch container images cannot be pulled by [kind](https://kind.sigs.k8s.io) from a registry. Therefore
|
||||
starting with descheduler release v0.20.0 use the below process to download the official descheduler
|
||||
|
||||
158
go.mod
158
go.mod
@@ -1,116 +1,138 @@
|
||||
module sigs.k8s.io/descheduler
|
||||
|
||||
go 1.22.5
|
||||
go 1.24.0
|
||||
|
||||
toolchain go1.24.3
|
||||
|
||||
godebug default=go1.24
|
||||
|
||||
require (
|
||||
github.com/client9/misspell v0.3.4
|
||||
github.com/google/go-cmp v0.6.0
|
||||
github.com/spf13/cobra v1.8.1
|
||||
github.com/spf13/pflag v1.0.5
|
||||
go.opentelemetry.io/otel v1.28.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0
|
||||
go.opentelemetry.io/otel/sdk v1.28.0
|
||||
go.opentelemetry.io/otel/trace v1.28.0
|
||||
google.golang.org/grpc v1.65.0
|
||||
k8s.io/api v0.31.0
|
||||
k8s.io/apimachinery v0.31.0
|
||||
k8s.io/apiserver v0.31.0
|
||||
k8s.io/client-go v0.31.0
|
||||
k8s.io/code-generator v0.31.0
|
||||
k8s.io/component-base v0.31.0
|
||||
k8s.io/component-helpers v0.31.0
|
||||
github.com/google/go-cmp v0.7.0
|
||||
github.com/prometheus/client_golang v1.22.0
|
||||
github.com/prometheus/common v0.64.0
|
||||
github.com/spf13/cobra v1.10.0
|
||||
github.com/spf13/pflag v1.0.9
|
||||
go.opentelemetry.io/otel v1.36.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.36.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.36.0
|
||||
go.opentelemetry.io/otel/sdk v1.36.0
|
||||
go.opentelemetry.io/otel/trace v1.36.0
|
||||
google.golang.org/grpc v1.72.2
|
||||
k8s.io/api v0.34.0
|
||||
k8s.io/apimachinery v0.34.0
|
||||
k8s.io/apiserver v0.34.0
|
||||
k8s.io/client-go v0.34.0
|
||||
k8s.io/code-generator v0.34.0
|
||||
k8s.io/component-base v0.34.0
|
||||
k8s.io/component-helpers v0.34.0
|
||||
k8s.io/klog/v2 v2.130.1
|
||||
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8
|
||||
k8s.io/metrics v0.34.0
|
||||
k8s.io/utils v0.0.0-20250604170112-4c0f3b243397
|
||||
kubevirt.io/api v1.3.0
|
||||
kubevirt.io/client-go v1.3.0
|
||||
kubevirt.io/containerized-data-importer-api v1.60.1 // indirect; drops dependency on o/api
|
||||
sigs.k8s.io/mdtoc v1.1.0
|
||||
sigs.k8s.io/yaml v1.4.0
|
||||
sigs.k8s.io/yaml v1.6.0
|
||||
)
|
||||
|
||||
require golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56
|
||||
|
||||
require (
|
||||
cel.dev/expr v0.24.0 // indirect
|
||||
github.com/BurntSushi/toml v0.3.1 // indirect
|
||||
github.com/NYTimes/gziphandler v1.1.1 // indirect
|
||||
github.com/antlr4-go/antlr/v4 v4.13.0 // indirect
|
||||
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/blang/semver/v4 v4.0.0 // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
|
||||
github.com/cenkalti/backoff/v5 v5.0.2 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/coreos/go-semver v0.3.1 // indirect
|
||||
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.4 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.6 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.11.0 // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.12.2 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/fsnotify/fsnotify v1.7.0 // indirect
|
||||
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
|
||||
github.com/fsnotify/fsnotify v1.9.0 // indirect
|
||||
github.com/fxamacker/cbor/v2 v2.9.0 // indirect
|
||||
github.com/go-kit/kit v0.13.0 // indirect
|
||||
github.com/go-kit/log v0.2.1 // indirect
|
||||
github.com/go-logfmt/logfmt v0.6.0 // indirect
|
||||
github.com/go-logr/logr v1.4.2 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-logr/zapr v1.3.0 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.19.6 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.21.0 // indirect
|
||||
github.com/go-openapi/jsonreference v0.20.2 // indirect
|
||||
github.com/go-openapi/swag v0.22.4 // indirect
|
||||
github.com/go-openapi/swag v0.23.0 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/glog v1.2.4 // indirect
|
||||
github.com/golang/protobuf v1.5.4 // indirect
|
||||
github.com/gomarkdown/markdown v0.0.0-20210514010506-3b9f47219fe7 // indirect
|
||||
github.com/google/cel-go v0.20.1 // indirect
|
||||
github.com/google/gnostic-models v0.6.8 // indirect
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/google/btree v1.1.3 // indirect
|
||||
github.com/google/cel-go v0.26.0 // indirect
|
||||
github.com/google/gnostic-models v0.7.0 // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect
|
||||
github.com/imdario/mergo v0.3.6 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/jpillora/backoff v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/kylelemons/godebug v1.1.0 // indirect
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
github.com/mmarkdown/mmark v2.0.40+incompatible // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect
|
||||
github.com/openshift/custom-resource-status v1.1.2 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/prometheus/client_golang v1.19.1 // indirect
|
||||
github.com/prometheus/client_model v0.6.1 // indirect
|
||||
github.com/prometheus/common v0.55.0 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/prometheus/client_model v0.6.2 // indirect
|
||||
github.com/prometheus/procfs v0.15.1 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/stoewer/go-strcase v1.2.0 // indirect
|
||||
github.com/stoewer/go-strcase v1.3.0 // indirect
|
||||
github.com/x448/float16 v0.8.4 // indirect
|
||||
go.etcd.io/etcd/api/v3 v3.5.14 // indirect
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.14 // indirect
|
||||
go.etcd.io/etcd/client/v3 v3.5.14 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.28.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.3.1 // indirect
|
||||
go.etcd.io/etcd/api/v3 v3.6.4 // indirect
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.6.4 // indirect
|
||||
go.etcd.io/etcd/client/v3 v3.6.4 // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.36.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.6.0 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
go.uber.org/zap v1.26.0 // indirect
|
||||
golang.org/x/crypto v0.24.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc // indirect
|
||||
golang.org/x/mod v0.17.0 // indirect
|
||||
golang.org/x/net v0.26.0 // indirect
|
||||
golang.org/x/oauth2 v0.21.0 // indirect
|
||||
golang.org/x/sync v0.7.0 // indirect
|
||||
golang.org/x/sys v0.21.0 // indirect
|
||||
golang.org/x/term v0.21.0 // indirect
|
||||
golang.org/x/text v0.16.0 // indirect
|
||||
golang.org/x/time v0.3.0 // indirect
|
||||
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect
|
||||
google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 // indirect
|
||||
google.golang.org/protobuf v1.34.2 // indirect
|
||||
go.uber.org/zap v1.27.0 // indirect
|
||||
go.yaml.in/yaml/v2 v2.4.2 // indirect
|
||||
go.yaml.in/yaml/v3 v3.0.4 // indirect
|
||||
golang.org/x/crypto v0.38.0 // indirect
|
||||
golang.org/x/mod v0.21.0 // indirect
|
||||
golang.org/x/net v0.40.0 // indirect
|
||||
golang.org/x/oauth2 v0.30.0 // indirect
|
||||
golang.org/x/sync v0.14.0 // indirect
|
||||
golang.org/x/sys v0.33.0 // indirect
|
||||
golang.org/x/term v0.32.0 // indirect
|
||||
golang.org/x/text v0.25.0 // indirect
|
||||
golang.org/x/time v0.9.0 // indirect
|
||||
golang.org/x/tools v0.26.0 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250519155744-55703ea1f237 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250519155744-55703ea1f237 // indirect
|
||||
google.golang.org/protobuf v1.36.6 // indirect
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70 // indirect
|
||||
k8s.io/kms v0.31.0 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.30.3 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect
|
||||
k8s.io/apiextensions-apiserver v0.30.0 // indirect
|
||||
k8s.io/gengo/v2 v2.0.0-20250604051438-85fd79dbfd9f // indirect
|
||||
k8s.io/kms v0.34.0 // indirect
|
||||
k8s.io/kube-openapi v0.30.0 // indirect
|
||||
kubevirt.io/controller-lifecycle-operator-sdk/api v0.0.0-20220329064328-f3cc58c6ed90 // indirect
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect
|
||||
sigs.k8s.io/randfill v1.0.0 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect
|
||||
)
|
||||
|
||||
replace go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc => go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.47.0
|
||||
replace k8s.io/kube-openapi => k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b
|
||||
|
||||
723
go.sum
723
go.sum
@@ -1,115 +1,191 @@
|
||||
cloud.google.com/go v0.112.0 h1:tpFCD7hpHFlQ8yPwT3x+QeXqc2T6+n6T+hmABHfDUSM=
|
||||
cloud.google.com/go/compute v1.23.3 h1:6sVlXXBmbd7jNX0Ipq0trII3e4n1/MsADLK6a+aiVlk=
|
||||
cloud.google.com/go/compute/metadata v0.3.0 h1:Tz+eQXMEqDIKRsmY3cHTL6FVaynIjX2QxYC4trgAKZc=
|
||||
cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k=
|
||||
cel.dev/expr v0.24.0 h1:56OvJKSH3hDGL0ml5uSxZmz3/3Pq4tJ+fb1unVLAFcY=
|
||||
cel.dev/expr v0.24.0/go.mod h1:hLPLo1W4QUmuYdA72RBX06QTs6MXw941piREPl3Yfiw=
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I=
|
||||
github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c=
|
||||
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
|
||||
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
|
||||
github.com/antlr4-go/antlr/v4 v4.13.0 h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8TVTI=
|
||||
github.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g=
|
||||
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA=
|
||||
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM=
|
||||
github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ=
|
||||
github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
|
||||
github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
|
||||
github.com/cenkalti/backoff/v5 v5.0.2 h1:rIfFVxEf1QsI7E1ZHfp/B4DF/6QBAUhmgkxc0H7Zss8=
|
||||
github.com/cenkalti/backoff/v5 v5.0.2/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
||||
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/chromedp/cdproto v0.0.0-20230802225258-3cf4e6d46a89/go.mod h1:GKljq0VrfU4D5yc+2qA6OVr8pmO/MBbPEWqWQ/oqGEs=
|
||||
github.com/chromedp/chromedp v0.9.2/go.mod h1:LkSXJKONWTCHAfQasKFUZI+mxqS4tZqhmtGzzhLsnLs=
|
||||
github.com/chromedp/sysutil v1.0.0/go.mod h1:kgWmDdq8fTzXYcKIBqIYvRRTnYb9aNS9moAV0xufSww=
|
||||
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
|
||||
github.com/chzyer/logex v1.2.1/go.mod h1:JLbx6lG2kDbNRFnfkgvh4eRJRPX1QCoOIWomwysCBrQ=
|
||||
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
|
||||
github.com/chzyer/readline v1.5.1/go.mod h1:Eh+b79XXUwfKfcPLepksvw2tcLE/Ct21YObkaSkeBlk=
|
||||
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
||||
github.com/chzyer/test v1.0.0/go.mod h1:2JlltgoNkt4TW/z9V/IzDdFaMTM2JPIi26O1pF38GC8=
|
||||
github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b h1:ga8SEFjZ60pxLcmhnThWgvH2wg8376yUJmPhEH4H3kw=
|
||||
github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8=
|
||||
github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4=
|
||||
github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec=
|
||||
github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
|
||||
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.4 h1:wfIWP927BUkWJb2NmU/kNDYIBTh/ziUX91+lVfRxZq4=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.6 h1:XJtiaUW6dEEqVuZiMTn1ldk455QWwEIsMIJlo5vtkx0=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
|
||||
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
|
||||
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
|
||||
github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g=
|
||||
github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
|
||||
github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
|
||||
github.com/emicklei/go-restful v2.15.0+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
|
||||
github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
|
||||
github.com/envoyproxy/protoc-gen-validate v1.0.4 h1:gVPz/FMfvh57HdSJQyvBtF00j8JU4zdyUgIUNhlgg0A=
|
||||
github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew=
|
||||
github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU=
|
||||
github.com/emicklei/go-restful/v3 v3.12.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
|
||||
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
||||
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
|
||||
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
|
||||
github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E=
|
||||
github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||
github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k=
|
||||
github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
|
||||
github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM=
|
||||
github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ=
|
||||
github.com/go-kit/kit v0.13.0 h1:OoneCcHKHQ03LfBpoQCUfCluwd2Vt3ohz+kvbJneZAU=
|
||||
github.com/go-kit/kit v0.13.0/go.mod h1:phqEHMMUbyrCFCTgH48JueqrM3md2HcAZ8N3XE4FKDg=
|
||||
github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU=
|
||||
github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0=
|
||||
github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4=
|
||||
github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
|
||||
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
|
||||
github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
|
||||
github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
|
||||
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
||||
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
||||
github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ=
|
||||
github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg=
|
||||
github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE=
|
||||
github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
|
||||
github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs=
|
||||
github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ=
|
||||
github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY=
|
||||
github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg=
|
||||
github.com/go-openapi/jsonreference v0.19.6/go.mod h1:diGHMEHg2IqXZGKxqyvWdfWU/aim5Dprw5bqpKkTvns=
|
||||
github.com/go-openapi/jsonreference v0.20.1/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k=
|
||||
github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE=
|
||||
github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k=
|
||||
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
|
||||
github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
|
||||
github.com/go-openapi/swag v0.21.1/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
|
||||
github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
|
||||
github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU=
|
||||
github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
|
||||
github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE=
|
||||
github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ=
|
||||
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
|
||||
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
|
||||
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls=
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
|
||||
github.com/gobwas/httphead v0.1.0/go.mod h1:O/RXo79gxV8G+RqlR/otEwx4Q36zl9rqC5u12GKvMCM=
|
||||
github.com/gobwas/pool v0.2.1/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw=
|
||||
github.com/gobwas/ws v1.2.1/go.mod h1:hRKAFb8wOxFROYNsT1bqfWnhX+b5MFeJM9r2ZSwg/KY=
|
||||
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||
github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg=
|
||||
github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang-jwt/jwt/v5 v5.2.2 h1:Rl4B7itRWVtYIHFrSNd7vhTiz9UpLdi6gZhZ3wEeDy8=
|
||||
github.com/golang-jwt/jwt/v5 v5.2.2/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/glog v1.2.4 h1:CNNw5U8lSiiBk7druxtSHHTsRWcxKoac6kZKm2peBBc=
|
||||
github.com/golang/glog v1.2.4/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
|
||||
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
|
||||
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
|
||||
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
|
||||
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
||||
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
|
||||
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
|
||||
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
|
||||
github.com/gomarkdown/markdown v0.0.0-20210514010506-3b9f47219fe7 h1:oKYOfNR7Hp6XpZ4JqolL5u642Js5Z0n7psPVl+S5heo=
|
||||
github.com/gomarkdown/markdown v0.0.0-20210514010506-3b9f47219fe7/go.mod h1:aii0r/K0ZnHv7G0KF7xy1v0A7s2Ljrb5byB7MO5p6TU=
|
||||
github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4=
|
||||
github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
|
||||
github.com/google/cel-go v0.20.1 h1:nDx9r8S3L4pE61eDdt8igGj8rf5kjYR3ILxWIpWNi84=
|
||||
github.com/google/cel-go v0.20.1/go.mod h1:kWcIzTsPX0zmQ+H3TirHstLLf9ep5QTsZBN9u4dOYLg=
|
||||
github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I=
|
||||
github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U=
|
||||
github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg=
|
||||
github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4=
|
||||
github.com/google/cel-go v0.26.0 h1:DPGjXackMpJWH680oGY4lZhYjIameYmR+/6RBdDGmaI=
|
||||
github.com/google/cel-go v0.26.0/go.mod h1:A9O8OU9rdvrK5MQyrqfIxo1a0u4g3sF8KB6PUIaryMM=
|
||||
github.com/google/gnostic-models v0.7.0 h1:qwTtogB15McXDaNqTZdzPJRHvaVJlAl+HVQnLmJEJxo=
|
||||
github.com/google/gnostic-models v0.7.0/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
||||
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
|
||||
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/pprof v0.0.0-20240525223248-4bfdf5a9a2af h1:kmjWCqn2qkEml422C2Rrd27c3VGxi6a/6HNq8QmHRKM=
|
||||
github.com/google/pprof v0.0.0-20240525223248-4bfdf5a9a2af/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo=
|
||||
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6/go.mod h1:kf6iHlnVGwgKolg33glAes7Yg/8iWP8ukqeldJSO7jw=
|
||||
github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo=
|
||||
github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo=
|
||||
github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144=
|
||||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
|
||||
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA=
|
||||
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo=
|
||||
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.0.1 h1:qnpSQwGEnkcRpTqNOIR6bJbR0gAorgP9CSALpRcKoAA=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.0.1/go.mod h1:lXGCsh6c22WGtjr+qGHj1otzZpV/1kwTMAqkwZsnWRU=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.0 h1:FbSCl+KggFl+Ocym490i/EyXF4lPgLoUtcSWquBM0Rs=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.0/go.mod h1:qOchhhIlmRcqk/O9uCo/puJlyo07YINaIqdZfZG3Jkc=
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho=
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k=
|
||||
github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28=
|
||||
github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 h1:5ZPtiqj0JL5oKWmcsq4VMaAW5ukBEgSGXEN89zeH1Jo=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3/go.mod h1:ndYquD05frm2vACXE1nsccT4oJzjhw2arTS2cpUD1PI=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20240312041847-bd984b5ce465/go.mod h1:gx7rwoVhcfuVKG5uya9Hs3Sxj7EIvldVofAWIUtGouw=
|
||||
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
||||
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
||||
github.com/jonboulle/clockwork v0.2.2 h1:UOGuzwb1PwsrDAObMuhUnj0p5ULPj8V/xJ7Kx9qUBdQ=
|
||||
github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8=
|
||||
github.com/jonboulle/clockwork v0.5.0 h1:Hyh9A8u51kptdkR+cqRpT1EebBwTn1oK9YfGYbdFz6I=
|
||||
github.com/jonboulle/clockwork v0.5.0/go.mod h1:3mZlmanh0g2NDKO5TWZVJAfofYk64M7XN3SzBPjZF60=
|
||||
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
|
||||
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
|
||||
github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA=
|
||||
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
|
||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
|
||||
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
|
||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
|
||||
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
@@ -117,212 +193,545 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
|
||||
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
||||
github.com/ledongthuc/pdf v0.0.0-20220302134840-0c2507a12d80/go.mod h1:imJHygn/1yfhB7XSJJKlFZKl/J+dCPAknuiaGOshXAs=
|
||||
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
||||
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
|
||||
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
||||
github.com/mmarkdown/mmark v2.0.40+incompatible h1:vMeUeDzBK3H+/mU0oMVfMuhSXJlIA+DE/DMPQNAj5C4=
|
||||
github.com/mmarkdown/mmark v2.0.40+incompatible/go.mod h1:Uvmoz7tvsWpr7bMVxIpqZPyN3FbOtzDmnsJDFp7ltJs=
|
||||
github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
|
||||
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||
github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFdJifH4BDsTlE89Zl93FEloxaWZfGcifgq8=
|
||||
github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
github.com/onsi/ginkgo/v2 v2.19.0 h1:9Cnnf7UHo57Hy3k6/m5k3dRfGTMXGvxhHFvkDTCTpvA=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
|
||||
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
|
||||
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
|
||||
github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
|
||||
github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
|
||||
github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc=
|
||||
github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
|
||||
github.com/onsi/ginkgo/v2 v2.0.0/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c=
|
||||
github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c=
|
||||
github.com/onsi/ginkgo/v2 v2.1.4/go.mod h1:um6tUpWM/cxCK3/FK8BXqEiUMUwRgSM4JXG47RKZmLU=
|
||||
github.com/onsi/ginkgo/v2 v2.1.6/go.mod h1:MEH45j8TBi6u9BMogfbp0stKC5cdGjumZj5Y7AG4VIk=
|
||||
github.com/onsi/ginkgo/v2 v2.3.0/go.mod h1:Eew0uilEqZmIEZr8JrvYlvOM7Rr6xzTmMV8AyFNU9d0=
|
||||
github.com/onsi/ginkgo/v2 v2.4.0/go.mod h1:iHkDK1fKGcBoEHT5W7YBq4RFWaQulw+caOMkAt4OrFo=
|
||||
github.com/onsi/ginkgo/v2 v2.5.0/go.mod h1:Luc4sArBICYCS8THh8v3i3i5CuSZO+RaQRaJoeNwomw=
|
||||
github.com/onsi/ginkgo/v2 v2.7.0/go.mod h1:yjiuMwPokqY1XauOgju45q3sJt6VzQ/Fict1LFVcsAo=
|
||||
github.com/onsi/ginkgo/v2 v2.8.1/go.mod h1:N1/NbDngAFcSLdyZ+/aYTYGSlq9qMCS/cNKGJjy+csc=
|
||||
github.com/onsi/ginkgo/v2 v2.9.0/go.mod h1:4xkjoL/tZv4SMWeww56BU5kAt19mVB47gTWxmrTcxyk=
|
||||
github.com/onsi/ginkgo/v2 v2.9.1/go.mod h1:FEcmzVcCHl+4o9bQZVab+4dC9+j+91t2FHSzmGAPfuo=
|
||||
github.com/onsi/ginkgo/v2 v2.9.2/go.mod h1:WHcJJG2dIlcCqVfBAwUCrJxSPFb6v4azBwgxeMeDuts=
|
||||
github.com/onsi/ginkgo/v2 v2.9.5/go.mod h1:tvAoo1QUJwNEU2ITftXTpR7R1RbCzoZUOs3RonqW57k=
|
||||
github.com/onsi/ginkgo/v2 v2.9.7/go.mod h1:cxrmXWykAwTwhQsJOPfdIDiJ+l2RYq7U8hFU+M/1uw0=
|
||||
github.com/onsi/ginkgo/v2 v2.11.0/go.mod h1:ZhrRA5XmEE3x3rhlzamx/JJvujdZoJ2uvgI7kR0iZvM=
|
||||
github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o=
|
||||
github.com/onsi/ginkgo/v2 v2.17.1/go.mod h1:llBI3WDLL9Z6taip6f33H76YcWtJv+7R3HigUjbIBOs=
|
||||
github.com/onsi/ginkgo/v2 v2.17.2/go.mod h1:nP2DPOQoNsQmsVyv5rDA8JkXQoCs6goXIvr/PRJ1eCc=
|
||||
github.com/onsi/ginkgo/v2 v2.19.0/go.mod h1:rlwLi9PilAFJ8jCg9UE1QP6VBpd6/xj3SRC0d6TU0To=
|
||||
github.com/onsi/gomega v1.33.1 h1:dsYjIxxSR755MDmKVsaFQTE22ChNBcuuTWgkUDSubOk=
|
||||
github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM=
|
||||
github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo=
|
||||
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
|
||||
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
||||
github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
|
||||
github.com/onsi/gomega v1.18.1/go.mod h1:0q+aL8jAiMXy9hbwj2mr5GziHiwhAIQpFmmtT5hitRs=
|
||||
github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro=
|
||||
github.com/onsi/gomega v1.20.1/go.mod h1:DtrZpjmvpn2mPm4YWQa0/ALMDj9v4YxLgojwPeREyVo=
|
||||
github.com/onsi/gomega v1.21.1/go.mod h1:iYAIXgPSaDHak0LCMA+AWBpIKBr8WZicMxnE8luStNc=
|
||||
github.com/onsi/gomega v1.22.1/go.mod h1:x6n7VNe4hw0vkyYUM4mjIXx3JbLiPaBPNgB7PRQ1tuM=
|
||||
github.com/onsi/gomega v1.24.0/go.mod h1:Z/NWtiqwBrwUt4/2loMmHL63EDLnYHmVbuBpDr2vQAg=
|
||||
github.com/onsi/gomega v1.24.1/go.mod h1:3AOiACssS3/MajrniINInwbfOOtfZvplPzuRSmvt1jM=
|
||||
github.com/onsi/gomega v1.26.0/go.mod h1:r+zV744Re+DiYCIPRlYOTxn0YkOLcAnW8k1xXdMPGhM=
|
||||
github.com/onsi/gomega v1.27.1/go.mod h1:aHX5xOykVYzWOV4WqQy0sy8BQptgukenXpCXfadcIAw=
|
||||
github.com/onsi/gomega v1.27.3/go.mod h1:5vG284IBtfDAmDyrK+eGyZmUgUlmi+Wngqo557cZ6Gw=
|
||||
github.com/onsi/gomega v1.27.4/go.mod h1:riYq/GJKh8hhoM01HN6Vmuy93AarCXCBGpvFDK3q3fQ=
|
||||
github.com/onsi/gomega v1.27.6/go.mod h1:PIQNjfQwkP3aQAH7lf7j87O/5FiNr+ZR8+ipb+qQlhg=
|
||||
github.com/onsi/gomega v1.27.7/go.mod h1:1p8OOlwo2iUUDsHnOrjE5UKYJ+e3W8eQ3qSlRahPmr4=
|
||||
github.com/onsi/gomega v1.27.8/go.mod h1:2J8vzI/s+2shY9XHRApDkdgPo1TKT7P2u6fXeJKFnNQ=
|
||||
github.com/onsi/gomega v1.27.10/go.mod h1:RsS8tutOdbdgzbPtzzATp12yT7kM5I5aElG3evPbQ0M=
|
||||
github.com/onsi/gomega v1.30.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ=
|
||||
github.com/onsi/gomega v1.33.0/go.mod h1:+925n5YtiFsLzzafLUHzVMBpvvRAzrydIBiSIxjX3wY=
|
||||
github.com/onsi/gomega v1.33.1/go.mod h1:U4R44UsT+9eLIaYRB2a5qajjtQYn0hauxvRm16AVYg0=
|
||||
github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4=
|
||||
github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog=
|
||||
github.com/openshift/custom-resource-status v1.1.2 h1:C3DL44LEbvlbItfd8mT5jWrqPfHnSOQoQf/sypqA6A4=
|
||||
github.com/openshift/custom-resource-status v1.1.2/go.mod h1:DB/Mf2oTeiAmVVX1gN+NEqweonAPY0TKUwADizj8+ZA=
|
||||
github.com/orisano/pixelmatch v0.0.0-20220722002657-fb0b55479cde/go.mod h1:nZgzbfBr3hhjoZnS66nKrHmduYNpc34ny7RK4z5/HM0=
|
||||
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE=
|
||||
github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho=
|
||||
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
|
||||
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
|
||||
github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc=
|
||||
github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8=
|
||||
github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q=
|
||||
github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
|
||||
github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
|
||||
github.com/prometheus/common v0.64.0 h1:pdZeA+g617P7oGv1CzdTzyeShxAGrTBsolKNOLQPGO4=
|
||||
github.com/prometheus/common v0.64.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8=
|
||||
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
|
||||
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
|
||||
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
|
||||
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
|
||||
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
|
||||
github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
|
||||
github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
|
||||
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||
github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js=
|
||||
github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0=
|
||||
github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM=
|
||||
github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y=
|
||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
|
||||
github.com/spf13/cobra v1.10.0 h1:a5/WeUlSDCvV5a45ljW2ZFtV0bTDpkfSAj3uqB6Sc+0=
|
||||
github.com/spf13/cobra v1.10.0/go.mod h1:9dhySC7dnTtEiqzmqfkLj47BslqLCUPMXjG2lj/NgoE=
|
||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/stoewer/go-strcase v1.2.0 h1:Z2iHWqGXH00XYgqDmNgQbIBxf3wrNq0F3feEy0ainaU=
|
||||
github.com/spf13/pflag v1.0.8/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spf13/pflag v1.0.9 h1:9exaQaMOCwffKiiiYk6/BndUBv+iRViNW+4lEMi0PvY=
|
||||
github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8=
|
||||
github.com/stoewer/go-strcase v1.3.0 h1:g0eASXYtp+yvN9fK8sH94oCIk0fau9uV1/ZdJ0AVEzs=
|
||||
github.com/stoewer/go-strcase v1.3.0/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8wodgtPmh1xo=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
|
||||
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
|
||||
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 h1:6fotK7otjonDflCTK0BCfls4SPy3NcCVb5dqqmbRknE=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75/go.mod h1:KO6IkyS8Y3j8OdNO85qEYBsRPuteD+YciPomcXdrMnk=
|
||||
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
|
||||
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
|
||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8=
|
||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
||||
github.com/xiang90/probing v0.0.0-20221125231312-a49e3df8f510 h1:S2dVYn90KE98chqDkyE9Z4N61UnQd+KOfgp5Iu53llk=
|
||||
github.com/xiang90/probing v0.0.0-20221125231312-a49e3df8f510/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
go.etcd.io/bbolt v1.3.9 h1:8x7aARPEXiXbHmtUwAIv7eV2fQFHrLLavdiJ3uzJXoI=
|
||||
go.etcd.io/bbolt v1.3.9/go.mod h1:zaO32+Ti0PK1ivdPtgMESzuzL2VPoIG1PCQNvOdo/dE=
|
||||
go.etcd.io/etcd/api/v3 v3.5.14 h1:vHObSCxyB9zlF60w7qzAdTcGaglbJOpSj1Xj9+WGxq0=
|
||||
go.etcd.io/etcd/api/v3 v3.5.14/go.mod h1:BmtWcRlQvwa1h3G2jvKYwIQy4PkHlDej5t7uLMUdJUU=
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.14 h1:SaNH6Y+rVEdxfpA2Jr5wkEvN6Zykme5+YnbCkxvuWxQ=
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.14/go.mod h1:8uMgAokyG1czCtIdsq+AGyYQMvpIKnSvPjFMunkgeZI=
|
||||
go.etcd.io/etcd/client/v2 v2.305.13 h1:RWfV1SX5jTU0lbCvpVQe3iPQeAHETWdOTb6pxhd77C8=
|
||||
go.etcd.io/etcd/client/v2 v2.305.13/go.mod h1:iQnL7fepbiomdXMb3om1rHq96htNNGv2sJkEcZGDRRg=
|
||||
go.etcd.io/etcd/client/v3 v3.5.14 h1:CWfRs4FDaDoSz81giL7zPpZH2Z35tbOrAJkkjMqOupg=
|
||||
go.etcd.io/etcd/client/v3 v3.5.14/go.mod h1:k3XfdV/VIHy/97rqWjoUzrj9tk7GgJGH9J8L4dNXmAk=
|
||||
go.etcd.io/etcd/pkg/v3 v3.5.13 h1:st9bDWNsKkBNpP4PR1MvM/9NqUPfvYZx/YXegsYEH8M=
|
||||
go.etcd.io/etcd/pkg/v3 v3.5.13/go.mod h1:N+4PLrp7agI/Viy+dUYpX7iRtSPvKq+w8Y14d1vX+m0=
|
||||
go.etcd.io/etcd/raft/v3 v3.5.13 h1:7r/NKAOups1YnKcfro2RvGGo2PTuizF/xh26Z2CTAzA=
|
||||
go.etcd.io/etcd/raft/v3 v3.5.13/go.mod h1:uUFibGLn2Ksm2URMxN1fICGhk8Wu96EfDQyuLhAcAmw=
|
||||
go.etcd.io/etcd/server/v3 v3.5.13 h1:V6KG+yMfMSqWt+lGnhFpP5z5dRUj1BDRJ5k1fQ9DFok=
|
||||
go.etcd.io/etcd/server/v3 v3.5.13/go.mod h1:K/8nbsGupHqmr5MkgaZpLlH1QdX1pcNQLAkODy44XcQ=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.47.0 h1:UNQQKPfTDe1J81ViolILjTKPr9WetKW6uei2hFgJmFs=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.47.0/go.mod h1:r9vWsPS/3AQItv3OSlEJ/E4mbrhUbbw18meOjArPtKQ=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg=
|
||||
go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo=
|
||||
go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 h1:3Q/xZUyC1BBkualc9ROb4G8qkH90LXEIICcs5zv1OYY=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0/go.mod h1:s75jGIWA9OfCMzF0xr+ZgfrB5FEbbV7UuYo32ahUiFI=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 h1:R3X6ZXmNPRR8ul6i3WgFURCHzaXjHdm0karRG/+dj3s=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0/go.mod h1:QWFXnDavXWwMx2EEcZsf3yxgEKAqsxQ+Syjp+seyInw=
|
||||
go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q=
|
||||
go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s=
|
||||
go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE=
|
||||
go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg=
|
||||
go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g=
|
||||
go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI=
|
||||
go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0=
|
||||
go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8=
|
||||
github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
||||
github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||
go.etcd.io/bbolt v1.4.2 h1:IrUHp260R8c+zYx/Tm8QZr04CX+qWS5PGfPdevhdm1I=
|
||||
go.etcd.io/bbolt v1.4.2/go.mod h1:Is8rSHO/b4f3XigBC0lL0+4FwAQv3HXEEIgFMuKHceM=
|
||||
go.etcd.io/etcd/api/v3 v3.6.4 h1:7F6N7toCKcV72QmoUKa23yYLiiljMrT4xCeBL9BmXdo=
|
||||
go.etcd.io/etcd/api/v3 v3.6.4/go.mod h1:eFhhvfR8Px1P6SEuLT600v+vrhdDTdcfMzmnxVXXSbk=
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.6.4 h1:9HBYrjppeOfFjBjaMTRxT3R7xT0GLK8EJMVC4xg6ok0=
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.6.4/go.mod h1:sbdzr2cl3HzVmxNw//PH7aLGVtY4QySjQFuaCgcRFAI=
|
||||
go.etcd.io/etcd/client/v3 v3.6.4 h1:YOMrCfMhRzY8NgtzUsHl8hC2EBSnuqbR3dh84Uryl7A=
|
||||
go.etcd.io/etcd/client/v3 v3.6.4/go.mod h1:jaNNHCyg2FdALyKWnd7hxZXZxZANb0+KGY+YQaEMISo=
|
||||
go.etcd.io/etcd/pkg/v3 v3.6.4 h1:fy8bmXIec1Q35/jRZ0KOes8vuFxbvdN0aAFqmEfJZWA=
|
||||
go.etcd.io/etcd/pkg/v3 v3.6.4/go.mod h1:kKcYWP8gHuBRcteyv6MXWSN0+bVMnfgqiHueIZnKMtE=
|
||||
go.etcd.io/etcd/server/v3 v3.6.4 h1:LsCA7CzjVt+8WGrdsnh6RhC0XqCsLkBly3ve5rTxMAU=
|
||||
go.etcd.io/etcd/server/v3 v3.6.4/go.mod h1:aYCL/h43yiONOv0QIR82kH/2xZ7m+IWYjzRmyQfnCAg=
|
||||
go.etcd.io/raft/v3 v3.6.0 h1:5NtvbDVYpnfZWcIHgGRk9DyzkBIXOi8j+DDp1IcnUWQ=
|
||||
go.etcd.io/raft/v3 v3.6.0/go.mod h1:nLvLevg6+xrVtHUmVaTcTz603gQPHfh7kUAwV6YpfGo=
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
|
||||
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0 h1:x7wzEgXfnzJcHDwStJT+mxOz4etr2EcexjqhBvmoakw=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0/go.mod h1:rg+RlpR5dKwaS95IyyZqj5Wd4E13lk/msnTS0Xl9lJM=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 h1:yd02MEjBdJkG3uabWP9apV+OuWRIXGDuJEUJbOHmCFU=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0/go.mod h1:umTcuxiv1n/s/S6/c2AT/g2CQ7u5C59sHDNmfSwgz7Q=
|
||||
go.opentelemetry.io/otel v1.36.0 h1:UumtzIklRBY6cI/lllNZlALOF5nNIzJVb16APdvgTXg=
|
||||
go.opentelemetry.io/otel v1.36.0/go.mod h1:/TcFMXYjyRNh8khOAO9ybYkqaDBb/70aVwkNML4pP8E=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.36.0 h1:dNzwXjZKpMpE2JhmO+9HsPl42NIXFIFSUSSs0fiqra0=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.36.0/go.mod h1:90PoxvaEB5n6AOdZvi+yWJQoE95U8Dhhw2bSyRqnTD0=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.36.0 h1:JgtbA0xkWHnTmYk7YusopJFX6uleBmAuZ8n05NEh8nQ=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.36.0/go.mod h1:179AK5aar5R3eS9FucPy6rggvU0g52cvKId8pv4+v0c=
|
||||
go.opentelemetry.io/otel/metric v1.36.0 h1:MoWPKVhQvJ+eeXWHFBOPoBOi20jh6Iq2CcCREuTYufE=
|
||||
go.opentelemetry.io/otel/metric v1.36.0/go.mod h1:zC7Ks+yeyJt4xig9DEw9kuUFe5C3zLbVjV2PzT6qzbs=
|
||||
go.opentelemetry.io/otel/sdk v1.36.0 h1:b6SYIuLRs88ztox4EyrvRti80uXIFy+Sqzoh9kFULbs=
|
||||
go.opentelemetry.io/otel/sdk v1.36.0/go.mod h1:+lC+mTgD+MUWfjJubi2vvXWcVxyr9rmlshZni72pXeY=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.34.0 h1:5CeK9ujjbFVL5c1PhLuStg1wxA7vQv7ce1EK0Gyvahk=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.34.0/go.mod h1:jQ/r8Ze28zRKoNRdkjCZxfs6YvBTG1+YIqyFVFYec5w=
|
||||
go.opentelemetry.io/otel/trace v1.36.0 h1:ahxWNuqZjpdiFAyrIoQ4GIiAIhxAunQR6MUoKrsNd4w=
|
||||
go.opentelemetry.io/otel/trace v1.36.0/go.mod h1:gQ+OnDZzrybY4k4seLzPAWNwVBBVlF2szhehOBB/tGA=
|
||||
go.opentelemetry.io/proto/otlp v1.6.0 h1:jQjP+AQyTf+Fe7OKj/MfkDrmK4MNVtw2NpXsf9fefDI=
|
||||
go.opentelemetry.io/proto/otlp v1.6.0/go.mod h1:cicgGehlFuNdgZkcALOCh3VE6K/u2tAjzlRhDwmVpZc=
|
||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
|
||||
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
|
||||
go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo=
|
||||
go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so=
|
||||
go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
|
||||
go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
|
||||
go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI=
|
||||
go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU=
|
||||
go.yaml.in/yaml/v3 v3.0.3/go.mod h1:tBHosrYAkRZjRAOREWbDnBXUf08JOwYq++0QNwQiWzI=
|
||||
go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
|
||||
go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
|
||||
golang.org/dl v0.0.0-20190829154251-82a15e2f2ead/go.mod h1:IUMfjQLJQd4UTqG1Z90tenwKoCX93Gn3MAQJMOSBsDQ=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI=
|
||||
golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM=
|
||||
golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc h1:mCRnTeVUjcrhlRmO0VK8a6k6Rrf6TF9htwo2pJVSjIU=
|
||||
golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
|
||||
golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio=
|
||||
golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw=
|
||||
golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc=
|
||||
golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4=
|
||||
golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
|
||||
golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg=
|
||||
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
|
||||
golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs=
|
||||
golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M=
|
||||
golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
|
||||
golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54=
|
||||
golang.org/x/crypto v0.38.0 h1:jt+WWG8IZlBnVbomuhg2Mdq0+BBQaHbtqHEFEigjUV8=
|
||||
golang.org/x/crypto v0.38.0/go.mod h1:MvrbAqul58NNYPKnOra203SB9vpuZW0e+RRZV+Ggqjw=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8=
|
||||
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA=
|
||||
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI=
|
||||
golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/mod v0.20.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0=
|
||||
golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ=
|
||||
golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE=
|
||||
golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs=
|
||||
golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM=
|
||||
golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
|
||||
golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
|
||||
golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
|
||||
golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
|
||||
golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws=
|
||||
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
|
||||
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
|
||||
golang.org/x/net v0.12.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA=
|
||||
golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI=
|
||||
golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk=
|
||||
golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
|
||||
golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U=
|
||||
golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY=
|
||||
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
|
||||
golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
|
||||
golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8=
|
||||
golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
|
||||
golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg=
|
||||
golang.org/x/net v0.40.0 h1:79Xs7wF06Gbdcg4kdCCIQArK11Z1hr5POQ6+fIYHNuY=
|
||||
golang.org/x/net v0.40.0/go.mod h1:y0hY0exeL2Pku80/zKK7tpntoX23cqL3Oa6njdgRtds=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI=
|
||||
golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
|
||||
golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.14.0 h1:woo0S4Yywslg6hp4eUFjTVOyKt0RookbpAHG4c1HmhQ=
|
||||
golang.org/x/sync v0.14.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws=
|
||||
golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA=
|
||||
golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210420072515-93ed5bcd2bfe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.23.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw=
|
||||
golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||
golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE=
|
||||
golang.org/x/telemetry v0.0.0-20240521205824-bda55230c457/go.mod h1:pRgIJT+bRLFKnoM1ldnzKoxTIn14Yxz928LQRYYgIN0=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
|
||||
golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA=
|
||||
golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ=
|
||||
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
||||
golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
|
||||
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
|
||||
golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o=
|
||||
golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU=
|
||||
golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU=
|
||||
golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U=
|
||||
golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0=
|
||||
golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY=
|
||||
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
|
||||
golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58=
|
||||
golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk=
|
||||
golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY=
|
||||
golang.org/x/term v0.23.0/go.mod h1:DgV24QBUrK6jhZXl+20l6UWznPlwAHm1Q1mGHtydmSk=
|
||||
golang.org/x/term v0.32.0 h1:DR4lr0TjUs3epypdhTOkMmuF5CDFJ/8pOnbzMZPQ7bg=
|
||||
golang.org/x/term v0.32.0/go.mod h1:uZG1FhGx848Sqfsq4/DlJr3xGGsYMu/L5GW4abiaEPQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4=
|
||||
golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI=
|
||||
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
|
||||
golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||
golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
|
||||
golang.org/x/text v0.25.0 h1:qVyWApTSYLk/drJRO5mDlNYskwQznZmkpV2c8q9zls4=
|
||||
golang.org/x/text v0.25.0/go.mod h1:WEdwpYrmk1qmdHvhkSTNPm3app7v4rsT8F2UD6+VHIA=
|
||||
golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY=
|
||||
golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg=
|
||||
golang.org/x/tools v0.1.6-0.20210820212750-d4cc65f0b2ff/go.mod h1:YD9qOF0M9xpSpdWTBbzEl5e/RnCefISl8E5Noe10jFM=
|
||||
golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU=
|
||||
golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA=
|
||||
golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ=
|
||||
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
||||
golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s=
|
||||
golang.org/x/tools v0.9.1/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc=
|
||||
golang.org/x/tools v0.9.3/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc=
|
||||
golang.org/x/tools v0.12.0/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM=
|
||||
golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58=
|
||||
golang.org/x/tools v0.16.1/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0=
|
||||
golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps=
|
||||
golang.org/x/tools v0.20.0/go.mod h1:WvitBU7JJf6A4jOdg4S1tviW9bhUxkgeCui/0JHctQg=
|
||||
golang.org/x/tools v0.21.0/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
|
||||
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
|
||||
golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ=
|
||||
golang.org/x/tools v0.26.0 h1:v/60pFQmzmT9ExmjDv2gGIfi3OqfKoEP6I5+umXlbnQ=
|
||||
golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 h1:KAeGQVN3M9nD0/bQXnr/ClcEMJ968gUXJQ9pwfSynuQ=
|
||||
google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80/go.mod h1:cc8bqMqtv9gMOr0zHg2Vzff5ULhhL2IXP4sbcn32Dro=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094 h1:0+ozOGcrp+Y8Aq8TLNN2Aliibms5LEzsq99ZZmAGYm0=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094/go.mod h1:fJ/e3If/Q67Mj99hin0hMhiNyCRmt6BQ2aWIJshUSJw=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 h1:BwIjyKYGsK9dMCBOorzRri8MQwmi7mT9rGHsCEinZkA=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY=
|
||||
google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc=
|
||||
google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ=
|
||||
google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
|
||||
google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||
google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250519155744-55703ea1f237 h1:Kog3KlB4xevJlAcbbbzPfRG0+X9fdoGM+UBRKVz6Wr0=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250519155744-55703ea1f237/go.mod h1:ezi0AVyMKDWy5xAncvjLWH7UcLBB5n7y2fQ8MzjJcto=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250519155744-55703ea1f237 h1:cJfm9zPbe1e873mHJzmQ1nwVEeRDU/T1wXDK2kUSU34=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250519155744-55703ea1f237/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.72.2 h1:TdbGzwb82ty4OusHWepvFWGLgIbNo1/SUynEN0ssqv8=
|
||||
google.golang.org/grpc v1.72.2/go.mod h1:wH5Aktxcg25y1I3w7H69nHfXdOG3UiadoBtjh3izSDM=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
|
||||
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
|
||||
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
|
||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||
google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
|
||||
google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
|
||||
google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY=
|
||||
google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4=
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
|
||||
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc=
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
k8s.io/api v0.31.0 h1:b9LiSjR2ym/SzTOlfMHm1tr7/21aD7fSkqgD/CVJBCo=
|
||||
k8s.io/api v0.31.0/go.mod h1:0YiFF+JfFxMM6+1hQei8FY8M7s1Mth+z/q7eF1aJkTE=
|
||||
k8s.io/apimachinery v0.31.0 h1:m9jOiSr3FoSSL5WO9bjm1n6B9KROYYgNZOb4tyZ1lBc=
|
||||
k8s.io/apimachinery v0.31.0/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo=
|
||||
k8s.io/apiserver v0.31.0 h1:p+2dgJjy+bk+B1Csz+mc2wl5gHwvNkC9QJV+w55LVrY=
|
||||
k8s.io/apiserver v0.31.0/go.mod h1:KI9ox5Yu902iBnnyMmy7ajonhKnkeZYJhTZ/YI+WEMk=
|
||||
k8s.io/client-go v0.31.0 h1:QqEJzNjbN2Yv1H79SsS+SWnXkBgVu4Pj3CJQgbx0gI8=
|
||||
k8s.io/client-go v0.31.0/go.mod h1:Y9wvC76g4fLjmU0BA+rV+h2cncoadjvjjkkIGoTLcGU=
|
||||
k8s.io/code-generator v0.31.0 h1:w607nrMi1KeDKB3/F/J4lIoOgAwc+gV9ZKew4XRfMp8=
|
||||
k8s.io/code-generator v0.31.0/go.mod h1:84y4w3es8rOJOUUP1rLsIiGlO1JuEaPFXQPA9e/K6U0=
|
||||
k8s.io/component-base v0.31.0 h1:/KIzGM5EvPNQcYgwq5NwoQBaOlVFrghoVGr8lG6vNRs=
|
||||
k8s.io/component-base v0.31.0/go.mod h1:TYVuzI1QmN4L5ItVdMSXKvH7/DtvIuas5/mm8YT3rTo=
|
||||
k8s.io/component-helpers v0.31.0 h1:jyRUKA+GX+q19o81k4x94imjNICn+e6Gzi6T89va1/A=
|
||||
k8s.io/component-helpers v0.31.0/go.mod h1:MrNIvT4iB7wXIseYSWfHUJB/aNUiFvbilp4qDfBQi6s=
|
||||
k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70 h1:NGrVE502P0s0/1hudf8zjgwki1X/TByhmAoILTarmzo=
|
||||
k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70/go.mod h1:VH3AT8AaQOqiGjMF9p0/IM1Dj+82ZwjfxUP1IxaHE+8=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
k8s.io/api v0.23.3/go.mod h1:w258XdGyvCmnBj/vGzQMj6kzdufJZVUwEM1U2fRJwSQ=
|
||||
k8s.io/api v0.34.0 h1:L+JtP2wDbEYPUeNGbeSa/5GwFtIA662EmT2YSLOkAVE=
|
||||
k8s.io/api v0.34.0/go.mod h1:YzgkIzOOlhl9uwWCZNqpw6RJy9L2FK4dlJeayUoydug=
|
||||
k8s.io/apiextensions-apiserver v0.30.0 h1:jcZFKMqnICJfRxTgnC4E+Hpcq8UEhT8B2lhBcQ+6uAs=
|
||||
k8s.io/apiextensions-apiserver v0.30.0/go.mod h1:N9ogQFGcrbWqAY9p2mUAL5mGxsLqwgtUce127VtRX5Y=
|
||||
k8s.io/apimachinery v0.23.3/go.mod h1:BEuFMMBaIbcOqVIJqNZJXGFTP4W6AycEpb5+m/97hrM=
|
||||
k8s.io/apimachinery v0.34.0 h1:eR1WO5fo0HyoQZt1wdISpFDffnWOvFLOOeJ7MgIv4z0=
|
||||
k8s.io/apimachinery v0.34.0/go.mod h1:/GwIlEcWuTX9zKIg2mbw0LRFIsXwrfoVxn+ef0X13lw=
|
||||
k8s.io/apiserver v0.34.0 h1:Z51fw1iGMqN7uJ1kEaynf2Aec1Y774PqU+FVWCFV3Jg=
|
||||
k8s.io/apiserver v0.34.0/go.mod h1:52ti5YhxAvewmmpVRqlASvaqxt0gKJxvCeW7ZrwgazQ=
|
||||
k8s.io/client-go v0.34.0 h1:YoWv5r7bsBfb0Hs2jh8SOvFbKzzxyNo0nSb0zC19KZo=
|
||||
k8s.io/client-go v0.34.0/go.mod h1:ozgMnEKXkRjeMvBZdV1AijMHLTh3pbACPvK7zFR+QQY=
|
||||
k8s.io/code-generator v0.23.3/go.mod h1:S0Q1JVA+kSzTI1oUvbKAxZY/DYbA/ZUb4Uknog12ETk=
|
||||
k8s.io/code-generator v0.34.0 h1:Ze2i1QsvUprIlX3oHiGv09BFQRLCz+StA8qKwwFzees=
|
||||
k8s.io/code-generator v0.34.0/go.mod h1:Py2+4w2HXItL8CGhks8uI/wS3Y93wPKO/9mBQUYNua0=
|
||||
k8s.io/component-base v0.34.0 h1:bS8Ua3zlJzapklsB1dZgjEJuJEeHjj8yTu1gxE2zQX8=
|
||||
k8s.io/component-base v0.34.0/go.mod h1:RSCqUdvIjjrEm81epPcjQ/DS+49fADvGSCkIP3IC6vg=
|
||||
k8s.io/component-helpers v0.34.0 h1:5T7P9XGMoUy1JDNKzHf0p/upYbeUf8ZaSf9jbx0QlIo=
|
||||
k8s.io/component-helpers v0.34.0/go.mod h1:kaOyl5tdtnymriYcVZg4uwDBe2d1wlIpXyDkt6sVnt4=
|
||||
k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
|
||||
k8s.io/gengo v0.0.0-20211129171323-c02415ce4185/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
|
||||
k8s.io/gengo/v2 v2.0.0-20250604051438-85fd79dbfd9f h1:SLb+kxmzfA87x4E4brQzB33VBbT2+x7Zq9ROIHmGn9Q=
|
||||
k8s.io/gengo/v2 v2.0.0-20250604051438-85fd79dbfd9f/go.mod h1:EJykeLsmFC60UQbYJezXkEsG2FLrt0GPNkU5iK5GWxU=
|
||||
k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
|
||||
k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
|
||||
k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
|
||||
k8s.io/klog/v2 v2.40.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
|
||||
k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
|
||||
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
|
||||
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
|
||||
k8s.io/kms v0.31.0 h1:KchILPfB1ZE+ka7223mpU5zeFNkmb45jl7RHnlImUaI=
|
||||
k8s.io/kms v0.31.0/go.mod h1:OZKwl1fan3n3N5FFxnW5C4V3ygrah/3YXeJWS3O6+94=
|
||||
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag=
|
||||
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98=
|
||||
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A=
|
||||
k8s.io/kms v0.34.0 h1:u+/rcxQ3Jr7gC9AY5nXuEnBcGEB7ZOIJ9cdLdyHyEjQ=
|
||||
k8s.io/kms v0.34.0/go.mod h1:s1CFkLG7w9eaTYvctOxosx88fl4spqmixnNpys0JAtM=
|
||||
k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b h1:MloQ9/bdJyIu9lb1PzujOPolHyvO06MXG5TUIj2mNAA=
|
||||
k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b/go.mod h1:UZ2yyWbFTpuhSbFhv24aGNOdoRdJZgsIObGBUaYVsts=
|
||||
k8s.io/metrics v0.34.0 h1:nYSfG2+tnL6/MRC2I+sGHjtNEGoEoM/KktgGOoQFwws=
|
||||
k8s.io/metrics v0.34.0/go.mod h1:KCuXmotE0v4AvoARKUP8NC4lUnbK/Du1mluGdor5h4M=
|
||||
k8s.io/utils v0.0.0-20211116205334-6203023598ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
||||
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.30.3 h1:2770sDpzrjjsAtVhSeUFseziht227YAWYHLGNM8QPwY=
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.30.3/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw=
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
|
||||
k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 h1:hwvWFiBzdWw1FhfY1FooPn3kzWuJ8tmbZBHi4zVsl1Y=
|
||||
k8s.io/utils v0.0.0-20250604170112-4c0f3b243397/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
kubevirt.io/api v1.3.0 h1:9sGElMmnRU50pGED+MPPD2OwQl4S5lvjCUjm+t0mI90=
|
||||
kubevirt.io/api v1.3.0/go.mod h1:e6LkElYZZm8NcP2gKlFVHZS9pgNhIARHIjSBSfeiP1s=
|
||||
kubevirt.io/client-go v1.3.0 h1:/HKn4exzwsctEVTwVtEFaeT9D2v4TgWr2SmxITVEZ/4=
|
||||
kubevirt.io/client-go v1.3.0/go.mod h1:qmcJZvUjbmggY1pp7irO3zesBJj7wwGIWAdnYEoh3yc=
|
||||
kubevirt.io/containerized-data-importer-api v1.60.1 h1:chmxuINvA7TPmIe8LpShCoKPxoegcKjkG9tYboFBs/U=
|
||||
kubevirt.io/containerized-data-importer-api v1.60.1/go.mod h1:8mwrkZIdy8j/LmCyKt2wFXbiMavLUIqDaegaIF67CZs=
|
||||
kubevirt.io/controller-lifecycle-operator-sdk/api v0.0.0-20220329064328-f3cc58c6ed90 h1:QMrd0nKP0BGbnxTqakhDZAUhGKxPiPiN5gSDqKUmGGc=
|
||||
kubevirt.io/controller-lifecycle-operator-sdk/api v0.0.0-20220329064328-f3cc58c6ed90/go.mod h1:018lASpFYBsYN6XwmA2TIrPCx6e0gviTd/ZNtSitKgc=
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2 h1:jpcvIRr3GLoUoEKRkHKSmGjxb6lWwrBlJsXc+eUYQHM=
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw=
|
||||
sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6/go.mod h1:p4QtZmO4uMYipTQNzagwnNoseA6OxSUutVw05NhYDRs=
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
|
||||
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE=
|
||||
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg=
|
||||
sigs.k8s.io/mdtoc v1.1.0 h1:q3YtqYzmC2e0hgLXRIOm7/QLuPux1CX3ZHCwlbABxZo=
|
||||
sigs.k8s.io/mdtoc v1.1.0/go.mod h1:QZLVEdHH2iNIR4uHAZyvFRtjloHgVItk8lo/mzCtq3w=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08=
|
||||
sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
|
||||
sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
|
||||
sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU=
|
||||
sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.1/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4=
|
||||
sigs.k8s.io/structured-merge-diff/v6 v6.2.0/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE=
|
||||
sigs.k8s.io/structured-merge-diff/v6 v6.3.0 h1:jTijUJbW353oVOd9oTlifJqOGEkUw2jB/fXCbTiQEco=
|
||||
sigs.k8s.io/structured-merge-diff/v6 v6.3.0/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE=
|
||||
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
|
||||
sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8=
|
||||
sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=
|
||||
sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs=
|
||||
sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4=
|
||||
|
||||
@@ -19,7 +19,7 @@
|
||||
go::verify_version() {
|
||||
GO_VERSION=($(go version))
|
||||
|
||||
if [[ -z $(echo "${GO_VERSION[2]}" | grep -E 'go1.20|go1.21|go1.22') ]]; then
|
||||
if [[ -z $(echo "${GO_VERSION[2]}" | grep -E 'go1.22|go1.23|go1.24') ]]; then
|
||||
echo "Unknown go version '${GO_VERSION[2]}', skipping gofmt."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
@@ -70,7 +70,7 @@ pushd "${DESCHEDULER_ROOT}" > /dev/null 2>&1
|
||||
ret=1
|
||||
fi
|
||||
|
||||
if ! _out="$(diff -Naupr -x "BUILD" -x "AUTHORS*" -x "CONTRIBUTORS*" vendor "${_deschedulertmp}/vendor")"; then
|
||||
if ! _out="$(diff -Naupr -x "BUILD" -x "AUTHORS*" -x "CONTRIBUTORS*" -x "README*" vendor "${_deschedulertmp}/vendor")"; then
|
||||
echo "Your vendored results are different:" >&2
|
||||
echo "${_out}" >&2
|
||||
echo "Vendor Verify failed." >&2
|
||||
|
||||
@@ -22,13 +22,31 @@ rules:
|
||||
- apiGroups: ["scheduling.k8s.io"]
|
||||
resources: ["priorityclasses"]
|
||||
verbs: ["get", "watch", "list"]
|
||||
- apiGroups: ["policy"]
|
||||
resources: ["poddisruptionbudgets"]
|
||||
verbs: ["get", "watch", "list"]
|
||||
- apiGroups: ["coordination.k8s.io"]
|
||||
resources: ["leases"]
|
||||
verbs: ["create"]
|
||||
verbs: ["create", "update"]
|
||||
- apiGroups: ["coordination.k8s.io"]
|
||||
resources: ["leases"]
|
||||
resourceNames: ["descheduler"]
|
||||
verbs: ["get", "patch", "delete"]
|
||||
- apiGroups: ["metrics.k8s.io"]
|
||||
resources: ["nodes", "pods"]
|
||||
verbs: ["get", "list"]
|
||||
- apiGroups: [""]
|
||||
resources: ["persistentvolumeclaims"]
|
||||
verbs: ["get", "watch", "list"]
|
||||
---
|
||||
kind: Role
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: descheduler-role
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["secrets"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
@@ -48,3 +66,16 @@ subjects:
|
||||
- name: descheduler-sa
|
||||
kind: ServiceAccount
|
||||
namespace: kube-system
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: descheduler-role-binding
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: descheduler-role
|
||||
subjects:
|
||||
- name: descheduler-sa
|
||||
kind: ServiceAccount
|
||||
namespace: kube-system
|
||||
|
||||
@@ -16,7 +16,7 @@ spec:
|
||||
priorityClassName: system-cluster-critical
|
||||
containers:
|
||||
- name: descheduler
|
||||
image: registry.k8s.io/descheduler/descheduler:v0.30.1
|
||||
image: registry.k8s.io/descheduler/descheduler:v0.34.0
|
||||
volumeMounts:
|
||||
- mountPath: /policy-dir
|
||||
name: policy-volume
|
||||
|
||||
@@ -19,7 +19,7 @@ spec:
|
||||
serviceAccountName: descheduler-sa
|
||||
containers:
|
||||
- name: descheduler
|
||||
image: registry.k8s.io/descheduler/descheduler:v0.30.1
|
||||
image: registry.k8s.io/descheduler/descheduler:v0.34.0
|
||||
imagePullPolicy: IfNotPresent
|
||||
command:
|
||||
- "/bin/descheduler"
|
||||
|
||||
@@ -14,7 +14,7 @@ spec:
|
||||
priorityClassName: system-cluster-critical
|
||||
containers:
|
||||
- name: descheduler
|
||||
image: registry.k8s.io/descheduler/descheduler:v0.30.1
|
||||
image: registry.k8s.io/descheduler/descheduler:v0.34.0
|
||||
volumeMounts:
|
||||
- mountPath: /policy-dir
|
||||
name: policy-volume
|
||||
|
||||
@@ -31,10 +31,18 @@ const (
|
||||
|
||||
var (
|
||||
PodsEvicted = metrics.NewCounterVec(
|
||||
&metrics.CounterOpts{
|
||||
Subsystem: DeschedulerSubsystem,
|
||||
Name: "pods_evicted",
|
||||
Help: "Number of total evicted pods, by the result, by the strategy, by the namespace, by the node name. 'error' result means a pod could not be evicted",
|
||||
StabilityLevel: metrics.ALPHA,
|
||||
DeprecatedVersion: "0.34.0",
|
||||
}, []string{"result", "strategy", "profile", "namespace", "node"})
|
||||
PodsEvictedTotal = metrics.NewCounterVec(
|
||||
&metrics.CounterOpts{
|
||||
Subsystem: DeschedulerSubsystem,
|
||||
Name: "pods_evicted",
|
||||
Help: "Number of evicted pods, by the result, by the strategy, by the namespace, by the node name. 'error' result means a pod could not be evicted",
|
||||
Name: "pods_evicted_total",
|
||||
Help: "Number of total evicted pods, by the result, by the strategy, by the namespace, by the node name. 'error' result means a pod could not be evicted",
|
||||
StabilityLevel: metrics.ALPHA,
|
||||
}, []string{"result", "strategy", "profile", "namespace", "node"})
|
||||
|
||||
@@ -49,18 +57,36 @@ var (
|
||||
)
|
||||
|
||||
DeschedulerLoopDuration = metrics.NewHistogramVec(
|
||||
&metrics.HistogramOpts{
|
||||
Subsystem: DeschedulerSubsystem,
|
||||
Name: "descheduler_loop_duration_seconds",
|
||||
Help: "Time taken to complete a full descheduling cycle",
|
||||
StabilityLevel: metrics.ALPHA,
|
||||
DeprecatedVersion: "0.34.0",
|
||||
Buckets: []float64{0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2.5, 5, 10, 25, 50, 100, 250, 500},
|
||||
}, []string{})
|
||||
LoopDuration = metrics.NewHistogramVec(
|
||||
&metrics.HistogramOpts{
|
||||
Subsystem: DeschedulerSubsystem,
|
||||
Name: "descheduler_loop_duration_seconds",
|
||||
Name: "loop_duration_seconds",
|
||||
Help: "Time taken to complete a full descheduling cycle",
|
||||
StabilityLevel: metrics.ALPHA,
|
||||
Buckets: []float64{0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2.5, 5, 10, 25, 50, 100, 250, 500},
|
||||
}, []string{})
|
||||
|
||||
DeschedulerStrategyDuration = metrics.NewHistogramVec(
|
||||
&metrics.HistogramOpts{
|
||||
Subsystem: DeschedulerSubsystem,
|
||||
Name: "descheduler_strategy_duration_seconds",
|
||||
Help: "Time taken to complete Each strategy of the descheduling operation",
|
||||
StabilityLevel: metrics.ALPHA,
|
||||
DeprecatedVersion: "0.34.0",
|
||||
Buckets: []float64{0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2.5, 5, 10, 25, 50, 100},
|
||||
}, []string{"strategy", "profile"})
|
||||
StrategyDuration = metrics.NewHistogramVec(
|
||||
&metrics.HistogramOpts{
|
||||
Subsystem: DeschedulerSubsystem,
|
||||
Name: "descheduler_strategy_duration_seconds",
|
||||
Name: "strategy_duration_seconds",
|
||||
Help: "Time taken to complete Each strategy of the descheduling operation",
|
||||
StabilityLevel: metrics.ALPHA,
|
||||
Buckets: []float64{0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2.5, 5, 10, 25, 50, 100},
|
||||
@@ -68,9 +94,12 @@ var (
|
||||
|
||||
metricsList = []metrics.Registerable{
|
||||
PodsEvicted,
|
||||
PodsEvictedTotal,
|
||||
buildInfo,
|
||||
DeschedulerLoopDuration,
|
||||
DeschedulerStrategyDuration,
|
||||
LoopDuration,
|
||||
StrategyDuration,
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
@@ -18,6 +18,7 @@ package api
|
||||
|
||||
import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
@@ -41,6 +42,23 @@ type DeschedulerPolicy struct {
|
||||
|
||||
// MaxNoOfPodsToTotal restricts maximum of pods to be evicted total.
|
||||
MaxNoOfPodsToEvictTotal *uint
|
||||
|
||||
// EvictionFailureEventNotification should be set to true to enable eviction failure event notification.
|
||||
// Default is false.
|
||||
EvictionFailureEventNotification *bool
|
||||
|
||||
// MetricsCollector configures collection of metrics about actual resource utilization
|
||||
// Deprecated. Use MetricsProviders field instead.
|
||||
MetricsCollector *MetricsCollector
|
||||
|
||||
// MetricsProviders configure collection of metrics about actual resource utilization from various sources
|
||||
MetricsProviders []MetricsProvider
|
||||
|
||||
// GracePeriodSeconds The duration in seconds before the object should be deleted. Value must be non-negative integer.
|
||||
// The value zero indicates delete immediately. If this value is nil, the default grace period for the
|
||||
// specified type will be used.
|
||||
// Defaults to a per object value if not specified. zero means delete immediately.
|
||||
GracePeriodSeconds *int64
|
||||
}
|
||||
|
||||
// Namespaces carries a list of included/excluded namespaces
|
||||
@@ -50,6 +68,12 @@ type Namespaces struct {
|
||||
Exclude []string `json:"exclude,omitempty"`
|
||||
}
|
||||
|
||||
// EvictionLimits limits the number of evictions per domain. E.g. node, namespace, total.
|
||||
type EvictionLimits struct {
|
||||
// node restricts the maximum number of evictions per node
|
||||
Node *uint `json:"node,omitempty"`
|
||||
}
|
||||
|
||||
type (
|
||||
Percentage float64
|
||||
ResourceThresholds map[v1.ResourceName]Percentage
|
||||
@@ -84,3 +108,55 @@ type PluginSet struct {
|
||||
Enabled []string
|
||||
Disabled []string
|
||||
}
|
||||
|
||||
type MetricsSource string
|
||||
|
||||
const (
|
||||
// KubernetesMetrics enables metrics from a Kubernetes metrics server.
|
||||
// Please see https://kubernetes-sigs.github.io/metrics-server/ for more.
|
||||
KubernetesMetrics MetricsSource = "KubernetesMetrics"
|
||||
|
||||
// KubernetesMetrics enables metrics from a Prometheus metrics server.
|
||||
PrometheusMetrics MetricsSource = "Prometheus"
|
||||
)
|
||||
|
||||
// MetricsCollector configures collection of metrics about actual resource utilization
|
||||
type MetricsCollector struct {
|
||||
// Enabled metrics collection from Kubernetes metrics.
|
||||
// Deprecated. Use MetricsProvider.Source field instead.
|
||||
Enabled bool
|
||||
}
|
||||
|
||||
// MetricsProvider configures collection of metrics about actual resource utilization from a given source
|
||||
type MetricsProvider struct {
|
||||
// Source enables metrics from Kubernetes metrics server.
|
||||
Source MetricsSource
|
||||
|
||||
// Prometheus enables metrics collection through Prometheus
|
||||
Prometheus *Prometheus
|
||||
}
|
||||
|
||||
// ReferencedResourceList is an adaption of v1.ResourceList with resources as references
|
||||
type ReferencedResourceList = map[v1.ResourceName]*resource.Quantity
|
||||
|
||||
type Prometheus struct {
|
||||
URL string
|
||||
// authToken used for authentication with the prometheus server.
|
||||
// If not set the in cluster authentication token for the descheduler service
|
||||
// account is read from the container's file system.
|
||||
AuthToken *AuthToken
|
||||
}
|
||||
|
||||
type AuthToken struct {
|
||||
// secretReference references an authentication token.
|
||||
// secrets are expected to be created under the descheduler's namespace.
|
||||
SecretReference *SecretReference
|
||||
}
|
||||
|
||||
// SecretReference holds a reference to a Secret
|
||||
type SecretReference struct {
|
||||
// namespace is the namespace of the secret.
|
||||
Namespace string
|
||||
// name is the name of the secret.
|
||||
Name string
|
||||
}
|
||||
|
||||
@@ -40,6 +40,23 @@ type DeschedulerPolicy struct {
|
||||
|
||||
// MaxNoOfPodsToTotal restricts maximum of pods to be evicted total.
|
||||
MaxNoOfPodsToEvictTotal *uint `json:"maxNoOfPodsToEvictTotal,omitempty"`
|
||||
|
||||
// EvictionFailureEventNotification should be set to true to enable eviction failure event notification.
|
||||
// Default is false.
|
||||
EvictionFailureEventNotification *bool `json:"evictionFailureEventNotification,omitempty"`
|
||||
|
||||
// MetricsCollector configures collection of metrics for actual resource utilization
|
||||
// Deprecated. Use MetricsProviders field instead.
|
||||
MetricsCollector *MetricsCollector `json:"metricsCollector,omitempty"`
|
||||
|
||||
// MetricsProviders configure collection of metrics about actual resource utilization from various sources
|
||||
MetricsProviders []MetricsProvider `json:"metricsProviders,omitempty"`
|
||||
|
||||
// GracePeriodSeconds The duration in seconds before the object should be deleted. Value must be non-negative integer.
|
||||
// The value zero indicates delete immediately. If this value is nil, the default grace period for the
|
||||
// specified type will be used.
|
||||
// Defaults to a per object value if not specified. zero means delete immediately.
|
||||
GracePeriodSeconds *int64 `json:"gracePeriodSeconds,omitempty"`
|
||||
}
|
||||
|
||||
type DeschedulerProfile struct {
|
||||
@@ -66,3 +83,52 @@ type PluginSet struct {
|
||||
Enabled []string `json:"enabled"`
|
||||
Disabled []string `json:"disabled"`
|
||||
}
|
||||
|
||||
type MetricsSource string
|
||||
|
||||
const (
|
||||
// KubernetesMetrics enables metrics from a Kubernetes metrics server.
|
||||
// Please see https://kubernetes-sigs.github.io/metrics-server/ for more.
|
||||
KubernetesMetrics MetricsSource = "KubernetesMetrics"
|
||||
|
||||
// KubernetesMetrics enables metrics from a Prometheus metrics server.
|
||||
PrometheusMetrics MetricsSource = "Prometheus"
|
||||
)
|
||||
|
||||
// MetricsCollector configures collection of metrics about actual resource utilization
|
||||
type MetricsCollector struct {
|
||||
// Enabled metrics collection from Kubernetes metrics server.
|
||||
// Deprecated. Use MetricsProvider.Source field instead.
|
||||
Enabled bool `json:"enabled,omitempty"`
|
||||
}
|
||||
|
||||
// MetricsProvider configures collection of metrics about actual resource utilization from a given source
|
||||
type MetricsProvider struct {
|
||||
// Source enables metrics from Kubernetes metrics server.
|
||||
Source MetricsSource `json:"source,omitempty"`
|
||||
|
||||
// Prometheus enables metrics collection through Prometheus
|
||||
Prometheus *Prometheus `json:"prometheus,omitempty"`
|
||||
}
|
||||
|
||||
type Prometheus struct {
|
||||
URL string `json:"url,omitempty"`
|
||||
// authToken used for authentication with the prometheus server.
|
||||
// If not set the in cluster authentication token for the descheduler service
|
||||
// account is read from the container's file system.
|
||||
AuthToken *AuthToken `json:"authToken,omitempty"`
|
||||
}
|
||||
|
||||
type AuthToken struct {
|
||||
// secretReference references an authentication token.
|
||||
// secrets are expected to be created under the descheduler's namespace.
|
||||
SecretReference *SecretReference `json:"secretReference,omitempty"`
|
||||
}
|
||||
|
||||
// SecretReference holds a reference to a Secret
|
||||
type SecretReference struct {
|
||||
// namespace is the namespace of the secret.
|
||||
Namespace string `json:"namespace,omitempty"`
|
||||
// name is the name of the secret.
|
||||
Name string `json:"name,omitempty"`
|
||||
}
|
||||
|
||||
166
pkg/api/v1alpha2/zz_generated.conversion.go
generated
166
pkg/api/v1alpha2/zz_generated.conversion.go
generated
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
Copyright 2026 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@@ -36,6 +36,16 @@ func init() {
|
||||
// RegisterConversions adds conversion functions to the given scheme.
|
||||
// Public to allow building arbitrary schemes.
|
||||
func RegisterConversions(s *runtime.Scheme) error {
|
||||
if err := s.AddGeneratedConversionFunc((*AuthToken)(nil), (*api.AuthToken)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1alpha2_AuthToken_To_api_AuthToken(a.(*AuthToken), b.(*api.AuthToken), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*api.AuthToken)(nil), (*AuthToken)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_api_AuthToken_To_v1alpha2_AuthToken(a.(*api.AuthToken), b.(*AuthToken), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*DeschedulerProfile)(nil), (*api.DeschedulerProfile)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1alpha2_DeschedulerProfile_To_api_DeschedulerProfile(a.(*DeschedulerProfile), b.(*api.DeschedulerProfile), scope)
|
||||
}); err != nil {
|
||||
@@ -46,6 +56,26 @@ func RegisterConversions(s *runtime.Scheme) error {
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*MetricsCollector)(nil), (*api.MetricsCollector)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1alpha2_MetricsCollector_To_api_MetricsCollector(a.(*MetricsCollector), b.(*api.MetricsCollector), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*api.MetricsCollector)(nil), (*MetricsCollector)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_api_MetricsCollector_To_v1alpha2_MetricsCollector(a.(*api.MetricsCollector), b.(*MetricsCollector), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*MetricsProvider)(nil), (*api.MetricsProvider)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1alpha2_MetricsProvider_To_api_MetricsProvider(a.(*MetricsProvider), b.(*api.MetricsProvider), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*api.MetricsProvider)(nil), (*MetricsProvider)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_api_MetricsProvider_To_v1alpha2_MetricsProvider(a.(*api.MetricsProvider), b.(*MetricsProvider), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*api.PluginConfig)(nil), (*PluginConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_api_PluginConfig_To_v1alpha2_PluginConfig(a.(*api.PluginConfig), b.(*PluginConfig), scope)
|
||||
}); err != nil {
|
||||
@@ -71,6 +101,26 @@ func RegisterConversions(s *runtime.Scheme) error {
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*Prometheus)(nil), (*api.Prometheus)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1alpha2_Prometheus_To_api_Prometheus(a.(*Prometheus), b.(*api.Prometheus), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*api.Prometheus)(nil), (*Prometheus)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_api_Prometheus_To_v1alpha2_Prometheus(a.(*api.Prometheus), b.(*Prometheus), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*SecretReference)(nil), (*api.SecretReference)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1alpha2_SecretReference_To_api_SecretReference(a.(*SecretReference), b.(*api.SecretReference), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*api.SecretReference)(nil), (*SecretReference)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_api_SecretReference_To_v1alpha2_SecretReference(a.(*api.SecretReference), b.(*SecretReference), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddConversionFunc((*api.DeschedulerPolicy)(nil), (*DeschedulerPolicy)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_api_DeschedulerPolicy_To_v1alpha2_DeschedulerPolicy(a.(*api.DeschedulerPolicy), b.(*DeschedulerPolicy), scope)
|
||||
}); err != nil {
|
||||
@@ -89,6 +139,26 @@ func RegisterConversions(s *runtime.Scheme) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func autoConvert_v1alpha2_AuthToken_To_api_AuthToken(in *AuthToken, out *api.AuthToken, s conversion.Scope) error {
|
||||
out.SecretReference = (*api.SecretReference)(unsafe.Pointer(in.SecretReference))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_v1alpha2_AuthToken_To_api_AuthToken is an autogenerated conversion function.
|
||||
func Convert_v1alpha2_AuthToken_To_api_AuthToken(in *AuthToken, out *api.AuthToken, s conversion.Scope) error {
|
||||
return autoConvert_v1alpha2_AuthToken_To_api_AuthToken(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_api_AuthToken_To_v1alpha2_AuthToken(in *api.AuthToken, out *AuthToken, s conversion.Scope) error {
|
||||
out.SecretReference = (*SecretReference)(unsafe.Pointer(in.SecretReference))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_api_AuthToken_To_v1alpha2_AuthToken is an autogenerated conversion function.
|
||||
func Convert_api_AuthToken_To_v1alpha2_AuthToken(in *api.AuthToken, out *AuthToken, s conversion.Scope) error {
|
||||
return autoConvert_api_AuthToken_To_v1alpha2_AuthToken(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1alpha2_DeschedulerPolicy_To_api_DeschedulerPolicy(in *DeschedulerPolicy, out *api.DeschedulerPolicy, s conversion.Scope) error {
|
||||
if in.Profiles != nil {
|
||||
in, out := &in.Profiles, &out.Profiles
|
||||
@@ -105,6 +175,10 @@ func autoConvert_v1alpha2_DeschedulerPolicy_To_api_DeschedulerPolicy(in *Desched
|
||||
out.MaxNoOfPodsToEvictPerNode = (*uint)(unsafe.Pointer(in.MaxNoOfPodsToEvictPerNode))
|
||||
out.MaxNoOfPodsToEvictPerNamespace = (*uint)(unsafe.Pointer(in.MaxNoOfPodsToEvictPerNamespace))
|
||||
out.MaxNoOfPodsToEvictTotal = (*uint)(unsafe.Pointer(in.MaxNoOfPodsToEvictTotal))
|
||||
out.EvictionFailureEventNotification = (*bool)(unsafe.Pointer(in.EvictionFailureEventNotification))
|
||||
out.MetricsCollector = (*api.MetricsCollector)(unsafe.Pointer(in.MetricsCollector))
|
||||
out.MetricsProviders = *(*[]api.MetricsProvider)(unsafe.Pointer(&in.MetricsProviders))
|
||||
out.GracePeriodSeconds = (*int64)(unsafe.Pointer(in.GracePeriodSeconds))
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -124,6 +198,10 @@ func autoConvert_api_DeschedulerPolicy_To_v1alpha2_DeschedulerPolicy(in *api.Des
|
||||
out.MaxNoOfPodsToEvictPerNode = (*uint)(unsafe.Pointer(in.MaxNoOfPodsToEvictPerNode))
|
||||
out.MaxNoOfPodsToEvictPerNamespace = (*uint)(unsafe.Pointer(in.MaxNoOfPodsToEvictPerNamespace))
|
||||
out.MaxNoOfPodsToEvictTotal = (*uint)(unsafe.Pointer(in.MaxNoOfPodsToEvictTotal))
|
||||
out.EvictionFailureEventNotification = (*bool)(unsafe.Pointer(in.EvictionFailureEventNotification))
|
||||
out.MetricsCollector = (*MetricsCollector)(unsafe.Pointer(in.MetricsCollector))
|
||||
out.MetricsProviders = *(*[]MetricsProvider)(unsafe.Pointer(&in.MetricsProviders))
|
||||
out.GracePeriodSeconds = (*int64)(unsafe.Pointer(in.GracePeriodSeconds))
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -175,6 +253,48 @@ func Convert_api_DeschedulerProfile_To_v1alpha2_DeschedulerProfile(in *api.Desch
|
||||
return autoConvert_api_DeschedulerProfile_To_v1alpha2_DeschedulerProfile(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1alpha2_MetricsCollector_To_api_MetricsCollector(in *MetricsCollector, out *api.MetricsCollector, s conversion.Scope) error {
|
||||
out.Enabled = in.Enabled
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_v1alpha2_MetricsCollector_To_api_MetricsCollector is an autogenerated conversion function.
|
||||
func Convert_v1alpha2_MetricsCollector_To_api_MetricsCollector(in *MetricsCollector, out *api.MetricsCollector, s conversion.Scope) error {
|
||||
return autoConvert_v1alpha2_MetricsCollector_To_api_MetricsCollector(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_api_MetricsCollector_To_v1alpha2_MetricsCollector(in *api.MetricsCollector, out *MetricsCollector, s conversion.Scope) error {
|
||||
out.Enabled = in.Enabled
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_api_MetricsCollector_To_v1alpha2_MetricsCollector is an autogenerated conversion function.
|
||||
func Convert_api_MetricsCollector_To_v1alpha2_MetricsCollector(in *api.MetricsCollector, out *MetricsCollector, s conversion.Scope) error {
|
||||
return autoConvert_api_MetricsCollector_To_v1alpha2_MetricsCollector(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1alpha2_MetricsProvider_To_api_MetricsProvider(in *MetricsProvider, out *api.MetricsProvider, s conversion.Scope) error {
|
||||
out.Source = api.MetricsSource(in.Source)
|
||||
out.Prometheus = (*api.Prometheus)(unsafe.Pointer(in.Prometheus))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_v1alpha2_MetricsProvider_To_api_MetricsProvider is an autogenerated conversion function.
|
||||
func Convert_v1alpha2_MetricsProvider_To_api_MetricsProvider(in *MetricsProvider, out *api.MetricsProvider, s conversion.Scope) error {
|
||||
return autoConvert_v1alpha2_MetricsProvider_To_api_MetricsProvider(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_api_MetricsProvider_To_v1alpha2_MetricsProvider(in *api.MetricsProvider, out *MetricsProvider, s conversion.Scope) error {
|
||||
out.Source = MetricsSource(in.Source)
|
||||
out.Prometheus = (*Prometheus)(unsafe.Pointer(in.Prometheus))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_api_MetricsProvider_To_v1alpha2_MetricsProvider is an autogenerated conversion function.
|
||||
func Convert_api_MetricsProvider_To_v1alpha2_MetricsProvider(in *api.MetricsProvider, out *MetricsProvider, s conversion.Scope) error {
|
||||
return autoConvert_api_MetricsProvider_To_v1alpha2_MetricsProvider(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1alpha2_PluginConfig_To_api_PluginConfig(in *PluginConfig, out *api.PluginConfig, s conversion.Scope) error {
|
||||
out.Name = in.Name
|
||||
if err := runtime.Convert_runtime_RawExtension_To_runtime_Object(&in.Args, &out.Args, s); err != nil {
|
||||
@@ -271,3 +391,47 @@ func autoConvert_api_Plugins_To_v1alpha2_Plugins(in *api.Plugins, out *Plugins,
|
||||
func Convert_api_Plugins_To_v1alpha2_Plugins(in *api.Plugins, out *Plugins, s conversion.Scope) error {
|
||||
return autoConvert_api_Plugins_To_v1alpha2_Plugins(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1alpha2_Prometheus_To_api_Prometheus(in *Prometheus, out *api.Prometheus, s conversion.Scope) error {
|
||||
out.URL = in.URL
|
||||
out.AuthToken = (*api.AuthToken)(unsafe.Pointer(in.AuthToken))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_v1alpha2_Prometheus_To_api_Prometheus is an autogenerated conversion function.
|
||||
func Convert_v1alpha2_Prometheus_To_api_Prometheus(in *Prometheus, out *api.Prometheus, s conversion.Scope) error {
|
||||
return autoConvert_v1alpha2_Prometheus_To_api_Prometheus(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_api_Prometheus_To_v1alpha2_Prometheus(in *api.Prometheus, out *Prometheus, s conversion.Scope) error {
|
||||
out.URL = in.URL
|
||||
out.AuthToken = (*AuthToken)(unsafe.Pointer(in.AuthToken))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_api_Prometheus_To_v1alpha2_Prometheus is an autogenerated conversion function.
|
||||
func Convert_api_Prometheus_To_v1alpha2_Prometheus(in *api.Prometheus, out *Prometheus, s conversion.Scope) error {
|
||||
return autoConvert_api_Prometheus_To_v1alpha2_Prometheus(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1alpha2_SecretReference_To_api_SecretReference(in *SecretReference, out *api.SecretReference, s conversion.Scope) error {
|
||||
out.Namespace = in.Namespace
|
||||
out.Name = in.Name
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_v1alpha2_SecretReference_To_api_SecretReference is an autogenerated conversion function.
|
||||
func Convert_v1alpha2_SecretReference_To_api_SecretReference(in *SecretReference, out *api.SecretReference, s conversion.Scope) error {
|
||||
return autoConvert_v1alpha2_SecretReference_To_api_SecretReference(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_api_SecretReference_To_v1alpha2_SecretReference(in *api.SecretReference, out *SecretReference, s conversion.Scope) error {
|
||||
out.Namespace = in.Namespace
|
||||
out.Name = in.Name
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_api_SecretReference_To_v1alpha2_SecretReference is an autogenerated conversion function.
|
||||
func Convert_api_SecretReference_To_v1alpha2_SecretReference(in *api.SecretReference, out *SecretReference, s conversion.Scope) error {
|
||||
return autoConvert_api_SecretReference_To_v1alpha2_SecretReference(in, out, s)
|
||||
}
|
||||
|
||||
119
pkg/api/v1alpha2/zz_generated.deepcopy.go
generated
119
pkg/api/v1alpha2/zz_generated.deepcopy.go
generated
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
Copyright 2026 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@@ -25,6 +25,27 @@ import (
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *AuthToken) DeepCopyInto(out *AuthToken) {
|
||||
*out = *in
|
||||
if in.SecretReference != nil {
|
||||
in, out := &in.SecretReference, &out.SecretReference
|
||||
*out = new(SecretReference)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthToken.
|
||||
func (in *AuthToken) DeepCopy() *AuthToken {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(AuthToken)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *DeschedulerPolicy) DeepCopyInto(out *DeschedulerPolicy) {
|
||||
*out = *in
|
||||
@@ -56,6 +77,28 @@ func (in *DeschedulerPolicy) DeepCopyInto(out *DeschedulerPolicy) {
|
||||
*out = new(uint)
|
||||
**out = **in
|
||||
}
|
||||
if in.EvictionFailureEventNotification != nil {
|
||||
in, out := &in.EvictionFailureEventNotification, &out.EvictionFailureEventNotification
|
||||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
if in.MetricsCollector != nil {
|
||||
in, out := &in.MetricsCollector, &out.MetricsCollector
|
||||
*out = new(MetricsCollector)
|
||||
**out = **in
|
||||
}
|
||||
if in.MetricsProviders != nil {
|
||||
in, out := &in.MetricsProviders, &out.MetricsProviders
|
||||
*out = make([]MetricsProvider, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.GracePeriodSeconds != nil {
|
||||
in, out := &in.GracePeriodSeconds, &out.GracePeriodSeconds
|
||||
*out = new(int64)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -101,6 +144,43 @@ func (in *DeschedulerProfile) DeepCopy() *DeschedulerProfile {
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *MetricsCollector) DeepCopyInto(out *MetricsCollector) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricsCollector.
|
||||
func (in *MetricsCollector) DeepCopy() *MetricsCollector {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(MetricsCollector)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *MetricsProvider) DeepCopyInto(out *MetricsProvider) {
|
||||
*out = *in
|
||||
if in.Prometheus != nil {
|
||||
in, out := &in.Prometheus, &out.Prometheus
|
||||
*out = new(Prometheus)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricsProvider.
|
||||
func (in *MetricsProvider) DeepCopy() *MetricsProvider {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(MetricsProvider)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *PluginConfig) DeepCopyInto(out *PluginConfig) {
|
||||
*out = *in
|
||||
@@ -165,3 +245,40 @@ func (in *Plugins) DeepCopy() *Plugins {
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Prometheus) DeepCopyInto(out *Prometheus) {
|
||||
*out = *in
|
||||
if in.AuthToken != nil {
|
||||
in, out := &in.AuthToken, &out.AuthToken
|
||||
*out = new(AuthToken)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Prometheus.
|
||||
func (in *Prometheus) DeepCopy() *Prometheus {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(Prometheus)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *SecretReference) DeepCopyInto(out *SecretReference) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretReference.
|
||||
func (in *SecretReference) DeepCopy() *SecretReference {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(SecretReference)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
2
pkg/api/v1alpha2/zz_generated.defaults.go
generated
2
pkg/api/v1alpha2/zz_generated.defaults.go
generated
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
Copyright 2026 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
140
pkg/api/zz_generated.deepcopy.go
generated
140
pkg/api/zz_generated.deepcopy.go
generated
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
Copyright 2026 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@@ -25,6 +25,27 @@ import (
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *AuthToken) DeepCopyInto(out *AuthToken) {
|
||||
*out = *in
|
||||
if in.SecretReference != nil {
|
||||
in, out := &in.SecretReference, &out.SecretReference
|
||||
*out = new(SecretReference)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthToken.
|
||||
func (in *AuthToken) DeepCopy() *AuthToken {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(AuthToken)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *DeschedulerPolicy) DeepCopyInto(out *DeschedulerPolicy) {
|
||||
*out = *in
|
||||
@@ -56,6 +77,28 @@ func (in *DeschedulerPolicy) DeepCopyInto(out *DeschedulerPolicy) {
|
||||
*out = new(uint)
|
||||
**out = **in
|
||||
}
|
||||
if in.EvictionFailureEventNotification != nil {
|
||||
in, out := &in.EvictionFailureEventNotification, &out.EvictionFailureEventNotification
|
||||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
if in.MetricsCollector != nil {
|
||||
in, out := &in.MetricsCollector, &out.MetricsCollector
|
||||
*out = new(MetricsCollector)
|
||||
**out = **in
|
||||
}
|
||||
if in.MetricsProviders != nil {
|
||||
in, out := &in.MetricsProviders, &out.MetricsProviders
|
||||
*out = make([]MetricsProvider, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.GracePeriodSeconds != nil {
|
||||
in, out := &in.GracePeriodSeconds, &out.GracePeriodSeconds
|
||||
*out = new(int64)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -101,6 +144,64 @@ func (in *DeschedulerProfile) DeepCopy() *DeschedulerProfile {
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *EvictionLimits) DeepCopyInto(out *EvictionLimits) {
|
||||
*out = *in
|
||||
if in.Node != nil {
|
||||
in, out := &in.Node, &out.Node
|
||||
*out = new(uint)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EvictionLimits.
|
||||
func (in *EvictionLimits) DeepCopy() *EvictionLimits {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(EvictionLimits)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *MetricsCollector) DeepCopyInto(out *MetricsCollector) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricsCollector.
|
||||
func (in *MetricsCollector) DeepCopy() *MetricsCollector {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(MetricsCollector)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *MetricsProvider) DeepCopyInto(out *MetricsProvider) {
|
||||
*out = *in
|
||||
if in.Prometheus != nil {
|
||||
in, out := &in.Prometheus, &out.Prometheus
|
||||
*out = new(Prometheus)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricsProvider.
|
||||
func (in *MetricsProvider) DeepCopy() *MetricsProvider {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(MetricsProvider)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Namespaces) DeepCopyInto(out *Namespaces) {
|
||||
*out = *in
|
||||
@@ -215,6 +316,27 @@ func (in *PriorityThreshold) DeepCopy() *PriorityThreshold {
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Prometheus) DeepCopyInto(out *Prometheus) {
|
||||
*out = *in
|
||||
if in.AuthToken != nil {
|
||||
in, out := &in.AuthToken, &out.AuthToken
|
||||
*out = new(AuthToken)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Prometheus.
|
||||
func (in *Prometheus) DeepCopy() *Prometheus {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(Prometheus)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in ResourceThresholds) DeepCopyInto(out *ResourceThresholds) {
|
||||
{
|
||||
@@ -236,3 +358,19 @@ func (in ResourceThresholds) DeepCopy() ResourceThresholds {
|
||||
in.DeepCopyInto(out)
|
||||
return *out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *SecretReference) DeepCopyInto(out *SecretReference) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretReference.
|
||||
func (in *SecretReference) DeepCopy() *SecretReference {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(SecretReference)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
Copyright 2026 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
Copyright 2026 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
Copyright 2026 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
Copyright 2026 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -17,18 +17,31 @@ limitations under the License.
|
||||
package client
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"time"
|
||||
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
componentbaseconfig "k8s.io/component-base/config"
|
||||
promapi "github.com/prometheus/client_golang/api"
|
||||
"github.com/prometheus/common/config"
|
||||
|
||||
// Ensure to load all auth plugins.
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
_ "k8s.io/client-go/plugin/pkg/client/auth"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
"k8s.io/client-go/transport"
|
||||
componentbaseconfig "k8s.io/component-base/config"
|
||||
metricsclient "k8s.io/metrics/pkg/client/clientset/versioned"
|
||||
)
|
||||
|
||||
func CreateClient(clientConnection componentbaseconfig.ClientConnectionConfiguration, userAgt string) (clientset.Interface, error) {
|
||||
var K8sPodCAFilePath = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt"
|
||||
|
||||
func createConfig(clientConnection componentbaseconfig.ClientConnectionConfiguration, userAgt string) (*rest.Config, error) {
|
||||
var cfg *rest.Config
|
||||
if len(clientConnection.Kubeconfig) != 0 {
|
||||
master, err := GetMasterFromKubeconfig(clientConnection.Kubeconfig)
|
||||
@@ -56,9 +69,28 @@ func CreateClient(clientConnection componentbaseconfig.ClientConnectionConfigura
|
||||
cfg = rest.AddUserAgent(cfg, userAgt)
|
||||
}
|
||||
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
func CreateClient(clientConnection componentbaseconfig.ClientConnectionConfiguration, userAgt string) (clientset.Interface, error) {
|
||||
cfg, err := createConfig(clientConnection, userAgt)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to create config: %v", err)
|
||||
}
|
||||
|
||||
return clientset.NewForConfig(cfg)
|
||||
}
|
||||
|
||||
func CreateMetricsClient(clientConnection componentbaseconfig.ClientConnectionConfiguration, userAgt string) (metricsclient.Interface, error) {
|
||||
cfg, err := createConfig(clientConnection, userAgt)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to create config: %v", err)
|
||||
}
|
||||
|
||||
// Create the metrics clientset to access the metrics.k8s.io API
|
||||
return metricsclient.NewForConfig(cfg)
|
||||
}
|
||||
|
||||
func GetMasterFromKubeconfig(filename string) (string, error) {
|
||||
config, err := clientcmd.LoadFromFile(filename)
|
||||
if err != nil {
|
||||
@@ -75,3 +107,61 @@ func GetMasterFromKubeconfig(filename string) (string, error) {
|
||||
}
|
||||
return "", fmt.Errorf("failed to get master address from kubeconfig: cluster information not found")
|
||||
}
|
||||
|
||||
func loadCAFile(filepath string) (*x509.CertPool, error) {
|
||||
caCert, err := ioutil.ReadFile(filepath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
caCertPool := x509.NewCertPool()
|
||||
if ok := caCertPool.AppendCertsFromPEM(caCert); !ok {
|
||||
return nil, fmt.Errorf("failed to append CA certificate to the pool")
|
||||
}
|
||||
|
||||
return caCertPool, nil
|
||||
}
|
||||
|
||||
func CreatePrometheusClient(prometheusURL, authToken string) (promapi.Client, *http.Transport, error) {
|
||||
// Retrieve Pod CA cert
|
||||
caCertPool, err := loadCAFile(K8sPodCAFilePath)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("Error loading CA file: %v", err)
|
||||
}
|
||||
|
||||
// Get Prometheus Host
|
||||
u, err := url.Parse(prometheusURL)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("Error parsing prometheus URL: %v", err)
|
||||
}
|
||||
t := &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
DialContext: (&net.Dialer{
|
||||
Timeout: 30 * time.Second,
|
||||
KeepAlive: 30 * time.Second,
|
||||
}).DialContext,
|
||||
MaxIdleConns: 100,
|
||||
IdleConnTimeout: 90 * time.Second,
|
||||
TLSHandshakeTimeout: 10 * time.Second,
|
||||
TLSClientConfig: &tls.Config{
|
||||
RootCAs: caCertPool,
|
||||
ServerName: u.Host,
|
||||
},
|
||||
}
|
||||
roundTripper := transport.NewBearerAuthRoundTripper(
|
||||
authToken,
|
||||
t,
|
||||
)
|
||||
|
||||
if authToken != "" {
|
||||
client, err := promapi.NewClient(promapi.Config{
|
||||
Address: prometheusURL,
|
||||
RoundTripper: config.NewAuthorizationCredentialsRoundTripper("Bearer", config.NewInlineSecret(authToken), roundTripper),
|
||||
})
|
||||
return client, t, err
|
||||
}
|
||||
client, err := promapi.NewClient(promapi.Config{
|
||||
Address: prometheusURL,
|
||||
})
|
||||
return client, t, err
|
||||
}
|
||||
|
||||
@@ -20,46 +20,60 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
promapi "github.com/prometheus/client_golang/api"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
"k8s.io/client-go/discovery"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/tools/events"
|
||||
componentbaseconfig "k8s.io/component-base/config"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
policy "k8s.io/api/policy/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
policyv1 "k8s.io/api/policy/v1"
|
||||
schedulingv1 "k8s.io/api/scheduling/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
utilversion "k8s.io/apimachinery/pkg/util/version"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/discovery"
|
||||
"k8s.io/client-go/informers"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
fakeclientset "k8s.io/client-go/kubernetes/fake"
|
||||
listersv1 "k8s.io/client-go/listers/core/v1"
|
||||
schedulingv1 "k8s.io/client-go/listers/scheduling/v1"
|
||||
corev1listers "k8s.io/client-go/listers/core/v1"
|
||||
"k8s.io/client-go/rest"
|
||||
core "k8s.io/client-go/testing"
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/client"
|
||||
eutils "sigs.k8s.io/descheduler/pkg/descheduler/evictions/utils"
|
||||
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
|
||||
"sigs.k8s.io/descheduler/pkg/tracing"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
"sigs.k8s.io/descheduler/pkg/version"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/events"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
componentbaseconfig "k8s.io/component-base/config"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
|
||||
"sigs.k8s.io/descheduler/metrics"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/client"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
eutils "sigs.k8s.io/descheduler/pkg/descheduler/evictions/utils"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/metricscollector"
|
||||
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/pluginregistry"
|
||||
frameworkprofile "sigs.k8s.io/descheduler/pkg/framework/profile"
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
"sigs.k8s.io/descheduler/pkg/tracing"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
"sigs.k8s.io/descheduler/pkg/version"
|
||||
)
|
||||
|
||||
const (
|
||||
prometheusAuthTokenSecretKey = "prometheusAuthToken"
|
||||
workQueueKey = "key"
|
||||
)
|
||||
|
||||
type eprunner func(ctx context.Context, nodes []*v1.Node) *frameworktypes.Status
|
||||
@@ -70,56 +84,265 @@ type profileRunner struct {
|
||||
}
|
||||
|
||||
type descheduler struct {
|
||||
rs *options.DeschedulerServer
|
||||
podLister listersv1.PodLister
|
||||
nodeLister listersv1.NodeLister
|
||||
namespaceLister listersv1.NamespaceLister
|
||||
priorityClassLister schedulingv1.PriorityClassLister
|
||||
getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc
|
||||
sharedInformerFactory informers.SharedInformerFactory
|
||||
deschedulerPolicy *api.DeschedulerPolicy
|
||||
eventRecorder events.EventRecorder
|
||||
podEvictor *evictions.PodEvictor
|
||||
podEvictionReactionFnc func(*fakeclientset.Clientset) func(action core.Action) (bool, runtime.Object, error)
|
||||
rs *options.DeschedulerServer
|
||||
ir *informerResources
|
||||
getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc
|
||||
sharedInformerFactory informers.SharedInformerFactory
|
||||
namespacedSecretsLister corev1listers.SecretNamespaceLister
|
||||
deschedulerPolicy *api.DeschedulerPolicy
|
||||
eventRecorder events.EventRecorder
|
||||
podEvictor *evictions.PodEvictor
|
||||
podEvictionReactionFnc func(*fakeclientset.Clientset) func(action core.Action) (bool, runtime.Object, error)
|
||||
metricsCollector *metricscollector.MetricsCollector
|
||||
prometheusClient promapi.Client
|
||||
previousPrometheusClientTransport *http.Transport
|
||||
queue workqueue.RateLimitingInterface
|
||||
currentPrometheusAuthToken string
|
||||
metricsProviders map[api.MetricsSource]*api.MetricsProvider
|
||||
}
|
||||
|
||||
func newDescheduler(rs *options.DeschedulerServer, deschedulerPolicy *api.DeschedulerPolicy, evictionPolicyGroupVersion string, eventRecorder events.EventRecorder, sharedInformerFactory informers.SharedInformerFactory) (*descheduler, error) {
|
||||
type informerResources struct {
|
||||
sharedInformerFactory informers.SharedInformerFactory
|
||||
resourceToInformer map[schema.GroupVersionResource]informers.GenericInformer
|
||||
}
|
||||
|
||||
func newInformerResources(sharedInformerFactory informers.SharedInformerFactory) *informerResources {
|
||||
return &informerResources{
|
||||
sharedInformerFactory: sharedInformerFactory,
|
||||
resourceToInformer: make(map[schema.GroupVersionResource]informers.GenericInformer),
|
||||
}
|
||||
}
|
||||
|
||||
func (ir *informerResources) Uses(resources ...schema.GroupVersionResource) error {
|
||||
for _, resource := range resources {
|
||||
informer, err := ir.sharedInformerFactory.ForResource(resource)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ir.resourceToInformer[resource] = informer
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CopyTo Copy informer subscriptions to the new factory and objects to the fake client so that the backing caches are populated for when listers are used.
|
||||
func (ir *informerResources) CopyTo(fakeClient *fakeclientset.Clientset, newFactory informers.SharedInformerFactory) error {
|
||||
for resource, informer := range ir.resourceToInformer {
|
||||
_, err := newFactory.ForResource(resource)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error getting resource %s: %w", resource, err)
|
||||
}
|
||||
|
||||
objects, err := informer.Lister().List(labels.Everything())
|
||||
if err != nil {
|
||||
return fmt.Errorf("error listing %s: %w", informer, err)
|
||||
}
|
||||
|
||||
for _, object := range objects {
|
||||
fakeClient.Tracker().Add(object)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func metricsProviderListToMap(providersList []api.MetricsProvider) map[api.MetricsSource]*api.MetricsProvider {
|
||||
providersMap := make(map[api.MetricsSource]*api.MetricsProvider)
|
||||
for _, provider := range providersList {
|
||||
providersMap[provider.Source] = &provider
|
||||
}
|
||||
return providersMap
|
||||
}
|
||||
|
||||
func newDescheduler(ctx context.Context, rs *options.DeschedulerServer, deschedulerPolicy *api.DeschedulerPolicy, evictionPolicyGroupVersion string, eventRecorder events.EventRecorder, sharedInformerFactory, namespacedSharedInformerFactory informers.SharedInformerFactory) (*descheduler, error) {
|
||||
podInformer := sharedInformerFactory.Core().V1().Pods().Informer()
|
||||
podLister := sharedInformerFactory.Core().V1().Pods().Lister()
|
||||
nodeLister := sharedInformerFactory.Core().V1().Nodes().Lister()
|
||||
namespaceLister := sharedInformerFactory.Core().V1().Namespaces().Lister()
|
||||
priorityClassLister := sharedInformerFactory.Scheduling().V1().PriorityClasses().Lister()
|
||||
|
||||
ir := newInformerResources(sharedInformerFactory)
|
||||
ir.Uses(v1.SchemeGroupVersion.WithResource("pods"),
|
||||
v1.SchemeGroupVersion.WithResource("nodes"),
|
||||
// Future work could be to let each plugin declare what type of resources it needs; that way dry runs would stay
|
||||
// consistent with the real runs without having to keep the list here in sync.
|
||||
v1.SchemeGroupVersion.WithResource("namespaces"), // Used by the defaultevictor plugin
|
||||
schedulingv1.SchemeGroupVersion.WithResource("priorityclasses"), // Used by the defaultevictor plugin
|
||||
policyv1.SchemeGroupVersion.WithResource("poddisruptionbudgets"), // Used by the defaultevictor plugin
|
||||
v1.SchemeGroupVersion.WithResource("persistentvolumeclaims"), // Used by the defaultevictor plugin
|
||||
) // Used by the defaultevictor plugin
|
||||
|
||||
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("build get pods assigned to node function error: %v", err)
|
||||
}
|
||||
|
||||
podEvictor := evictions.NewPodEvictor(
|
||||
nil,
|
||||
podEvictor, err := evictions.NewPodEvictor(
|
||||
ctx,
|
||||
rs.Client,
|
||||
eventRecorder,
|
||||
podInformer,
|
||||
rs.DefaultFeatureGates,
|
||||
evictions.NewOptions().
|
||||
WithPolicyGroupVersion(evictionPolicyGroupVersion).
|
||||
WithMaxPodsToEvictPerNode(deschedulerPolicy.MaxNoOfPodsToEvictPerNode).
|
||||
WithMaxPodsToEvictPerNamespace(deschedulerPolicy.MaxNoOfPodsToEvictPerNamespace).
|
||||
WithMaxPodsToEvictTotal(deschedulerPolicy.MaxNoOfPodsToEvictTotal).
|
||||
WithEvictionFailureEventNotification(deschedulerPolicy.EvictionFailureEventNotification).
|
||||
WithGracePeriodSeconds(deschedulerPolicy.GracePeriodSeconds).
|
||||
WithDryRun(rs.DryRun).
|
||||
WithMetricsEnabled(!rs.DisableMetrics),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &descheduler{
|
||||
desch := &descheduler{
|
||||
rs: rs,
|
||||
podLister: podLister,
|
||||
nodeLister: nodeLister,
|
||||
namespaceLister: namespaceLister,
|
||||
priorityClassLister: priorityClassLister,
|
||||
ir: ir,
|
||||
getPodsAssignedToNode: getPodsAssignedToNode,
|
||||
sharedInformerFactory: sharedInformerFactory,
|
||||
deschedulerPolicy: deschedulerPolicy,
|
||||
eventRecorder: eventRecorder,
|
||||
podEvictor: podEvictor,
|
||||
podEvictionReactionFnc: podEvictionReactionFnc,
|
||||
}, nil
|
||||
prometheusClient: rs.PrometheusClient,
|
||||
queue: workqueue.NewRateLimitingQueueWithConfig(workqueue.DefaultControllerRateLimiter(), workqueue.RateLimitingQueueConfig{Name: "descheduler"}),
|
||||
metricsProviders: metricsProviderListToMap(deschedulerPolicy.MetricsProviders),
|
||||
}
|
||||
|
||||
if rs.MetricsClient != nil {
|
||||
nodeSelector := labels.Everything()
|
||||
if deschedulerPolicy.NodeSelector != nil {
|
||||
sel, err := labels.Parse(*deschedulerPolicy.NodeSelector)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nodeSelector = sel
|
||||
}
|
||||
desch.metricsCollector = metricscollector.NewMetricsCollector(sharedInformerFactory.Core().V1().Nodes().Lister(), rs.MetricsClient, nodeSelector)
|
||||
}
|
||||
|
||||
prometheusProvider := desch.metricsProviders[api.PrometheusMetrics]
|
||||
if prometheusProvider != nil && prometheusProvider.Prometheus != nil && prometheusProvider.Prometheus.AuthToken != nil {
|
||||
authTokenSecret := prometheusProvider.Prometheus.AuthToken.SecretReference
|
||||
if authTokenSecret == nil || authTokenSecret.Namespace == "" {
|
||||
return nil, fmt.Errorf("prometheus metrics source configuration is missing authentication token secret")
|
||||
}
|
||||
if namespacedSharedInformerFactory == nil {
|
||||
return nil, fmt.Errorf("namespacedSharedInformerFactory not configured")
|
||||
}
|
||||
namespacedSharedInformerFactory.Core().V1().Secrets().Informer().AddEventHandler(desch.eventHandler())
|
||||
desch.namespacedSecretsLister = namespacedSharedInformerFactory.Core().V1().Secrets().Lister().Secrets(authTokenSecret.Namespace)
|
||||
}
|
||||
|
||||
return desch, nil
|
||||
}
|
||||
|
||||
func (d *descheduler) reconcileInClusterSAToken() error {
|
||||
// Read the sa token and assume it has the sufficient permissions to authenticate
|
||||
cfg, err := rest.InClusterConfig()
|
||||
if err == nil {
|
||||
if d.currentPrometheusAuthToken != cfg.BearerToken {
|
||||
klog.V(2).Infof("Creating Prometheus client (with SA token)")
|
||||
prometheusClient, transport, err := client.CreatePrometheusClient(d.metricsProviders[api.PrometheusMetrics].Prometheus.URL, cfg.BearerToken)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to create a prometheus client: %v", err)
|
||||
}
|
||||
d.prometheusClient = prometheusClient
|
||||
if d.previousPrometheusClientTransport != nil {
|
||||
d.previousPrometheusClientTransport.CloseIdleConnections()
|
||||
}
|
||||
d.previousPrometheusClientTransport = transport
|
||||
d.currentPrometheusAuthToken = cfg.BearerToken
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if err == rest.ErrNotInCluster {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("unexpected error when reading in cluster config: %v", err)
|
||||
}
|
||||
|
||||
func (d *descheduler) runAuthenticationSecretReconciler(ctx context.Context) {
|
||||
defer utilruntime.HandleCrash()
|
||||
defer d.queue.ShutDown()
|
||||
|
||||
klog.Infof("Starting authentication secret reconciler")
|
||||
defer klog.Infof("Shutting down authentication secret reconciler")
|
||||
|
||||
go wait.UntilWithContext(ctx, d.runAuthenticationSecretReconcilerWorker, time.Second)
|
||||
|
||||
<-ctx.Done()
|
||||
}
|
||||
|
||||
func (d *descheduler) runAuthenticationSecretReconcilerWorker(ctx context.Context) {
|
||||
for d.processNextWorkItem(ctx) {
|
||||
}
|
||||
}
|
||||
|
||||
func (d *descheduler) processNextWorkItem(ctx context.Context) bool {
|
||||
dsKey, quit := d.queue.Get()
|
||||
if quit {
|
||||
return false
|
||||
}
|
||||
defer d.queue.Done(dsKey)
|
||||
|
||||
err := d.sync()
|
||||
if err == nil {
|
||||
d.queue.Forget(dsKey)
|
||||
return true
|
||||
}
|
||||
|
||||
utilruntime.HandleError(fmt.Errorf("%v failed with : %v", dsKey, err))
|
||||
d.queue.AddRateLimited(dsKey)
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (d *descheduler) sync() error {
|
||||
prometheusConfig := d.metricsProviders[api.PrometheusMetrics].Prometheus
|
||||
if prometheusConfig == nil || prometheusConfig.AuthToken == nil || prometheusConfig.AuthToken.SecretReference == nil {
|
||||
return fmt.Errorf("prometheus metrics source configuration is missing authentication token secret")
|
||||
}
|
||||
ns := prometheusConfig.AuthToken.SecretReference.Namespace
|
||||
name := prometheusConfig.AuthToken.SecretReference.Name
|
||||
secretObj, err := d.namespacedSecretsLister.Get(name)
|
||||
if err != nil {
|
||||
// clear the token if the secret is not found
|
||||
if apierrors.IsNotFound(err) {
|
||||
d.currentPrometheusAuthToken = ""
|
||||
if d.previousPrometheusClientTransport != nil {
|
||||
d.previousPrometheusClientTransport.CloseIdleConnections()
|
||||
}
|
||||
d.previousPrometheusClientTransport = nil
|
||||
d.prometheusClient = nil
|
||||
}
|
||||
return fmt.Errorf("unable to get %v/%v secret", ns, name)
|
||||
}
|
||||
authToken := string(secretObj.Data[prometheusAuthTokenSecretKey])
|
||||
if authToken == "" {
|
||||
return fmt.Errorf("prometheus authentication token secret missing %q data or empty", prometheusAuthTokenSecretKey)
|
||||
}
|
||||
if d.currentPrometheusAuthToken == authToken {
|
||||
return nil
|
||||
}
|
||||
|
||||
klog.V(2).Infof("authentication secret token updated, recreating prometheus client")
|
||||
prometheusClient, transport, err := client.CreatePrometheusClient(prometheusConfig.URL, authToken)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to create a prometheus client: %v", err)
|
||||
}
|
||||
d.prometheusClient = prometheusClient
|
||||
if d.previousPrometheusClientTransport != nil {
|
||||
d.previousPrometheusClientTransport.CloseIdleConnections()
|
||||
}
|
||||
d.previousPrometheusClientTransport = transport
|
||||
d.currentPrometheusAuthToken = authToken
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *descheduler) eventHandler() cache.ResourceEventHandler {
|
||||
return cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: func(obj interface{}) { d.queue.Add(workQueueKey) },
|
||||
UpdateFunc: func(old, new interface{}) { d.queue.Add(workQueueKey) },
|
||||
DeleteFunc: func(obj interface{}) { d.queue.Add(workQueueKey) },
|
||||
}
|
||||
}
|
||||
|
||||
func (d *descheduler) runDeschedulerLoop(ctx context.Context, nodes []*v1.Node) error {
|
||||
@@ -128,12 +351,13 @@ func (d *descheduler) runDeschedulerLoop(ctx context.Context, nodes []*v1.Node)
|
||||
defer span.End()
|
||||
defer func(loopStartDuration time.Time) {
|
||||
metrics.DeschedulerLoopDuration.With(map[string]string{}).Observe(time.Since(loopStartDuration).Seconds())
|
||||
metrics.LoopDuration.With(map[string]string{}).Observe(time.Since(loopStartDuration).Seconds())
|
||||
}(time.Now())
|
||||
|
||||
// if len is still <= 1 error out
|
||||
if len(nodes) <= 1 {
|
||||
klog.V(1).InfoS("The cluster size is 0 or 1 meaning eviction causes service disruption or degradation. So aborting..")
|
||||
return fmt.Errorf("the cluster size is 0 or 1")
|
||||
klog.InfoS("Skipping descheduling cycle: requires >=2 nodes", "found", len(nodes))
|
||||
return nil // gracefully skip this cycle instead of aborting
|
||||
}
|
||||
|
||||
var client clientset.Interface
|
||||
@@ -146,13 +370,14 @@ func (d *descheduler) runDeschedulerLoop(ctx context.Context, nodes []*v1.Node)
|
||||
fakeClient := fakeclientset.NewSimpleClientset()
|
||||
// simulate a pod eviction by deleting a pod
|
||||
fakeClient.PrependReactor("create", "pods", d.podEvictionReactionFnc(fakeClient))
|
||||
err := cachedClient(d.rs.Client, fakeClient, d.podLister, d.nodeLister, d.namespaceLister, d.priorityClassLister)
|
||||
fakeSharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
|
||||
|
||||
err := d.ir.CopyTo(fakeClient, fakeSharedInformerFactory)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// create a new instance of the shared informer factor from the cached client
|
||||
fakeSharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
|
||||
// register the pod informer, otherwise it will not get running
|
||||
d.getPodsAssignedToNode, err = podutil.BuildGetPodsAssignedToNodeFunc(fakeSharedInformerFactory.Core().V1().Pods().Informer())
|
||||
if err != nil {
|
||||
@@ -176,7 +401,7 @@ func (d *descheduler) runDeschedulerLoop(ctx context.Context, nodes []*v1.Node)
|
||||
|
||||
d.runProfiles(ctx, client, nodes)
|
||||
|
||||
klog.V(1).InfoS("Number of evicted pods", "totalEvicted", d.podEvictor.TotalEvicted())
|
||||
klog.V(1).InfoS("Number of evictions/requests", "totalEvicted", d.podEvictor.TotalEvicted(), "evictionRequests", d.podEvictor.TotalEvictionRequests())
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -191,12 +416,15 @@ func (d *descheduler) runProfiles(ctx context.Context, client clientset.Interfac
|
||||
var profileRunners []profileRunner
|
||||
for _, profile := range d.deschedulerPolicy.Profiles {
|
||||
currProfile, err := frameworkprofile.NewProfile(
|
||||
ctx,
|
||||
profile,
|
||||
pluginregistry.PluginRegistry,
|
||||
frameworkprofile.WithClientSet(client),
|
||||
frameworkprofile.WithSharedInformerFactory(d.sharedInformerFactory),
|
||||
frameworkprofile.WithPodEvictor(d.podEvictor),
|
||||
frameworkprofile.WithGetPodsAssignedToNodeFnc(d.getPodsAssignedToNode),
|
||||
frameworkprofile.WithMetricsCollector(d.metricsCollector),
|
||||
frameworkprofile.WithPrometheusClient(d.prometheusClient),
|
||||
)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "unable to create a profile", "profile", profile.Name)
|
||||
@@ -261,6 +489,14 @@ func Run(ctx context.Context, rs *options.DeschedulerServer) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if (deschedulerPolicy.MetricsCollector != nil && deschedulerPolicy.MetricsCollector.Enabled) || metricsProviderListToMap(deschedulerPolicy.MetricsProviders)[api.KubernetesMetrics] != nil {
|
||||
metricsClient, err := client.CreateMetricsClient(clientConnection, "descheduler")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rs.MetricsClient = metricsClient
|
||||
}
|
||||
|
||||
runFn := func() error {
|
||||
return RunDeschedulerStrategies(ctx, rs, deschedulerPolicy, evictionPolicyGroupVersion)
|
||||
}
|
||||
@@ -336,61 +572,13 @@ func podEvictionReactionFnc(fakeClient *fakeclientset.Clientset) func(action cor
|
||||
}
|
||||
}
|
||||
|
||||
func cachedClient(
|
||||
realClient clientset.Interface,
|
||||
fakeClient *fakeclientset.Clientset,
|
||||
podLister listersv1.PodLister,
|
||||
nodeLister listersv1.NodeLister,
|
||||
namespaceLister listersv1.NamespaceLister,
|
||||
priorityClassLister schedulingv1.PriorityClassLister,
|
||||
) error {
|
||||
klog.V(3).Infof("Pulling resources for the cached client from the cluster")
|
||||
pods, err := podLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to list pods: %v", err)
|
||||
}
|
||||
type tokenReconciliation int
|
||||
|
||||
for _, item := range pods {
|
||||
if _, err := fakeClient.CoreV1().Pods(item.Namespace).Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
|
||||
return fmt.Errorf("unable to copy pod: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
nodes, err := nodeLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to list nodes: %v", err)
|
||||
}
|
||||
|
||||
for _, item := range nodes {
|
||||
if _, err := fakeClient.CoreV1().Nodes().Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
|
||||
return fmt.Errorf("unable to copy node: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
namespaces, err := namespaceLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to list namespaces: %v", err)
|
||||
}
|
||||
|
||||
for _, item := range namespaces {
|
||||
if _, err := fakeClient.CoreV1().Namespaces().Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
|
||||
return fmt.Errorf("unable to copy namespace: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
priorityClasses, err := priorityClassLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to list priorityclasses: %v", err)
|
||||
}
|
||||
|
||||
for _, item := range priorityClasses {
|
||||
if _, err := fakeClient.SchedulingV1().PriorityClasses().Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
|
||||
return fmt.Errorf("unable to copy priorityclass: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
const (
|
||||
noReconciliation tokenReconciliation = iota
|
||||
inClusterReconciliation
|
||||
secretReconciliation
|
||||
)
|
||||
|
||||
func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer, deschedulerPolicy *api.DeschedulerPolicy, evictionPolicyGroupVersion string) error {
|
||||
var span trace.Span
|
||||
@@ -413,7 +601,22 @@ func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer
|
||||
eventBroadcaster, eventRecorder := utils.GetRecorderAndBroadcaster(ctx, eventClient)
|
||||
defer eventBroadcaster.Shutdown()
|
||||
|
||||
descheduler, err := newDescheduler(rs, deschedulerPolicy, evictionPolicyGroupVersion, eventRecorder, sharedInformerFactory)
|
||||
var namespacedSharedInformerFactory informers.SharedInformerFactory
|
||||
metricProviderTokenReconciliation := noReconciliation
|
||||
|
||||
prometheusProvider := metricsProviderListToMap(deschedulerPolicy.MetricsProviders)[api.PrometheusMetrics]
|
||||
if prometheusProvider != nil && prometheusProvider.Prometheus != nil && prometheusProvider.Prometheus.URL != "" {
|
||||
if prometheusProvider.Prometheus.AuthToken != nil {
|
||||
// Will get reconciled
|
||||
namespacedSharedInformerFactory = informers.NewSharedInformerFactoryWithOptions(rs.Client, 0, informers.WithTransform(trimManagedFields), informers.WithNamespace(prometheusProvider.Prometheus.AuthToken.SecretReference.Namespace))
|
||||
metricProviderTokenReconciliation = secretReconciliation
|
||||
} else {
|
||||
// Use the sa token and assume it has the sufficient permissions to authenticate
|
||||
metricProviderTokenReconciliation = inClusterReconciliation
|
||||
}
|
||||
}
|
||||
|
||||
descheduler, err := newDescheduler(ctx, rs, deschedulerPolicy, evictionPolicyGroupVersion, eventRecorder, sharedInformerFactory, namespacedSharedInformerFactory)
|
||||
if err != nil {
|
||||
span.AddEvent("Failed to create new descheduler", trace.WithAttributes(attribute.String("err", err.Error())))
|
||||
return err
|
||||
@@ -422,13 +625,48 @@ func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer
|
||||
defer cancel()
|
||||
|
||||
sharedInformerFactory.Start(ctx.Done())
|
||||
if metricProviderTokenReconciliation == secretReconciliation {
|
||||
namespacedSharedInformerFactory.Start(ctx.Done())
|
||||
}
|
||||
|
||||
sharedInformerFactory.WaitForCacheSync(ctx.Done())
|
||||
descheduler.podEvictor.WaitForEventHandlersSync(ctx)
|
||||
if metricProviderTokenReconciliation == secretReconciliation {
|
||||
namespacedSharedInformerFactory.WaitForCacheSync(ctx.Done())
|
||||
}
|
||||
|
||||
if descheduler.metricsCollector != nil {
|
||||
go func() {
|
||||
klog.V(2).Infof("Starting metrics collector")
|
||||
descheduler.metricsCollector.Run(ctx)
|
||||
klog.V(2).Infof("Stopped metrics collector")
|
||||
}()
|
||||
klog.V(2).Infof("Waiting for metrics collector to sync")
|
||||
if err := wait.PollWithContext(ctx, time.Second, time.Minute, func(context.Context) (done bool, err error) {
|
||||
return descheduler.metricsCollector.HasSynced(), nil
|
||||
}); err != nil {
|
||||
return fmt.Errorf("unable to wait for metrics collector to sync: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
if metricProviderTokenReconciliation == secretReconciliation {
|
||||
go descheduler.runAuthenticationSecretReconciler(ctx)
|
||||
}
|
||||
|
||||
wait.NonSlidingUntil(func() {
|
||||
if metricProviderTokenReconciliation == inClusterReconciliation {
|
||||
// Read the sa token and assume it has the sufficient permissions to authenticate
|
||||
if err := descheduler.reconcileInClusterSAToken(); err != nil {
|
||||
klog.ErrorS(err, "unable to reconcile an in cluster SA token")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// A next context is created here intentionally to avoid nesting the spans via context.
|
||||
sCtx, sSpan := tracing.Tracer().Start(ctx, "NonSlidingUntil")
|
||||
defer sSpan.End()
|
||||
nodes, err := nodeutil.ReadyNodes(sCtx, rs.Client, descheduler.nodeLister, nodeSelector)
|
||||
|
||||
nodes, err := nodeutil.ReadyNodes(sCtx, rs.Client, descheduler.sharedInformerFactory.Core().V1().Nodes().Lister(), nodeSelector)
|
||||
if err != nil {
|
||||
sSpan.AddEvent("Failed to detect ready nodes", trace.WithAttributes(attribute.String("err", err.Error())))
|
||||
klog.Error(err)
|
||||
|
||||
@@ -2,25 +2,38 @@ package descheduler
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
policy "k8s.io/api/policy/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
apiversion "k8s.io/apimachinery/pkg/version"
|
||||
fakediscovery "k8s.io/client-go/discovery/fake"
|
||||
"k8s.io/client-go/informers"
|
||||
fakeclientset "k8s.io/client-go/kubernetes/fake"
|
||||
core "k8s.io/client-go/testing"
|
||||
"k8s.io/component-base/featuregate"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/metrics/pkg/apis/metrics/v1beta1"
|
||||
metricsclient "k8s.io/metrics/pkg/client/clientset/versioned"
|
||||
fakemetricsclient "k8s.io/metrics/pkg/client/clientset/versioned/fake"
|
||||
utilptr "k8s.io/utils/ptr"
|
||||
|
||||
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
"sigs.k8s.io/descheduler/pkg/features"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/pluginregistry"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/defaultevictor"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/nodeutilization"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/removeduplicates"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/removepodsviolatingnodetaints"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
@@ -28,11 +41,34 @@ import (
|
||||
"sigs.k8s.io/descheduler/test"
|
||||
)
|
||||
|
||||
var (
|
||||
podEvictionError = errors.New("PodEvictionError")
|
||||
tooManyRequestsError = &apierrors.StatusError{
|
||||
ErrStatus: metav1.Status{
|
||||
Status: metav1.StatusFailure,
|
||||
Code: http.StatusTooManyRequests,
|
||||
Reason: metav1.StatusReasonTooManyRequests,
|
||||
Message: "admission webhook \"virt-launcher-eviction-interceptor.kubevirt.io\" denied the request: Eviction triggered evacuation of VMI",
|
||||
},
|
||||
}
|
||||
nodesgvr = schema.GroupVersionResource{Group: "metrics.k8s.io", Version: "v1beta1", Resource: "nodes"}
|
||||
podsgvr = schema.GroupVersionResource{Group: "metrics.k8s.io", Version: "v1beta1", Resource: "pods"}
|
||||
)
|
||||
|
||||
func initFeatureGates() featuregate.FeatureGate {
|
||||
featureGates := featuregate.NewFeatureGate()
|
||||
featureGates.Add(map[featuregate.Feature]featuregate.FeatureSpec{
|
||||
features.EvictionsInBackground: {Default: false, PreRelease: featuregate.Alpha},
|
||||
})
|
||||
return featureGates
|
||||
}
|
||||
|
||||
func initPluginRegistry() {
|
||||
pluginregistry.PluginRegistry = pluginregistry.NewRegistry()
|
||||
pluginregistry.Register(removeduplicates.PluginName, removeduplicates.New, &removeduplicates.RemoveDuplicates{}, &removeduplicates.RemoveDuplicatesArgs{}, removeduplicates.ValidateRemoveDuplicatesArgs, removeduplicates.SetDefaults_RemoveDuplicatesArgs, pluginregistry.PluginRegistry)
|
||||
pluginregistry.Register(defaultevictor.PluginName, defaultevictor.New, &defaultevictor.DefaultEvictor{}, &defaultevictor.DefaultEvictorArgs{}, defaultevictor.ValidateDefaultEvictorArgs, defaultevictor.SetDefaults_DefaultEvictorArgs, pluginregistry.PluginRegistry)
|
||||
pluginregistry.Register(removepodsviolatingnodetaints.PluginName, removepodsviolatingnodetaints.New, &removepodsviolatingnodetaints.RemovePodsViolatingNodeTaints{}, &removepodsviolatingnodetaints.RemovePodsViolatingNodeTaintsArgs{}, removepodsviolatingnodetaints.ValidateRemovePodsViolatingNodeTaintsArgs, removepodsviolatingnodetaints.SetDefaults_RemovePodsViolatingNodeTaintsArgs, pluginregistry.PluginRegistry)
|
||||
pluginregistry.Register(nodeutilization.LowNodeUtilizationPluginName, nodeutilization.NewLowNodeUtilization, &nodeutilization.LowNodeUtilization{}, &nodeutilization.LowNodeUtilizationArgs{}, nodeutilization.ValidateLowNodeUtilizationArgs, nodeutilization.SetDefaults_LowNodeUtilizationArgs, pluginregistry.PluginRegistry)
|
||||
}
|
||||
|
||||
func removePodsViolatingNodeTaintsPolicy() *api.DeschedulerPolicy {
|
||||
@@ -99,7 +135,49 @@ func removeDuplicatesPolicy() *api.DeschedulerPolicy {
|
||||
}
|
||||
}
|
||||
|
||||
func initDescheduler(t *testing.T, ctx context.Context, internalDeschedulerPolicy *api.DeschedulerPolicy, objects ...runtime.Object) (*options.DeschedulerServer, *descheduler, *fakeclientset.Clientset) {
|
||||
func lowNodeUtilizationPolicy(thresholds, targetThresholds api.ResourceThresholds, metricsEnabled bool) *api.DeschedulerPolicy {
|
||||
var metricsSource api.MetricsSource = ""
|
||||
if metricsEnabled {
|
||||
metricsSource = api.KubernetesMetrics
|
||||
}
|
||||
return &api.DeschedulerPolicy{
|
||||
Profiles: []api.DeschedulerProfile{
|
||||
{
|
||||
Name: "Profile",
|
||||
PluginConfigs: []api.PluginConfig{
|
||||
{
|
||||
Name: nodeutilization.LowNodeUtilizationPluginName,
|
||||
Args: &nodeutilization.LowNodeUtilizationArgs{
|
||||
Thresholds: thresholds,
|
||||
TargetThresholds: targetThresholds,
|
||||
MetricsUtilization: &nodeutilization.MetricsUtilization{
|
||||
Source: metricsSource,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: defaultevictor.PluginName,
|
||||
Args: &defaultevictor.DefaultEvictorArgs{},
|
||||
},
|
||||
},
|
||||
Plugins: api.Plugins{
|
||||
Filter: api.PluginSet{
|
||||
Enabled: []string{
|
||||
defaultevictor.PluginName,
|
||||
},
|
||||
},
|
||||
Balance: api.PluginSet{
|
||||
Enabled: []string{
|
||||
nodeutilization.LowNodeUtilizationPluginName,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func initDescheduler(t *testing.T, ctx context.Context, featureGates featuregate.FeatureGate, internalDeschedulerPolicy *api.DeschedulerPolicy, metricsClient metricsclient.Interface, objects ...runtime.Object) (*options.DeschedulerServer, *descheduler, *fakeclientset.Clientset) {
|
||||
client := fakeclientset.NewSimpleClientset(objects...)
|
||||
eventClient := fakeclientset.NewSimpleClientset(objects...)
|
||||
|
||||
@@ -109,11 +187,13 @@ func initDescheduler(t *testing.T, ctx context.Context, internalDeschedulerPolic
|
||||
}
|
||||
rs.Client = client
|
||||
rs.EventClient = eventClient
|
||||
rs.DefaultFeatureGates = featureGates
|
||||
rs.MetricsClient = metricsClient
|
||||
|
||||
sharedInformerFactory := informers.NewSharedInformerFactoryWithOptions(rs.Client, 0, informers.WithTransform(trimManagedFields))
|
||||
eventBroadcaster, eventRecorder := utils.GetRecorderAndBroadcaster(ctx, client)
|
||||
|
||||
descheduler, err := newDescheduler(rs, internalDeschedulerPolicy, "v1", eventRecorder, sharedInformerFactory)
|
||||
descheduler, err := newDescheduler(ctx, rs, internalDeschedulerPolicy, "v1", eventRecorder, sharedInformerFactory, nil)
|
||||
if err != nil {
|
||||
eventBroadcaster.Shutdown()
|
||||
t.Fatalf("Unable to create a descheduler instance: %v", err)
|
||||
@@ -144,6 +224,7 @@ func TestTaintsUpdated(t *testing.T) {
|
||||
}
|
||||
rs.Client = client
|
||||
rs.EventClient = eventClient
|
||||
rs.DefaultFeatureGates = initFeatureGates()
|
||||
|
||||
pods, err := client.CoreV1().Pods(p1.Namespace).List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
@@ -167,7 +248,7 @@ func TestTaintsUpdated(t *testing.T) {
|
||||
}
|
||||
|
||||
var evictedPods []string
|
||||
client.PrependReactor("create", "pods", podEvictionReactionTestingFnc(&evictedPods))
|
||||
client.PrependReactor("create", "pods", podEvictionReactionTestingFnc(&evictedPods, nil, nil))
|
||||
|
||||
if err := RunDeschedulerStrategies(ctx, rs, removePodsViolatingNodeTaintsPolicy(), "v1"); err != nil {
|
||||
t.Fatalf("Unable to run descheduler strategies: %v", err)
|
||||
@@ -206,6 +287,7 @@ func TestDuplicate(t *testing.T) {
|
||||
}
|
||||
rs.Client = client
|
||||
rs.EventClient = eventClient
|
||||
rs.DefaultFeatureGates = initFeatureGates()
|
||||
|
||||
pods, err := client.CoreV1().Pods(p1.Namespace).List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
@@ -217,7 +299,7 @@ func TestDuplicate(t *testing.T) {
|
||||
}
|
||||
|
||||
var evictedPods []string
|
||||
client.PrependReactor("create", "pods", podEvictionReactionTestingFnc(&evictedPods))
|
||||
client.PrependReactor("create", "pods", podEvictionReactionTestingFnc(&evictedPods, nil, nil))
|
||||
|
||||
if err := RunDeschedulerStrategies(ctx, rs, removeDuplicatesPolicy(), "v1"); err != nil {
|
||||
t.Fatalf("Unable to run descheduler strategies: %v", err)
|
||||
@@ -245,6 +327,7 @@ func TestRootCancel(t *testing.T) {
|
||||
rs.Client = client
|
||||
rs.EventClient = eventClient
|
||||
rs.DeschedulingInterval = 100 * time.Millisecond
|
||||
rs.DefaultFeatureGates = initFeatureGates()
|
||||
errChan := make(chan error, 1)
|
||||
defer close(errChan)
|
||||
|
||||
@@ -280,6 +363,7 @@ func TestRootCancelWithNoInterval(t *testing.T) {
|
||||
rs.Client = client
|
||||
rs.EventClient = eventClient
|
||||
rs.DeschedulingInterval = 0
|
||||
rs.DefaultFeatureGates = initFeatureGates()
|
||||
errChan := make(chan error, 1)
|
||||
defer close(errChan)
|
||||
|
||||
@@ -358,7 +442,7 @@ func TestValidateVersionCompatibility(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func podEvictionReactionTestingFnc(evictedPods *[]string) func(action core.Action) (bool, runtime.Object, error) {
|
||||
func podEvictionReactionTestingFnc(evictedPods *[]string, isEvictionsInBackground func(podName string) bool, evictionErr error) func(action core.Action) (bool, runtime.Object, error) {
|
||||
return func(action core.Action) (bool, runtime.Object, error) {
|
||||
if action.GetSubresource() == "eviction" {
|
||||
createAct, matched := action.(core.CreateActionImpl)
|
||||
@@ -366,7 +450,14 @@ func podEvictionReactionTestingFnc(evictedPods *[]string) func(action core.Actio
|
||||
return false, nil, fmt.Errorf("unable to convert action to core.CreateActionImpl")
|
||||
}
|
||||
if eviction, matched := createAct.Object.(*policy.Eviction); matched {
|
||||
if isEvictionsInBackground != nil && isEvictionsInBackground(eviction.GetName()) {
|
||||
return true, nil, tooManyRequestsError
|
||||
}
|
||||
if evictionErr != nil {
|
||||
return true, nil, evictionErr
|
||||
}
|
||||
*evictedPods = append(*evictedPods, eviction.GetName())
|
||||
return true, nil, nil
|
||||
}
|
||||
}
|
||||
return false, nil, nil // fallback to the default reactor
|
||||
@@ -402,15 +493,15 @@ func TestPodEvictorReset(t *testing.T) {
|
||||
|
||||
internalDeschedulerPolicy := removePodsViolatingNodeTaintsPolicy()
|
||||
ctxCancel, cancel := context.WithCancel(ctx)
|
||||
rs, descheduler, client := initDescheduler(t, ctxCancel, internalDeschedulerPolicy, node1, node2, p1, p2)
|
||||
rs, descheduler, client := initDescheduler(t, ctxCancel, initFeatureGates(), internalDeschedulerPolicy, nil, node1, node2, p1, p2)
|
||||
defer cancel()
|
||||
|
||||
var evictedPods []string
|
||||
client.PrependReactor("create", "pods", podEvictionReactionTestingFnc(&evictedPods))
|
||||
client.PrependReactor("create", "pods", podEvictionReactionTestingFnc(&evictedPods, nil, nil))
|
||||
|
||||
var fakeEvictedPods []string
|
||||
descheduler.podEvictionReactionFnc = func(*fakeclientset.Clientset) func(action core.Action) (bool, runtime.Object, error) {
|
||||
return podEvictionReactionTestingFnc(&fakeEvictedPods)
|
||||
return podEvictionReactionTestingFnc(&fakeEvictedPods, nil, nil)
|
||||
}
|
||||
|
||||
// a single pod eviction expected
|
||||
@@ -453,6 +544,138 @@ func TestPodEvictorReset(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func checkTotals(t *testing.T, ctx context.Context, descheduler *descheduler, totalEvictionRequests, totalEvicted uint) {
|
||||
if total := descheduler.podEvictor.TotalEvictionRequests(); total != totalEvictionRequests {
|
||||
t.Fatalf("Expected %v total eviction requests, got %v instead", totalEvictionRequests, total)
|
||||
}
|
||||
if total := descheduler.podEvictor.TotalEvicted(); total != totalEvicted {
|
||||
t.Fatalf("Expected %v total evictions, got %v instead", totalEvicted, total)
|
||||
}
|
||||
t.Logf("Total evictions: %v, total eviction requests: %v, total evictions and eviction requests: %v", totalEvicted, totalEvictionRequests, totalEvicted+totalEvictionRequests)
|
||||
}
|
||||
|
||||
func runDeschedulingCycleAndCheckTotals(t *testing.T, ctx context.Context, nodes []*v1.Node, descheduler *descheduler, totalEvictionRequests, totalEvicted uint) {
|
||||
err := descheduler.runDeschedulerLoop(ctx, nodes)
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to run a descheduling loop: %v", err)
|
||||
}
|
||||
checkTotals(t, ctx, descheduler, totalEvictionRequests, totalEvicted)
|
||||
}
|
||||
|
||||
func TestEvictionRequestsCache(t *testing.T) {
|
||||
initPluginRegistry()
|
||||
|
||||
ctx := context.Background()
|
||||
node1 := test.BuildTestNode("n1", 2000, 3000, 10, taintNodeNoSchedule)
|
||||
node2 := test.BuildTestNode("n2", 2000, 3000, 10, nil)
|
||||
nodes := []*v1.Node{node1, node2}
|
||||
|
||||
ownerRef1 := test.GetReplicaSetOwnerRefList()
|
||||
updatePod := func(pod *v1.Pod) {
|
||||
pod.Namespace = "dev"
|
||||
pod.ObjectMeta.OwnerReferences = ownerRef1
|
||||
pod.Status.Phase = v1.PodRunning
|
||||
}
|
||||
updatePodWithEvictionInBackground := func(pod *v1.Pod) {
|
||||
updatePod(pod)
|
||||
pod.Annotations = map[string]string{
|
||||
evictions.EvictionRequestAnnotationKey: "",
|
||||
}
|
||||
}
|
||||
|
||||
p1 := test.BuildTestPod("p1", 100, 0, node1.Name, updatePodWithEvictionInBackground)
|
||||
p2 := test.BuildTestPod("p2", 100, 0, node1.Name, updatePodWithEvictionInBackground)
|
||||
p3 := test.BuildTestPod("p3", 100, 0, node1.Name, updatePod)
|
||||
p4 := test.BuildTestPod("p4", 100, 0, node1.Name, updatePod)
|
||||
p5 := test.BuildTestPod("p5", 100, 0, node1.Name, updatePod)
|
||||
|
||||
internalDeschedulerPolicy := removePodsViolatingNodeTaintsPolicy()
|
||||
ctxCancel, cancel := context.WithCancel(ctx)
|
||||
featureGates := featuregate.NewFeatureGate()
|
||||
featureGates.Add(map[featuregate.Feature]featuregate.FeatureSpec{
|
||||
features.EvictionsInBackground: {Default: true, PreRelease: featuregate.Alpha},
|
||||
})
|
||||
_, descheduler, client := initDescheduler(t, ctxCancel, featureGates, internalDeschedulerPolicy, nil, node1, node2, p1, p2, p3, p4)
|
||||
defer cancel()
|
||||
|
||||
var fakeEvictedPods []string
|
||||
descheduler.podEvictionReactionFnc = func(*fakeclientset.Clientset) func(action core.Action) (bool, runtime.Object, error) {
|
||||
return podEvictionReactionTestingFnc(&fakeEvictedPods, nil, podEvictionError)
|
||||
}
|
||||
|
||||
var evictedPods []string
|
||||
client.PrependReactor("create", "pods", podEvictionReactionTestingFnc(&evictedPods, func(name string) bool { return name == "p1" || name == "p2" }, nil))
|
||||
|
||||
klog.Infof("2 evictions in background expected, 2 normal evictions")
|
||||
runDeschedulingCycleAndCheckTotals(t, ctx, nodes, descheduler, 2, 2)
|
||||
|
||||
klog.Infof("Repeat the same as previously to confirm no more evictions in background are requested")
|
||||
// No evicted pod is actually deleted on purpose so the test can run the descheduling cycle repeatedly
|
||||
// without recreating the pods.
|
||||
runDeschedulingCycleAndCheckTotals(t, ctx, nodes, descheduler, 2, 2)
|
||||
|
||||
klog.Infof("Scenario: Eviction in background got initiated")
|
||||
p2.Annotations[evictions.EvictionInProgressAnnotationKey] = ""
|
||||
if _, err := client.CoreV1().Pods(p2.Namespace).Update(context.TODO(), p2, metav1.UpdateOptions{}); err != nil {
|
||||
t.Fatalf("unable to update a pod: %v", err)
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
klog.Infof("Repeat the same as previously to confirm no more evictions in background are requested")
|
||||
runDeschedulingCycleAndCheckTotals(t, ctx, nodes, descheduler, 2, 2)
|
||||
|
||||
klog.Infof("Scenario: Another eviction in background got initiated")
|
||||
p1.Annotations[evictions.EvictionInProgressAnnotationKey] = ""
|
||||
if _, err := client.CoreV1().Pods(p1.Namespace).Update(context.TODO(), p1, metav1.UpdateOptions{}); err != nil {
|
||||
t.Fatalf("unable to update a pod: %v", err)
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
klog.Infof("Repeat the same as previously to confirm no more evictions in background are requested")
|
||||
runDeschedulingCycleAndCheckTotals(t, ctx, nodes, descheduler, 2, 2)
|
||||
|
||||
klog.Infof("Scenario: Eviction in background completed")
|
||||
if err := client.CoreV1().Pods(p1.Namespace).Delete(context.TODO(), p1.Name, metav1.DeleteOptions{}); err != nil {
|
||||
t.Fatalf("unable to delete a pod: %v", err)
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
klog.Infof("Check the number of evictions in background decreased")
|
||||
runDeschedulingCycleAndCheckTotals(t, ctx, nodes, descheduler, 1, 2)
|
||||
|
||||
klog.Infof("Scenario: A new pod without eviction in background added")
|
||||
if _, err := client.CoreV1().Pods(p5.Namespace).Create(context.TODO(), p5, metav1.CreateOptions{}); err != nil {
|
||||
t.Fatalf("unable to create a pod: %v", err)
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
klog.Infof("Check the number of evictions increased after running a descheduling cycle")
|
||||
runDeschedulingCycleAndCheckTotals(t, ctx, nodes, descheduler, 1, 3)
|
||||
|
||||
klog.Infof("Scenario: Eviction in background canceled => eviction in progress annotation removed")
|
||||
delete(p2.Annotations, evictions.EvictionInProgressAnnotationKey)
|
||||
if _, err := client.CoreV1().Pods(p2.Namespace).Update(context.TODO(), p2, metav1.UpdateOptions{}); err != nil {
|
||||
t.Fatalf("unable to update a pod: %v", err)
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
klog.Infof("Check the number of evictions in background decreased")
|
||||
checkTotals(t, ctx, descheduler, 0, 3)
|
||||
|
||||
klog.Infof("Scenario: Re-run the descheduling cycle to re-request eviction in background")
|
||||
runDeschedulingCycleAndCheckTotals(t, ctx, nodes, descheduler, 1, 3)
|
||||
|
||||
klog.Infof("Scenario: Eviction in background completed with a pod in completed state")
|
||||
p2.Status.Phase = v1.PodSucceeded
|
||||
if _, err := client.CoreV1().Pods(p2.Namespace).Update(context.TODO(), p2, metav1.UpdateOptions{}); err != nil {
|
||||
t.Fatalf("unable to delete a pod: %v", err)
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
klog.Infof("Check the number of evictions in background decreased")
|
||||
runDeschedulingCycleAndCheckTotals(t, ctx, nodes, descheduler, 0, 3)
|
||||
}
|
||||
|
||||
func TestDeschedulingLimits(t *testing.T) {
|
||||
initPluginRegistry()
|
||||
|
||||
@@ -496,6 +719,13 @@ func TestDeschedulingLimits(t *testing.T) {
|
||||
pod.ObjectMeta.OwnerReferences = ownerRef1
|
||||
}
|
||||
|
||||
updatePodWithEvictionInBackground := func(pod *v1.Pod) {
|
||||
updatePod(pod)
|
||||
pod.Annotations = map[string]string{
|
||||
evictions.EvictionRequestAnnotationKey: "",
|
||||
}
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.description, func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
@@ -503,39 +733,137 @@ func TestDeschedulingLimits(t *testing.T) {
|
||||
node2 := test.BuildTestNode("n2", 2000, 3000, 10, nil)
|
||||
nodes := []*v1.Node{node1, node2}
|
||||
ctxCancel, cancel := context.WithCancel(ctx)
|
||||
_, descheduler, client := initDescheduler(t, ctxCancel, tc.policy, node1, node2)
|
||||
featureGates := featuregate.NewFeatureGate()
|
||||
featureGates.Add(map[featuregate.Feature]featuregate.FeatureSpec{
|
||||
features.EvictionsInBackground: {Default: true, PreRelease: featuregate.Alpha},
|
||||
})
|
||||
_, descheduler, client := initDescheduler(t, ctxCancel, featureGates, tc.policy, nil, node1, node2)
|
||||
defer cancel()
|
||||
|
||||
var fakeEvictedPods []string
|
||||
descheduler.podEvictionReactionFnc = func(*fakeclientset.Clientset) func(action core.Action) (bool, runtime.Object, error) {
|
||||
return podEvictionReactionTestingFnc(&fakeEvictedPods, nil, podEvictionError)
|
||||
}
|
||||
|
||||
var evictedPods []string
|
||||
client.PrependReactor("create", "pods", podEvictionReactionTestingFnc(&evictedPods, func(name string) bool { return name == "p1" || name == "p2" }, nil))
|
||||
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
pods := []*v1.Pod{
|
||||
test.BuildTestPod("p1", 100, 0, node1.Name, updatePod),
|
||||
test.BuildTestPod("p2", 100, 0, node1.Name, updatePod),
|
||||
test.BuildTestPod("p1", 100, 0, node1.Name, updatePodWithEvictionInBackground),
|
||||
test.BuildTestPod("p2", 100, 0, node1.Name, updatePodWithEvictionInBackground),
|
||||
test.BuildTestPod("p3", 100, 0, node1.Name, updatePod),
|
||||
test.BuildTestPod("p4", 100, 0, node1.Name, updatePod),
|
||||
test.BuildTestPod("p5", 100, 0, node1.Name, updatePod),
|
||||
}
|
||||
|
||||
for j := 0; j < 5; j++ {
|
||||
idx := j
|
||||
if _, err := client.CoreV1().Pods(pods[idx].Namespace).Create(context.TODO(), pods[idx], metav1.CreateOptions{}); err != nil {
|
||||
t.Fatalf("unable to create a pod: %v", err)
|
||||
}
|
||||
defer func() {
|
||||
if err := client.CoreV1().Pods(pods[idx].Namespace).Delete(context.TODO(), pods[idx].Name, metav1.DeleteOptions{}); err != nil {
|
||||
t.Fatalf("unable to delete a pod: %v", err)
|
||||
for i := 0; i < 10; i++ {
|
||||
rand.Shuffle(len(pods), func(i, j int) { pods[i], pods[j] = pods[j], pods[i] })
|
||||
func() {
|
||||
for j := 0; j < 5; j++ {
|
||||
idx := j
|
||||
if _, err := client.CoreV1().Pods(pods[idx].Namespace).Create(context.TODO(), pods[idx], metav1.CreateOptions{}); err != nil {
|
||||
t.Fatalf("unable to create a pod: %v", err)
|
||||
}
|
||||
defer func() {
|
||||
if err := client.CoreV1().Pods(pods[idx].Namespace).Delete(context.TODO(), pods[idx].Name, metav1.DeleteOptions{}); err != nil {
|
||||
t.Fatalf("unable to delete a pod: %v", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
klog.Infof("2 evictions in background expected, 2 normal evictions")
|
||||
err := descheduler.runDeschedulerLoop(ctx, nodes)
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to run a descheduling loop: %v", err)
|
||||
}
|
||||
totalERs := descheduler.podEvictor.TotalEvictionRequests()
|
||||
totalEs := descheduler.podEvictor.TotalEvicted()
|
||||
if totalERs+totalEs > tc.limit {
|
||||
t.Fatalf("Expected %v evictions and eviction requests in total, got %v instead", tc.limit, totalERs+totalEs)
|
||||
}
|
||||
t.Logf("Total evictions and eviction requests: %v (er=%v, e=%v)", totalERs+totalEs, totalERs, totalEs)
|
||||
}()
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
err := descheduler.runDeschedulerLoop(ctx, nodes)
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to run a descheduling loop: %v", err)
|
||||
}
|
||||
totalEs := descheduler.podEvictor.TotalEvicted()
|
||||
if totalEs > tc.limit {
|
||||
t.Fatalf("Expected %v evictions in total, got %v instead", tc.limit, totalEs)
|
||||
}
|
||||
t.Logf("Total evictions: %v", totalEs)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadAwareDescheduling(t *testing.T) {
|
||||
initPluginRegistry()
|
||||
|
||||
ownerRef1 := test.GetReplicaSetOwnerRefList()
|
||||
updatePod := func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = ownerRef1
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
node1 := test.BuildTestNode("n1", 2000, 3000, 10, taintNodeNoSchedule)
|
||||
node2 := test.BuildTestNode("n2", 2000, 3000, 10, nil)
|
||||
nodes := []*v1.Node{node1, node2}
|
||||
|
||||
p1 := test.BuildTestPod("p1", 300, 0, node1.Name, updatePod)
|
||||
p2 := test.BuildTestPod("p2", 300, 0, node1.Name, updatePod)
|
||||
p3 := test.BuildTestPod("p3", 300, 0, node1.Name, updatePod)
|
||||
p4 := test.BuildTestPod("p4", 300, 0, node1.Name, updatePod)
|
||||
p5 := test.BuildTestPod("p5", 300, 0, node1.Name, updatePod)
|
||||
|
||||
nodemetricses := []*v1beta1.NodeMetrics{
|
||||
test.BuildNodeMetrics("n1", 2400, 3000),
|
||||
test.BuildNodeMetrics("n2", 400, 0),
|
||||
}
|
||||
|
||||
podmetricses := []*v1beta1.PodMetrics{
|
||||
test.BuildPodMetrics("p1", 400, 0),
|
||||
test.BuildPodMetrics("p2", 400, 0),
|
||||
test.BuildPodMetrics("p3", 400, 0),
|
||||
test.BuildPodMetrics("p4", 400, 0),
|
||||
test.BuildPodMetrics("p5", 400, 0),
|
||||
}
|
||||
|
||||
metricsClientset := fakemetricsclient.NewSimpleClientset()
|
||||
for _, nodemetrics := range nodemetricses {
|
||||
metricsClientset.Tracker().Create(nodesgvr, nodemetrics, "")
|
||||
}
|
||||
for _, podmetrics := range podmetricses {
|
||||
metricsClientset.Tracker().Create(podsgvr, podmetrics, podmetrics.Namespace)
|
||||
}
|
||||
|
||||
policy := lowNodeUtilizationPolicy(
|
||||
api.ResourceThresholds{
|
||||
v1.ResourceCPU: 30,
|
||||
v1.ResourcePods: 30,
|
||||
},
|
||||
api.ResourceThresholds{
|
||||
v1.ResourceCPU: 50,
|
||||
v1.ResourcePods: 50,
|
||||
},
|
||||
true, // enabled metrics utilization
|
||||
)
|
||||
policy.MetricsProviders = []api.MetricsProvider{{Source: api.KubernetesMetrics}}
|
||||
|
||||
ctxCancel, cancel := context.WithCancel(ctx)
|
||||
_, descheduler, _ := initDescheduler(
|
||||
t,
|
||||
ctxCancel,
|
||||
initFeatureGates(),
|
||||
policy,
|
||||
metricsClientset,
|
||||
node1, node2, p1, p2, p3, p4, p5)
|
||||
defer cancel()
|
||||
|
||||
// This needs to be run since the metrics collector is started
|
||||
// after newDescheduler in RunDeschedulerStrategies.
|
||||
descheduler.metricsCollector.Collect(ctx)
|
||||
|
||||
err := descheduler.runDeschedulerLoop(ctx, nodes)
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to run a descheduling loop: %v", err)
|
||||
}
|
||||
totalEs := descheduler.podEvictor.TotalEvicted()
|
||||
if totalEs != 2 {
|
||||
t.Fatalf("Expected %v evictions in total, got %v instead", 2, totalEs)
|
||||
}
|
||||
t.Logf("Total evictions: %v", totalEs)
|
||||
}
|
||||
|
||||
@@ -19,7 +19,9 @@ package evictions
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
@@ -27,15 +29,182 @@ import (
|
||||
policy "k8s.io/api/policy/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/events"
|
||||
"k8s.io/component-base/featuregate"
|
||||
"k8s.io/klog/v2"
|
||||
"sigs.k8s.io/descheduler/metrics"
|
||||
|
||||
"sigs.k8s.io/descheduler/metrics"
|
||||
eutils "sigs.k8s.io/descheduler/pkg/descheduler/evictions/utils"
|
||||
"sigs.k8s.io/descheduler/pkg/features"
|
||||
"sigs.k8s.io/descheduler/pkg/tracing"
|
||||
)
|
||||
|
||||
const (
|
||||
deschedulerGlobalName = "sigs.k8s.io/descheduler"
|
||||
reasonAnnotationKey = "reason"
|
||||
requestedByAnnotationKey = "requested-by"
|
||||
)
|
||||
|
||||
var (
|
||||
assumedEvictionRequestTimeoutSeconds uint = 10 * 60 // 10 minutes
|
||||
evictionRequestsCacheResyncPeriod time.Duration = 10 * time.Minute
|
||||
// syncedPollPeriod controls how often you look at the status of your sync funcs
|
||||
syncedPollPeriod = 100 * time.Millisecond
|
||||
)
|
||||
|
||||
type evictionRequestItem struct {
|
||||
podName, podNamespace, podNodeName string
|
||||
evictionAssumed bool
|
||||
assumedTimestamp metav1.Time
|
||||
}
|
||||
|
||||
type evictionRequestsCache struct {
|
||||
mu sync.RWMutex
|
||||
requests map[string]evictionRequestItem
|
||||
requestsPerNode map[string]uint
|
||||
requestsPerNamespace map[string]uint
|
||||
requestsTotal uint
|
||||
assumedRequestTimeoutSeconds uint
|
||||
}
|
||||
|
||||
func newEvictionRequestsCache(assumedRequestTimeoutSeconds uint) *evictionRequestsCache {
|
||||
return &evictionRequestsCache{
|
||||
requests: make(map[string]evictionRequestItem),
|
||||
requestsPerNode: make(map[string]uint),
|
||||
requestsPerNamespace: make(map[string]uint),
|
||||
assumedRequestTimeoutSeconds: assumedRequestTimeoutSeconds,
|
||||
}
|
||||
}
|
||||
|
||||
func (erc *evictionRequestsCache) run(ctx context.Context) {
|
||||
wait.UntilWithContext(ctx, erc.cleanCache, evictionRequestsCacheResyncPeriod)
|
||||
}
|
||||
|
||||
// cleanCache removes all assumed entries that has not been confirmed
|
||||
// for more than a specified timeout
|
||||
func (erc *evictionRequestsCache) cleanCache(ctx context.Context) {
|
||||
erc.mu.Lock()
|
||||
defer erc.mu.Unlock()
|
||||
klog.V(4).Infof("Cleaning cache of assumed eviction requests in background")
|
||||
for uid, item := range erc.requests {
|
||||
if item.evictionAssumed {
|
||||
requestAgeSeconds := uint(metav1.Now().Sub(item.assumedTimestamp.Local()).Seconds())
|
||||
if requestAgeSeconds > erc.assumedRequestTimeoutSeconds {
|
||||
klog.V(4).InfoS("Assumed eviction request in background timed out, deleting", "timeout", erc.assumedRequestTimeoutSeconds, "podNamespace", item.podNamespace, "podName", item.podName)
|
||||
erc.deleteItem(uid)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (erc *evictionRequestsCache) evictionRequestsPerNode(nodeName string) uint {
|
||||
erc.mu.RLock()
|
||||
defer erc.mu.RUnlock()
|
||||
return erc.requestsPerNode[nodeName]
|
||||
}
|
||||
|
||||
func (erc *evictionRequestsCache) evictionRequestsPerNamespace(ns string) uint {
|
||||
erc.mu.RLock()
|
||||
defer erc.mu.RUnlock()
|
||||
return erc.requestsPerNamespace[ns]
|
||||
}
|
||||
|
||||
func (erc *evictionRequestsCache) evictionRequestsTotal() uint {
|
||||
erc.mu.RLock()
|
||||
defer erc.mu.RUnlock()
|
||||
return erc.requestsTotal
|
||||
}
|
||||
|
||||
func (erc *evictionRequestsCache) TotalEvictionRequests() uint {
|
||||
erc.mu.RLock()
|
||||
defer erc.mu.RUnlock()
|
||||
return uint(len(erc.requests))
|
||||
}
|
||||
|
||||
// getPodKey returns the string key of a pod.
|
||||
func getPodKey(pod *v1.Pod) string {
|
||||
uid := string(pod.UID)
|
||||
// Every pod is expected to have the UID set.
|
||||
// When the descheduling framework is used for simulation
|
||||
// user created workload may forget to set the UID.
|
||||
if len(uid) == 0 {
|
||||
panic(fmt.Errorf("cannot get cache key for %v/%v pod with empty UID", pod.Namespace, pod.Name))
|
||||
}
|
||||
return uid
|
||||
}
|
||||
|
||||
func (erc *evictionRequestsCache) addPod(pod *v1.Pod) {
|
||||
erc.mu.Lock()
|
||||
defer erc.mu.Unlock()
|
||||
uid := getPodKey(pod)
|
||||
if _, exists := erc.requests[uid]; exists {
|
||||
return
|
||||
}
|
||||
erc.requests[uid] = evictionRequestItem{podNamespace: pod.Namespace, podName: pod.Name, podNodeName: pod.Spec.NodeName}
|
||||
erc.requestsPerNode[pod.Spec.NodeName]++
|
||||
erc.requestsPerNamespace[pod.Namespace]++
|
||||
erc.requestsTotal++
|
||||
}
|
||||
|
||||
func (erc *evictionRequestsCache) assumePod(pod *v1.Pod) {
|
||||
erc.mu.Lock()
|
||||
defer erc.mu.Unlock()
|
||||
uid := getPodKey(pod)
|
||||
if _, exists := erc.requests[uid]; exists {
|
||||
return
|
||||
}
|
||||
erc.requests[uid] = evictionRequestItem{
|
||||
podNamespace: pod.Namespace,
|
||||
podName: pod.Name,
|
||||
podNodeName: pod.Spec.NodeName,
|
||||
evictionAssumed: true,
|
||||
assumedTimestamp: metav1.NewTime(time.Now()),
|
||||
}
|
||||
erc.requestsPerNode[pod.Spec.NodeName]++
|
||||
erc.requestsPerNamespace[pod.Namespace]++
|
||||
erc.requestsTotal++
|
||||
}
|
||||
|
||||
// no locking, expected to be invoked from protected methods only
|
||||
func (erc *evictionRequestsCache) deleteItem(uid string) {
|
||||
erc.requestsPerNode[erc.requests[uid].podNodeName]--
|
||||
if erc.requestsPerNode[erc.requests[uid].podNodeName] == 0 {
|
||||
delete(erc.requestsPerNode, erc.requests[uid].podNodeName)
|
||||
}
|
||||
erc.requestsPerNamespace[erc.requests[uid].podNamespace]--
|
||||
if erc.requestsPerNamespace[erc.requests[uid].podNamespace] == 0 {
|
||||
delete(erc.requestsPerNamespace, erc.requests[uid].podNamespace)
|
||||
}
|
||||
erc.requestsTotal--
|
||||
delete(erc.requests, uid)
|
||||
}
|
||||
|
||||
func (erc *evictionRequestsCache) deletePod(pod *v1.Pod) {
|
||||
erc.mu.Lock()
|
||||
defer erc.mu.Unlock()
|
||||
uid := getPodKey(pod)
|
||||
if _, exists := erc.requests[uid]; exists {
|
||||
erc.deleteItem(uid)
|
||||
}
|
||||
}
|
||||
|
||||
func (erc *evictionRequestsCache) hasPod(pod *v1.Pod) bool {
|
||||
erc.mu.RLock()
|
||||
defer erc.mu.RUnlock()
|
||||
uid := getPodKey(pod)
|
||||
_, exists := erc.requests[uid]
|
||||
return exists
|
||||
}
|
||||
|
||||
var (
|
||||
EvictionRequestAnnotationKey = "descheduler.alpha.kubernetes.io/request-evict-only"
|
||||
EvictionInProgressAnnotationKey = "descheduler.alpha.kubernetes.io/eviction-in-progress"
|
||||
EvictionInBackgroundErrorText = "Eviction triggered evacuation"
|
||||
)
|
||||
|
||||
// nodePodEvictedCount keeps count of pods evicted on node
|
||||
type (
|
||||
nodePodEvictedCount map[string]uint
|
||||
@@ -43,54 +212,186 @@ type (
|
||||
)
|
||||
|
||||
type PodEvictor struct {
|
||||
mu sync.Mutex
|
||||
client clientset.Interface
|
||||
policyGroupVersion string
|
||||
dryRun bool
|
||||
maxPodsToEvictPerNode *uint
|
||||
maxPodsToEvictPerNamespace *uint
|
||||
maxPodsToEvictTotal *uint
|
||||
nodePodCount nodePodEvictedCount
|
||||
namespacePodCount namespacePodEvictCount
|
||||
totalPodCount uint
|
||||
metricsEnabled bool
|
||||
eventRecorder events.EventRecorder
|
||||
mu sync.RWMutex
|
||||
client clientset.Interface
|
||||
policyGroupVersion string
|
||||
dryRun bool
|
||||
evictionFailureEventNotification bool
|
||||
maxPodsToEvictPerNode *uint
|
||||
maxPodsToEvictPerNamespace *uint
|
||||
maxPodsToEvictTotal *uint
|
||||
gracePeriodSeconds *int64
|
||||
nodePodCount nodePodEvictedCount
|
||||
namespacePodCount namespacePodEvictCount
|
||||
totalPodCount uint
|
||||
metricsEnabled bool
|
||||
eventRecorder events.EventRecorder
|
||||
erCache *evictionRequestsCache
|
||||
featureGates featuregate.FeatureGate
|
||||
|
||||
// registeredHandlers contains the registrations of all handlers. It's used to check if all handlers have finished syncing before the scheduling cycles start.
|
||||
registeredHandlers []cache.ResourceEventHandlerRegistration
|
||||
}
|
||||
|
||||
func NewPodEvictor(
|
||||
ctx context.Context,
|
||||
client clientset.Interface,
|
||||
eventRecorder events.EventRecorder,
|
||||
podInformer cache.SharedIndexInformer,
|
||||
featureGates featuregate.FeatureGate,
|
||||
options *Options,
|
||||
) *PodEvictor {
|
||||
) (*PodEvictor, error) {
|
||||
if options == nil {
|
||||
options = NewOptions()
|
||||
}
|
||||
|
||||
return &PodEvictor{
|
||||
client: client,
|
||||
eventRecorder: eventRecorder,
|
||||
policyGroupVersion: options.policyGroupVersion,
|
||||
dryRun: options.dryRun,
|
||||
maxPodsToEvictPerNode: options.maxPodsToEvictPerNode,
|
||||
maxPodsToEvictPerNamespace: options.maxPodsToEvictPerNamespace,
|
||||
maxPodsToEvictTotal: options.maxPodsToEvictTotal,
|
||||
metricsEnabled: options.metricsEnabled,
|
||||
nodePodCount: make(nodePodEvictedCount),
|
||||
namespacePodCount: make(namespacePodEvictCount),
|
||||
podEvictor := &PodEvictor{
|
||||
client: client,
|
||||
eventRecorder: eventRecorder,
|
||||
policyGroupVersion: options.policyGroupVersion,
|
||||
dryRun: options.dryRun,
|
||||
evictionFailureEventNotification: options.evictionFailureEventNotification,
|
||||
maxPodsToEvictPerNode: options.maxPodsToEvictPerNode,
|
||||
maxPodsToEvictPerNamespace: options.maxPodsToEvictPerNamespace,
|
||||
maxPodsToEvictTotal: options.maxPodsToEvictTotal,
|
||||
gracePeriodSeconds: options.gracePeriodSeconds,
|
||||
metricsEnabled: options.metricsEnabled,
|
||||
nodePodCount: make(nodePodEvictedCount),
|
||||
namespacePodCount: make(namespacePodEvictCount),
|
||||
featureGates: featureGates,
|
||||
}
|
||||
|
||||
if featureGates.Enabled(features.EvictionsInBackground) {
|
||||
erCache := newEvictionRequestsCache(assumedEvictionRequestTimeoutSeconds)
|
||||
|
||||
handlerRegistration, err := podInformer.AddEventHandler(
|
||||
cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: func(obj interface{}) {
|
||||
pod, ok := obj.(*v1.Pod)
|
||||
if !ok {
|
||||
klog.ErrorS(nil, "Cannot convert to *v1.Pod", "obj", obj)
|
||||
return
|
||||
}
|
||||
if _, exists := pod.Annotations[EvictionRequestAnnotationKey]; exists {
|
||||
if _, exists := pod.Annotations[EvictionInProgressAnnotationKey]; exists {
|
||||
// Ignore completed/suceeeded or failed pods
|
||||
if pod.Status.Phase != v1.PodSucceeded && pod.Status.Phase != v1.PodFailed {
|
||||
klog.V(3).InfoS("Eviction in background detected. Adding pod to the cache.", "pod", klog.KObj(pod))
|
||||
erCache.addPod(pod)
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
UpdateFunc: func(oldObj, newObj interface{}) {
|
||||
oldPod, ok := oldObj.(*v1.Pod)
|
||||
if !ok {
|
||||
klog.ErrorS(nil, "Cannot convert oldObj to *v1.Pod", "oldObj", oldObj)
|
||||
return
|
||||
}
|
||||
newPod, ok := newObj.(*v1.Pod)
|
||||
if !ok {
|
||||
klog.ErrorS(nil, "Cannot convert newObj to *v1.Pod", "newObj", newObj)
|
||||
return
|
||||
}
|
||||
// Ignore pod's that are not subject to an eviction in background
|
||||
if _, exists := newPod.Annotations[EvictionRequestAnnotationKey]; !exists {
|
||||
if erCache.hasPod(newPod) {
|
||||
klog.V(3).InfoS("Pod with eviction in background lost annotation. Removing pod from the cache.", "pod", klog.KObj(newPod))
|
||||
}
|
||||
erCache.deletePod(newPod)
|
||||
return
|
||||
}
|
||||
// Remove completed/suceeeded or failed pods from the cache
|
||||
if newPod.Status.Phase == v1.PodSucceeded || newPod.Status.Phase == v1.PodFailed {
|
||||
klog.V(3).InfoS("Pod with eviction in background completed. Removing pod from the cache.", "pod", klog.KObj(newPod))
|
||||
erCache.deletePod(newPod)
|
||||
return
|
||||
}
|
||||
// Ignore any pod that does not have eviction in progress
|
||||
if _, exists := newPod.Annotations[EvictionInProgressAnnotationKey]; !exists {
|
||||
// In case EvictionInProgressAnnotationKey annotation is not present/removed
|
||||
// it's unclear whether the eviction was restarted or terminated.
|
||||
// If the eviction gets restarted the pod needs to be removed from the cache
|
||||
// to allow re-triggering the eviction.
|
||||
if _, exists := oldPod.Annotations[EvictionInProgressAnnotationKey]; !exists {
|
||||
return
|
||||
}
|
||||
// the annotation was removed -> remove the pod from the cache to allow to
|
||||
// request for eviction again. In case the eviction got restarted requesting
|
||||
// the eviction again is expected to be a no-op. In case the eviction
|
||||
// got terminated with no-retry, requesting a new eviction is a normal
|
||||
// operation.
|
||||
klog.V(3).InfoS("Eviction in background canceled (annotation removed). Removing pod from the cache.", "annotation", EvictionInProgressAnnotationKey, "pod", klog.KObj(newPod))
|
||||
erCache.deletePod(newPod)
|
||||
return
|
||||
}
|
||||
// Pick up the eviction in progress
|
||||
if !erCache.hasPod(newPod) {
|
||||
klog.V(3).InfoS("Eviction in background detected. Updating the cache.", "pod", klog.KObj(newPod))
|
||||
}
|
||||
erCache.addPod(newPod)
|
||||
},
|
||||
DeleteFunc: func(obj interface{}) {
|
||||
var pod *v1.Pod
|
||||
switch t := obj.(type) {
|
||||
case *v1.Pod:
|
||||
pod = t
|
||||
case cache.DeletedFinalStateUnknown:
|
||||
var ok bool
|
||||
pod, ok = t.Obj.(*v1.Pod)
|
||||
if !ok {
|
||||
klog.ErrorS(nil, "Cannot convert to *v1.Pod", "obj", t.Obj)
|
||||
return
|
||||
}
|
||||
default:
|
||||
klog.ErrorS(nil, "Cannot convert to *v1.Pod", "obj", t)
|
||||
return
|
||||
}
|
||||
if erCache.hasPod(pod) {
|
||||
klog.V(3).InfoS("Pod with eviction in background deleted/evicted. Removing pod from the cache.", "pod", klog.KObj(pod))
|
||||
}
|
||||
erCache.deletePod(pod)
|
||||
},
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to register event handler for pod evictor: %v", err)
|
||||
}
|
||||
|
||||
podEvictor.registeredHandlers = append(podEvictor.registeredHandlers, handlerRegistration)
|
||||
|
||||
go erCache.run(ctx)
|
||||
|
||||
podEvictor.erCache = erCache
|
||||
}
|
||||
|
||||
return podEvictor, nil
|
||||
}
|
||||
|
||||
// WaitForEventHandlersSync waits for EventHandlers to sync.
|
||||
// It returns true if it was successful, false if the controller should shut down
|
||||
func (pe *PodEvictor) WaitForEventHandlersSync(ctx context.Context) error {
|
||||
return wait.PollUntilContextCancel(ctx, syncedPollPeriod, true, func(ctx context.Context) (done bool, err error) {
|
||||
for _, handler := range pe.registeredHandlers {
|
||||
if !handler.HasSynced() {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
}
|
||||
|
||||
// NodeEvicted gives a number of pods evicted for node
|
||||
func (pe *PodEvictor) NodeEvicted(node *v1.Node) uint {
|
||||
pe.mu.Lock()
|
||||
defer pe.mu.Unlock()
|
||||
pe.mu.RLock()
|
||||
defer pe.mu.RUnlock()
|
||||
return pe.nodePodCount[node.Name]
|
||||
}
|
||||
|
||||
// TotalEvicted gives a number of pods evicted through all nodes
|
||||
func (pe *PodEvictor) TotalEvicted() uint {
|
||||
pe.mu.Lock()
|
||||
defer pe.mu.Unlock()
|
||||
pe.mu.RLock()
|
||||
defer pe.mu.RUnlock()
|
||||
return pe.totalPodCount
|
||||
}
|
||||
|
||||
@@ -108,6 +409,46 @@ func (pe *PodEvictor) SetClient(client clientset.Interface) {
|
||||
pe.client = client
|
||||
}
|
||||
|
||||
func (pe *PodEvictor) evictionRequestsTotal() uint {
|
||||
if pe.featureGates.Enabled(features.EvictionsInBackground) {
|
||||
return pe.erCache.evictionRequestsTotal()
|
||||
} else {
|
||||
return 0
|
||||
}
|
||||
}
|
||||
|
||||
func (pe *PodEvictor) evictionRequestsPerNode(node string) uint {
|
||||
if pe.featureGates.Enabled(features.EvictionsInBackground) {
|
||||
return pe.erCache.evictionRequestsPerNode(node)
|
||||
} else {
|
||||
return 0
|
||||
}
|
||||
}
|
||||
|
||||
func (pe *PodEvictor) evictionRequestsPerNamespace(ns string) uint {
|
||||
if pe.featureGates.Enabled(features.EvictionsInBackground) {
|
||||
return pe.erCache.evictionRequestsPerNamespace(ns)
|
||||
} else {
|
||||
return 0
|
||||
}
|
||||
}
|
||||
|
||||
func (pe *PodEvictor) EvictionRequests(node *v1.Node) uint {
|
||||
pe.mu.RLock()
|
||||
defer pe.mu.RUnlock()
|
||||
return pe.evictionRequestsTotal()
|
||||
}
|
||||
|
||||
func (pe *PodEvictor) TotalEvictionRequests() uint {
|
||||
pe.mu.RLock()
|
||||
defer pe.mu.RUnlock()
|
||||
if pe.featureGates.Enabled(features.EvictionsInBackground) {
|
||||
return pe.erCache.TotalEvictionRequests()
|
||||
} else {
|
||||
return 0
|
||||
}
|
||||
}
|
||||
|
||||
// EvictOptions provides a handle for passing additional info to EvictPod
|
||||
type EvictOptions struct {
|
||||
// Reason allows for passing details about the specific eviction for logging.
|
||||
@@ -121,55 +462,91 @@ type EvictOptions struct {
|
||||
// EvictPod evicts a pod while exercising eviction limits.
|
||||
// Returns true when the pod is evicted on the server side.
|
||||
func (pe *PodEvictor) EvictPod(ctx context.Context, pod *v1.Pod, opts EvictOptions) error {
|
||||
if len(pod.UID) == 0 {
|
||||
klog.InfoS("Ignoring pod eviction due to missing UID", "pod", pod)
|
||||
return fmt.Errorf("Pod %v is missing UID", klog.KObj(pod))
|
||||
}
|
||||
|
||||
if pe.featureGates.Enabled(features.EvictionsInBackground) {
|
||||
// eviction in background requested
|
||||
if _, exists := pod.Annotations[EvictionRequestAnnotationKey]; exists {
|
||||
if pe.erCache.hasPod(pod) {
|
||||
klog.V(3).InfoS("Eviction in background already requested (ignoring)", "pod", klog.KObj(pod))
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pe.mu.Lock()
|
||||
defer pe.mu.Unlock()
|
||||
|
||||
var span trace.Span
|
||||
ctx, span = tracing.Tracer().Start(ctx, "EvictPod", trace.WithAttributes(attribute.String("podName", pod.Name), attribute.String("podNamespace", pod.Namespace), attribute.String("reason", opts.Reason), attribute.String("operation", tracing.EvictOperation)))
|
||||
defer span.End()
|
||||
|
||||
if pe.maxPodsToEvictTotal != nil && pe.totalPodCount+1 > *pe.maxPodsToEvictTotal {
|
||||
if pe.maxPodsToEvictTotal != nil && pe.totalPodCount+pe.evictionRequestsTotal()+1 > *pe.maxPodsToEvictTotal {
|
||||
err := NewEvictionTotalLimitError()
|
||||
if pe.metricsEnabled {
|
||||
metrics.PodsEvicted.With(map[string]string{"result": err.Error(), "strategy": opts.StrategyName, "namespace": pod.Namespace, "node": pod.Spec.NodeName, "profile": opts.ProfileName}).Inc()
|
||||
metrics.PodsEvictedTotal.With(map[string]string{"result": err.Error(), "strategy": opts.StrategyName, "namespace": pod.Namespace, "node": pod.Spec.NodeName, "profile": opts.ProfileName}).Inc()
|
||||
}
|
||||
span.AddEvent("Eviction Failed", trace.WithAttributes(attribute.String("node", pod.Spec.NodeName), attribute.String("err", err.Error())))
|
||||
klog.ErrorS(err, "Error evicting pod", "limit", *pe.maxPodsToEvictTotal)
|
||||
if pe.evictionFailureEventNotification {
|
||||
pe.eventRecorder.Eventf(pod, nil, v1.EventTypeWarning, "EvictionFailed", "Descheduled", "pod eviction from %v node by sigs.k8s.io/descheduler failed: total eviction limit exceeded (%v)", pod.Spec.NodeName, *pe.maxPodsToEvictTotal)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
if pod.Spec.NodeName != "" {
|
||||
if pe.maxPodsToEvictPerNode != nil && pe.nodePodCount[pod.Spec.NodeName]+1 > *pe.maxPodsToEvictPerNode {
|
||||
if pe.maxPodsToEvictPerNode != nil && pe.nodePodCount[pod.Spec.NodeName]+pe.evictionRequestsPerNode(pod.Spec.NodeName)+1 > *pe.maxPodsToEvictPerNode {
|
||||
err := NewEvictionNodeLimitError(pod.Spec.NodeName)
|
||||
if pe.metricsEnabled {
|
||||
metrics.PodsEvicted.With(map[string]string{"result": err.Error(), "strategy": opts.StrategyName, "namespace": pod.Namespace, "node": pod.Spec.NodeName, "profile": opts.ProfileName}).Inc()
|
||||
metrics.PodsEvictedTotal.With(map[string]string{"result": err.Error(), "strategy": opts.StrategyName, "namespace": pod.Namespace, "node": pod.Spec.NodeName, "profile": opts.ProfileName}).Inc()
|
||||
}
|
||||
span.AddEvent("Eviction Failed", trace.WithAttributes(attribute.String("node", pod.Spec.NodeName), attribute.String("err", err.Error())))
|
||||
klog.ErrorS(err, "Error evicting pod", "limit", *pe.maxPodsToEvictPerNode, "node", pod.Spec.NodeName)
|
||||
if pe.evictionFailureEventNotification {
|
||||
pe.eventRecorder.Eventf(pod, nil, v1.EventTypeWarning, "EvictionFailed", "Descheduled", "pod eviction from %v node by sigs.k8s.io/descheduler failed: node eviction limit exceeded (%v)", pod.Spec.NodeName, *pe.maxPodsToEvictPerNode)
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if pe.maxPodsToEvictPerNamespace != nil && pe.namespacePodCount[pod.Namespace]+1 > *pe.maxPodsToEvictPerNamespace {
|
||||
if pe.maxPodsToEvictPerNamespace != nil && pe.namespacePodCount[pod.Namespace]+pe.evictionRequestsPerNamespace(pod.Namespace)+1 > *pe.maxPodsToEvictPerNamespace {
|
||||
err := NewEvictionNamespaceLimitError(pod.Namespace)
|
||||
if pe.metricsEnabled {
|
||||
metrics.PodsEvicted.With(map[string]string{"result": err.Error(), "strategy": opts.StrategyName, "namespace": pod.Namespace, "node": pod.Spec.NodeName, "profile": opts.ProfileName}).Inc()
|
||||
metrics.PodsEvictedTotal.With(map[string]string{"result": err.Error(), "strategy": opts.StrategyName, "namespace": pod.Namespace, "node": pod.Spec.NodeName, "profile": opts.ProfileName}).Inc()
|
||||
}
|
||||
span.AddEvent("Eviction Failed", trace.WithAttributes(attribute.String("node", pod.Spec.NodeName), attribute.String("err", err.Error())))
|
||||
klog.ErrorS(err, "Error evicting pod", "limit", *pe.maxPodsToEvictPerNamespace, "namespace", pod.Namespace)
|
||||
klog.ErrorS(err, "Error evicting pod", "limit", *pe.maxPodsToEvictPerNamespace, "namespace", pod.Namespace, "pod", klog.KObj(pod))
|
||||
if pe.evictionFailureEventNotification {
|
||||
pe.eventRecorder.Eventf(pod, nil, v1.EventTypeWarning, "EvictionFailed", "Descheduled", "pod eviction from %v node by sigs.k8s.io/descheduler failed: namespace eviction limit exceeded (%v)", pod.Spec.NodeName, *pe.maxPodsToEvictPerNamespace)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
err := evictPod(ctx, pe.client, pod, pe.policyGroupVersion)
|
||||
ignore, err := pe.evictPod(ctx, pod, opts)
|
||||
if err != nil {
|
||||
// err is used only for logging purposes
|
||||
span.AddEvent("Eviction Failed", trace.WithAttributes(attribute.String("node", pod.Spec.NodeName), attribute.String("err", err.Error())))
|
||||
klog.ErrorS(err, "Error evicting pod", "pod", klog.KObj(pod), "reason", opts.Reason)
|
||||
if pe.metricsEnabled {
|
||||
metrics.PodsEvicted.With(map[string]string{"result": "error", "strategy": opts.StrategyName, "namespace": pod.Namespace, "node": pod.Spec.NodeName, "profile": opts.ProfileName}).Inc()
|
||||
metrics.PodsEvictedTotal.With(map[string]string{"result": "error", "strategy": opts.StrategyName, "namespace": pod.Namespace, "node": pod.Spec.NodeName, "profile": opts.ProfileName}).Inc()
|
||||
}
|
||||
if pe.evictionFailureEventNotification {
|
||||
pe.eventRecorder.Eventf(pod, nil, v1.EventTypeWarning, "EvictionFailed", "Descheduled", "pod eviction from %v node by sigs.k8s.io/descheduler failed: %v", pod.Spec.NodeName, err.Error())
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
if ignore {
|
||||
return nil
|
||||
}
|
||||
|
||||
if pod.Spec.NodeName != "" {
|
||||
pe.nodePodCount[pod.Spec.NodeName]++
|
||||
}
|
||||
@@ -178,6 +555,7 @@ func (pe *PodEvictor) EvictPod(ctx context.Context, pod *v1.Pod, opts EvictOptio
|
||||
|
||||
if pe.metricsEnabled {
|
||||
metrics.PodsEvicted.With(map[string]string{"result": "success", "strategy": opts.StrategyName, "namespace": pod.Namespace, "node": pod.Spec.NodeName, "profile": opts.ProfileName}).Inc()
|
||||
metrics.PodsEvictedTotal.With(map[string]string{"result": "success", "strategy": opts.StrategyName, "namespace": pod.Namespace, "node": pod.Spec.NodeName, "profile": opts.ProfileName}).Inc()
|
||||
}
|
||||
|
||||
if pe.dryRun {
|
||||
@@ -191,32 +569,62 @@ func (pe *PodEvictor) EvictPod(ctx context.Context, pod *v1.Pod, opts EvictOptio
|
||||
reason = "NotSet"
|
||||
}
|
||||
}
|
||||
pe.eventRecorder.Eventf(pod, nil, v1.EventTypeNormal, reason, "Descheduled", "pod evicted from %v node by sigs.k8s.io/descheduler", pod.Spec.NodeName)
|
||||
pe.eventRecorder.Eventf(pod, nil, v1.EventTypeNormal, reason, "Descheduled", "pod eviction from %v node by sigs.k8s.io/descheduler", pod.Spec.NodeName)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func evictPod(ctx context.Context, client clientset.Interface, pod *v1.Pod, policyGroupVersion string) error {
|
||||
deleteOptions := &metav1.DeleteOptions{}
|
||||
// return (ignore, err)
|
||||
func (pe *PodEvictor) evictPod(ctx context.Context, pod *v1.Pod, opts EvictOptions) (bool, error) {
|
||||
deleteOptions := &metav1.DeleteOptions{
|
||||
GracePeriodSeconds: pe.gracePeriodSeconds,
|
||||
}
|
||||
// GracePeriodSeconds ?
|
||||
eviction := &policy.Eviction{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: policyGroupVersion,
|
||||
APIVersion: pe.policyGroupVersion,
|
||||
Kind: eutils.EvictionKind,
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: pod.Name,
|
||||
Namespace: pod.Namespace,
|
||||
Annotations: map[string]string{
|
||||
"reason": fmt.Sprintf("triggered by %v/%v: %v", opts.ProfileName, opts.StrategyName, opts.Reason),
|
||||
"requested-by": deschedulerGlobalName,
|
||||
},
|
||||
},
|
||||
DeleteOptions: deleteOptions,
|
||||
}
|
||||
err := client.PolicyV1().Evictions(eviction.Namespace).Evict(ctx, eviction)
|
||||
err := pe.client.PolicyV1().Evictions(eviction.Namespace).Evict(ctx, eviction)
|
||||
if err == nil {
|
||||
return false, nil
|
||||
}
|
||||
if pe.featureGates.Enabled(features.EvictionsInBackground) {
|
||||
// eviction in background requested
|
||||
if _, exists := pod.Annotations[EvictionRequestAnnotationKey]; exists {
|
||||
// Simulating https://github.com/kubevirt/kubevirt/pull/11532/files#diff-059cc1fc09e8b469143348cc3aa80b40de987670e008fa18a6fe010061f973c9R77
|
||||
if apierrors.IsTooManyRequests(err) && strings.Contains(err.Error(), EvictionInBackgroundErrorText) {
|
||||
// Ignore eviction of any pod that's failed or completed.
|
||||
// It can happen an eviction in background ends up with the pod stuck in the completed state.
|
||||
// Normally, any request eviction is expected to end with the pod deletion.
|
||||
// However, some custom eviction policies may end up with completed pods around.
|
||||
// Which leads to all the completed pods to be considered still as unfinished evictions in background.
|
||||
if pod.Status.Phase == v1.PodSucceeded || pod.Status.Phase == v1.PodFailed {
|
||||
klog.V(3).InfoS("Ignoring eviction of a completed/failed pod", "pod", klog.KObj(pod))
|
||||
return true, nil
|
||||
}
|
||||
klog.V(3).InfoS("Eviction in background assumed", "pod", klog.KObj(pod))
|
||||
pe.erCache.assumePod(pod)
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if apierrors.IsTooManyRequests(err) {
|
||||
return fmt.Errorf("error when evicting pod (ignoring) %q: %v", pod.Name, err)
|
||||
return false, fmt.Errorf("error when evicting pod (ignoring) %q: %v", pod.Name, err)
|
||||
}
|
||||
if apierrors.IsNotFound(err) {
|
||||
return fmt.Errorf("pod not found when evicting %q: %v", pod.Name, err)
|
||||
return false, fmt.Errorf("pod not found when evicting %q: %v", pod.Name, err)
|
||||
}
|
||||
return err
|
||||
return false, err
|
||||
}
|
||||
|
||||
@@ -18,57 +18,108 @@ package evictions
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
policy "k8s.io/api/policy/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
fakeclientset "k8s.io/client-go/kubernetes/fake"
|
||||
core "k8s.io/client-go/testing"
|
||||
"k8s.io/client-go/tools/events"
|
||||
"k8s.io/component-base/featuregate"
|
||||
"k8s.io/klog/v2"
|
||||
utilptr "k8s.io/utils/ptr"
|
||||
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/pkg/features"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
"sigs.k8s.io/descheduler/test"
|
||||
)
|
||||
|
||||
const (
|
||||
notFoundText = "pod not found when evicting \"%s\": pods \"%s\" not found"
|
||||
tooManyRequests = "error when evicting pod (ignoring) \"%s\": Too many requests: too many requests"
|
||||
)
|
||||
|
||||
func initFeatureGates() featuregate.FeatureGate {
|
||||
featureGates := featuregate.NewFeatureGate()
|
||||
featureGates.Add(map[featuregate.Feature]featuregate.FeatureSpec{
|
||||
features.EvictionsInBackground: {Default: true, PreRelease: featuregate.Alpha},
|
||||
})
|
||||
return featureGates
|
||||
}
|
||||
|
||||
func TestEvictPod(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
node1 := test.BuildTestNode("node1", 1000, 2000, 9, nil)
|
||||
pod1 := test.BuildTestPod("p1", 400, 0, "node1", nil)
|
||||
tests := []struct {
|
||||
description string
|
||||
node *v1.Node
|
||||
pod *v1.Pod
|
||||
pods []v1.Pod
|
||||
want error
|
||||
evictedPod *v1.Pod
|
||||
pods []runtime.Object
|
||||
wantErr error
|
||||
}{
|
||||
{
|
||||
description: "test pod eviction - pod present",
|
||||
node: node1,
|
||||
pod: pod1,
|
||||
pods: []v1.Pod{*pod1},
|
||||
want: nil,
|
||||
evictedPod: pod1,
|
||||
pods: []runtime.Object{pod1},
|
||||
},
|
||||
{
|
||||
description: "test pod eviction - pod absent",
|
||||
description: "test pod eviction - pod absent (not found error)",
|
||||
node: node1,
|
||||
pod: pod1,
|
||||
pods: []v1.Pod{*test.BuildTestPod("p2", 400, 0, "node1", nil), *test.BuildTestPod("p3", 450, 0, "node1", nil)},
|
||||
want: nil,
|
||||
evictedPod: pod1,
|
||||
pods: []runtime.Object{test.BuildTestPod("p2", 400, 0, "node1", nil), test.BuildTestPod("p3", 450, 0, "node1", nil)},
|
||||
wantErr: fmt.Errorf(notFoundText, pod1.Name, pod1.Name),
|
||||
},
|
||||
{
|
||||
description: "test pod eviction - pod absent (too many requests error)",
|
||||
node: node1,
|
||||
evictedPod: pod1,
|
||||
pods: []runtime.Object{test.BuildTestPod("p2", 400, 0, "node1", nil), test.BuildTestPod("p3", 450, 0, "node1", nil)},
|
||||
wantErr: fmt.Errorf(tooManyRequests, pod1.Name),
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
fakeClient := &fake.Clientset{}
|
||||
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
|
||||
return true, &v1.PodList{Items: test.pods}, nil
|
||||
t.Run(test.description, func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
fakeClient := fake.NewClientset(test.pods...)
|
||||
fakeClient.PrependReactor("create", "pods/eviction", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
return true, nil, test.wantErr
|
||||
})
|
||||
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
|
||||
sharedInformerFactory.Start(ctx.Done())
|
||||
sharedInformerFactory.WaitForCacheSync(ctx.Done())
|
||||
|
||||
eventRecorder := &events.FakeRecorder{}
|
||||
podEvictor, err := NewPodEvictor(
|
||||
ctx,
|
||||
fakeClient,
|
||||
eventRecorder,
|
||||
sharedInformerFactory.Core().V1().Pods().Informer(),
|
||||
initFeatureGates(),
|
||||
NewOptions(),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error when creating a pod evictor: %v", err)
|
||||
}
|
||||
|
||||
_, got := podEvictor.evictPod(ctx, test.evictedPod, EvictOptions{})
|
||||
if got != test.wantErr {
|
||||
t.Errorf("Test error for Desc: %s. Expected %v pod eviction to be %v, got %v", test.description, test.evictedPod.Name, test.wantErr, got)
|
||||
}
|
||||
})
|
||||
got := evictPod(ctx, fakeClient, test.pod, "v1")
|
||||
if got != test.want {
|
||||
t.Errorf("Test error for Desc: %s. Expected %v pod eviction to be %v, got %v", test.description, test.pod.Name, test.want, got)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -118,50 +169,321 @@ func TestPodTypes(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestNewPodEvictor(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
pod1 := test.BuildTestPod("pod", 400, 0, "node", nil)
|
||||
|
||||
fakeClient := fake.NewSimpleClientset(pod1)
|
||||
|
||||
eventRecorder := &events.FakeRecorder{}
|
||||
|
||||
podEvictor := NewPodEvictor(
|
||||
fakeClient,
|
||||
eventRecorder,
|
||||
NewOptions().WithMaxPodsToEvictPerNode(utilptr.To[uint](1)),
|
||||
)
|
||||
|
||||
stubNode := &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "node"}}
|
||||
|
||||
// 0 evictions expected
|
||||
if evictions := podEvictor.NodeEvicted(stubNode); evictions != 0 {
|
||||
t.Errorf("Expected 0 node evictions, got %q instead", evictions)
|
||||
type podEvictorTest struct {
|
||||
description string
|
||||
pod *v1.Pod
|
||||
dryRun bool
|
||||
evictionFailureEventNotification *bool
|
||||
maxPodsToEvictTotal *uint
|
||||
maxPodsToEvictPerNode *uint
|
||||
maxPodsToEvictPerNamespace *uint
|
||||
expectedNodeEvictions uint
|
||||
expectedTotalEvictions uint
|
||||
expectedError error
|
||||
// expectedEvent is a slice of strings representing expected events.
|
||||
// Each string in the slice should follow the format: "EventType Reason Message".
|
||||
// - "Warning Failed processing failed"
|
||||
events []string
|
||||
}
|
||||
// 0 evictions expected
|
||||
if evictions := podEvictor.TotalEvicted(); evictions != 0 {
|
||||
t.Errorf("Expected 0 total evictions, got %q instead", evictions)
|
||||
tests := []podEvictorTest{
|
||||
{
|
||||
description: "one eviction expected with eviction failure event notification",
|
||||
pod: pod1,
|
||||
evictionFailureEventNotification: utilptr.To[bool](true),
|
||||
maxPodsToEvictTotal: utilptr.To[uint](1),
|
||||
maxPodsToEvictPerNode: utilptr.To[uint](1),
|
||||
maxPodsToEvictPerNamespace: utilptr.To[uint](1),
|
||||
expectedNodeEvictions: 1,
|
||||
expectedTotalEvictions: 1,
|
||||
expectedError: nil,
|
||||
events: []string{"Normal NotSet pod eviction from node node by sigs.k8s.io/descheduler"},
|
||||
},
|
||||
{
|
||||
description: "eviction limit exceeded on total with eviction failure event notification",
|
||||
pod: pod1,
|
||||
evictionFailureEventNotification: utilptr.To[bool](true),
|
||||
maxPodsToEvictTotal: utilptr.To[uint](0),
|
||||
maxPodsToEvictPerNode: utilptr.To[uint](1),
|
||||
maxPodsToEvictPerNamespace: utilptr.To[uint](1),
|
||||
expectedNodeEvictions: 0,
|
||||
expectedTotalEvictions: 0,
|
||||
expectedError: NewEvictionTotalLimitError(),
|
||||
events: []string{"Warning EvictionFailed pod eviction from node node by sigs.k8s.io/descheduler failed: total eviction limit exceeded (0)"},
|
||||
},
|
||||
{
|
||||
description: "eviction limit exceeded on node with eviction failure event notification",
|
||||
pod: pod1,
|
||||
evictionFailureEventNotification: utilptr.To[bool](true),
|
||||
maxPodsToEvictTotal: utilptr.To[uint](1),
|
||||
maxPodsToEvictPerNode: utilptr.To[uint](0),
|
||||
maxPodsToEvictPerNamespace: utilptr.To[uint](1),
|
||||
expectedNodeEvictions: 0,
|
||||
expectedTotalEvictions: 0,
|
||||
expectedError: NewEvictionNodeLimitError("node"),
|
||||
events: []string{"Warning EvictionFailed pod eviction from node node by sigs.k8s.io/descheduler failed: node eviction limit exceeded (0)"},
|
||||
},
|
||||
{
|
||||
description: "eviction limit exceeded on node with eviction failure event notification",
|
||||
pod: pod1,
|
||||
evictionFailureEventNotification: utilptr.To[bool](true),
|
||||
maxPodsToEvictTotal: utilptr.To[uint](1),
|
||||
maxPodsToEvictPerNode: utilptr.To[uint](1),
|
||||
maxPodsToEvictPerNamespace: utilptr.To[uint](0),
|
||||
expectedNodeEvictions: 0,
|
||||
expectedTotalEvictions: 0,
|
||||
expectedError: NewEvictionNamespaceLimitError("default"),
|
||||
events: []string{"Warning EvictionFailed pod eviction from node node by sigs.k8s.io/descheduler failed: namespace eviction limit exceeded (0)"},
|
||||
},
|
||||
{
|
||||
description: "eviction error with eviction failure event notification",
|
||||
pod: pod1,
|
||||
evictionFailureEventNotification: utilptr.To[bool](true),
|
||||
maxPodsToEvictTotal: utilptr.To[uint](1),
|
||||
maxPodsToEvictPerNode: utilptr.To[uint](1),
|
||||
maxPodsToEvictPerNamespace: utilptr.To[uint](1),
|
||||
expectedNodeEvictions: 0,
|
||||
expectedTotalEvictions: 0,
|
||||
expectedError: fmt.Errorf("eviction error"),
|
||||
events: []string{"Warning EvictionFailed pod eviction from node node by sigs.k8s.io/descheduler failed: eviction error"},
|
||||
},
|
||||
{
|
||||
description: "eviction with dryRun with eviction failure event notification",
|
||||
pod: pod1,
|
||||
dryRun: true,
|
||||
evictionFailureEventNotification: utilptr.To[bool](true),
|
||||
maxPodsToEvictTotal: utilptr.To[uint](1),
|
||||
maxPodsToEvictPerNode: utilptr.To[uint](1),
|
||||
maxPodsToEvictPerNamespace: utilptr.To[uint](1),
|
||||
expectedNodeEvictions: 1,
|
||||
expectedTotalEvictions: 1,
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
description: "one eviction expected without eviction failure event notification",
|
||||
pod: pod1,
|
||||
maxPodsToEvictTotal: utilptr.To[uint](1),
|
||||
maxPodsToEvictPerNode: utilptr.To[uint](1),
|
||||
maxPodsToEvictPerNamespace: utilptr.To[uint](1),
|
||||
expectedNodeEvictions: 1,
|
||||
expectedTotalEvictions: 1,
|
||||
expectedError: nil,
|
||||
events: []string{"Normal NotSet pod eviction from node node by sigs.k8s.io/descheduler"},
|
||||
},
|
||||
{
|
||||
description: "eviction limit exceeded on total without eviction failure event notification",
|
||||
pod: pod1,
|
||||
maxPodsToEvictTotal: utilptr.To[uint](0),
|
||||
maxPodsToEvictPerNode: utilptr.To[uint](1),
|
||||
maxPodsToEvictPerNamespace: utilptr.To[uint](1),
|
||||
expectedNodeEvictions: 0,
|
||||
expectedTotalEvictions: 0,
|
||||
expectedError: NewEvictionTotalLimitError(),
|
||||
},
|
||||
{
|
||||
description: "eviction limit exceeded on node without eviction failure event notification",
|
||||
pod: pod1,
|
||||
maxPodsToEvictTotal: utilptr.To[uint](1),
|
||||
maxPodsToEvictPerNode: utilptr.To[uint](0),
|
||||
maxPodsToEvictPerNamespace: utilptr.To[uint](1),
|
||||
expectedNodeEvictions: 0,
|
||||
expectedTotalEvictions: 0,
|
||||
expectedError: NewEvictionNodeLimitError("node"),
|
||||
},
|
||||
{
|
||||
description: "eviction limit exceeded on node without eviction failure event notification",
|
||||
pod: pod1,
|
||||
maxPodsToEvictTotal: utilptr.To[uint](1),
|
||||
maxPodsToEvictPerNode: utilptr.To[uint](1),
|
||||
maxPodsToEvictPerNamespace: utilptr.To[uint](0),
|
||||
expectedNodeEvictions: 0,
|
||||
expectedTotalEvictions: 0,
|
||||
expectedError: NewEvictionNamespaceLimitError("default"),
|
||||
},
|
||||
{
|
||||
description: "eviction error without eviction failure event notification",
|
||||
pod: pod1,
|
||||
maxPodsToEvictTotal: utilptr.To[uint](1),
|
||||
maxPodsToEvictPerNode: utilptr.To[uint](1),
|
||||
maxPodsToEvictPerNamespace: utilptr.To[uint](1),
|
||||
expectedNodeEvictions: 0,
|
||||
expectedTotalEvictions: 0,
|
||||
expectedError: fmt.Errorf("eviction error"),
|
||||
},
|
||||
{
|
||||
description: "eviction without dryRun with eviction failure event notification",
|
||||
pod: pod1,
|
||||
dryRun: true,
|
||||
maxPodsToEvictTotal: utilptr.To[uint](1),
|
||||
maxPodsToEvictPerNode: utilptr.To[uint](1),
|
||||
maxPodsToEvictPerNamespace: utilptr.To[uint](1),
|
||||
expectedNodeEvictions: 1,
|
||||
expectedTotalEvictions: 1,
|
||||
expectedError: nil,
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
t.Run(test.description, func(t *testing.T) {
|
||||
fakeClient := fake.NewSimpleClientset(pod1)
|
||||
fakeClient.PrependReactor("create", "pods/eviction", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
return true, nil, test.expectedError
|
||||
})
|
||||
|
||||
if err := podEvictor.EvictPod(context.TODO(), pod1, EvictOptions{}); err != nil {
|
||||
t.Errorf("Expected a pod eviction, got an eviction error instead: %v", err)
|
||||
}
|
||||
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
|
||||
sharedInformerFactory.Start(ctx.Done())
|
||||
sharedInformerFactory.WaitForCacheSync(ctx.Done())
|
||||
|
||||
// 1 node eviction expected
|
||||
if evictions := podEvictor.NodeEvicted(stubNode); evictions != 1 {
|
||||
t.Errorf("Expected 1 node eviction, got %q instead", evictions)
|
||||
}
|
||||
// 1 total eviction expected
|
||||
if evictions := podEvictor.TotalEvicted(); evictions != 1 {
|
||||
t.Errorf("Expected 1 total evictions, got %q instead", evictions)
|
||||
}
|
||||
eventRecorder := events.NewFakeRecorder(100)
|
||||
|
||||
err := podEvictor.EvictPod(context.TODO(), pod1, EvictOptions{})
|
||||
if err == nil {
|
||||
t.Errorf("Expected a pod eviction error, got nil instead")
|
||||
}
|
||||
switch err.(type) {
|
||||
case *EvictionNodeLimitError:
|
||||
// all good
|
||||
default:
|
||||
t.Errorf("Expected a pod eviction EvictionNodeLimitError error, got a different error instead: %v", err)
|
||||
podEvictor, err := NewPodEvictor(
|
||||
ctx,
|
||||
fakeClient,
|
||||
eventRecorder,
|
||||
sharedInformerFactory.Core().V1().Pods().Informer(),
|
||||
initFeatureGates(),
|
||||
NewOptions().
|
||||
WithDryRun(test.dryRun).
|
||||
WithMaxPodsToEvictTotal(test.maxPodsToEvictTotal).
|
||||
WithMaxPodsToEvictPerNode(test.maxPodsToEvictPerNode).
|
||||
WithEvictionFailureEventNotification(test.evictionFailureEventNotification).
|
||||
WithMaxPodsToEvictPerNamespace(test.maxPodsToEvictPerNamespace),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error when creating a pod evictor: %v", err)
|
||||
}
|
||||
|
||||
stubNode := &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "node"}}
|
||||
|
||||
if actualErr := podEvictor.EvictPod(ctx, test.pod, EvictOptions{}); actualErr != nil && actualErr.Error() != test.expectedError.Error() {
|
||||
t.Errorf("Expected error: %v, got: %v", test.expectedError, actualErr)
|
||||
}
|
||||
|
||||
if evictions := podEvictor.NodeEvicted(stubNode); evictions != test.expectedNodeEvictions {
|
||||
t.Errorf("Expected %d node evictions, got %d instead", test.expectedNodeEvictions, evictions)
|
||||
}
|
||||
|
||||
if evictions := podEvictor.TotalEvicted(); evictions != test.expectedTotalEvictions {
|
||||
t.Errorf("Expected %d total evictions, got %d instead", test.expectedTotalEvictions, evictions)
|
||||
}
|
||||
|
||||
// Assert that the events are correct.
|
||||
assertEqualEvents(t, test.events, eventRecorder.Events)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestEvictionRequestsCacheCleanup(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
node1 := test.BuildTestNode("n1", 2000, 3000, 10, nil)
|
||||
|
||||
ownerRef1 := test.GetReplicaSetOwnerRefList()
|
||||
updatePod := func(pod *v1.Pod) {
|
||||
pod.Namespace = "dev"
|
||||
pod.ObjectMeta.OwnerReferences = ownerRef1
|
||||
}
|
||||
updatePodWithEvictionInBackground := func(pod *v1.Pod) {
|
||||
updatePod(pod)
|
||||
pod.Annotations = map[string]string{
|
||||
EvictionRequestAnnotationKey: "",
|
||||
}
|
||||
}
|
||||
|
||||
p1 := test.BuildTestPod("p1", 100, 0, node1.Name, updatePodWithEvictionInBackground)
|
||||
p2 := test.BuildTestPod("p2", 100, 0, node1.Name, updatePodWithEvictionInBackground)
|
||||
p3 := test.BuildTestPod("p3", 100, 0, node1.Name, updatePod)
|
||||
p4 := test.BuildTestPod("p4", 100, 0, node1.Name, updatePod)
|
||||
|
||||
client := fakeclientset.NewSimpleClientset(node1, p1, p2, p3, p4)
|
||||
sharedInformerFactory := informers.NewSharedInformerFactory(client, 0)
|
||||
_, eventRecorder := utils.GetRecorderAndBroadcaster(ctx, client)
|
||||
|
||||
podEvictor, err := NewPodEvictor(
|
||||
ctx,
|
||||
client,
|
||||
eventRecorder,
|
||||
sharedInformerFactory.Core().V1().Pods().Informer(),
|
||||
initFeatureGates(),
|
||||
nil,
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error when creating a pod evictor: %v", err)
|
||||
}
|
||||
|
||||
client.PrependReactor("create", "pods", func(action core.Action) (bool, runtime.Object, error) {
|
||||
if action.GetSubresource() == "eviction" {
|
||||
createAct, matched := action.(core.CreateActionImpl)
|
||||
if !matched {
|
||||
return false, nil, fmt.Errorf("unable to convert action to core.CreateActionImpl")
|
||||
}
|
||||
if eviction, matched := createAct.Object.(*policy.Eviction); matched {
|
||||
podName := eviction.GetName()
|
||||
annotations := eviction.GetAnnotations()
|
||||
if (podName == "p1" || podName == "p2") && annotations[requestedByAnnotationKey] == deschedulerGlobalName && strings.HasPrefix(
|
||||
annotations[reasonAnnotationKey],
|
||||
"triggered by",
|
||||
) {
|
||||
return true, nil, &apierrors.StatusError{
|
||||
ErrStatus: metav1.Status{
|
||||
Reason: metav1.StatusReasonTooManyRequests,
|
||||
Message: "Eviction triggered evacuation",
|
||||
},
|
||||
}
|
||||
}
|
||||
return true, nil, nil
|
||||
}
|
||||
}
|
||||
return false, nil, nil
|
||||
})
|
||||
|
||||
sharedInformerFactory.Start(ctx.Done())
|
||||
sharedInformerFactory.WaitForCacheSync(ctx.Done())
|
||||
|
||||
podEvictor.EvictPod(ctx, p1, EvictOptions{})
|
||||
podEvictor.EvictPod(ctx, p2, EvictOptions{})
|
||||
podEvictor.EvictPod(ctx, p3, EvictOptions{})
|
||||
podEvictor.EvictPod(ctx, p4, EvictOptions{})
|
||||
|
||||
klog.Infof("2 evictions in background expected, 2 normal evictions")
|
||||
if total := podEvictor.TotalEvictionRequests(); total != 2 {
|
||||
t.Fatalf("Expected %v total eviction requests, got %v instead", 2, total)
|
||||
}
|
||||
if total := podEvictor.TotalEvicted(); total != 2 {
|
||||
t.Fatalf("Expected %v total evictions, got %v instead", 2, total)
|
||||
}
|
||||
|
||||
klog.Infof("2 evictions in background assumed. Wait for few seconds and check the assumed requests timed out")
|
||||
time.Sleep(2 * time.Second)
|
||||
klog.Infof("Checking the assumed requests timed out and were deleted")
|
||||
// Set the timeout to 1s so the cleaning can be tested
|
||||
podEvictor.erCache.assumedRequestTimeoutSeconds = 1
|
||||
podEvictor.erCache.cleanCache(ctx)
|
||||
if totalERs := podEvictor.TotalEvictionRequests(); totalERs > 0 {
|
||||
t.Fatalf("Expected 0 eviction requests, got %v instead", totalERs)
|
||||
}
|
||||
}
|
||||
|
||||
func assertEqualEvents(t *testing.T, expected []string, actual <-chan string) {
|
||||
t.Logf("Assert for events: %v", expected)
|
||||
c := time.After(wait.ForeverTestTimeout)
|
||||
for _, e := range expected {
|
||||
select {
|
||||
case a := <-actual:
|
||||
if !reflect.DeepEqual(a, e) {
|
||||
t.Errorf("Expected event %q, got %q instead", e, a)
|
||||
}
|
||||
case <-c:
|
||||
t.Errorf("Expected event %q, got nothing", e)
|
||||
// continue iterating to print all expected events
|
||||
}
|
||||
}
|
||||
for {
|
||||
select {
|
||||
case a := <-actual:
|
||||
t.Errorf("Unexpected event: %q", a)
|
||||
default:
|
||||
return // No more events, as expected.
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,12 +5,14 @@ import (
|
||||
)
|
||||
|
||||
type Options struct {
|
||||
policyGroupVersion string
|
||||
dryRun bool
|
||||
maxPodsToEvictPerNode *uint
|
||||
maxPodsToEvictPerNamespace *uint
|
||||
maxPodsToEvictTotal *uint
|
||||
metricsEnabled bool
|
||||
policyGroupVersion string
|
||||
dryRun bool
|
||||
maxPodsToEvictPerNode *uint
|
||||
maxPodsToEvictPerNamespace *uint
|
||||
maxPodsToEvictTotal *uint
|
||||
evictionFailureEventNotification bool
|
||||
metricsEnabled bool
|
||||
gracePeriodSeconds *int64
|
||||
}
|
||||
|
||||
// NewOptions returns an Options with default values.
|
||||
@@ -45,7 +47,19 @@ func (o *Options) WithMaxPodsToEvictTotal(maxPodsToEvictTotal *uint) *Options {
|
||||
return o
|
||||
}
|
||||
|
||||
func (o *Options) WithGracePeriodSeconds(gracePeriodSeconds *int64) *Options {
|
||||
o.gracePeriodSeconds = gracePeriodSeconds
|
||||
return o
|
||||
}
|
||||
|
||||
func (o *Options) WithMetricsEnabled(metricsEnabled bool) *Options {
|
||||
o.metricsEnabled = metricsEnabled
|
||||
return o
|
||||
}
|
||||
|
||||
func (o *Options) WithEvictionFailureEventNotification(evictionFailureEventNotification *bool) *Options {
|
||||
if evictionFailureEventNotification != nil {
|
||||
o.evictionFailureEventNotification = *evictionFailureEventNotification
|
||||
}
|
||||
return o
|
||||
}
|
||||
|
||||
@@ -17,12 +17,17 @@ limitations under the License.
|
||||
package utils
|
||||
|
||||
import (
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
)
|
||||
|
||||
const (
|
||||
EvictionKind = "Eviction"
|
||||
EvictionSubresource = "pods/eviction"
|
||||
// A new experimental feature for soft no-eviction preference.
|
||||
// Each plugin will decide whether the soft preference will be respected.
|
||||
// If configured the soft preference turns into a mandatory no-eviction policy for the DefaultEvictor plugin.
|
||||
SoftNoEvictionAnnotationKey = "descheduler.alpha.kubernetes.io/prefer-no-eviction"
|
||||
)
|
||||
|
||||
// SupportEviction uses Discovery API to find out if the server support eviction subresource
|
||||
@@ -56,3 +61,9 @@ func SupportEviction(client clientset.Interface) (string, error) {
|
||||
}
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// HaveNoEvictionAnnotation checks if the pod have soft no-eviction annotation
|
||||
func HaveNoEvictionAnnotation(pod *corev1.Pod) bool {
|
||||
_, found := pod.ObjectMeta.Annotations[SoftNoEvictionAnnotationKey]
|
||||
return found
|
||||
}
|
||||
|
||||
152
pkg/descheduler/metricscollector/metricscollector.go
Normal file
152
pkg/descheduler/metricscollector/metricscollector.go
Normal file
@@ -0,0 +1,152 @@
|
||||
/*
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package metricscollector
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
listercorev1 "k8s.io/client-go/listers/core/v1"
|
||||
"k8s.io/klog/v2"
|
||||
metricsclient "k8s.io/metrics/pkg/client/clientset/versioned"
|
||||
utilptr "k8s.io/utils/ptr"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
)
|
||||
|
||||
const (
|
||||
beta float64 = 0.9
|
||||
)
|
||||
|
||||
type MetricsCollector struct {
|
||||
nodeLister listercorev1.NodeLister
|
||||
metricsClientset metricsclient.Interface
|
||||
nodeSelector labels.Selector
|
||||
|
||||
nodes map[string]api.ReferencedResourceList
|
||||
|
||||
mu sync.RWMutex
|
||||
// hasSynced signals at least one sync succeeded
|
||||
hasSynced bool
|
||||
}
|
||||
|
||||
func NewMetricsCollector(nodeLister listercorev1.NodeLister, metricsClientset metricsclient.Interface, nodeSelector labels.Selector) *MetricsCollector {
|
||||
return &MetricsCollector{
|
||||
nodeLister: nodeLister,
|
||||
metricsClientset: metricsClientset,
|
||||
nodeSelector: nodeSelector,
|
||||
nodes: make(map[string]api.ReferencedResourceList),
|
||||
}
|
||||
}
|
||||
|
||||
func (mc *MetricsCollector) Run(ctx context.Context) {
|
||||
wait.NonSlidingUntil(func() {
|
||||
mc.Collect(ctx)
|
||||
}, 5*time.Second, ctx.Done())
|
||||
}
|
||||
|
||||
// During experiments rounding to int error causes weightedAverage to never
|
||||
// reach value even when weightedAverage is repeated many times in a row.
|
||||
// The difference between the limit and computed average stops within 5 units.
|
||||
// Nevertheless, the value is expected to change in time. So the weighted
|
||||
// average nevers gets a chance to converge. Which makes the computed
|
||||
// error negligible.
|
||||
// The speed of convergence depends on how often the metrics collector
|
||||
// syncs with the current value. Currently, the interval is set to 5s.
|
||||
func weightedAverage(prevValue, value int64) int64 {
|
||||
return int64(math.Round(beta*float64(prevValue) + (1-beta)*float64(value)))
|
||||
}
|
||||
|
||||
func (mc *MetricsCollector) AllNodesUsage() (map[string]api.ReferencedResourceList, error) {
|
||||
mc.mu.RLock()
|
||||
defer mc.mu.RUnlock()
|
||||
|
||||
allNodesUsage := make(map[string]api.ReferencedResourceList)
|
||||
for nodeName := range mc.nodes {
|
||||
allNodesUsage[nodeName] = api.ReferencedResourceList{
|
||||
v1.ResourceCPU: utilptr.To[resource.Quantity](mc.nodes[nodeName][v1.ResourceCPU].DeepCopy()),
|
||||
v1.ResourceMemory: utilptr.To[resource.Quantity](mc.nodes[nodeName][v1.ResourceMemory].DeepCopy()),
|
||||
}
|
||||
}
|
||||
|
||||
return allNodesUsage, nil
|
||||
}
|
||||
|
||||
func (mc *MetricsCollector) NodeUsage(node *v1.Node) (api.ReferencedResourceList, error) {
|
||||
mc.mu.RLock()
|
||||
defer mc.mu.RUnlock()
|
||||
|
||||
if _, exists := mc.nodes[node.Name]; !exists {
|
||||
klog.V(4).InfoS("unable to find node in the collected metrics", "node", klog.KObj(node))
|
||||
return nil, fmt.Errorf("unable to find node %q in the collected metrics", node.Name)
|
||||
}
|
||||
return api.ReferencedResourceList{
|
||||
v1.ResourceCPU: utilptr.To[resource.Quantity](mc.nodes[node.Name][v1.ResourceCPU].DeepCopy()),
|
||||
v1.ResourceMemory: utilptr.To[resource.Quantity](mc.nodes[node.Name][v1.ResourceMemory].DeepCopy()),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (mc *MetricsCollector) HasSynced() bool {
|
||||
return mc.hasSynced
|
||||
}
|
||||
|
||||
func (mc *MetricsCollector) MetricsClient() metricsclient.Interface {
|
||||
return mc.metricsClientset
|
||||
}
|
||||
|
||||
func (mc *MetricsCollector) Collect(ctx context.Context) error {
|
||||
mc.mu.Lock()
|
||||
defer mc.mu.Unlock()
|
||||
nodes, err := mc.nodeLister.List(mc.nodeSelector)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to list nodes: %v", err)
|
||||
}
|
||||
|
||||
for _, node := range nodes {
|
||||
metrics, err := mc.metricsClientset.MetricsV1beta1().NodeMetricses().Get(ctx, node.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Error fetching metrics", "node", node.Name)
|
||||
// No entry -> duplicate the previous value -> do nothing as beta*PV + (1-beta)*PV = PV
|
||||
continue
|
||||
}
|
||||
|
||||
if _, exists := mc.nodes[node.Name]; !exists {
|
||||
mc.nodes[node.Name] = api.ReferencedResourceList{
|
||||
v1.ResourceCPU: utilptr.To[resource.Quantity](metrics.Usage.Cpu().DeepCopy()),
|
||||
v1.ResourceMemory: utilptr.To[resource.Quantity](metrics.Usage.Memory().DeepCopy()),
|
||||
}
|
||||
} else {
|
||||
// get MilliValue to reduce loss of precision
|
||||
mc.nodes[node.Name][v1.ResourceCPU].SetMilli(
|
||||
weightedAverage(mc.nodes[node.Name][v1.ResourceCPU].MilliValue(), metrics.Usage.Cpu().MilliValue()),
|
||||
)
|
||||
mc.nodes[node.Name][v1.ResourceMemory].Set(
|
||||
weightedAverage(mc.nodes[node.Name][v1.ResourceMemory].Value(), metrics.Usage.Memory().Value()),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
mc.hasSynced = true
|
||||
return nil
|
||||
}
|
||||
142
pkg/descheduler/metricscollector/metricscollector_test.go
Normal file
142
pkg/descheduler/metricscollector/metricscollector_test.go
Normal file
@@ -0,0 +1,142 @@
|
||||
/*
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package metricscollector
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math"
|
||||
"testing"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/client-go/informers"
|
||||
fakeclientset "k8s.io/client-go/kubernetes/fake"
|
||||
fakemetricsclient "k8s.io/metrics/pkg/client/clientset/versioned/fake"
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/test"
|
||||
)
|
||||
|
||||
func checkCpuNodeUsage(t *testing.T, usage api.ReferencedResourceList, millicpu int64) {
|
||||
t.Logf("current node cpu usage: %v\n", usage[v1.ResourceCPU].MilliValue())
|
||||
if usage[v1.ResourceCPU].MilliValue() != millicpu {
|
||||
t.Fatalf("cpu node usage expected to be %v, got %v instead", millicpu, usage[v1.ResourceCPU].MilliValue())
|
||||
}
|
||||
}
|
||||
|
||||
func TestMetricsCollector(t *testing.T) {
|
||||
gvr := schema.GroupVersionResource{Group: "metrics.k8s.io", Version: "v1beta1", Resource: "nodes"}
|
||||
|
||||
n1 := test.BuildTestNode("n1", 2000, 3000, 10, nil)
|
||||
n2 := test.BuildTestNode("n2", 2000, 3000, 10, nil)
|
||||
n3 := test.BuildTestNode("n3", 2000, 3000, 10, nil)
|
||||
|
||||
n1metrics := test.BuildNodeMetrics("n1", 400, 1714978816)
|
||||
n2metrics := test.BuildNodeMetrics("n2", 1400, 1714978816)
|
||||
n3metrics := test.BuildNodeMetrics("n3", 300, 1714978816)
|
||||
|
||||
clientset := fakeclientset.NewSimpleClientset(n1, n2, n3)
|
||||
metricsClientset := fakemetricsclient.NewSimpleClientset()
|
||||
metricsClientset.Tracker().Create(gvr, n1metrics, "")
|
||||
metricsClientset.Tracker().Create(gvr, n2metrics, "")
|
||||
metricsClientset.Tracker().Create(gvr, n3metrics, "")
|
||||
|
||||
ctx := context.TODO()
|
||||
sharedInformerFactory := informers.NewSharedInformerFactory(clientset, 0)
|
||||
nodeLister := sharedInformerFactory.Core().V1().Nodes().Lister()
|
||||
sharedInformerFactory.Start(ctx.Done())
|
||||
sharedInformerFactory.WaitForCacheSync(ctx.Done())
|
||||
|
||||
t.Logf("Set initial node cpu usage to 1400")
|
||||
collector := NewMetricsCollector(nodeLister, metricsClientset, labels.Everything())
|
||||
collector.Collect(context.TODO())
|
||||
nodesUsage, _ := collector.NodeUsage(n2)
|
||||
checkCpuNodeUsage(t, nodesUsage, 1400)
|
||||
allnodesUsage, _ := collector.AllNodesUsage()
|
||||
checkCpuNodeUsage(t, allnodesUsage[n2.Name], 1400)
|
||||
|
||||
t.Logf("Set current node cpu usage to 500")
|
||||
n2metrics.Usage[v1.ResourceCPU] = *resource.NewMilliQuantity(500, resource.DecimalSI)
|
||||
metricsClientset.Tracker().Update(gvr, n2metrics, "")
|
||||
collector.Collect(context.TODO())
|
||||
nodesUsage, _ = collector.NodeUsage(n2)
|
||||
checkCpuNodeUsage(t, nodesUsage, 1310)
|
||||
allnodesUsage, _ = collector.AllNodesUsage()
|
||||
checkCpuNodeUsage(t, allnodesUsage[n2.Name], 1310)
|
||||
|
||||
t.Logf("Set current node cpu usage to 900")
|
||||
n2metrics.Usage[v1.ResourceCPU] = *resource.NewMilliQuantity(900, resource.DecimalSI)
|
||||
metricsClientset.Tracker().Update(gvr, n2metrics, "")
|
||||
collector.Collect(context.TODO())
|
||||
nodesUsage, _ = collector.NodeUsage(n2)
|
||||
checkCpuNodeUsage(t, nodesUsage, 1269)
|
||||
allnodesUsage, _ = collector.AllNodesUsage()
|
||||
checkCpuNodeUsage(t, allnodesUsage[n2.Name], 1269)
|
||||
}
|
||||
|
||||
func TestMetricsCollectorConvergence(t *testing.T) {
|
||||
gvr := schema.GroupVersionResource{Group: "metrics.k8s.io", Version: "v1beta1", Resource: "nodes"}
|
||||
|
||||
n1 := test.BuildTestNode("n1", 2000, 3000, 10, nil)
|
||||
n2 := test.BuildTestNode("n2", 2000, 3000, 10, nil)
|
||||
n3 := test.BuildTestNode("n3", 2000, 3000, 10, nil)
|
||||
|
||||
n1metrics := test.BuildNodeMetrics("n1", 400, 1714978816)
|
||||
n2metrics := test.BuildNodeMetrics("n2", 1400, 1714978816)
|
||||
n3metrics := test.BuildNodeMetrics("n3", 300, 1714978816)
|
||||
|
||||
clientset := fakeclientset.NewSimpleClientset(n1, n2, n3)
|
||||
metricsClientset := fakemetricsclient.NewSimpleClientset()
|
||||
metricsClientset.Tracker().Create(gvr, n1metrics, "")
|
||||
metricsClientset.Tracker().Create(gvr, n2metrics, "")
|
||||
metricsClientset.Tracker().Create(gvr, n3metrics, "")
|
||||
|
||||
ctx := context.TODO()
|
||||
sharedInformerFactory := informers.NewSharedInformerFactory(clientset, 0)
|
||||
nodeLister := sharedInformerFactory.Core().V1().Nodes().Lister()
|
||||
sharedInformerFactory.Start(ctx.Done())
|
||||
sharedInformerFactory.WaitForCacheSync(ctx.Done())
|
||||
|
||||
t.Logf("Set initial node cpu usage to 1400")
|
||||
collector := NewMetricsCollector(nodeLister, metricsClientset, labels.Everything())
|
||||
collector.Collect(context.TODO())
|
||||
nodesUsage, _ := collector.NodeUsage(n2)
|
||||
checkCpuNodeUsage(t, nodesUsage, 1400)
|
||||
allnodesUsage, _ := collector.AllNodesUsage()
|
||||
checkCpuNodeUsage(t, allnodesUsage[n2.Name], 1400)
|
||||
|
||||
t.Logf("Set current node cpu/memory usage to 900/1614978816 and wait until it converges to it")
|
||||
n2metrics.Usage[v1.ResourceCPU] = *resource.NewMilliQuantity(900, resource.DecimalSI)
|
||||
n2metrics.Usage[v1.ResourceMemory] = *resource.NewQuantity(1614978816, resource.BinarySI)
|
||||
metricsClientset.Tracker().Update(gvr, n2metrics, "")
|
||||
converged := false
|
||||
for i := 0; i < 300; i++ {
|
||||
collector.Collect(context.TODO())
|
||||
nodesUsage, _ = collector.NodeUsage(n2)
|
||||
if math.Abs(float64(900-nodesUsage[v1.ResourceCPU].MilliValue())) < 6 && math.Abs(float64(1614978816-nodesUsage[v1.ResourceMemory].Value())) < 6 {
|
||||
t.Logf("Node cpu/memory usage converged to 900+-5/1614978816+-5")
|
||||
converged = true
|
||||
break
|
||||
}
|
||||
t.Logf("The current node usage: cpu=%v, memory=%v", nodesUsage[v1.ResourceCPU].MilliValue(), nodesUsage[v1.ResourceMemory].Value())
|
||||
}
|
||||
if !converged {
|
||||
t.Fatalf("The node usage did not converged to 900+-1")
|
||||
}
|
||||
}
|
||||
@@ -30,6 +30,7 @@ import (
|
||||
listersv1 "k8s.io/client-go/listers/core/v1"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
"k8s.io/klog/v2"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
)
|
||||
@@ -104,20 +105,29 @@ func IsReady(node *v1.Node) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// NodeFit returns true if the provided pod can be scheduled onto the provided node.
|
||||
// NodeFit returns nil if the provided pod can be scheduled onto the provided node.
|
||||
// Otherwise, it returns an error explaining why the node does not fit the pod.
|
||||
//
|
||||
// This function is used when the NodeFit pod filtering feature of the Descheduler is enabled.
|
||||
// This function currently considers a subset of the Kubernetes Scheduler's predicates when
|
||||
// deciding if a pod would fit on a node, but more predicates may be added in the future.
|
||||
// There should be no methods to modify nodes or pods in this method.
|
||||
// It considers a subset of the Kubernetes Scheduler's predicates
|
||||
// when deciding if a pod would fit on a node. More predicates may be added in the future.
|
||||
//
|
||||
// The checks are ordered from fastest to slowest to reduce unnecessary computation,
|
||||
// especially for nodes that are clearly unsuitable early in the evaluation process.
|
||||
func NodeFit(nodeIndexer podutil.GetPodsAssignedToNodeFunc, pod *v1.Pod, node *v1.Node) error {
|
||||
// Check node selector and required affinity
|
||||
// Check if the node is marked as unschedulable.
|
||||
if IsNodeUnschedulable(node) {
|
||||
return errors.New("node is not schedulable")
|
||||
}
|
||||
|
||||
// Check if the pod matches the node's label selector (nodeSelector) and required node affinity rules.
|
||||
if ok, err := utils.PodMatchNodeSelector(pod, node); err != nil {
|
||||
return err
|
||||
} else if !ok {
|
||||
return errors.New("pod node selector does not match the node label")
|
||||
}
|
||||
|
||||
// Check taints (we only care about NoSchedule and NoExecute taints)
|
||||
// Check taints on the node that have effect NoSchedule or NoExecute.
|
||||
ok := utils.TolerationsTolerateTaintsWithFilter(pod.Spec.Tolerations, node.Spec.Taints, func(taint *v1.Taint) bool {
|
||||
return taint.Effect == v1.TaintEffectNoSchedule || taint.Effect == v1.TaintEffectNoExecute
|
||||
})
|
||||
@@ -125,25 +135,21 @@ func NodeFit(nodeIndexer podutil.GetPodsAssignedToNodeFunc, pod *v1.Pod, node *v
|
||||
return errors.New("pod does not tolerate taints on the node")
|
||||
}
|
||||
|
||||
// Check if the pod can fit on a node based off it's requests
|
||||
if pod.Spec.NodeName == "" || pod.Spec.NodeName != node.Name {
|
||||
if ok, reqError := fitsRequest(nodeIndexer, pod, node); !ok {
|
||||
return reqError
|
||||
}
|
||||
}
|
||||
|
||||
// Check if node is schedulable
|
||||
if IsNodeUnschedulable(node) {
|
||||
return errors.New("node is not schedulable")
|
||||
}
|
||||
|
||||
// Check if pod matches inter-pod anti-affinity rule of pod on node
|
||||
// Check if the pod violates any inter-pod anti-affinity rules with existing pods on the node.
|
||||
// This involves iterating over all pods assigned to the node and evaluating label selectors.
|
||||
if match, err := podMatchesInterPodAntiAffinity(nodeIndexer, pod, node); err != nil {
|
||||
return err
|
||||
} else if match {
|
||||
return errors.New("pod matches inter-pod anti-affinity rule of other pod on node")
|
||||
}
|
||||
|
||||
// Check whether the node has enough available resources to accommodate the pod.
|
||||
if pod.Spec.NodeName == "" || pod.Spec.NodeName != node.Name {
|
||||
if ok, reqError := fitsRequest(nodeIndexer, pod, node); !ok {
|
||||
return reqError
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -213,12 +219,17 @@ func IsNodeUnschedulable(node *v1.Node) bool {
|
||||
func fitsRequest(nodeIndexer podutil.GetPodsAssignedToNodeFunc, pod *v1.Pod, node *v1.Node) (bool, error) {
|
||||
// Get pod requests
|
||||
podRequests, _ := utils.PodRequestsAndLimits(pod)
|
||||
resourceNames := make([]v1.ResourceName, 0, len(podRequests))
|
||||
resourceNames := []v1.ResourceName{v1.ResourcePods}
|
||||
for name := range podRequests {
|
||||
resourceNames = append(resourceNames, name)
|
||||
}
|
||||
|
||||
availableResources, err := nodeAvailableResources(nodeIndexer, node, resourceNames)
|
||||
availableResources, err := nodeAvailableResources(nodeIndexer, node, resourceNames,
|
||||
func(pod *v1.Pod) (v1.ResourceList, error) {
|
||||
req, _ := utils.PodRequestsAndLimits(pod)
|
||||
return req, nil
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@@ -230,8 +241,8 @@ func fitsRequest(nodeIndexer podutil.GetPodsAssignedToNodeFunc, pod *v1.Pod, nod
|
||||
return false, fmt.Errorf("insufficient %v", resource)
|
||||
}
|
||||
}
|
||||
// check pod num, at least one pod number is avaibalbe
|
||||
if availableResources[v1.ResourcePods].MilliValue() <= 0 {
|
||||
// check pod num, at least one pod number is available
|
||||
if quantity, ok := availableResources[v1.ResourcePods]; ok && quantity.MilliValue() <= 0 {
|
||||
return false, fmt.Errorf("insufficient %v", v1.ResourcePods)
|
||||
}
|
||||
|
||||
@@ -239,19 +250,27 @@ func fitsRequest(nodeIndexer podutil.GetPodsAssignedToNodeFunc, pod *v1.Pod, nod
|
||||
}
|
||||
|
||||
// nodeAvailableResources returns resources mapped to the quanitity available on the node.
|
||||
func nodeAvailableResources(nodeIndexer podutil.GetPodsAssignedToNodeFunc, node *v1.Node, resourceNames []v1.ResourceName) (map[v1.ResourceName]*resource.Quantity, error) {
|
||||
func nodeAvailableResources(nodeIndexer podutil.GetPodsAssignedToNodeFunc, node *v1.Node, resourceNames []v1.ResourceName, podUtilization podutil.PodUtilizationFnc) (api.ReferencedResourceList, error) {
|
||||
podsOnNode, err := podutil.ListPodsOnANode(node.Name, nodeIndexer, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nodeUtilization := NodeUtilization(podsOnNode, resourceNames)
|
||||
remainingResources := map[v1.ResourceName]*resource.Quantity{
|
||||
v1.ResourceCPU: resource.NewMilliQuantity(node.Status.Allocatable.Cpu().MilliValue()-nodeUtilization[v1.ResourceCPU].MilliValue(), resource.DecimalSI),
|
||||
v1.ResourceMemory: resource.NewQuantity(node.Status.Allocatable.Memory().Value()-nodeUtilization[v1.ResourceMemory].Value(), resource.BinarySI),
|
||||
v1.ResourcePods: resource.NewQuantity(node.Status.Allocatable.Pods().Value()-nodeUtilization[v1.ResourcePods].Value(), resource.DecimalSI),
|
||||
nodeUtilization, err := NodeUtilization(podsOnNode, resourceNames, podUtilization)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
remainingResources := api.ReferencedResourceList{}
|
||||
for _, name := range resourceNames {
|
||||
if !IsBasicResource(name) {
|
||||
if IsBasicResource(name) {
|
||||
switch name {
|
||||
case v1.ResourceCPU:
|
||||
remainingResources[name] = resource.NewMilliQuantity(node.Status.Allocatable.Cpu().MilliValue()-nodeUtilization[v1.ResourceCPU].MilliValue(), resource.DecimalSI)
|
||||
case v1.ResourceMemory:
|
||||
remainingResources[name] = resource.NewQuantity(node.Status.Allocatable.Memory().Value()-nodeUtilization[v1.ResourceMemory].Value(), resource.BinarySI)
|
||||
case v1.ResourcePods:
|
||||
remainingResources[name] = resource.NewQuantity(node.Status.Allocatable.Pods().Value()-nodeUtilization[v1.ResourcePods].Value(), resource.DecimalSI)
|
||||
}
|
||||
} else {
|
||||
if _, exists := node.Status.Allocatable[name]; exists {
|
||||
allocatableResource := node.Status.Allocatable[name]
|
||||
remainingResources[name] = resource.NewQuantity(allocatableResource.Value()-nodeUtilization[name].Value(), resource.DecimalSI)
|
||||
@@ -265,31 +284,37 @@ func nodeAvailableResources(nodeIndexer podutil.GetPodsAssignedToNodeFunc, node
|
||||
}
|
||||
|
||||
// NodeUtilization returns the resources requested by the given pods. Only resources supplied in the resourceNames parameter are calculated.
|
||||
func NodeUtilization(pods []*v1.Pod, resourceNames []v1.ResourceName) map[v1.ResourceName]*resource.Quantity {
|
||||
totalReqs := map[v1.ResourceName]*resource.Quantity{
|
||||
v1.ResourceCPU: resource.NewMilliQuantity(0, resource.DecimalSI),
|
||||
v1.ResourceMemory: resource.NewQuantity(0, resource.BinarySI),
|
||||
v1.ResourcePods: resource.NewQuantity(int64(len(pods)), resource.DecimalSI),
|
||||
}
|
||||
func NodeUtilization(pods []*v1.Pod, resourceNames []v1.ResourceName, podUtilization podutil.PodUtilizationFnc) (api.ReferencedResourceList, error) {
|
||||
totalUtilization := api.ReferencedResourceList{}
|
||||
for _, name := range resourceNames {
|
||||
if !IsBasicResource(name) {
|
||||
totalReqs[name] = resource.NewQuantity(0, resource.DecimalSI)
|
||||
switch name {
|
||||
case v1.ResourceCPU:
|
||||
totalUtilization[name] = resource.NewMilliQuantity(0, resource.DecimalSI)
|
||||
case v1.ResourceMemory:
|
||||
totalUtilization[name] = resource.NewQuantity(0, resource.BinarySI)
|
||||
case v1.ResourcePods:
|
||||
totalUtilization[name] = resource.NewQuantity(int64(len(pods)), resource.DecimalSI)
|
||||
default:
|
||||
totalUtilization[name] = resource.NewQuantity(0, resource.DecimalSI)
|
||||
}
|
||||
}
|
||||
|
||||
for _, pod := range pods {
|
||||
req, _ := utils.PodRequestsAndLimits(pod)
|
||||
podUtil, err := podUtilization(pod)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, name := range resourceNames {
|
||||
quantity, ok := req[name]
|
||||
quantity, ok := podUtil[name]
|
||||
if ok && name != v1.ResourcePods {
|
||||
// As Quantity.Add says: Add adds the provided y quantity to the current value. If the current value is zero,
|
||||
// the format of the quantity will be updated to the format of y.
|
||||
totalReqs[name].Add(quantity)
|
||||
totalUtilization[name].Add(quantity)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return totalReqs
|
||||
return totalUtilization, nil
|
||||
}
|
||||
|
||||
// IsBasicResource checks if resource is basic native.
|
||||
|
||||
@@ -25,9 +25,11 @@ import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
"k8s.io/utils/ptr"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/test"
|
||||
)
|
||||
@@ -78,9 +80,24 @@ func TestReadyNodesWithNodeSelector(t *testing.T) {
|
||||
sharedInformerFactory.WaitForCacheSync(stopChannel)
|
||||
defer close(stopChannel)
|
||||
|
||||
// First verify nodeLister returns non-empty list
|
||||
allNodes, err := nodeLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to list nodes from nodeLister: %v", err)
|
||||
}
|
||||
if len(allNodes) == 0 {
|
||||
t.Fatal("Expected nodeLister to return non-empty list of nodes")
|
||||
}
|
||||
if len(allNodes) != 2 {
|
||||
t.Errorf("Expected nodeLister to return 2 nodes, got %d", len(allNodes))
|
||||
}
|
||||
|
||||
// Now test ReadyNodes
|
||||
nodes, _ := ReadyNodes(ctx, fakeClient, nodeLister, nodeSelector)
|
||||
|
||||
if nodes[0].Name != "node1" {
|
||||
if len(nodes) != 1 {
|
||||
t.Errorf("Expected 1 node, got %d", len(nodes))
|
||||
} else if nodes[0].Name != "node1" {
|
||||
t.Errorf("Expected node1, got %s", nodes[0].Name)
|
||||
}
|
||||
}
|
||||
@@ -1020,6 +1037,64 @@ func TestNodeFit(t *testing.T) {
|
||||
node: node,
|
||||
podsOnNode: []*v1.Pod{},
|
||||
},
|
||||
{
|
||||
description: "Pod with native sidecars with too much cpu does not fit on node",
|
||||
pod: test.BuildTestPod("p1", 1, 100, "", func(pod *v1.Pod) {
|
||||
pod.Spec.InitContainers = append(pod.Spec.InitContainers, v1.Container{
|
||||
RestartPolicy: ptr.To(v1.ContainerRestartPolicyAlways), // native sidecar
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: createResourceList(100000, 100*1000*1000, 0),
|
||||
},
|
||||
})
|
||||
}),
|
||||
node: node,
|
||||
podsOnNode: []*v1.Pod{},
|
||||
err: errors.New("insufficient cpu"),
|
||||
},
|
||||
{
|
||||
description: "Pod with native sidecars with too much memory does not fit on node",
|
||||
pod: test.BuildTestPod("p1", 1, 100, "", func(pod *v1.Pod) {
|
||||
pod.Spec.InitContainers = append(pod.Spec.InitContainers, v1.Container{
|
||||
RestartPolicy: ptr.To(v1.ContainerRestartPolicyAlways), // native sidecar
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: createResourceList(100, 1000*1000*1000*1000, 0),
|
||||
},
|
||||
})
|
||||
}),
|
||||
node: node,
|
||||
podsOnNode: []*v1.Pod{},
|
||||
err: errors.New("insufficient memory"),
|
||||
},
|
||||
{
|
||||
description: "Pod with small native sidecars fits on node",
|
||||
pod: test.BuildTestPod("p1", 1, 100, "", func(pod *v1.Pod) {
|
||||
pod.Spec.InitContainers = append(pod.Spec.InitContainers, v1.Container{
|
||||
RestartPolicy: ptr.To(v1.ContainerRestartPolicyAlways), // native sidecar
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: createResourceList(100, 100*1000*1000, 0),
|
||||
},
|
||||
})
|
||||
}),
|
||||
node: node,
|
||||
podsOnNode: []*v1.Pod{},
|
||||
},
|
||||
{
|
||||
description: "Pod with large overhead does not fit on node",
|
||||
pod: test.BuildTestPod("p1", 1, 100, "", func(pod *v1.Pod) {
|
||||
pod.Spec.Overhead = createResourceList(100000, 100*1000*1000, 0)
|
||||
}),
|
||||
node: node,
|
||||
podsOnNode: []*v1.Pod{},
|
||||
err: errors.New("insufficient cpu"),
|
||||
},
|
||||
{
|
||||
description: "Pod with small overhead fits on node",
|
||||
pod: test.BuildTestPod("p1", 1, 100, "", func(pod *v1.Pod) {
|
||||
pod.Spec.Overhead = createResourceList(1, 1*1000*1000, 0)
|
||||
}),
|
||||
node: node,
|
||||
podsOnNode: []*v1.Pod{},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
|
||||
@@ -25,6 +25,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
|
||||
evictionutils "sigs.k8s.io/descheduler/pkg/descheduler/evictions/utils"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
)
|
||||
|
||||
@@ -39,6 +40,9 @@ type FilterFunc func(*v1.Pod) bool
|
||||
// as input and returns the pods that assigned to the node.
|
||||
type GetPodsAssignedToNodeFunc func(string, FilterFunc) ([]*v1.Pod, error)
|
||||
|
||||
// PodUtilizationFnc is a function for getting pod's utilization. E.g. requested resources of utilization from metrics.
|
||||
type PodUtilizationFnc func(pod *v1.Pod) (v1.ResourceList, error)
|
||||
|
||||
// WrapFilterFuncs wraps a set of FilterFunc in one.
|
||||
func WrapFilterFuncs(filters ...FilterFunc) FilterFunc {
|
||||
return func(pod *v1.Pod) bool {
|
||||
@@ -251,14 +255,32 @@ func SortPodsBasedOnPriorityLowToHigh(pods []*v1.Pod) {
|
||||
return false
|
||||
}
|
||||
if (pods[j].Spec.Priority == nil && pods[i].Spec.Priority == nil) || (*pods[i].Spec.Priority == *pods[j].Spec.Priority) {
|
||||
if IsBestEffortPod(pods[i]) {
|
||||
iIsBestEffortPod := IsBestEffortPod(pods[i])
|
||||
jIsBestEffortPod := IsBestEffortPod(pods[j])
|
||||
iIsBurstablePod := IsBurstablePod(pods[i])
|
||||
jIsBurstablePod := IsBurstablePod(pods[j])
|
||||
iIsGuaranteedPod := IsGuaranteedPod(pods[i])
|
||||
jIsGuaranteedPod := IsGuaranteedPod(pods[j])
|
||||
if (iIsBestEffortPod && jIsBestEffortPod) || (iIsBurstablePod && jIsBurstablePod) || (iIsGuaranteedPod && jIsGuaranteedPod) {
|
||||
iHasNoEvictonPolicy := evictionutils.HaveNoEvictionAnnotation(pods[i])
|
||||
jHasNoEvictonPolicy := evictionutils.HaveNoEvictionAnnotation(pods[j])
|
||||
if !iHasNoEvictonPolicy {
|
||||
return true
|
||||
}
|
||||
if !jHasNoEvictonPolicy {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
if IsBurstablePod(pods[i]) && IsGuaranteedPod(pods[j]) {
|
||||
if iIsBestEffortPod {
|
||||
return true
|
||||
}
|
||||
if iIsBurstablePod && jIsGuaranteedPod {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
return *pods[i].Spec.Priority < *pods[j].Spec.Priority
|
||||
})
|
||||
}
|
||||
|
||||
@@ -117,6 +117,14 @@ func TestListPodsOnANode(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func getPodListNames(pods []*v1.Pod) []string {
|
||||
names := []string{}
|
||||
for _, pod := range pods {
|
||||
names = append(names, pod.Name)
|
||||
}
|
||||
return names
|
||||
}
|
||||
|
||||
func TestSortPodsBasedOnPriorityLowToHigh(t *testing.T) {
|
||||
n1 := test.BuildTestNode("n1", 4000, 3000, 9, nil)
|
||||
|
||||
@@ -149,11 +157,70 @@ func TestSortPodsBasedOnPriorityLowToHigh(t *testing.T) {
|
||||
p6 := test.BuildTestPod("p6", 400, 100, n1.Name, test.MakeGuaranteedPod)
|
||||
p6.Spec.Priority = nil
|
||||
|
||||
podList := []*v1.Pod{p4, p3, p2, p1, p6, p5}
|
||||
p7 := test.BuildTestPod("p7", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
test.SetPodPriority(pod, lowPriority)
|
||||
pod.Annotations = map[string]string{
|
||||
"descheduler.alpha.kubernetes.io/prefer-no-eviction": "",
|
||||
}
|
||||
})
|
||||
|
||||
// BestEffort
|
||||
p8 := test.BuildTestPod("p8", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
test.SetPodPriority(pod, highPriority)
|
||||
test.MakeBestEffortPod(pod)
|
||||
pod.Annotations = map[string]string{
|
||||
"descheduler.alpha.kubernetes.io/prefer-no-eviction": "",
|
||||
}
|
||||
})
|
||||
|
||||
// Burstable
|
||||
p9 := test.BuildTestPod("p9", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
test.SetPodPriority(pod, highPriority)
|
||||
test.MakeBurstablePod(pod)
|
||||
pod.Annotations = map[string]string{
|
||||
"descheduler.alpha.kubernetes.io/prefer-no-eviction": "",
|
||||
}
|
||||
})
|
||||
|
||||
// Guaranteed
|
||||
p10 := test.BuildTestPod("p10", 400, 100, n1.Name, func(pod *v1.Pod) {
|
||||
test.SetPodPriority(pod, highPriority)
|
||||
test.MakeGuaranteedPod(pod)
|
||||
pod.Annotations = map[string]string{
|
||||
"descheduler.alpha.kubernetes.io/prefer-no-eviction": "",
|
||||
}
|
||||
})
|
||||
|
||||
// Burstable
|
||||
p11 := test.BuildTestPod("p11", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
test.MakeBurstablePod(pod)
|
||||
})
|
||||
|
||||
// Burstable
|
||||
p12 := test.BuildTestPod("p12", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
test.MakeBurstablePod(pod)
|
||||
pod.Annotations = map[string]string{
|
||||
"descheduler.alpha.kubernetes.io/prefer-no-eviction": "",
|
||||
}
|
||||
})
|
||||
|
||||
podList := []*v1.Pod{p1, p8, p9, p10, p2, p3, p4, p5, p6, p7, p11, p12}
|
||||
// p5: no priority, best effort
|
||||
// p11: no priority, burstable
|
||||
// p6: no priority, guaranteed
|
||||
// p1: low priority
|
||||
// p7: low priority, prefer-no-eviction
|
||||
// p2: high priority, best effort
|
||||
// p8: high priority, best effort, prefer-no-eviction
|
||||
// p3: high priority, burstable
|
||||
// p9: high priority, burstable, prefer-no-eviction
|
||||
// p4: high priority, guaranteed
|
||||
// p10: high priority, guaranteed, prefer-no-eviction
|
||||
expectedPodList := []*v1.Pod{p5, p11, p12, p6, p1, p7, p2, p8, p3, p9, p4, p10}
|
||||
|
||||
SortPodsBasedOnPriorityLowToHigh(podList)
|
||||
if !reflect.DeepEqual(podList[len(podList)-1], p4) {
|
||||
t.Errorf("Expected last pod in sorted list to be %v which of highest priority and guaranteed but got %v", p4, podList[len(podList)-1])
|
||||
if !reflect.DeepEqual(getPodListNames(podList), getPodListNames(expectedPodList)) {
|
||||
t.Errorf("Pods were sorted in an unexpected order: %v, expected %v", getPodListNames(podList), getPodListNames(expectedPodList))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -19,6 +19,7 @@ package descheduler
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"os"
|
||||
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
@@ -62,21 +63,22 @@ func decode(policyConfigFile string, policy []byte, client clientset.Interface,
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
setDefaults(*internalPolicy, registry, client)
|
||||
|
||||
return internalPolicy, nil
|
||||
return setDefaults(*internalPolicy, registry, client)
|
||||
}
|
||||
|
||||
func setDefaults(in api.DeschedulerPolicy, registry pluginregistry.Registry, client clientset.Interface) *api.DeschedulerPolicy {
|
||||
func setDefaults(in api.DeschedulerPolicy, registry pluginregistry.Registry, client clientset.Interface) (*api.DeschedulerPolicy, error) {
|
||||
var err error
|
||||
for idx, profile := range in.Profiles {
|
||||
// If we need to set defaults coming from loadtime in each profile we do it here
|
||||
in.Profiles[idx] = setDefaultEvictor(profile, client)
|
||||
in.Profiles[idx], err = setDefaultEvictor(profile, client)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, pluginConfig := range profile.PluginConfigs {
|
||||
setDefaultsPluginConfig(&pluginConfig, registry)
|
||||
}
|
||||
}
|
||||
return &in
|
||||
return &in, nil
|
||||
}
|
||||
|
||||
func setDefaultsPluginConfig(pluginConfig *api.PluginConfig, registry pluginregistry.Registry) {
|
||||
@@ -97,7 +99,7 @@ func findPluginName(names []string, key string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func setDefaultEvictor(profile api.DeschedulerProfile, client clientset.Interface) api.DeschedulerProfile {
|
||||
func setDefaultEvictor(profile api.DeschedulerProfile, client clientset.Interface) (api.DeschedulerProfile, error) {
|
||||
newPluginConfig := api.PluginConfig{
|
||||
Name: defaultevictor.PluginName,
|
||||
Args: &defaultevictor.DefaultEvictorArgs{
|
||||
@@ -105,6 +107,11 @@ func setDefaultEvictor(profile api.DeschedulerProfile, client clientset.Interfac
|
||||
EvictSystemCriticalPods: false,
|
||||
IgnorePvcPods: false,
|
||||
EvictFailedBarePods: false,
|
||||
IgnorePodsWithoutPDB: false,
|
||||
PodProtections: defaultevictor.PodProtections{
|
||||
DefaultDisabled: []defaultevictor.PodProtection{},
|
||||
ExtraEnabled: []defaultevictor.PodProtection{},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -127,18 +134,19 @@ func setDefaultEvictor(profile api.DeschedulerProfile, client clientset.Interfac
|
||||
thresholdPriority, err := utils.GetPriorityValueFromPriorityThreshold(context.TODO(), client, defaultevictorPluginConfig.Args.(*defaultevictor.DefaultEvictorArgs).PriorityThreshold)
|
||||
if err != nil {
|
||||
klog.Error(err, "Failed to get threshold priority from args")
|
||||
return profile, err
|
||||
}
|
||||
profile.PluginConfigs[idx].Args.(*defaultevictor.DefaultEvictorArgs).PriorityThreshold = &api.PriorityThreshold{}
|
||||
profile.PluginConfigs[idx].Args.(*defaultevictor.DefaultEvictorArgs).PriorityThreshold.Value = &thresholdPriority
|
||||
return profile
|
||||
return profile, nil
|
||||
}
|
||||
|
||||
func validateDeschedulerConfiguration(in api.DeschedulerPolicy, registry pluginregistry.Registry) error {
|
||||
var errorsInProfiles []error
|
||||
var errorsInPolicy []error
|
||||
for _, profile := range in.Profiles {
|
||||
for _, pluginConfig := range profile.PluginConfigs {
|
||||
if _, ok := registry[pluginConfig.Name]; !ok {
|
||||
errorsInProfiles = append(errorsInProfiles, fmt.Errorf("in profile %s: plugin %s in pluginConfig not registered", profile.Name, pluginConfig.Name))
|
||||
errorsInPolicy = append(errorsInPolicy, fmt.Errorf("in profile %s: plugin %s in pluginConfig not registered", profile.Name, pluginConfig.Name))
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -147,9 +155,41 @@ func validateDeschedulerConfiguration(in api.DeschedulerPolicy, registry pluginr
|
||||
continue
|
||||
}
|
||||
if err := pluginUtilities.PluginArgValidator(pluginConfig.Args); err != nil {
|
||||
errorsInProfiles = append(errorsInProfiles, fmt.Errorf("in profile %s: %s", profile.Name, err.Error()))
|
||||
errorsInPolicy = append(errorsInPolicy, fmt.Errorf("in profile %s: %s", profile.Name, err.Error()))
|
||||
}
|
||||
}
|
||||
}
|
||||
return utilerrors.NewAggregate(errorsInProfiles)
|
||||
providers := map[api.MetricsSource]api.MetricsProvider{}
|
||||
for _, provider := range in.MetricsProviders {
|
||||
if _, ok := providers[provider.Source]; ok {
|
||||
errorsInPolicy = append(errorsInPolicy, fmt.Errorf("metric provider %q is already configured, each source can be configured only once", provider.Source))
|
||||
} else {
|
||||
providers[provider.Source] = provider
|
||||
}
|
||||
}
|
||||
if _, exists := providers[api.KubernetesMetrics]; exists && in.MetricsCollector != nil && in.MetricsCollector.Enabled {
|
||||
errorsInPolicy = append(errorsInPolicy, fmt.Errorf("it is not allowed to combine metrics provider when metrics collector is enabled"))
|
||||
}
|
||||
if prometheusConfig, exists := providers[api.PrometheusMetrics]; exists {
|
||||
if prometheusConfig.Prometheus == nil {
|
||||
errorsInPolicy = append(errorsInPolicy, fmt.Errorf("prometheus configuration is required when prometheus source is enabled"))
|
||||
} else {
|
||||
if prometheusConfig.Prometheus.URL == "" {
|
||||
errorsInPolicy = append(errorsInPolicy, fmt.Errorf("prometheus URL is required when prometheus is enabled"))
|
||||
} else if _, err := url.Parse(prometheusConfig.Prometheus.URL); err != nil {
|
||||
errorsInPolicy = append(errorsInPolicy, fmt.Errorf("error parsing prometheus URL: %v", err))
|
||||
}
|
||||
|
||||
if prometheusConfig.Prometheus.AuthToken != nil {
|
||||
secretRef := prometheusConfig.Prometheus.AuthToken.SecretReference
|
||||
if secretRef == nil {
|
||||
errorsInPolicy = append(errorsInPolicy, fmt.Errorf("prometheus authToken secret is expected to be set when authToken field is"))
|
||||
} else if secretRef.Name == "" || secretRef.Namespace == "" {
|
||||
errorsInPolicy = append(errorsInPolicy, fmt.Errorf("prometheus authToken secret reference does not set both namespace and name"))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return utilerrors.NewAggregate(errorsInPolicy)
|
||||
}
|
||||
|
||||
@@ -17,10 +17,12 @@ limitations under the License.
|
||||
package descheduler
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
|
||||
"k8s.io/apimachinery/pkg/conversion"
|
||||
fakeclientset "k8s.io/client-go/kubernetes/fake"
|
||||
utilptr "k8s.io/utils/ptr"
|
||||
@@ -121,6 +123,25 @@ profiles:
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "v1alpha2 to internal, validate error handling (priorityThreshold exceeding maximum)",
|
||||
policy: []byte(`apiVersion: "descheduler/v1alpha2"
|
||||
kind: "DeschedulerPolicy"
|
||||
profiles:
|
||||
- name: ProfileName
|
||||
pluginConfig:
|
||||
- name: "DefaultEvictor"
|
||||
args:
|
||||
priorityThreshold:
|
||||
value: 2000000001
|
||||
plugins:
|
||||
deschedule:
|
||||
enabled:
|
||||
- "RemovePodsHavingTooManyRestarts"
|
||||
`),
|
||||
result: nil,
|
||||
err: errors.New("priority threshold can't be greater than 2000000000"),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
@@ -191,12 +212,153 @@ func TestValidateDeschedulerConfiguration(t *testing.T) {
|
||||
},
|
||||
result: fmt.Errorf("[in profile RemoveFailedPods: only one of Include/Exclude namespaces can be set, in profile RemovePodsViolatingTopologySpreadConstraint: only one of Include/Exclude namespaces can be set]"),
|
||||
},
|
||||
{
|
||||
description: "Duplicit metrics providers error",
|
||||
deschedulerPolicy: api.DeschedulerPolicy{
|
||||
MetricsProviders: []api.MetricsProvider{
|
||||
{Source: api.KubernetesMetrics},
|
||||
{Source: api.KubernetesMetrics},
|
||||
},
|
||||
},
|
||||
result: fmt.Errorf("metric provider \"KubernetesMetrics\" is already configured, each source can be configured only once"),
|
||||
},
|
||||
{
|
||||
description: "Too many metrics providers error",
|
||||
deschedulerPolicy: api.DeschedulerPolicy{
|
||||
MetricsCollector: &api.MetricsCollector{
|
||||
Enabled: true,
|
||||
},
|
||||
MetricsProviders: []api.MetricsProvider{
|
||||
{Source: api.KubernetesMetrics},
|
||||
},
|
||||
},
|
||||
result: fmt.Errorf("it is not allowed to combine metrics provider when metrics collector is enabled"),
|
||||
},
|
||||
{
|
||||
description: "missing prometheus url error",
|
||||
deschedulerPolicy: api.DeschedulerPolicy{
|
||||
MetricsProviders: []api.MetricsProvider{
|
||||
{
|
||||
Source: api.PrometheusMetrics,
|
||||
Prometheus: &api.Prometheus{},
|
||||
},
|
||||
},
|
||||
},
|
||||
result: fmt.Errorf("prometheus URL is required when prometheus is enabled"),
|
||||
},
|
||||
{
|
||||
description: "prometheus url is not valid error",
|
||||
deschedulerPolicy: api.DeschedulerPolicy{
|
||||
MetricsProviders: []api.MetricsProvider{
|
||||
{
|
||||
Source: api.PrometheusMetrics,
|
||||
Prometheus: &api.Prometheus{
|
||||
URL: "http://example.com:-80",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
result: fmt.Errorf("error parsing prometheus URL: parse \"http://example.com:-80\": invalid port \":-80\" after host"),
|
||||
},
|
||||
{
|
||||
description: "prometheus authtoken with no secret reference error",
|
||||
deschedulerPolicy: api.DeschedulerPolicy{
|
||||
MetricsProviders: []api.MetricsProvider{
|
||||
{
|
||||
Source: api.PrometheusMetrics,
|
||||
Prometheus: &api.Prometheus{
|
||||
URL: "https://example.com:80",
|
||||
AuthToken: &api.AuthToken{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
result: fmt.Errorf("prometheus authToken secret is expected to be set when authToken field is"),
|
||||
},
|
||||
{
|
||||
description: "prometheus authtoken with empty secret reference error",
|
||||
deschedulerPolicy: api.DeschedulerPolicy{
|
||||
MetricsProviders: []api.MetricsProvider{
|
||||
{
|
||||
Source: api.PrometheusMetrics,
|
||||
Prometheus: &api.Prometheus{
|
||||
URL: "https://example.com:80",
|
||||
AuthToken: &api.AuthToken{
|
||||
SecretReference: &api.SecretReference{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
result: fmt.Errorf("prometheus authToken secret reference does not set both namespace and name"),
|
||||
},
|
||||
{
|
||||
description: "prometheus authtoken missing secret reference namespace error",
|
||||
deschedulerPolicy: api.DeschedulerPolicy{
|
||||
MetricsProviders: []api.MetricsProvider{
|
||||
{
|
||||
Source: api.PrometheusMetrics,
|
||||
Prometheus: &api.Prometheus{
|
||||
URL: "https://example.com:80",
|
||||
AuthToken: &api.AuthToken{
|
||||
SecretReference: &api.SecretReference{
|
||||
Name: "secretname",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
result: fmt.Errorf("prometheus authToken secret reference does not set both namespace and name"),
|
||||
},
|
||||
{
|
||||
description: "prometheus authtoken missing secret reference name error",
|
||||
deschedulerPolicy: api.DeschedulerPolicy{
|
||||
MetricsProviders: []api.MetricsProvider{
|
||||
{
|
||||
Source: api.PrometheusMetrics,
|
||||
Prometheus: &api.Prometheus{
|
||||
URL: "https://example.com:80",
|
||||
AuthToken: &api.AuthToken{
|
||||
SecretReference: &api.SecretReference{
|
||||
Namespace: "secretnamespace",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
result: fmt.Errorf("prometheus authToken secret reference does not set both namespace and name"),
|
||||
},
|
||||
{
|
||||
description: "valid prometheus authtoken secret reference",
|
||||
deschedulerPolicy: api.DeschedulerPolicy{
|
||||
MetricsProviders: []api.MetricsProvider{
|
||||
{
|
||||
Source: api.PrometheusMetrics,
|
||||
Prometheus: &api.Prometheus{
|
||||
URL: "https://example.com:80",
|
||||
AuthToken: &api.AuthToken{
|
||||
SecretReference: &api.SecretReference{
|
||||
Name: "secretname",
|
||||
Namespace: "secretnamespace",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.description, func(t *testing.T) {
|
||||
result := validateDeschedulerConfiguration(tc.deschedulerPolicy, pluginregistry.PluginRegistry)
|
||||
if result.Error() != tc.result.Error() {
|
||||
if result == nil && tc.result != nil || result != nil && tc.result == nil {
|
||||
t.Errorf("test '%s' failed. expected \n'%s', got \n'%s'", tc.description, tc.result, result)
|
||||
} else if result == nil && tc.result == nil {
|
||||
return
|
||||
} else if result.Error() != tc.result.Error() {
|
||||
t.Errorf("test '%s' failed. expected \n'%s', got \n'%s'", tc.description, tc.result, result)
|
||||
}
|
||||
})
|
||||
@@ -335,6 +497,313 @@ profiles:
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "test DisabledDefaultPodProtections configuration",
|
||||
policy: []byte(`apiVersion: "descheduler/v1alpha2"
|
||||
kind: "DeschedulerPolicy"
|
||||
profiles:
|
||||
- name: ProfileName
|
||||
pluginConfig:
|
||||
- name: "DefaultEvictor"
|
||||
args:
|
||||
podProtections:
|
||||
defaultDisabled:
|
||||
- "PodsWithLocalStorage"
|
||||
- "DaemonSetPods"
|
||||
priorityThreshold:
|
||||
value: 2000000000
|
||||
nodeFit: true
|
||||
plugins:
|
||||
filter:
|
||||
enabled:
|
||||
- "DefaultEvictor"
|
||||
preEvictionFilter:
|
||||
enabled:
|
||||
- "DefaultEvictor"
|
||||
`),
|
||||
result: &api.DeschedulerPolicy{
|
||||
Profiles: []api.DeschedulerProfile{
|
||||
{
|
||||
Name: "ProfileName",
|
||||
PluginConfigs: []api.PluginConfig{
|
||||
{
|
||||
Name: defaultevictor.PluginName,
|
||||
Args: &defaultevictor.DefaultEvictorArgs{
|
||||
PodProtections: defaultevictor.PodProtections{
|
||||
DefaultDisabled: []defaultevictor.PodProtection{
|
||||
defaultevictor.PodsWithLocalStorage,
|
||||
defaultevictor.DaemonSetPods,
|
||||
},
|
||||
},
|
||||
PriorityThreshold: &api.PriorityThreshold{Value: utilptr.To[int32](2000000000)},
|
||||
NodeFit: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
Plugins: api.Plugins{
|
||||
Filter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
PreEvictionFilter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "test podProtections extraEnabled configuration",
|
||||
policy: []byte(`apiVersion: "descheduler/v1alpha2"
|
||||
kind: "DeschedulerPolicy"
|
||||
profiles:
|
||||
- name: ProfileName
|
||||
pluginConfig:
|
||||
- name: "DefaultEvictor"
|
||||
args:
|
||||
podProtections:
|
||||
extraEnabled:
|
||||
- "PodsWithPVC"
|
||||
- "PodsWithoutPDB"
|
||||
priorityThreshold:
|
||||
value: 2000000000
|
||||
nodeFit: true
|
||||
plugins:
|
||||
filter:
|
||||
enabled:
|
||||
- "DefaultEvictor"
|
||||
preEvictionFilter:
|
||||
enabled:
|
||||
- "DefaultEvictor"
|
||||
`),
|
||||
result: &api.DeschedulerPolicy{
|
||||
Profiles: []api.DeschedulerProfile{
|
||||
{
|
||||
Name: "ProfileName",
|
||||
PluginConfigs: []api.PluginConfig{
|
||||
{
|
||||
Name: defaultevictor.PluginName,
|
||||
Args: &defaultevictor.DefaultEvictorArgs{
|
||||
PodProtections: defaultevictor.PodProtections{
|
||||
ExtraEnabled: []defaultevictor.PodProtection{
|
||||
defaultevictor.PodsWithPVC,
|
||||
defaultevictor.PodsWithoutPDB,
|
||||
},
|
||||
},
|
||||
PriorityThreshold: &api.PriorityThreshold{Value: utilptr.To[int32](2000000000)},
|
||||
NodeFit: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
Plugins: api.Plugins{
|
||||
Filter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
PreEvictionFilter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "test both ExtraPodProtections and DisabledDefaultPodProtections configuration",
|
||||
policy: []byte(`apiVersion: "descheduler/v1alpha2"
|
||||
kind: "DeschedulerPolicy"
|
||||
profiles:
|
||||
- name: ProfileName
|
||||
pluginConfig:
|
||||
- name: "DefaultEvictor"
|
||||
args:
|
||||
podProtections:
|
||||
extraEnabled:
|
||||
- "PodsWithPVC"
|
||||
- "PodsWithoutPDB"
|
||||
defaultDisabled:
|
||||
- "PodsWithLocalStorage"
|
||||
- "DaemonSetPods"
|
||||
priorityThreshold:
|
||||
value: 2000000000
|
||||
nodeFit: true
|
||||
plugins:
|
||||
filter:
|
||||
enabled:
|
||||
- "DefaultEvictor"
|
||||
preEvictionFilter:
|
||||
enabled:
|
||||
- "DefaultEvictor"
|
||||
`),
|
||||
result: &api.DeschedulerPolicy{
|
||||
Profiles: []api.DeschedulerProfile{
|
||||
{
|
||||
Name: "ProfileName",
|
||||
PluginConfigs: []api.PluginConfig{
|
||||
{
|
||||
Name: defaultevictor.PluginName,
|
||||
Args: &defaultevictor.DefaultEvictorArgs{
|
||||
PodProtections: defaultevictor.PodProtections{
|
||||
ExtraEnabled: []defaultevictor.PodProtection{
|
||||
defaultevictor.PodsWithPVC,
|
||||
defaultevictor.PodsWithoutPDB,
|
||||
},
|
||||
DefaultDisabled: []defaultevictor.PodProtection{
|
||||
defaultevictor.PodsWithLocalStorage,
|
||||
defaultevictor.DaemonSetPods,
|
||||
},
|
||||
},
|
||||
PriorityThreshold: &api.PriorityThreshold{Value: utilptr.To[int32](2000000000)},
|
||||
NodeFit: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
Plugins: api.Plugins{
|
||||
Filter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
PreEvictionFilter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "test error when using both Deprecated fields and DisabledDefaultPodProtections/ExtraPodProtections",
|
||||
policy: []byte(`apiVersion: "descheduler/v1alpha2"
|
||||
kind: "DeschedulerPolicy"
|
||||
profiles:
|
||||
- name: ProfileName
|
||||
pluginConfig:
|
||||
- name: "DefaultEvictor"
|
||||
args:
|
||||
evictSystemCriticalPods: true
|
||||
podProtections:
|
||||
extraEnabled:
|
||||
- "PodsWithPVC"
|
||||
- "PodsWithoutPDB"
|
||||
defaultDisabled:
|
||||
- "PodsWithLocalStorage"
|
||||
- "DaemonSetPods"
|
||||
priorityThreshold:
|
||||
value: 2000000000
|
||||
nodeFit: true
|
||||
plugins:
|
||||
filter:
|
||||
enabled:
|
||||
- "DefaultEvictor"
|
||||
preEvictionFilter:
|
||||
enabled:
|
||||
- "DefaultEvictor"
|
||||
`),
|
||||
result: nil,
|
||||
err: fmt.Errorf("in profile ProfileName: cannot use Deprecated fields alongside PodProtections.ExtraEnabled or PodProtections.DefaultDisabled"),
|
||||
},
|
||||
{
|
||||
description: "test error when Disables a default protection that does not exist",
|
||||
policy: []byte(`apiVersion: "descheduler/v1alpha2"
|
||||
kind: "DeschedulerPolicy"
|
||||
profiles:
|
||||
- name: ProfileName
|
||||
pluginConfig:
|
||||
- name: "DefaultEvictor"
|
||||
args:
|
||||
podProtections:
|
||||
defaultDisabled:
|
||||
- "InvalidProtection"
|
||||
priorityThreshold:
|
||||
value: 2000000000
|
||||
nodeFit: true
|
||||
plugins:
|
||||
filter:
|
||||
enabled:
|
||||
- "DefaultEvictor"
|
||||
preEvictionFilter:
|
||||
enabled:
|
||||
- "DefaultEvictor"
|
||||
`),
|
||||
result: nil,
|
||||
err: fmt.Errorf("in profile ProfileName: invalid pod protection policy in DefaultDisabled: \"InvalidProtection\". Valid options are: [PodsWithLocalStorage SystemCriticalPods FailedBarePods DaemonSetPods]"),
|
||||
},
|
||||
{
|
||||
description: "test error when Enables an extra protection that does not exist",
|
||||
policy: []byte(`apiVersion: "descheduler/v1alpha2"
|
||||
kind: "DeschedulerPolicy"
|
||||
profiles:
|
||||
- name: ProfileName
|
||||
pluginConfig:
|
||||
- name: "DefaultEvictor"
|
||||
args:
|
||||
podProtections:
|
||||
extraEnabled:
|
||||
- "InvalidProtection"
|
||||
priorityThreshold:
|
||||
value: 2000000000
|
||||
nodeFit: true
|
||||
plugins:
|
||||
filter:
|
||||
enabled:
|
||||
- "DefaultEvictor"
|
||||
preEvictionFilter:
|
||||
enabled:
|
||||
- "DefaultEvictor"
|
||||
`),
|
||||
result: nil,
|
||||
err: fmt.Errorf("in profile ProfileName: invalid pod protection policy in ExtraEnabled: \"InvalidProtection\". Valid options are: [PodsWithPVC PodsWithoutPDB PodsWithResourceClaims]"),
|
||||
},
|
||||
{
|
||||
description: "test error when Disables an extra protection",
|
||||
policy: []byte(`apiVersion: "descheduler/v1alpha2"
|
||||
kind: "DeschedulerPolicy"
|
||||
profiles:
|
||||
- name: ProfileName
|
||||
pluginConfig:
|
||||
- name: "DefaultEvictor"
|
||||
args:
|
||||
podProtections:
|
||||
defaultDisabled:
|
||||
- "PodsWithPVC"
|
||||
priorityThreshold:
|
||||
value: 2000000000
|
||||
nodeFit: true
|
||||
plugins:
|
||||
filter:
|
||||
enabled:
|
||||
- "DefaultEvictor"
|
||||
preEvictionFilter:
|
||||
enabled:
|
||||
- "DefaultEvictor"
|
||||
`),
|
||||
result: nil,
|
||||
err: fmt.Errorf("in profile ProfileName: invalid pod protection policy in DefaultDisabled: \"PodsWithPVC\". Valid options are: [PodsWithLocalStorage SystemCriticalPods FailedBarePods DaemonSetPods]"),
|
||||
},
|
||||
{
|
||||
description: "test error when Enables a default protection",
|
||||
policy: []byte(`apiVersion: "descheduler/v1alpha2"
|
||||
kind: "DeschedulerPolicy"
|
||||
profiles:
|
||||
- name: ProfileName
|
||||
pluginConfig:
|
||||
- name: "DefaultEvictor"
|
||||
args:
|
||||
podProtections:
|
||||
extraEnabled:
|
||||
- "DaemonSetPods"
|
||||
priorityThreshold:
|
||||
value: 2000000000
|
||||
nodeFit: true
|
||||
plugins:
|
||||
filter:
|
||||
enabled:
|
||||
- "DefaultEvictor"
|
||||
preEvictionFilter:
|
||||
enabled:
|
||||
- "DefaultEvictor"
|
||||
`),
|
||||
result: nil,
|
||||
err: fmt.Errorf("in profile ProfileName: invalid pod protection policy in ExtraEnabled: \"DaemonSetPods\". Valid options are: [PodsWithPVC PodsWithoutPDB PodsWithResourceClaims]"),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
@@ -342,14 +811,14 @@ profiles:
|
||||
result, err := decode("filename", tc.policy, client, pluginregistry.PluginRegistry)
|
||||
if err != nil {
|
||||
if tc.err == nil {
|
||||
t.Errorf("unexpected error: %s.", err.Error())
|
||||
} else {
|
||||
t.Errorf("unexpected error: %s. Was expecting %s", err.Error(), tc.err.Error())
|
||||
t.Fatalf("unexpected error: %s.", err.Error())
|
||||
} else if err.Error() != tc.err.Error() {
|
||||
t.Fatalf("unexpected error: %s. Was expecting %s", err.Error(), tc.err.Error())
|
||||
}
|
||||
}
|
||||
diff := cmp.Diff(tc.result, result)
|
||||
if diff != "" && err == nil {
|
||||
t.Errorf("test '%s' failed. Results are not deep equal. mismatch (-want +got):\n%s", tc.description, diff)
|
||||
if diff != "" {
|
||||
t.Fatalf("test '%s' failed. Results are not deep equal. mismatch (-want +got):\n%s", tc.description, diff)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
49
pkg/features/features.go
Normal file
49
pkg/features/features.go
Normal file
@@ -0,0 +1,49 @@
|
||||
package features
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/component-base/featuregate"
|
||||
)
|
||||
|
||||
const (
|
||||
// Every feature gate should add method here following this template:
|
||||
//
|
||||
// // owner: @username
|
||||
// // kep: kep link
|
||||
// // alpha: v1.X
|
||||
// MyFeature featuregate.Feature = "MyFeature"
|
||||
//
|
||||
// Feature gates should be listed in alphabetical, case-sensitive
|
||||
// (upper before any lower case character) order. This reduces the risk
|
||||
// of code conflicts because changes are more likely to be scattered
|
||||
// across the file.
|
||||
|
||||
// owner: @ingvagabund
|
||||
// kep: https://github.com/kubernetes-sigs/descheduler/issues/1397
|
||||
// alpha: v1.31
|
||||
//
|
||||
// Enable evictions in background so users can create their own eviction policies
|
||||
// as an alternative to immediate evictions.
|
||||
EvictionsInBackground featuregate.Feature = "EvictionsInBackground"
|
||||
)
|
||||
|
||||
func init() {
|
||||
runtime.Must(DefaultMutableFeatureGate.Add(defaultDeschedulerFeatureGates))
|
||||
}
|
||||
|
||||
// defaultDeschedulerFeatureGates consists of all known descheduler-specific feature keys.
|
||||
// To add a new feature, define a key for it above and add it here. The features will be
|
||||
// available throughout descheduler binary.
|
||||
//
|
||||
// Entries are separated from each other with blank lines to avoid sweeping gofmt changes
|
||||
// when adding or removing one entry.
|
||||
var defaultDeschedulerFeatureGates = map[featuregate.Feature]featuregate.FeatureSpec{
|
||||
EvictionsInBackground: {Default: false, PreRelease: featuregate.Alpha},
|
||||
}
|
||||
|
||||
// DefaultMutableFeatureGate is a mutable version of DefaultFeatureGate.
|
||||
// Only top-level commands/options setup and the k8s.io/component-base/featuregate/testing package should make use of this.
|
||||
// Tests that need to modify feature gates for the duration of their test should use:
|
||||
//
|
||||
// defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.<FeatureName>, <value>)()
|
||||
var DefaultMutableFeatureGate featuregate.MutableFeatureGate = featuregate.NewFeatureGate()
|
||||
@@ -8,8 +8,11 @@ import (
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/metricscollector"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
|
||||
promapi "github.com/prometheus/client_golang/api"
|
||||
)
|
||||
|
||||
type HandleImpl struct {
|
||||
@@ -18,6 +21,8 @@ type HandleImpl struct {
|
||||
SharedInformerFactoryImpl informers.SharedInformerFactory
|
||||
EvictorFilterImpl frameworktypes.EvictorPlugin
|
||||
PodEvictorImpl *evictions.PodEvictor
|
||||
MetricsCollectorImpl *metricscollector.MetricsCollector
|
||||
PrometheusClientImpl promapi.Client
|
||||
}
|
||||
|
||||
var _ frameworktypes.Handle = &HandleImpl{}
|
||||
@@ -26,6 +31,14 @@ func (hi *HandleImpl) ClientSet() clientset.Interface {
|
||||
return hi.ClientsetImpl
|
||||
}
|
||||
|
||||
func (hi *HandleImpl) PrometheusClient() promapi.Client {
|
||||
return hi.PrometheusClientImpl
|
||||
}
|
||||
|
||||
func (hi *HandleImpl) MetricsCollector() *metricscollector.MetricsCollector {
|
||||
return hi.MetricsCollectorImpl
|
||||
}
|
||||
|
||||
func (hi *HandleImpl) GetPodsAssignedToNodeFunc() podutil.GetPodsAssignedToNodeFunc {
|
||||
return hi.GetPodsAssignedToNodeFuncImpl
|
||||
}
|
||||
|
||||
@@ -60,7 +60,7 @@ type FakePlugin struct {
|
||||
}
|
||||
|
||||
func NewPluginFncFromFake(fp *FakePlugin) pluginregistry.PluginBuilder {
|
||||
return func(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
|
||||
return func(ctx context.Context, args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
|
||||
fakePluginArgs, ok := args.(*FakePluginArgs)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("want args to be of type FakePluginArgs, got %T", args)
|
||||
@@ -74,7 +74,7 @@ func NewPluginFncFromFake(fp *FakePlugin) pluginregistry.PluginBuilder {
|
||||
}
|
||||
|
||||
// New builds plugin from its arguments while passing a handle
|
||||
func New(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
|
||||
func New(ctx context.Context, args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
|
||||
fakePluginArgs, ok := args.(*FakePluginArgs)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("want args to be of type FakePluginArgs, got %T", args)
|
||||
@@ -165,7 +165,7 @@ type FakeDeschedulePlugin struct {
|
||||
}
|
||||
|
||||
func NewFakeDeschedulePluginFncFromFake(fp *FakeDeschedulePlugin) pluginregistry.PluginBuilder {
|
||||
return func(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
|
||||
return func(ctx context.Context, args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
|
||||
fakePluginArgs, ok := args.(*FakeDeschedulePluginArgs)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("want args to be of type FakeDeschedulePluginArgs, got %T", args)
|
||||
@@ -252,7 +252,7 @@ type FakeBalancePlugin struct {
|
||||
}
|
||||
|
||||
func NewFakeBalancePluginFncFromFake(fp *FakeBalancePlugin) pluginregistry.PluginBuilder {
|
||||
return func(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
|
||||
return func(ctx context.Context, args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
|
||||
fakePluginArgs, ok := args.(*FakeBalancePluginArgs)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("want args to be of type FakeBalancePluginArgs, got %T", args)
|
||||
@@ -339,7 +339,7 @@ type FakeFilterPlugin struct {
|
||||
}
|
||||
|
||||
func NewFakeFilterPluginFncFromFake(fp *FakeFilterPlugin) pluginregistry.PluginBuilder {
|
||||
return func(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
|
||||
return func(ctx context.Context, args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
|
||||
fakePluginArgs, ok := args.(*FakeFilterPluginArgs)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("want args to be of type FakeFilterPluginArgs, got %T", args)
|
||||
@@ -408,3 +408,55 @@ func (d *FakeFilterPlugin) handleBoolAction(action Action) bool {
|
||||
}
|
||||
panic(fmt.Errorf("unhandled %q action", action.GetExtensionPoint()))
|
||||
}
|
||||
|
||||
// RegisterFakePlugin registers a FakePlugin with the given registry
|
||||
func RegisterFakePlugin(name string, plugin *FakePlugin, registry pluginregistry.Registry) {
|
||||
pluginregistry.Register(
|
||||
name,
|
||||
NewPluginFncFromFake(plugin),
|
||||
&FakePlugin{},
|
||||
&FakePluginArgs{},
|
||||
ValidateFakePluginArgs,
|
||||
SetDefaults_FakePluginArgs,
|
||||
registry,
|
||||
)
|
||||
}
|
||||
|
||||
// RegisterFakeDeschedulePlugin registers a FakeDeschedulePlugin with the given registry
|
||||
func RegisterFakeDeschedulePlugin(name string, plugin *FakeDeschedulePlugin, registry pluginregistry.Registry) {
|
||||
pluginregistry.Register(
|
||||
name,
|
||||
NewFakeDeschedulePluginFncFromFake(plugin),
|
||||
&FakeDeschedulePlugin{},
|
||||
&FakeDeschedulePluginArgs{},
|
||||
ValidateFakePluginArgs,
|
||||
SetDefaults_FakePluginArgs,
|
||||
registry,
|
||||
)
|
||||
}
|
||||
|
||||
// RegisterFakeBalancePlugin registers a FakeBalancePlugin with the given registry
|
||||
func RegisterFakeBalancePlugin(name string, plugin *FakeBalancePlugin, registry pluginregistry.Registry) {
|
||||
pluginregistry.Register(
|
||||
name,
|
||||
NewFakeBalancePluginFncFromFake(plugin),
|
||||
&FakeBalancePlugin{},
|
||||
&FakeBalancePluginArgs{},
|
||||
ValidateFakePluginArgs,
|
||||
SetDefaults_FakePluginArgs,
|
||||
registry,
|
||||
)
|
||||
}
|
||||
|
||||
// RegisterFakeFilterPlugin registers a FakeFilterPlugin with the given registry
|
||||
func RegisterFakeFilterPlugin(name string, plugin *FakeFilterPlugin, registry pluginregistry.Registry) {
|
||||
pluginregistry.Register(
|
||||
name,
|
||||
NewFakeFilterPluginFncFromFake(plugin),
|
||||
&FakeFilterPlugin{},
|
||||
&FakeFilterPluginArgs{},
|
||||
ValidateFakePluginArgs,
|
||||
SetDefaults_FakePluginArgs,
|
||||
registry,
|
||||
)
|
||||
}
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
Copyright 2026 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -17,6 +17,8 @@ limitations under the License.
|
||||
package pluginregistry
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/klog/v2"
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
@@ -35,7 +37,7 @@ type PluginUtilities struct {
|
||||
PluginArgDefaulter PluginArgDefaulter
|
||||
}
|
||||
|
||||
type PluginBuilder = func(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error)
|
||||
type PluginBuilder = func(ctx context.Context, args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error)
|
||||
|
||||
type (
|
||||
PluginArgValidator = func(args runtime.Object) error
|
||||
|
||||
94
pkg/framework/plugins/defaultevictor/constraints.go
Normal file
94
pkg/framework/plugins/defaultevictor/constraints.go
Normal file
@@ -0,0 +1,94 @@
|
||||
/*
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package defaultevictor
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
)
|
||||
|
||||
func evictionConstraintsForLabelSelector(logger klog.Logger, labelSelector *metav1.LabelSelector) ([]constraint, error) {
|
||||
if labelSelector != nil {
|
||||
selector, err := metav1.LabelSelectorAsSelector(labelSelector)
|
||||
if err != nil {
|
||||
logger.Error(err, "could not get selector from label selector")
|
||||
return nil, err
|
||||
}
|
||||
if !selector.Empty() {
|
||||
return []constraint{
|
||||
func(pod *v1.Pod) error {
|
||||
if !selector.Matches(labels.Set(pod.Labels)) {
|
||||
return fmt.Errorf("pod labels do not match the labelSelector filter in the policy parameter")
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func evictionConstraintsForMinReplicas(logger klog.Logger, minReplicas uint, handle frameworktypes.Handle) ([]constraint, error) {
|
||||
if minReplicas > 1 {
|
||||
indexName := "metadata.ownerReferences"
|
||||
indexer, err := getPodIndexerByOwnerRefs(indexName, handle)
|
||||
if err != nil {
|
||||
logger.Error(err, "could not get pod indexer by ownerRefs")
|
||||
return nil, err
|
||||
}
|
||||
return []constraint{
|
||||
func(pod *v1.Pod) error {
|
||||
if len(pod.OwnerReferences) == 0 {
|
||||
return nil
|
||||
}
|
||||
if len(pod.OwnerReferences) > 1 {
|
||||
logger.V(5).Info("pod has multiple owner references which is not supported for minReplicas check", "size", len(pod.OwnerReferences), "pod", klog.KObj(pod))
|
||||
return nil
|
||||
}
|
||||
ownerRef := pod.OwnerReferences[0]
|
||||
objs, err := indexer.ByIndex(indexName, string(ownerRef.UID))
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to list pods for minReplicas filter in the policy parameter")
|
||||
}
|
||||
if uint(len(objs)) < minReplicas {
|
||||
return fmt.Errorf("owner has %d replicas which is less than minReplicas of %d", len(objs), minReplicas)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}, nil
|
||||
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func evictionConstraintsForMinPodAge(minPodAge *metav1.Duration) []constraint {
|
||||
if minPodAge != nil {
|
||||
return []constraint{
|
||||
func(pod *v1.Pod) error {
|
||||
if pod.Status.StartTime == nil || time.Since(pod.Status.StartTime.Time) < minPodAge.Duration {
|
||||
return fmt.Errorf("pod age is not older than MinPodAge: %s seconds", minPodAge.String())
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -14,19 +14,20 @@ limitations under the License.
|
||||
package defaultevictor
|
||||
|
||||
import (
|
||||
// "context"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
"maps"
|
||||
"slices"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
evictionutils "sigs.k8s.io/descheduler/pkg/descheduler/evictions/utils"
|
||||
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
@@ -48,6 +49,7 @@ type constraint func(pod *v1.Pod) error
|
||||
// This plugin is only meant to customize other actions (extension points) of the evictor,
|
||||
// like filtering, sorting, and other ones that might be relevant in the future
|
||||
type DefaultEvictor struct {
|
||||
logger klog.Logger
|
||||
args *DefaultEvictorArgs
|
||||
constraints []constraint
|
||||
handle frameworktypes.Handle
|
||||
@@ -66,136 +68,326 @@ func HaveEvictAnnotation(pod *v1.Pod) bool {
|
||||
|
||||
// New builds plugin from its arguments while passing a handle
|
||||
// nolint: gocyclo
|
||||
func New(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
|
||||
func New(ctx context.Context, args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
|
||||
defaultEvictorArgs, ok := args.(*DefaultEvictorArgs)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("want args to be of type defaultEvictorFilterArgs, got %T", args)
|
||||
}
|
||||
logger := klog.FromContext(ctx).WithValues("plugin", PluginName)
|
||||
|
||||
ev := &DefaultEvictor{
|
||||
logger: logger,
|
||||
handle: handle,
|
||||
args: defaultEvictorArgs,
|
||||
}
|
||||
// add constraints
|
||||
err := ev.addAllConstraints(logger, handle)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ev, nil
|
||||
}
|
||||
|
||||
if defaultEvictorArgs.EvictFailedBarePods {
|
||||
klog.V(1).InfoS("Warning: EvictFailedBarePods is set to True. This could cause eviction of pods without ownerReferences.")
|
||||
ev.constraints = append(ev.constraints, func(pod *v1.Pod) error {
|
||||
func (d *DefaultEvictor) addAllConstraints(logger klog.Logger, handle frameworktypes.Handle) error {
|
||||
args := d.args
|
||||
// Determine effective protected policies based on the provided arguments.
|
||||
effectivePodProtections := getEffectivePodProtections(args)
|
||||
|
||||
if err := applyEffectivePodProtections(d, effectivePodProtections, handle); err != nil {
|
||||
return fmt.Errorf("failed to apply effective protected policies: %w", err)
|
||||
}
|
||||
if constraints, err := evictionConstraintsForLabelSelector(logger, args.LabelSelector); err != nil {
|
||||
return err
|
||||
} else {
|
||||
d.constraints = append(d.constraints, constraints...)
|
||||
}
|
||||
if constraints, err := evictionConstraintsForMinReplicas(logger, args.MinReplicas, handle); err != nil {
|
||||
return err
|
||||
} else {
|
||||
d.constraints = append(d.constraints, constraints...)
|
||||
}
|
||||
d.constraints = append(d.constraints, evictionConstraintsForMinPodAge(args.MinPodAge)...)
|
||||
return nil
|
||||
}
|
||||
|
||||
// applyEffectivePodProtections configures the evictor with specified Pod protection.
|
||||
func applyEffectivePodProtections(d *DefaultEvictor, podProtections []PodProtection, handle frameworktypes.Handle) error {
|
||||
protectionMap := make(map[PodProtection]bool, len(podProtections))
|
||||
for _, protection := range podProtections {
|
||||
protectionMap[protection] = true
|
||||
}
|
||||
|
||||
// Apply protections
|
||||
if err := applySystemCriticalPodsProtection(d, protectionMap, handle); err != nil {
|
||||
return err
|
||||
}
|
||||
applyFailedBarePodsProtection(d, protectionMap)
|
||||
applyLocalStoragePodsProtection(d, protectionMap)
|
||||
applyDaemonSetPodsProtection(d, protectionMap)
|
||||
applyPVCPodsProtection(d, protectionMap)
|
||||
applyPodsWithoutPDBProtection(d, protectionMap, handle)
|
||||
applyPodsWithResourceClaimsProtection(d, protectionMap)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// protectedPVCStorageClasses returns the list of storage classes that should
|
||||
// be protected from eviction. If the list is empty or nil then all storage
|
||||
// classes are protected (assuming PodsWithPVC protection is enabled).
|
||||
func protectedPVCStorageClasses(d *DefaultEvictor) []ProtectedStorageClass {
|
||||
protcfg := d.args.PodProtections.Config
|
||||
if protcfg == nil {
|
||||
return nil
|
||||
}
|
||||
scconfig := protcfg.PodsWithPVC
|
||||
if scconfig == nil {
|
||||
return nil
|
||||
}
|
||||
return scconfig.ProtectedStorageClasses
|
||||
}
|
||||
|
||||
// podStorageClasses returns a list of storage classes referred by a pod. We
|
||||
// need this when assessing if a pod should be protected because it refers to a
|
||||
// protected storage class.
|
||||
func podStorageClasses(inf informers.SharedInformerFactory, pod *v1.Pod) ([]string, error) {
|
||||
lister := inf.Core().V1().PersistentVolumeClaims().Lister().PersistentVolumeClaims(
|
||||
pod.Namespace,
|
||||
)
|
||||
|
||||
referred := map[string]bool{}
|
||||
for _, vol := range pod.Spec.Volumes {
|
||||
if vol.PersistentVolumeClaim == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
claim, err := lister.Get(vol.PersistentVolumeClaim.ClaimName)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"failed to get persistent volume claim %q/%q: %w",
|
||||
pod.Namespace, vol.PersistentVolumeClaim.ClaimName, err,
|
||||
)
|
||||
}
|
||||
|
||||
// this should never happen as once a pvc is created with a nil
|
||||
// storageClass it is automatically picked up by the default
|
||||
// storage class. By returning an error here we make the pod
|
||||
// protected from eviction.
|
||||
if claim.Spec.StorageClassName == nil || *claim.Spec.StorageClassName == "" {
|
||||
return nil, fmt.Errorf(
|
||||
"failed to resolve storage class for pod %q/%q",
|
||||
pod.Namespace, claim.Name,
|
||||
)
|
||||
}
|
||||
|
||||
referred[*claim.Spec.StorageClassName] = true
|
||||
}
|
||||
|
||||
return slices.Collect(maps.Keys(referred)), nil
|
||||
}
|
||||
|
||||
func applyFailedBarePodsProtection(d *DefaultEvictor, protectionMap map[PodProtection]bool) {
|
||||
isProtectionEnabled := protectionMap[FailedBarePods]
|
||||
if !isProtectionEnabled {
|
||||
d.logger.V(1).Info("Warning: EvictFailedBarePods is set to True. This could cause eviction of pods without ownerReferences.")
|
||||
d.constraints = append(d.constraints, func(pod *v1.Pod) error {
|
||||
ownerRefList := podutil.OwnerRef(pod)
|
||||
// Enable evictFailedBarePods to evict bare pods in failed phase
|
||||
if len(ownerRefList) == 0 && pod.Status.Phase != v1.PodFailed {
|
||||
return fmt.Errorf("pod does not have any ownerRefs and is not in failed phase")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
} else {
|
||||
ev.constraints = append(ev.constraints, func(pod *v1.Pod) error {
|
||||
ownerRefList := podutil.OwnerRef(pod)
|
||||
if len(ownerRefList) == 0 {
|
||||
d.constraints = append(d.constraints, func(pod *v1.Pod) error {
|
||||
if len(podutil.OwnerRef(pod)) == 0 {
|
||||
return fmt.Errorf("pod does not have any ownerRefs")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
if !defaultEvictorArgs.EvictSystemCriticalPods {
|
||||
ev.constraints = append(ev.constraints, func(pod *v1.Pod) error {
|
||||
if utils.IsCriticalPriorityPod(pod) {
|
||||
return fmt.Errorf("pod has system critical priority")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
if defaultEvictorArgs.PriorityThreshold != nil && (defaultEvictorArgs.PriorityThreshold.Value != nil || len(defaultEvictorArgs.PriorityThreshold.Name) > 0) {
|
||||
thresholdPriority, err := utils.GetPriorityValueFromPriorityThreshold(context.TODO(), handle.ClientSet(), defaultEvictorArgs.PriorityThreshold)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get priority threshold: %v", err)
|
||||
}
|
||||
ev.constraints = append(ev.constraints, func(pod *v1.Pod) error {
|
||||
if IsPodEvictableBasedOnPriority(pod, thresholdPriority) {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("pod has higher priority than specified priority class threshold")
|
||||
})
|
||||
}
|
||||
} else {
|
||||
klog.V(1).InfoS("Warning: EvictSystemCriticalPods is set to True. This could cause eviction of Kubernetes system pods.")
|
||||
func applySystemCriticalPodsProtection(d *DefaultEvictor, protectionMap map[PodProtection]bool, handle frameworktypes.Handle) error {
|
||||
isProtectionEnabled := protectionMap[SystemCriticalPods]
|
||||
if !isProtectionEnabled {
|
||||
d.logger.V(1).Info("Warning: System critical pod protection is disabled. This could cause eviction of Kubernetes system pods.")
|
||||
return nil
|
||||
}
|
||||
if !defaultEvictorArgs.EvictLocalStoragePods {
|
||||
ev.constraints = append(ev.constraints, func(pod *v1.Pod) error {
|
||||
if utils.IsPodWithLocalStorage(pod) {
|
||||
return fmt.Errorf("pod has local storage and descheduler is not configured with evictLocalStoragePods")
|
||||
|
||||
d.constraints = append(d.constraints, func(pod *v1.Pod) error {
|
||||
if utils.IsCriticalPriorityPod(pod) {
|
||||
return fmt.Errorf("pod has system critical priority and is protected against eviction")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
priorityThreshold := d.args.PriorityThreshold
|
||||
if priorityThreshold != nil && (priorityThreshold.Value != nil || len(priorityThreshold.Name) > 0) {
|
||||
thresholdPriority, err := utils.GetPriorityValueFromPriorityThreshold(context.TODO(), handle.ClientSet(), priorityThreshold)
|
||||
if err != nil {
|
||||
d.logger.Error(err, "failed to get priority threshold")
|
||||
return err
|
||||
}
|
||||
d.constraints = append(d.constraints, func(pod *v1.Pod) error {
|
||||
if !IsPodEvictableBasedOnPriority(pod, thresholdPriority) {
|
||||
return fmt.Errorf("pod has higher priority than specified priority class threshold")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
if !defaultEvictorArgs.EvictDaemonSetPods {
|
||||
ev.constraints = append(ev.constraints, func(pod *v1.Pod) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func applyLocalStoragePodsProtection(d *DefaultEvictor, protectionMap map[PodProtection]bool) {
|
||||
isProtectionEnabled := protectionMap[PodsWithLocalStorage]
|
||||
if isProtectionEnabled {
|
||||
d.constraints = append(d.constraints, func(pod *v1.Pod) error {
|
||||
if utils.IsPodWithLocalStorage(pod) {
|
||||
return fmt.Errorf("pod has local storage and is protected against eviction")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func applyDaemonSetPodsProtection(d *DefaultEvictor, protectionMap map[PodProtection]bool) {
|
||||
isProtectionEnabled := protectionMap[DaemonSetPods]
|
||||
if isProtectionEnabled {
|
||||
d.constraints = append(d.constraints, func(pod *v1.Pod) error {
|
||||
ownerRefList := podutil.OwnerRef(pod)
|
||||
if utils.IsDaemonsetPod(ownerRefList) {
|
||||
return fmt.Errorf("pod is related to daemonset and descheduler is not configured with evictDaemonSetPods")
|
||||
return fmt.Errorf("daemonset pods are protected against eviction")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
if defaultEvictorArgs.IgnorePvcPods {
|
||||
ev.constraints = append(ev.constraints, func(pod *v1.Pod) error {
|
||||
if utils.IsPodWithPVC(pod) {
|
||||
return fmt.Errorf("pod has a PVC and descheduler is configured to ignore PVC pods")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
selector, err := metav1.LabelSelectorAsSelector(defaultEvictorArgs.LabelSelector)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not get selector from label selector")
|
||||
}
|
||||
if defaultEvictorArgs.LabelSelector != nil && !selector.Empty() {
|
||||
ev.constraints = append(ev.constraints, func(pod *v1.Pod) error {
|
||||
if !selector.Matches(labels.Set(pod.Labels)) {
|
||||
return fmt.Errorf("pod labels do not match the labelSelector filter in the policy parameter")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// applyPVCPodsProtection protects pods that refer to a PVC from eviction. If
|
||||
// the user has specified a list of storage classes to protect then only pods
|
||||
// referring to PVCs of those storage classes are protected.
|
||||
func applyPVCPodsProtection(d *DefaultEvictor, enabledProtections map[PodProtection]bool) {
|
||||
if !enabledProtections[PodsWithPVC] {
|
||||
return
|
||||
}
|
||||
|
||||
if defaultEvictorArgs.MinReplicas > 1 {
|
||||
indexName := "metadata.ownerReferences"
|
||||
indexer, err := getPodIndexerByOwnerRefs(indexName, handle)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ev.constraints = append(ev.constraints, func(pod *v1.Pod) error {
|
||||
if len(pod.OwnerReferences) == 0 {
|
||||
// if the user isn't filtering by storage classes we protect all pods
|
||||
// referring to a PVC.
|
||||
protected := protectedPVCStorageClasses(d)
|
||||
if len(protected) == 0 {
|
||||
d.constraints = append(
|
||||
d.constraints,
|
||||
func(pod *v1.Pod) error {
|
||||
if utils.IsPodWithPVC(pod) {
|
||||
return fmt.Errorf("pod with PVC is protected against eviction")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
},
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
if len(pod.OwnerReferences) > 1 {
|
||||
klog.V(5).InfoS("pod has multiple owner references which is not supported for minReplicas check", "size", len(pod.OwnerReferences), "pod", klog.KObj(pod))
|
||||
return nil
|
||||
}
|
||||
protectedsc := map[string]bool{}
|
||||
for _, class := range protected {
|
||||
protectedsc[class.Name] = true
|
||||
}
|
||||
|
||||
ownerRef := pod.OwnerReferences[0]
|
||||
objs, err := indexer.ByIndex(indexName, string(ownerRef.UID))
|
||||
d.constraints = append(
|
||||
d.constraints, func(pod *v1.Pod) error {
|
||||
classes, err := podStorageClasses(d.handle.SharedInformerFactory(), pod)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to list pods for minReplicas filter in the policy parameter")
|
||||
return err
|
||||
}
|
||||
|
||||
if uint(len(objs)) < defaultEvictorArgs.MinReplicas {
|
||||
return fmt.Errorf("owner has %d replicas which is less than minReplicas of %d", len(objs), defaultEvictorArgs.MinReplicas)
|
||||
for _, class := range classes {
|
||||
if !protectedsc[class] {
|
||||
continue
|
||||
}
|
||||
return fmt.Errorf("pod using protected storage class %q", class)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
if defaultEvictorArgs.MinPodAge != nil {
|
||||
ev.constraints = append(ev.constraints, func(pod *v1.Pod) error {
|
||||
if pod.Status.StartTime == nil || time.Since(pod.Status.StartTime.Time) < defaultEvictorArgs.MinPodAge.Duration {
|
||||
return fmt.Errorf("pod age is not older than MinPodAge: %s seconds", defaultEvictorArgs.MinPodAge.String())
|
||||
func applyPodsWithoutPDBProtection(d *DefaultEvictor, protectionMap map[PodProtection]bool, handle frameworktypes.Handle) {
|
||||
isProtectionEnabled := protectionMap[PodsWithoutPDB]
|
||||
if isProtectionEnabled {
|
||||
d.constraints = append(d.constraints, func(pod *v1.Pod) error {
|
||||
hasPdb, err := utils.IsPodCoveredByPDB(pod, handle.SharedInformerFactory().Policy().V1().PodDisruptionBudgets().Lister())
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to check if pod is covered by PodDisruptionBudget: %w", err)
|
||||
}
|
||||
if !hasPdb {
|
||||
return fmt.Errorf("pod does not have a PodDisruptionBudget and is protected against eviction")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
return ev, nil
|
||||
}
|
||||
|
||||
func applyPodsWithResourceClaimsProtection(d *DefaultEvictor, protectionMap map[PodProtection]bool) {
|
||||
isProtectionEnabled := protectionMap[PodsWithResourceClaims]
|
||||
if isProtectionEnabled {
|
||||
d.constraints = append(d.constraints, func(pod *v1.Pod) error {
|
||||
if utils.IsPodWithResourceClaims(pod) {
|
||||
return fmt.Errorf("pod has ResourceClaims and descheduler is configured to protect ResourceClaims pods")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// getEffectivePodProtections determines which policies are currently active.
|
||||
// It supports both new-style (PodProtections) and legacy-style flags.
|
||||
func getEffectivePodProtections(args *DefaultEvictorArgs) []PodProtection {
|
||||
// determine whether to use PodProtections config
|
||||
useNewConfig := len(args.PodProtections.DefaultDisabled) > 0 || len(args.PodProtections.ExtraEnabled) > 0
|
||||
|
||||
if !useNewConfig {
|
||||
// fall back to the Deprecated config
|
||||
return legacyGetPodProtections(args)
|
||||
}
|
||||
|
||||
// effective is the final list of active protection.
|
||||
effective := make([]PodProtection, 0)
|
||||
effective = append(effective, defaultPodProtections...)
|
||||
|
||||
// Remove PodProtections that are in the DefaultDisabled list.
|
||||
effective = slices.DeleteFunc(effective, func(protection PodProtection) bool {
|
||||
return slices.Contains(args.PodProtections.DefaultDisabled, protection)
|
||||
})
|
||||
|
||||
// Add extra enabled in PodProtections
|
||||
effective = append(effective, args.PodProtections.ExtraEnabled...)
|
||||
|
||||
return effective
|
||||
}
|
||||
|
||||
// legacyGetPodProtections returns protections using deprecated boolean flags.
|
||||
func legacyGetPodProtections(args *DefaultEvictorArgs) []PodProtection {
|
||||
var protections []PodProtection
|
||||
|
||||
// defaultDisabled
|
||||
if !args.EvictLocalStoragePods {
|
||||
protections = append(protections, PodsWithLocalStorage)
|
||||
}
|
||||
if !args.EvictDaemonSetPods {
|
||||
protections = append(protections, DaemonSetPods)
|
||||
}
|
||||
if !args.EvictSystemCriticalPods {
|
||||
protections = append(protections, SystemCriticalPods)
|
||||
}
|
||||
if !args.EvictFailedBarePods {
|
||||
protections = append(protections, FailedBarePods)
|
||||
}
|
||||
|
||||
// extraEnabled
|
||||
if args.IgnorePvcPods {
|
||||
protections = append(protections, PodsWithPVC)
|
||||
}
|
||||
if args.IgnorePodsWithoutPDB {
|
||||
protections = append(protections, PodsWithoutPDB)
|
||||
}
|
||||
return protections
|
||||
}
|
||||
|
||||
// Name retrieves the plugin name
|
||||
@@ -204,14 +396,15 @@ func (d *DefaultEvictor) Name() string {
|
||||
}
|
||||
|
||||
func (d *DefaultEvictor) PreEvictionFilter(pod *v1.Pod) bool {
|
||||
logger := d.logger.WithValues("ExtensionPoint", frameworktypes.PreEvictionFilterExtensionPoint)
|
||||
if d.args.NodeFit {
|
||||
nodes, err := nodeutil.ReadyNodes(context.TODO(), d.handle.ClientSet(), d.handle.SharedInformerFactory().Core().V1().Nodes().Lister(), d.args.NodeSelector)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "unable to list ready nodes", "pod", klog.KObj(pod))
|
||||
logger.Error(err, "unable to list ready nodes", "pod", klog.KObj(pod))
|
||||
return false
|
||||
}
|
||||
if !nodeutil.PodFitsAnyOtherNode(d.handle.GetPodsAssignedToNodeFunc(), pod, nodes) {
|
||||
klog.InfoS("pod does not fit on any other node because of nodeSelector(s), Taint(s), or nodes marked as unschedulable", "pod", klog.KObj(pod))
|
||||
logger.Info("pod does not fit on any other node because of nodeSelector(s), Taint(s), or nodes marked as unschedulable", "pod", klog.KObj(pod))
|
||||
return false
|
||||
}
|
||||
return true
|
||||
@@ -220,12 +413,17 @@ func (d *DefaultEvictor) PreEvictionFilter(pod *v1.Pod) bool {
|
||||
}
|
||||
|
||||
func (d *DefaultEvictor) Filter(pod *v1.Pod) bool {
|
||||
logger := d.logger.WithValues("ExtensionPoint", frameworktypes.FilterExtensionPoint)
|
||||
checkErrs := []error{}
|
||||
|
||||
if HaveEvictAnnotation(pod) {
|
||||
return true
|
||||
}
|
||||
|
||||
if d.args.NoEvictionPolicy == MandatoryNoEvictionPolicy && evictionutils.HaveNoEvictionAnnotation(pod) {
|
||||
return false
|
||||
}
|
||||
|
||||
if utils.IsMirrorPod(pod) {
|
||||
checkErrs = append(checkErrs, fmt.Errorf("pod is a mirror pod"))
|
||||
}
|
||||
@@ -245,7 +443,7 @@ func (d *DefaultEvictor) Filter(pod *v1.Pod) bool {
|
||||
}
|
||||
|
||||
if len(checkErrs) > 0 {
|
||||
klog.V(4).InfoS("Pod fails the following checks", "pod", klog.KObj(pod), "checks", utilerrors.NewAggregate(checkErrs).Error())
|
||||
logger.V(4).Info("Pod fails the following checks", "pod", klog.KObj(pod), "checks", utilerrors.NewAggregate(checkErrs).Error())
|
||||
return false
|
||||
}
|
||||
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -21,35 +21,7 @@ func addDefaultingFuncs(scheme *runtime.Scheme) error {
|
||||
return RegisterDefaults(scheme)
|
||||
}
|
||||
|
||||
// SetDefaults_DefaultEvictorArgs
|
||||
// TODO: the final default values would be discussed in community
|
||||
// SetDefaults_DefaultEvictorArgs sets the default values for the
|
||||
// DefaultEvictorArgs configuration.
|
||||
func SetDefaults_DefaultEvictorArgs(obj runtime.Object) {
|
||||
args := obj.(*DefaultEvictorArgs)
|
||||
if args.NodeSelector == "" {
|
||||
args.NodeSelector = ""
|
||||
}
|
||||
if !args.EvictLocalStoragePods {
|
||||
args.EvictLocalStoragePods = false
|
||||
}
|
||||
if !args.EvictDaemonSetPods {
|
||||
args.EvictDaemonSetPods = false
|
||||
}
|
||||
if !args.EvictSystemCriticalPods {
|
||||
args.EvictSystemCriticalPods = false
|
||||
}
|
||||
if !args.IgnorePvcPods {
|
||||
args.IgnorePvcPods = false
|
||||
}
|
||||
if !args.EvictFailedBarePods {
|
||||
args.EvictFailedBarePods = false
|
||||
}
|
||||
if args.LabelSelector == nil {
|
||||
args.LabelSelector = nil
|
||||
}
|
||||
if args.PriorityThreshold == nil {
|
||||
args.PriorityThreshold = nil
|
||||
}
|
||||
if !args.NodeFit {
|
||||
args.NodeFit = false
|
||||
}
|
||||
}
|
||||
|
||||
@@ -17,6 +17,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
utilptr "k8s.io/utils/ptr"
|
||||
@@ -42,6 +43,7 @@ func TestSetDefaults_DefaultEvictorArgs(t *testing.T) {
|
||||
LabelSelector: nil,
|
||||
PriorityThreshold: nil,
|
||||
NodeFit: false,
|
||||
IgnorePodsWithoutPDB: false,
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -57,7 +59,8 @@ func TestSetDefaults_DefaultEvictorArgs(t *testing.T) {
|
||||
PriorityThreshold: &api.PriorityThreshold{
|
||||
Value: utilptr.To[int32](800),
|
||||
},
|
||||
NodeFit: true,
|
||||
NodeFit: true,
|
||||
IgnorePodsWithoutPDB: true,
|
||||
},
|
||||
want: &DefaultEvictorArgs{
|
||||
NodeSelector: "NodeSelector",
|
||||
@@ -70,7 +73,8 @@ func TestSetDefaults_DefaultEvictorArgs(t *testing.T) {
|
||||
PriorityThreshold: &api.PriorityThreshold{
|
||||
Value: utilptr.To[int32](800),
|
||||
},
|
||||
NodeFit: true,
|
||||
NodeFit: true,
|
||||
IgnorePodsWithoutPDB: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -25,15 +25,132 @@ import (
|
||||
type DefaultEvictorArgs struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
|
||||
NodeSelector string `json:"nodeSelector,omitempty"`
|
||||
EvictLocalStoragePods bool `json:"evictLocalStoragePods,omitempty"`
|
||||
EvictDaemonSetPods bool `json:"evictDaemonSetPods,omitempty"`
|
||||
EvictSystemCriticalPods bool `json:"evictSystemCriticalPods,omitempty"`
|
||||
IgnorePvcPods bool `json:"ignorePvcPods,omitempty"`
|
||||
EvictFailedBarePods bool `json:"evictFailedBarePods,omitempty"`
|
||||
LabelSelector *metav1.LabelSelector `json:"labelSelector,omitempty"`
|
||||
PriorityThreshold *api.PriorityThreshold `json:"priorityThreshold,omitempty"`
|
||||
NodeFit bool `json:"nodeFit,omitempty"`
|
||||
MinReplicas uint `json:"minReplicas,omitempty"`
|
||||
MinPodAge *metav1.Duration `json:"minPodAge,omitempty"`
|
||||
NodeSelector string `json:"nodeSelector,omitempty"`
|
||||
LabelSelector *metav1.LabelSelector `json:"labelSelector,omitempty"`
|
||||
PriorityThreshold *api.PriorityThreshold `json:"priorityThreshold,omitempty"`
|
||||
NodeFit bool `json:"nodeFit,omitempty"`
|
||||
MinReplicas uint `json:"minReplicas,omitempty"`
|
||||
MinPodAge *metav1.Duration `json:"minPodAge,omitempty"`
|
||||
NoEvictionPolicy NoEvictionPolicy `json:"noEvictionPolicy,omitempty"`
|
||||
|
||||
// PodProtections holds the list of enabled and disabled protection policies.
|
||||
// Users can selectively disable certain default protection rules or enable extra ones.
|
||||
PodProtections PodProtections `json:"podProtections,omitempty"`
|
||||
|
||||
// Deprecated: Use DisabledDefaultPodProtection with "PodsWithLocalStorage" instead.
|
||||
EvictLocalStoragePods bool `json:"evictLocalStoragePods,omitempty"`
|
||||
// Deprecated: Use DisabledDefaultPodProtection with "DaemonSetPods" instead.
|
||||
EvictDaemonSetPods bool `json:"evictDaemonSetPods,omitempty"`
|
||||
// Deprecated: Use DisabledDefaultPodProtection with "SystemCriticalPods" instead.
|
||||
EvictSystemCriticalPods bool `json:"evictSystemCriticalPods,omitempty"`
|
||||
// Deprecated: Use ExtraPodProtection with "PodsWithPVC" instead.
|
||||
IgnorePvcPods bool `json:"ignorePvcPods,omitempty"`
|
||||
// Deprecated: Use ExtraPodProtection with "PodsWithoutPDB" instead.
|
||||
IgnorePodsWithoutPDB bool `json:"ignorePodsWithoutPDB,omitempty"`
|
||||
// Deprecated: Use DisabledDefaultPodProtection with "FailedBarePods" instead.
|
||||
EvictFailedBarePods bool `json:"evictFailedBarePods,omitempty"`
|
||||
}
|
||||
|
||||
// PodProtection defines the protection policy for a pod.
|
||||
type PodProtection string
|
||||
|
||||
const (
|
||||
PodsWithLocalStorage PodProtection = "PodsWithLocalStorage"
|
||||
DaemonSetPods PodProtection = "DaemonSetPods"
|
||||
SystemCriticalPods PodProtection = "SystemCriticalPods"
|
||||
FailedBarePods PodProtection = "FailedBarePods"
|
||||
PodsWithPVC PodProtection = "PodsWithPVC"
|
||||
PodsWithoutPDB PodProtection = "PodsWithoutPDB"
|
||||
PodsWithResourceClaims PodProtection = "PodsWithResourceClaims"
|
||||
)
|
||||
|
||||
// PodProtections holds the list of enabled and disabled protection policies.
|
||||
// NOTE: The list of default enabled pod protection policies is subject to change in future versions.
|
||||
// +k8s:deepcopy-gen=true
|
||||
type PodProtections struct {
|
||||
// ExtraEnabled specifies additional protection policies that should be enabled.
|
||||
// Supports: PodsWithPVC, PodsWithoutPDB
|
||||
ExtraEnabled []PodProtection `json:"extraEnabled,omitempty"`
|
||||
|
||||
// DefaultDisabled specifies which default protection policies should be disabled.
|
||||
// Supports: PodsWithLocalStorage, DaemonSetPods, SystemCriticalPods, FailedBarePods
|
||||
DefaultDisabled []PodProtection `json:"defaultDisabled,omitempty"`
|
||||
|
||||
// Config holds configuration for pod protection policies. Depending on
|
||||
// the enabled policies this may be required. For instance, when
|
||||
// enabling the PodsWithPVC policy the user may specify which storage
|
||||
// classes should be protected.
|
||||
Config *PodProtectionsConfig `json:"config,omitempty"`
|
||||
}
|
||||
|
||||
// PodProtectionsConfig holds configuration for pod protection policies. The
|
||||
// name of the fields here must be equal to a protection name. This struct is
|
||||
// meant to be extended as more protection policies are added.
|
||||
// +k8s:deepcopy-gen=true
|
||||
type PodProtectionsConfig struct {
|
||||
PodsWithPVC *PodsWithPVCConfig `json:"PodsWithPVC,omitempty"`
|
||||
}
|
||||
|
||||
// PodsWithPVCConfig holds configuration for the PodsWithPVC protection.
|
||||
// +k8s:deepcopy-gen=true
|
||||
type PodsWithPVCConfig struct {
|
||||
// ProtectedStorageClasses is a list of storage classes that we want to
|
||||
// protect. i.e. if a pod refers to one of these storage classes it is
|
||||
// protected from being evicted. If none is provided then all pods with
|
||||
// PVCs are protected from eviction.
|
||||
ProtectedStorageClasses []ProtectedStorageClass `json:"protectedStorageClasses,omitempty"`
|
||||
}
|
||||
|
||||
// ProtectedStorageClass is used to determine what storage classes are
|
||||
// protected when the PodsWithPVC protection is enabled. This object exists
|
||||
// so we can later on extend it with more configuration if needed.
|
||||
type ProtectedStorageClass struct {
|
||||
Name string `json:"name"`
|
||||
}
|
||||
|
||||
// defaultPodProtections holds the list of protection policies that are enabled by default.
|
||||
// User can use the 'disabledDefaultPodProtections' evictor arguments (via PodProtections.DefaultDisabled)
|
||||
// to disable any of these default protections.
|
||||
//
|
||||
// The following four policies are included by default:
|
||||
// - PodsWithLocalStorage: Protects pods with local storage.
|
||||
// - DaemonSetPods: Protects DaemonSet managed pods.
|
||||
// - SystemCriticalPods: Protects system-critical pods.
|
||||
// - FailedBarePods: Protects failed bare pods (not part of any controller).
|
||||
var defaultPodProtections = []PodProtection{
|
||||
PodsWithLocalStorage,
|
||||
SystemCriticalPods,
|
||||
FailedBarePods,
|
||||
DaemonSetPods,
|
||||
}
|
||||
|
||||
// extraPodProtections holds a list of protection policies that the user can optionally enable
|
||||
// through the configuration (via PodProtections.ExtraEnabled). These policies are not enabled by default.
|
||||
//
|
||||
// Currently supported extra policies:
|
||||
// - PodsWithPVC: Protects pods using PersistentVolumeClaims.
|
||||
// - PodsWithoutPDB: Protects pods lacking a PodDisruptionBudget.
|
||||
// - PodsWithResourceClaims: Protects pods using ResourceClaims.
|
||||
var extraPodProtections = []PodProtection{
|
||||
PodsWithPVC,
|
||||
PodsWithoutPDB,
|
||||
PodsWithResourceClaims,
|
||||
}
|
||||
|
||||
// NoEvictionPolicy dictates whether a no-eviction policy is preferred or mandatory.
|
||||
// Needs to be used with caution as this will give users ability to protect their pods
|
||||
// from eviction. Which might work against enfored policies. E.g. plugins evicting pods
|
||||
// violating security policies.
|
||||
type NoEvictionPolicy string
|
||||
|
||||
const (
|
||||
// PreferredNoEvictionPolicy interprets the no-eviction policy as a preference.
|
||||
// Meaning the annotation will get ignored by the DefaultEvictor plugin.
|
||||
// Yet, plugins may optionally sort their pods based on the annotation
|
||||
// and focus on evicting pods that do not set the annotation.
|
||||
PreferredNoEvictionPolicy NoEvictionPolicy = "Preferred"
|
||||
|
||||
// MandatoryNoEvictionPolicy interprets the no-eviction policy as mandatory.
|
||||
// Every pod carying the annotation will get excluded from eviction.
|
||||
MandatoryNoEvictionPolicy NoEvictionPolicy = "Mandatory"
|
||||
)
|
||||
|
||||
@@ -15,22 +15,86 @@ package defaultevictor
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
"slices"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
func ValidateDefaultEvictorArgs(obj runtime.Object) error {
|
||||
args := obj.(*DefaultEvictorArgs)
|
||||
|
||||
var allErrs []error
|
||||
if args.PriorityThreshold != nil && args.PriorityThreshold.Value != nil && len(args.PriorityThreshold.Name) > 0 {
|
||||
return fmt.Errorf("priority threshold misconfigured, only one of priorityThreshold fields can be set, got %v", args)
|
||||
allErrs = append(allErrs, fmt.Errorf("priority threshold misconfigured, only one of priorityThreshold fields can be set"))
|
||||
}
|
||||
|
||||
if args.MinReplicas == 1 {
|
||||
klog.V(4).Info("DefaultEvictor minReplicas must be greater than 1 to check for min pods during eviction. This check will be ignored during eviction.")
|
||||
}
|
||||
|
||||
return nil
|
||||
if args.NoEvictionPolicy != "" {
|
||||
if args.NoEvictionPolicy != PreferredNoEvictionPolicy && args.NoEvictionPolicy != MandatoryNoEvictionPolicy {
|
||||
allErrs = append(allErrs, fmt.Errorf("noEvictionPolicy accepts only %q values", []NoEvictionPolicy{PreferredNoEvictionPolicy, MandatoryNoEvictionPolicy}))
|
||||
}
|
||||
}
|
||||
|
||||
// check if any deprecated fields are set to true
|
||||
hasDeprecatedFields := args.EvictLocalStoragePods || args.EvictDaemonSetPods ||
|
||||
args.EvictSystemCriticalPods || args.IgnorePvcPods ||
|
||||
args.EvictFailedBarePods || args.IgnorePodsWithoutPDB
|
||||
|
||||
// disallow mixing deprecated fields with PodProtections.ExtraEnabled and PodProtections.DefaultDisabled
|
||||
if hasDeprecatedFields && (len(args.PodProtections.ExtraEnabled) > 0 || len(args.PodProtections.DefaultDisabled) > 0) {
|
||||
allErrs = append(allErrs, fmt.Errorf("cannot use Deprecated fields alongside PodProtections.ExtraEnabled or PodProtections.DefaultDisabled"))
|
||||
}
|
||||
|
||||
if len(args.PodProtections.ExtraEnabled) > 0 || len(args.PodProtections.DefaultDisabled) > 0 {
|
||||
|
||||
for _, policy := range args.PodProtections.ExtraEnabled {
|
||||
if !slices.Contains(extraPodProtections, policy) {
|
||||
allErrs = append(allErrs, fmt.Errorf("invalid pod protection policy in ExtraEnabled: %q. Valid options are: %v",
|
||||
string(policy), extraPodProtections))
|
||||
}
|
||||
}
|
||||
|
||||
for _, policy := range args.PodProtections.DefaultDisabled {
|
||||
if !slices.Contains(defaultPodProtections, policy) {
|
||||
allErrs = append(allErrs, fmt.Errorf("invalid pod protection policy in DefaultDisabled: %q. Valid options are: %v",
|
||||
string(policy), defaultPodProtections))
|
||||
}
|
||||
}
|
||||
|
||||
if hasDuplicates(args.PodProtections.DefaultDisabled) {
|
||||
allErrs = append(allErrs, fmt.Errorf("PodProtections.DefaultDisabled contains duplicate entries"))
|
||||
}
|
||||
|
||||
if hasDuplicates(args.PodProtections.ExtraEnabled) {
|
||||
allErrs = append(allErrs, fmt.Errorf("PodProtections.ExtraEnabled contains duplicate entries"))
|
||||
}
|
||||
|
||||
if slices.Contains(args.PodProtections.ExtraEnabled, PodsWithPVC) {
|
||||
if args.PodProtections.Config != nil && args.PodProtections.Config.PodsWithPVC != nil {
|
||||
protectedsc := args.PodProtections.Config.PodsWithPVC.ProtectedStorageClasses
|
||||
for i, sc := range protectedsc {
|
||||
if sc.Name == "" {
|
||||
allErrs = append(allErrs, fmt.Errorf("PodProtections.Config.PodsWithPVC.ProtectedStorageClasses[%d] name cannot be empty", i))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return utilerrors.NewAggregate(allErrs)
|
||||
}
|
||||
|
||||
func hasDuplicates(slice []PodProtection) bool {
|
||||
seen := make(map[PodProtection]struct{}, len(slice))
|
||||
for _, item := range slice {
|
||||
if _, exists := seen[item]; exists {
|
||||
return true
|
||||
}
|
||||
seen[item] = struct{}{}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
242
pkg/framework/plugins/defaultevictor/validation_test.go
Normal file
242
pkg/framework/plugins/defaultevictor/validation_test.go
Normal file
@@ -0,0 +1,242 @@
|
||||
/*
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package defaultevictor
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilptr "k8s.io/utils/ptr"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
)
|
||||
|
||||
func TestValidateDefaultEvictorArgs(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
args *DefaultEvictorArgs
|
||||
errInfo error
|
||||
}{
|
||||
{
|
||||
name: "passing invalid priority",
|
||||
args: &DefaultEvictorArgs{
|
||||
PriorityThreshold: &api.PriorityThreshold{
|
||||
Value: utilptr.To[int32](1),
|
||||
Name: "priority-name",
|
||||
},
|
||||
},
|
||||
errInfo: fmt.Errorf("priority threshold misconfigured, only one of priorityThreshold fields can be set"),
|
||||
},
|
||||
{
|
||||
name: "passing invalid no eviction policy",
|
||||
args: &DefaultEvictorArgs{
|
||||
NoEvictionPolicy: "invalid-no-eviction-policy",
|
||||
},
|
||||
errInfo: fmt.Errorf("noEvictionPolicy accepts only %q values", []NoEvictionPolicy{PreferredNoEvictionPolicy, MandatoryNoEvictionPolicy}),
|
||||
},
|
||||
{
|
||||
name: "Valid configuration with no deprecated fields",
|
||||
args: &DefaultEvictorArgs{
|
||||
PodProtections: PodProtections{
|
||||
DefaultDisabled: []PodProtection{},
|
||||
ExtraEnabled: []PodProtection{},
|
||||
},
|
||||
},
|
||||
errInfo: nil,
|
||||
},
|
||||
{
|
||||
name: "Valid configuration: both Disabled and ExtraEnabled",
|
||||
args: &DefaultEvictorArgs{
|
||||
PodProtections: PodProtections{
|
||||
DefaultDisabled: []PodProtection{
|
||||
DaemonSetPods,
|
||||
PodsWithLocalStorage,
|
||||
},
|
||||
ExtraEnabled: []PodProtection{
|
||||
PodsWithPVC,
|
||||
},
|
||||
},
|
||||
},
|
||||
errInfo: nil,
|
||||
},
|
||||
{
|
||||
name: "Valid configuration with ExtraEnabled",
|
||||
args: &DefaultEvictorArgs{
|
||||
PodProtections: PodProtections{
|
||||
ExtraEnabled: []PodProtection{
|
||||
PodsWithPVC,
|
||||
},
|
||||
},
|
||||
},
|
||||
errInfo: nil,
|
||||
},
|
||||
{
|
||||
name: "Invalid configuration: Deprecated field used with Disabled",
|
||||
args: &DefaultEvictorArgs{
|
||||
EvictLocalStoragePods: true,
|
||||
PodProtections: PodProtections{
|
||||
DefaultDisabled: []PodProtection{
|
||||
DaemonSetPods,
|
||||
},
|
||||
},
|
||||
},
|
||||
errInfo: fmt.Errorf("cannot use Deprecated fields alongside PodProtections.ExtraEnabled or PodProtections.DefaultDisabled"),
|
||||
},
|
||||
{
|
||||
name: "Invalid configuration: Deprecated field used with ExtraPodProtections",
|
||||
args: &DefaultEvictorArgs{
|
||||
EvictDaemonSetPods: true,
|
||||
PodProtections: PodProtections{
|
||||
ExtraEnabled: []PodProtection{
|
||||
PodsWithPVC,
|
||||
},
|
||||
},
|
||||
},
|
||||
errInfo: fmt.Errorf("cannot use Deprecated fields alongside PodProtections.ExtraEnabled or PodProtections.DefaultDisabled"),
|
||||
},
|
||||
{
|
||||
name: "MinReplicas warning logged but no error",
|
||||
args: &DefaultEvictorArgs{
|
||||
MinReplicas: 1,
|
||||
},
|
||||
errInfo: nil,
|
||||
},
|
||||
{
|
||||
name: "Invalid ExtraEnabled: Unknown policy",
|
||||
args: &DefaultEvictorArgs{
|
||||
PodProtections: PodProtections{
|
||||
ExtraEnabled: []PodProtection{"InvalidPolicy"},
|
||||
},
|
||||
},
|
||||
errInfo: fmt.Errorf(`invalid pod protection policy in ExtraEnabled: "InvalidPolicy". Valid options are: [PodsWithPVC PodsWithoutPDB PodsWithResourceClaims]`),
|
||||
},
|
||||
{
|
||||
name: "Invalid ExtraEnabled: Misspelled policy",
|
||||
args: &DefaultEvictorArgs{
|
||||
PodProtections: PodProtections{
|
||||
ExtraEnabled: []PodProtection{"PodsWithPVCC"},
|
||||
},
|
||||
},
|
||||
errInfo: fmt.Errorf(`invalid pod protection policy in ExtraEnabled: "PodsWithPVCC". Valid options are: [PodsWithPVC PodsWithoutPDB PodsWithResourceClaims]`),
|
||||
},
|
||||
{
|
||||
name: "Invalid ExtraEnabled: Policy from DefaultDisabled list",
|
||||
args: &DefaultEvictorArgs{
|
||||
PodProtections: PodProtections{
|
||||
ExtraEnabled: []PodProtection{DaemonSetPods},
|
||||
},
|
||||
},
|
||||
errInfo: fmt.Errorf(`invalid pod protection policy in ExtraEnabled: "DaemonSetPods". Valid options are: [PodsWithPVC PodsWithoutPDB PodsWithResourceClaims]`),
|
||||
},
|
||||
{
|
||||
name: "Invalid DefaultDisabled: Unknown policy",
|
||||
args: &DefaultEvictorArgs{
|
||||
PodProtections: PodProtections{
|
||||
DefaultDisabled: []PodProtection{"InvalidPolicy"},
|
||||
},
|
||||
},
|
||||
errInfo: fmt.Errorf(`invalid pod protection policy in DefaultDisabled: "InvalidPolicy". Valid options are: [PodsWithLocalStorage SystemCriticalPods FailedBarePods DaemonSetPods]`),
|
||||
},
|
||||
{
|
||||
name: "Invalid DefaultDisabled: Misspelled policy",
|
||||
args: &DefaultEvictorArgs{
|
||||
PodProtections: PodProtections{
|
||||
DefaultDisabled: []PodProtection{"PodsWithLocalStorag"},
|
||||
},
|
||||
},
|
||||
errInfo: fmt.Errorf(`invalid pod protection policy in DefaultDisabled: "PodsWithLocalStorag". Valid options are: [PodsWithLocalStorage SystemCriticalPods FailedBarePods DaemonSetPods]`),
|
||||
},
|
||||
{
|
||||
name: "Invalid DefaultDisabled: Policy from ExtraEnabled list",
|
||||
args: &DefaultEvictorArgs{
|
||||
PodProtections: PodProtections{
|
||||
DefaultDisabled: []PodProtection{PodsWithPVC},
|
||||
},
|
||||
},
|
||||
errInfo: fmt.Errorf(`invalid pod protection policy in DefaultDisabled: "PodsWithPVC". Valid options are: [PodsWithLocalStorage SystemCriticalPods FailedBarePods DaemonSetPods]`),
|
||||
},
|
||||
{
|
||||
name: "Invalid ExtraEnabled duplicate",
|
||||
args: &DefaultEvictorArgs{
|
||||
PodProtections: PodProtections{
|
||||
ExtraEnabled: []PodProtection{PodsWithPVC, PodsWithPVC},
|
||||
},
|
||||
},
|
||||
errInfo: fmt.Errorf(`PodProtections.ExtraEnabled contains duplicate entries`),
|
||||
},
|
||||
{
|
||||
name: "Invalid DefaultDisabled duplicate",
|
||||
args: &DefaultEvictorArgs{
|
||||
PodProtections: PodProtections{
|
||||
DefaultDisabled: []PodProtection{PodsWithLocalStorage, PodsWithLocalStorage},
|
||||
},
|
||||
},
|
||||
errInfo: fmt.Errorf(`PodProtections.DefaultDisabled contains duplicate entries`),
|
||||
},
|
||||
{
|
||||
name: "Invalid DefaultDisabled duplicate and Invalid ExtraEnabled duplicate and passing invalid no eviction policy",
|
||||
args: &DefaultEvictorArgs{
|
||||
NoEvictionPolicy: "invalid-no-eviction-policy",
|
||||
PodProtections: PodProtections{
|
||||
ExtraEnabled: []PodProtection{PodsWithPVC, PodsWithPVC},
|
||||
DefaultDisabled: []PodProtection{PodsWithLocalStorage, PodsWithLocalStorage, PodsWithoutPDB},
|
||||
},
|
||||
},
|
||||
errInfo: fmt.Errorf(`[noEvictionPolicy accepts only ["Preferred" "Mandatory"] values, invalid pod protection policy in DefaultDisabled: "PodsWithoutPDB". Valid options are: [PodsWithLocalStorage SystemCriticalPods FailedBarePods DaemonSetPods], PodProtections.DefaultDisabled contains duplicate entries, PodProtections.ExtraEnabled contains duplicate entries]`),
|
||||
},
|
||||
{
|
||||
name: "Protected storage classes without storage class name",
|
||||
args: &DefaultEvictorArgs{
|
||||
PodProtections: PodProtections{
|
||||
ExtraEnabled: []PodProtection{PodsWithPVC},
|
||||
Config: &PodProtectionsConfig{
|
||||
PodsWithPVC: &PodsWithPVCConfig{
|
||||
ProtectedStorageClasses: []ProtectedStorageClass{
|
||||
{
|
||||
Name: "",
|
||||
},
|
||||
{
|
||||
Name: "protected-storage-class-0",
|
||||
},
|
||||
{
|
||||
Name: "",
|
||||
},
|
||||
{
|
||||
Name: "protected-storage-class-1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
errInfo: fmt.Errorf(`[PodProtections.Config.PodsWithPVC.ProtectedStorageClasses[0] name cannot be empty, PodProtections.Config.PodsWithPVC.ProtectedStorageClasses[2] name cannot be empty]`),
|
||||
},
|
||||
}
|
||||
|
||||
for _, testCase := range tests {
|
||||
t.Run(testCase.name, func(t *testing.T) {
|
||||
validateErr := ValidateDefaultEvictorArgs(runtime.Object(testCase.args))
|
||||
if validateErr == nil || testCase.errInfo == nil {
|
||||
if validateErr != testCase.errInfo {
|
||||
t.Errorf("expected validity of plugin config: %q but got %q instead", testCase.errInfo, validateErr)
|
||||
}
|
||||
} else if validateErr.Error() != testCase.errInfo.Error() {
|
||||
t.Errorf("expected validity of plugin config: %q but got %q instead", testCase.errInfo, validateErr)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
Copyright 2026 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@@ -46,6 +46,7 @@ func (in *DefaultEvictorArgs) DeepCopyInto(out *DefaultEvictorArgs) {
|
||||
*out = new(v1.Duration)
|
||||
**out = **in
|
||||
}
|
||||
in.PodProtections.DeepCopyInto(&out.PodProtections)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -66,3 +67,76 @@ func (in *DefaultEvictorArgs) DeepCopyObject() runtime.Object {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *PodProtections) DeepCopyInto(out *PodProtections) {
|
||||
*out = *in
|
||||
if in.ExtraEnabled != nil {
|
||||
in, out := &in.ExtraEnabled, &out.ExtraEnabled
|
||||
*out = make([]PodProtection, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.DefaultDisabled != nil {
|
||||
in, out := &in.DefaultDisabled, &out.DefaultDisabled
|
||||
*out = make([]PodProtection, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.Config != nil {
|
||||
in, out := &in.Config, &out.Config
|
||||
*out = new(PodProtectionsConfig)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodProtections.
|
||||
func (in *PodProtections) DeepCopy() *PodProtections {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(PodProtections)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *PodProtectionsConfig) DeepCopyInto(out *PodProtectionsConfig) {
|
||||
*out = *in
|
||||
if in.PodsWithPVC != nil {
|
||||
in, out := &in.PodsWithPVC, &out.PodsWithPVC
|
||||
*out = new(PodsWithPVCConfig)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodProtectionsConfig.
|
||||
func (in *PodProtectionsConfig) DeepCopy() *PodProtectionsConfig {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(PodProtectionsConfig)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *PodsWithPVCConfig) DeepCopyInto(out *PodsWithPVCConfig) {
|
||||
*out = *in
|
||||
if in.ProtectedStorageClasses != nil {
|
||||
in, out := &in.ProtectedStorageClasses, &out.ProtectedStorageClasses
|
||||
*out = make([]ProtectedStorageClass, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodsWithPVCConfig.
|
||||
func (in *PodsWithPVCConfig) DeepCopy() *PodsWithPVCConfig {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(PodsWithPVCConfig)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
Copyright 2026 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
90
pkg/framework/plugins/example/README.md
Normal file
90
pkg/framework/plugins/example/README.md
Normal file
@@ -0,0 +1,90 @@
|
||||
# Descheduler Plugin: Example Implementation
|
||||
|
||||
This directory provides an example plugin for the Kubernetes Descheduler,
|
||||
demonstrating how to evict pods based on custom criteria. The plugin targets
|
||||
pods based on:
|
||||
|
||||
* **Name Regex:** Pods matching a specified regular expression.
|
||||
* **Age:** Pods older than a defined duration.
|
||||
* **Namespace:** Pods within or outside a given list of namespaces (inclusion
|
||||
or exclusion).
|
||||
|
||||
## Building and Integrating the Plugin
|
||||
|
||||
To incorporate this plugin into your Descheduler build, you must register it
|
||||
within the Descheduler's plugin registry. Follow these steps:
|
||||
|
||||
1. **Register the Plugin:**
|
||||
* Modify the `pkg/descheduler/setupplugins.go` file.
|
||||
* Add the following registration line to the end of the
|
||||
`RegisterDefaultPlugins()` function:
|
||||
|
||||
```go
|
||||
pluginregistry.Register(
|
||||
example.PluginName,
|
||||
example.New,
|
||||
&example.Example{},
|
||||
&example.ExampleArgs{},
|
||||
example.ValidateExampleArgs,
|
||||
example.SetDefaults_Example,
|
||||
registry,
|
||||
)
|
||||
```
|
||||
|
||||
2. **Generate Code:**
|
||||
* If you modify the plugin's code, execute `make gen` before rebuilding the
|
||||
Descheduler. This ensures generated code is up-to-date.
|
||||
|
||||
3. **Rebuild the Descheduler:**
|
||||
* Build the descheduler with your changes.
|
||||
|
||||
## Plugin Configuration
|
||||
|
||||
Configure the plugin's behavior using the Descheduler's policy configuration.
|
||||
Here's an example:
|
||||
|
||||
```yaml
|
||||
apiVersion: descheduler/v1alpha2
|
||||
kind: DeschedulerPolicy
|
||||
profiles:
|
||||
- name: LifecycleAndUtilization
|
||||
plugins:
|
||||
deschedule:
|
||||
enabled:
|
||||
- Example
|
||||
pluginConfig:
|
||||
- name: Example
|
||||
args:
|
||||
regex: ^descheduler-test.*$
|
||||
maxAge: 3m
|
||||
namespaces:
|
||||
include:
|
||||
- default
|
||||
```
|
||||
|
||||
## Explanation
|
||||
|
||||
- `regex: ^descheduler-test.*$`: Evicts pods whose names match the regular
|
||||
expression `^descheduler-test.*$`.
|
||||
- `maxAge: 3m`: Evicts pods older than 3 minutes.
|
||||
- `namespaces.include: - default`: Evicts pods within the default namespace.
|
||||
|
||||
This configuration will cause the plugin to evict pods that meet all three
|
||||
criteria: matching the `regex`, exceeding the `maxAge`, and residing in the
|
||||
specified namespace.
|
||||
|
||||
## Notes
|
||||
|
||||
- This plugin is configured through the `ExampleArgs` struct, which defines the
|
||||
plugin's parameters.
|
||||
- Plugins must implement a function to validate and another to set the default
|
||||
values for their `Args` struct.
|
||||
- The fields in the `ExampleArgs` struct reflect directly into the
|
||||
`DeschedulerPolicy` configuration.
|
||||
- Plugins must comply with the `DeschedulePlugin` interface to be registered
|
||||
with the Descheduler.
|
||||
- The main functionality of the plugin is implemented in the `Deschedule()`
|
||||
method, which is called by the Descheduler when the plugin is executed.
|
||||
- A good amount of descheduling logic can be achieved by means of filters.
|
||||
- Whenever a change in the Plugin's configuration is made the developer should
|
||||
regenerate the code by running `make gen`.
|
||||
36
pkg/framework/plugins/example/defaults.go
Normal file
36
pkg/framework/plugins/example/defaults.go
Normal file
@@ -0,0 +1,36 @@
|
||||
/*
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package example
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
func addDefaultingFuncs(scheme *runtime.Scheme) error {
|
||||
return RegisterDefaults(scheme)
|
||||
}
|
||||
|
||||
// SetDefaults_Example sets the default arguments for the Example plugin. On
|
||||
// this case we set the default regex to match only empty strings (this should
|
||||
// not ever match anything). The default maximum age for pods is set to 5
|
||||
// minutes.
|
||||
func SetDefaults_Example(obj runtime.Object) {
|
||||
args := obj.(*ExampleArgs)
|
||||
if args.Regex == "" {
|
||||
args.Regex = "^$"
|
||||
}
|
||||
if args.MaxAge == "" {
|
||||
args.MaxAge = "5m"
|
||||
}
|
||||
}
|
||||
@@ -1,12 +1,9 @@
|
||||
/*
|
||||
Copyright 2014 Google Inc. All rights reserved.
|
||||
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
@@ -14,5 +11,6 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package fuzz is a library for populating go objects with random values.
|
||||
package fuzz
|
||||
// +k8s:defaulter-gen=TypeMeta
|
||||
|
||||
package example
|
||||
186
pkg/framework/plugins/example/example.go
Normal file
186
pkg/framework/plugins/example/example.go
Normal file
@@ -0,0 +1,186 @@
|
||||
/*
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package example
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"time"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
fwtypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
)
|
||||
|
||||
// PluginName is used when registering the plugin. You need to choose a unique
|
||||
// name across all plugins. This name is used to identify the plugin config in
|
||||
// the descheduler policy.
|
||||
const PluginName = "Example"
|
||||
|
||||
// We need to ensure that the plugin struct complies with the DeschedulePlugin
|
||||
// interface. This prevent unexpected changes that may render this type
|
||||
// incompatible.
|
||||
var _ fwtypes.DeschedulePlugin = &Example{}
|
||||
|
||||
// Example is our plugin (implementing the DeschedulePlugin interface). This
|
||||
// plugin will evict pods that match a regex and are older than a certain age.
|
||||
type Example struct {
|
||||
logger klog.Logger
|
||||
handle fwtypes.Handle
|
||||
args *ExampleArgs
|
||||
podFilter podutil.FilterFunc
|
||||
}
|
||||
|
||||
// New builds a plugin instance from its arguments. Arguments are passed in as
|
||||
// a runtime.Object. Handle is used by plugins to retrieve a kubernetes client
|
||||
// set, evictor interface, shared informer factory and other instruments shared
|
||||
// across different plugins.
|
||||
func New(ctx context.Context, args runtime.Object, handle fwtypes.Handle) (fwtypes.Plugin, error) {
|
||||
// make sure we are receiving the right argument type.
|
||||
exampleArgs, ok := args.(*ExampleArgs)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("args must be of type ExampleArgs, got %T", args)
|
||||
}
|
||||
logger := klog.FromContext(ctx).WithValues("plugin", PluginName)
|
||||
|
||||
// we can use the included and excluded namespaces to filter the pods we want
|
||||
// to evict.
|
||||
var includedNamespaces, excludedNamespaces sets.Set[string]
|
||||
if exampleArgs.Namespaces != nil {
|
||||
includedNamespaces = sets.New(exampleArgs.Namespaces.Include...)
|
||||
excludedNamespaces = sets.New(exampleArgs.Namespaces.Exclude...)
|
||||
}
|
||||
|
||||
// here we create a pod filter that will return only pods that can be
|
||||
// evicted (according to the evictor and inside the namespaces we want).
|
||||
// NOTE: here we could also add a function to filter out by the regex and
|
||||
// age but for sake of the example we are keeping it simple and filtering
|
||||
// those out in the Deschedule() function.
|
||||
podFilter, err := podutil.NewOptions().
|
||||
WithNamespaces(includedNamespaces).
|
||||
WithoutNamespaces(excludedNamespaces).
|
||||
WithFilter(
|
||||
podutil.WrapFilterFuncs(
|
||||
handle.Evictor().Filter,
|
||||
handle.Evictor().PreEvictionFilter,
|
||||
),
|
||||
).
|
||||
BuildFilterFunc()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error initializing pod filter function: %v", err)
|
||||
}
|
||||
|
||||
return &Example{
|
||||
logger: logger,
|
||||
handle: handle,
|
||||
podFilter: podFilter,
|
||||
args: exampleArgs,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Name returns the plugin name.
|
||||
func (d *Example) Name() string {
|
||||
return PluginName
|
||||
}
|
||||
|
||||
// Deschedule is the function where most of the logic around eviction is laid
|
||||
// down. Here we go through all pods in all nodes and evict the ones that match
|
||||
// the regex and are older than the maximum age. This function receives a list
|
||||
// of nodes we need to process.
|
||||
func (d *Example) Deschedule(ctx context.Context, nodes []*v1.Node) *fwtypes.Status {
|
||||
var podsToEvict []*v1.Pod
|
||||
logger := klog.FromContext(klog.NewContext(ctx, d.logger)).WithValues("ExtensionPoint", fwtypes.DescheduleExtensionPoint)
|
||||
logger.Info("Example plugin starting descheduling")
|
||||
|
||||
re, err := regexp.Compile(d.args.Regex)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("fail to compile regex: %w", err)
|
||||
return &fwtypes.Status{Err: err}
|
||||
}
|
||||
|
||||
duration, err := time.ParseDuration(d.args.MaxAge)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("fail to parse max age: %w", err)
|
||||
return &fwtypes.Status{Err: err}
|
||||
}
|
||||
|
||||
// here we create an auxiliar filter to remove all pods that don't
|
||||
// match the provided regex or are still too young to be evicted.
|
||||
// This filter will be used when we list all pods on a node. This
|
||||
// filter here could have been part of the podFilter but we are
|
||||
// keeping it separate for the sake of the example.
|
||||
filter := func(pod *v1.Pod) bool {
|
||||
if !re.MatchString(pod.Name) {
|
||||
return false
|
||||
}
|
||||
deadline := pod.CreationTimestamp.Add(duration)
|
||||
return time.Now().After(deadline)
|
||||
}
|
||||
|
||||
// go node by node getting all pods that we can evict.
|
||||
for _, node := range nodes {
|
||||
// ListAllPodsOnANode is a helper function that retrieves all pods filtering out the ones we can't evict.
|
||||
// ListPodsOnANode is a helper function that retrieves all pods(excluding Succeeded or Failed phases) filtering out the ones we can't evict.
|
||||
// We merge the default filters with the one we created above.
|
||||
//
|
||||
// The difference between ListPodsOnANode and ListAllPodsOnANode lies in their handling of Pods based on their phase:
|
||||
// - ListPodsOnANode excludes Pods that are in Succeeded or Failed phases because they do not occupy any resources.
|
||||
// - ListAllPodsOnANode does not exclude Pods based on their phase, listing all Pods regardless of their state.
|
||||
//
|
||||
// In this context, we prefer using ListPodsOnANode because:
|
||||
// 1. It ensures that only active Pods (not in Succeeded or Failed states) are considered for eviction.
|
||||
// 2. This helps avoid unnecessary processing of Pods that no longer consume resources.
|
||||
// 3. By applying an additional filter (d.podFilter and filter), we can further refine which Pods are eligible for eviction,
|
||||
// ensuring that only Pods meeting specific criteria are selected.
|
||||
//
|
||||
// However, if you need to consider all Pods including those in Succeeded or Failed states for other purposes,
|
||||
// you should use ListAllPodsOnANode instead.
|
||||
pods, err := podutil.ListPodsOnANode(
|
||||
node.Name,
|
||||
d.handle.GetPodsAssignedToNodeFunc(),
|
||||
podutil.WrapFilterFuncs(d.podFilter, filter),
|
||||
)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("fail to list pods: %w", err)
|
||||
return &fwtypes.Status{Err: err}
|
||||
}
|
||||
|
||||
// as we have already filtered out pods that don't match the
|
||||
// regex or are too young we can simply add them all to the
|
||||
// eviction list.
|
||||
podsToEvict = append(podsToEvict, pods...)
|
||||
}
|
||||
|
||||
// evict all the pods.
|
||||
for _, pod := range podsToEvict {
|
||||
logger.Info("Example plugin evicting pod", "pod", klog.KObj(pod))
|
||||
opts := evictions.EvictOptions{StrategyName: PluginName}
|
||||
if err := d.handle.Evictor().Evict(ctx, pod, opts); err != nil {
|
||||
logger.Error(err, "unable to evict pod", "pod", klog.KObj(pod))
|
||||
}
|
||||
}
|
||||
|
||||
logger.Info("Example plugin finished descheduling")
|
||||
return nil
|
||||
}
|
||||
31
pkg/framework/plugins/example/register.go
Normal file
31
pkg/framework/plugins/example/register.go
Normal file
@@ -0,0 +1,31 @@
|
||||
/*
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package example
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
var (
|
||||
SchemeBuilder = runtime.NewSchemeBuilder()
|
||||
localSchemeBuilder = &SchemeBuilder
|
||||
AddToScheme = localSchemeBuilder.AddToScheme
|
||||
)
|
||||
|
||||
func init() {
|
||||
// We only register manually written functions here. The registration of the
|
||||
// generated functions takes place in the generated files. The separation
|
||||
// makes the code compile even when the generated files are missing.
|
||||
localSchemeBuilder.Register(addDefaultingFuncs)
|
||||
}
|
||||
45
pkg/framework/plugins/example/types.go
Normal file
45
pkg/framework/plugins/example/types.go
Normal file
@@ -0,0 +1,45 @@
|
||||
/*
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package example
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
)
|
||||
|
||||
// +k8s:deepcopy-gen=true
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// ExampleArgs holds a list of arguments used to configure the plugin. For this
|
||||
// simple example we only care about a regex, a maximum age and possibly a list
|
||||
// of namespaces to which we want to apply the descheduler. This plugin evicts
|
||||
// pods that match a given regular expression and are older than the maximum
|
||||
// allowed age. Most of the fields here were defined as strings so we can
|
||||
// validate them somewhere else (show you a better implementation example).
|
||||
type ExampleArgs struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
|
||||
// Regex is a regular expression we use to match against pod names. If
|
||||
// the pod name matches the regex it will be evicted. This is expected
|
||||
// to be a valid regular expression (according to go's regexp package).
|
||||
Regex string `json:"regex"`
|
||||
|
||||
// MaxAge is the maximum age a pod can have before it is considered for
|
||||
// eviction. This is expected to be a valid time.Duration.
|
||||
MaxAge string `json:"maxAge"`
|
||||
|
||||
// Namespaces allows us to filter on which namespaces we want to apply
|
||||
// the descheduler.
|
||||
Namespaces *api.Namespaces `json:"namespaces,omitempty"`
|
||||
}
|
||||
45
pkg/framework/plugins/example/validation.go
Normal file
45
pkg/framework/plugins/example/validation.go
Normal file
@@ -0,0 +1,45 @@
|
||||
/*
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package example
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
// ValidateExampleArgs validates if the plugin arguments are correct (we have
|
||||
// everything we need). On this case we only validate if we have a valid
|
||||
// regular expression and maximum age.
|
||||
func ValidateExampleArgs(obj runtime.Object) error {
|
||||
args := obj.(*ExampleArgs)
|
||||
if args.Regex == "" {
|
||||
return fmt.Errorf("regex argument must be set")
|
||||
}
|
||||
|
||||
if _, err := regexp.Compile(args.Regex); err != nil {
|
||||
return fmt.Errorf("invalid regex: %v", err)
|
||||
}
|
||||
|
||||
if _, err := time.ParseDuration(args.MaxAge); err != nil {
|
||||
return fmt.Errorf("invalid max age: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
57
pkg/framework/plugins/example/zz_generated.deepcopy.go
generated
Normal file
57
pkg/framework/plugins/example/zz_generated.deepcopy.go
generated
Normal file
@@ -0,0 +1,57 @@
|
||||
//go:build !ignore_autogenerated
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2026 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by deepcopy-gen. DO NOT EDIT.
|
||||
|
||||
package example
|
||||
|
||||
import (
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
api "sigs.k8s.io/descheduler/pkg/api"
|
||||
)
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ExampleArgs) DeepCopyInto(out *ExampleArgs) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
if in.Namespaces != nil {
|
||||
in, out := &in.Namespaces, &out.Namespaces
|
||||
*out = new(api.Namespaces)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExampleArgs.
|
||||
func (in *ExampleArgs) DeepCopy() *ExampleArgs {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ExampleArgs)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *ExampleArgs) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
33
pkg/framework/plugins/example/zz_generated.defaults.go
generated
Normal file
33
pkg/framework/plugins/example/zz_generated.defaults.go
generated
Normal file
@@ -0,0 +1,33 @@
|
||||
//go:build !ignore_autogenerated
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2026 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by defaulter-gen. DO NOT EDIT.
|
||||
|
||||
package example
|
||||
|
||||
import (
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
// RegisterDefaults adds defaulters functions to the given scheme.
|
||||
// Public to allow building arbitrary schemes.
|
||||
// All generated defaulters are covering - they call all nested defaulters.
|
||||
func RegisterDefaults(scheme *runtime.Scheme) error {
|
||||
return nil
|
||||
}
|
||||
@@ -0,0 +1,84 @@
|
||||
/*
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package classifier
|
||||
|
||||
// Classifier is a function that classifies a resource usage based on a limit.
|
||||
// The function should return true if the resource usage matches the classifier
|
||||
// intent.
|
||||
type Classifier[K comparable, V any] func(K, V, V) bool
|
||||
|
||||
// Comparer is a function that compares two objects. This function should return
|
||||
// -1 if the first object is less than the second, 0 if they are equal, and 1 if
|
||||
// the first object is greater than the second. Of course this is a simplification
|
||||
// and any value between -1 and 1 can be returned.
|
||||
type Comparer[V any] func(V, V) int
|
||||
|
||||
// Values is a map of values indexed by a comparable key. An example of this
|
||||
// can be a list of resources indexed by a node name.
|
||||
type Values[K comparable, V any] map[K]V
|
||||
|
||||
// Limits is a map of list of limits indexed by a comparable key. Each limit
|
||||
// inside the list requires a classifier to evaluate.
|
||||
type Limits[K comparable, V any] map[K][]V
|
||||
|
||||
// Classify is a function that classifies based on classifier functions. This
|
||||
// function receives Values, a list of n Limits (indexed by name), and a list
|
||||
// of n Classifiers. The classifier at n position is called to evaluate the
|
||||
// limit at n position. The first classifier to return true will receive the
|
||||
// value, at this point the loop will break and the next value will be
|
||||
// evaluated. This function returns a slice of maps, each position in the
|
||||
// returned slice correspond to one of the classifiers (e.g. if n limits
|
||||
// and classifiers are provided, the returned slice will have n maps).
|
||||
func Classify[K comparable, V any](
|
||||
values Values[K, V], limits Limits[K, V], classifiers ...Classifier[K, V],
|
||||
) []map[K]V {
|
||||
result := make([]map[K]V, len(classifiers))
|
||||
for i := range classifiers {
|
||||
result[i] = make(map[K]V)
|
||||
}
|
||||
|
||||
for index, usage := range values {
|
||||
for i, limit := range limits[index] {
|
||||
if len(classifiers) <= i {
|
||||
continue
|
||||
}
|
||||
if !classifiers[i](index, usage, limit) {
|
||||
continue
|
||||
}
|
||||
result[i][index] = usage
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// ForMap is a function that returns a classifier that compares all values in a
|
||||
// map. The function receives a Comparer function that is used to compare all
|
||||
// the map values. The returned Classifier will return true only if the
|
||||
// provided Comparer function returns a value less than 0 for all the values.
|
||||
func ForMap[K, I comparable, V any, M ~map[I]V](cmp Comparer[V]) Classifier[K, M] {
|
||||
return func(_ K, usages, limits M) bool {
|
||||
for idx, usage := range usages {
|
||||
if limit, ok := limits[idx]; ok {
|
||||
if cmp(usage, limit) >= 0 {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,739 @@
|
||||
/*
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package classifier
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/utils/ptr"
|
||||
)
|
||||
|
||||
func TestClassifySimple(t *testing.T) {
|
||||
for _, tt := range []struct {
|
||||
name string
|
||||
usage map[string]int
|
||||
limits map[string][]int
|
||||
classifiers []Classifier[string, int]
|
||||
expected []map[string]int
|
||||
}{
|
||||
{
|
||||
name: "empty",
|
||||
usage: map[string]int{},
|
||||
limits: map[string][]int{},
|
||||
expected: []map[string]int{},
|
||||
},
|
||||
{
|
||||
name: "one under one over",
|
||||
usage: map[string]int{
|
||||
"node1": 2,
|
||||
"node2": 8,
|
||||
},
|
||||
limits: map[string][]int{
|
||||
"node1": {4, 6},
|
||||
"node2": {4, 6},
|
||||
},
|
||||
expected: []map[string]int{
|
||||
{"node1": 2},
|
||||
{"node2": 8},
|
||||
},
|
||||
classifiers: []Classifier[string, int]{
|
||||
func(_ string, usage, limit int) bool {
|
||||
return usage < limit
|
||||
},
|
||||
func(_ string, usage, limit int) bool {
|
||||
return usage > limit
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "randomly positioned over utilized",
|
||||
usage: map[string]int{
|
||||
"node1": 2,
|
||||
"node2": 8,
|
||||
"node3": 2,
|
||||
"node4": 8,
|
||||
"node5": 8,
|
||||
"node6": 2,
|
||||
"node7": 2,
|
||||
"node8": 8,
|
||||
"node9": 8,
|
||||
},
|
||||
limits: map[string][]int{
|
||||
"node1": {4, 6},
|
||||
"node2": {4, 6},
|
||||
"node3": {4, 6},
|
||||
"node4": {4, 6},
|
||||
"node5": {4, 6},
|
||||
"node6": {4, 6},
|
||||
"node7": {4, 6},
|
||||
"node8": {4, 6},
|
||||
"node9": {4, 6},
|
||||
},
|
||||
expected: []map[string]int{
|
||||
{
|
||||
"node1": 2,
|
||||
"node3": 2,
|
||||
"node6": 2,
|
||||
"node7": 2,
|
||||
},
|
||||
{
|
||||
"node2": 8,
|
||||
"node4": 8,
|
||||
"node5": 8,
|
||||
"node8": 8,
|
||||
"node9": 8,
|
||||
},
|
||||
},
|
||||
classifiers: []Classifier[string, int]{
|
||||
func(_ string, usage, limit int) bool {
|
||||
return usage < limit
|
||||
},
|
||||
func(_ string, usage, limit int) bool {
|
||||
return usage > limit
|
||||
},
|
||||
},
|
||||
},
|
||||
} {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result := Classify(tt.usage, tt.limits, tt.classifiers...)
|
||||
if !reflect.DeepEqual(result, tt.expected) {
|
||||
t.Fatalf("unexpected result: %v", result)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestClassify_pointers(t *testing.T) {
|
||||
for _, tt := range []struct {
|
||||
name string
|
||||
usage map[string]map[v1.ResourceName]*resource.Quantity
|
||||
limits map[string][]map[v1.ResourceName]*resource.Quantity
|
||||
classifiers []Classifier[string, map[v1.ResourceName]*resource.Quantity]
|
||||
expected []map[string]map[v1.ResourceName]*resource.Quantity
|
||||
}{
|
||||
{
|
||||
name: "empty",
|
||||
usage: map[string]map[v1.ResourceName]*resource.Quantity{},
|
||||
limits: map[string][]map[v1.ResourceName]*resource.Quantity{},
|
||||
expected: []map[string]map[v1.ResourceName]*resource.Quantity{},
|
||||
},
|
||||
{
|
||||
name: "single underutilized",
|
||||
usage: map[string]map[v1.ResourceName]*resource.Quantity{
|
||||
"node1": {
|
||||
v1.ResourceCPU: ptr.To(resource.MustParse("2")),
|
||||
v1.ResourceMemory: ptr.To(resource.MustParse("2Gi")),
|
||||
},
|
||||
},
|
||||
limits: map[string][]map[v1.ResourceName]*resource.Quantity{
|
||||
"node1": {
|
||||
{
|
||||
v1.ResourceCPU: ptr.To(resource.MustParse("4")),
|
||||
v1.ResourceMemory: ptr.To(resource.MustParse("4Gi")),
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: []map[string]map[v1.ResourceName]*resource.Quantity{
|
||||
{
|
||||
"node1": {
|
||||
v1.ResourceCPU: ptr.To(resource.MustParse("2")),
|
||||
v1.ResourceMemory: ptr.To(resource.MustParse("2Gi")),
|
||||
},
|
||||
},
|
||||
},
|
||||
classifiers: []Classifier[string, map[v1.ResourceName]*resource.Quantity]{
|
||||
ForMap[string, v1.ResourceName, *resource.Quantity, map[v1.ResourceName]*resource.Quantity](
|
||||
func(usage, limit *resource.Quantity) int {
|
||||
return usage.Cmp(*limit)
|
||||
},
|
||||
),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "single underutilized and properly utilized",
|
||||
usage: map[string]map[v1.ResourceName]*resource.Quantity{
|
||||
"node1": {
|
||||
v1.ResourceCPU: ptr.To(resource.MustParse("2")),
|
||||
v1.ResourceMemory: ptr.To(resource.MustParse("2Gi")),
|
||||
},
|
||||
"node2": {
|
||||
v1.ResourceCPU: ptr.To(resource.MustParse("5")),
|
||||
v1.ResourceMemory: ptr.To(resource.MustParse("5Gi")),
|
||||
},
|
||||
"node3": {
|
||||
v1.ResourceCPU: ptr.To(resource.MustParse("8")),
|
||||
v1.ResourceMemory: ptr.To(resource.MustParse("8Gi")),
|
||||
},
|
||||
},
|
||||
limits: map[string][]map[v1.ResourceName]*resource.Quantity{
|
||||
"node1": {
|
||||
{
|
||||
v1.ResourceCPU: ptr.To(resource.MustParse("4")),
|
||||
v1.ResourceMemory: ptr.To(resource.MustParse("4Gi")),
|
||||
},
|
||||
{
|
||||
v1.ResourceCPU: ptr.To(resource.MustParse("16")),
|
||||
v1.ResourceMemory: ptr.To(resource.MustParse("16Gi")),
|
||||
},
|
||||
},
|
||||
"node2": {
|
||||
{
|
||||
v1.ResourceCPU: ptr.To(resource.MustParse("4")),
|
||||
v1.ResourceMemory: ptr.To(resource.MustParse("4Gi")),
|
||||
},
|
||||
{
|
||||
v1.ResourceCPU: ptr.To(resource.MustParse("16")),
|
||||
v1.ResourceMemory: ptr.To(resource.MustParse("16Gi")),
|
||||
},
|
||||
},
|
||||
"node3": {
|
||||
{
|
||||
v1.ResourceCPU: ptr.To(resource.MustParse("4")),
|
||||
v1.ResourceMemory: ptr.To(resource.MustParse("4Gi")),
|
||||
},
|
||||
{
|
||||
v1.ResourceCPU: ptr.To(resource.MustParse("16")),
|
||||
v1.ResourceMemory: ptr.To(resource.MustParse("16Gi")),
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: []map[string]map[v1.ResourceName]*resource.Quantity{
|
||||
{
|
||||
"node1": {
|
||||
v1.ResourceCPU: ptr.To(resource.MustParse("2")),
|
||||
v1.ResourceMemory: ptr.To(resource.MustParse("2Gi")),
|
||||
},
|
||||
},
|
||||
{},
|
||||
},
|
||||
classifiers: []Classifier[string, map[v1.ResourceName]*resource.Quantity]{
|
||||
ForMap[string, v1.ResourceName, *resource.Quantity, map[v1.ResourceName]*resource.Quantity](
|
||||
func(usage, limit *resource.Quantity) int {
|
||||
return usage.Cmp(*limit)
|
||||
},
|
||||
),
|
||||
ForMap[string, v1.ResourceName, *resource.Quantity, map[v1.ResourceName]*resource.Quantity](
|
||||
func(usage, limit *resource.Quantity) int {
|
||||
return limit.Cmp(*usage)
|
||||
},
|
||||
),
|
||||
},
|
||||
},
|
||||
} {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result := Classify(tt.usage, tt.limits, tt.classifiers...)
|
||||
if !reflect.DeepEqual(result, tt.expected) {
|
||||
t.Fatalf("unexpected result: %v", result)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestClassify(t *testing.T) {
|
||||
for _, tt := range []struct {
|
||||
name string
|
||||
usage map[string]v1.ResourceList
|
||||
limits map[string][]v1.ResourceList
|
||||
classifiers []Classifier[string, v1.ResourceList]
|
||||
expected []map[string]v1.ResourceList
|
||||
}{
|
||||
{
|
||||
name: "empty",
|
||||
usage: map[string]v1.ResourceList{},
|
||||
limits: map[string][]v1.ResourceList{},
|
||||
expected: []map[string]v1.ResourceList{},
|
||||
},
|
||||
{
|
||||
name: "single underutilized",
|
||||
usage: map[string]v1.ResourceList{
|
||||
"node1": {
|
||||
v1.ResourceCPU: resource.MustParse("2"),
|
||||
v1.ResourceMemory: resource.MustParse("2Gi"),
|
||||
},
|
||||
},
|
||||
limits: map[string][]v1.ResourceList{
|
||||
"node1": {
|
||||
{
|
||||
v1.ResourceCPU: resource.MustParse("4"),
|
||||
v1.ResourceMemory: resource.MustParse("4Gi"),
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: []map[string]v1.ResourceList{
|
||||
{
|
||||
"node1": {
|
||||
v1.ResourceCPU: resource.MustParse("2"),
|
||||
v1.ResourceMemory: resource.MustParse("2Gi"),
|
||||
},
|
||||
},
|
||||
},
|
||||
classifiers: []Classifier[string, v1.ResourceList]{
|
||||
ForMap[string, v1.ResourceName, resource.Quantity, v1.ResourceList](
|
||||
func(usage, limit resource.Quantity) int {
|
||||
return usage.Cmp(limit)
|
||||
},
|
||||
),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "less classifiers than limits",
|
||||
usage: map[string]v1.ResourceList{
|
||||
"node1": {
|
||||
v1.ResourceCPU: resource.MustParse("2"),
|
||||
v1.ResourceMemory: resource.MustParse("2Gi"),
|
||||
},
|
||||
"node2": {
|
||||
v1.ResourceCPU: resource.MustParse("5"),
|
||||
v1.ResourceMemory: resource.MustParse("5Gi"),
|
||||
},
|
||||
"node3": {
|
||||
v1.ResourceCPU: resource.MustParse("8"),
|
||||
v1.ResourceMemory: resource.MustParse("8Gi"),
|
||||
},
|
||||
},
|
||||
limits: map[string][]v1.ResourceList{
|
||||
"node1": {
|
||||
{
|
||||
v1.ResourceCPU: resource.MustParse("4"),
|
||||
v1.ResourceMemory: resource.MustParse("4Gi"),
|
||||
},
|
||||
{
|
||||
v1.ResourceCPU: resource.MustParse("16"),
|
||||
v1.ResourceMemory: resource.MustParse("16Gi"),
|
||||
},
|
||||
},
|
||||
"node2": {
|
||||
{
|
||||
v1.ResourceCPU: resource.MustParse("4"),
|
||||
v1.ResourceMemory: resource.MustParse("4Gi"),
|
||||
},
|
||||
{
|
||||
v1.ResourceCPU: resource.MustParse("16"),
|
||||
v1.ResourceMemory: resource.MustParse("16Gi"),
|
||||
},
|
||||
},
|
||||
"node3": {
|
||||
{
|
||||
v1.ResourceCPU: resource.MustParse("4"),
|
||||
v1.ResourceMemory: resource.MustParse("4Gi"),
|
||||
},
|
||||
{
|
||||
v1.ResourceCPU: resource.MustParse("16"),
|
||||
v1.ResourceMemory: resource.MustParse("16Gi"),
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: []map[string]v1.ResourceList{
|
||||
{
|
||||
"node1": {
|
||||
v1.ResourceCPU: resource.MustParse("2"),
|
||||
v1.ResourceMemory: resource.MustParse("2Gi"),
|
||||
},
|
||||
},
|
||||
},
|
||||
classifiers: []Classifier[string, v1.ResourceList]{
|
||||
ForMap[string, v1.ResourceName, resource.Quantity, v1.ResourceList](
|
||||
func(usage, limit resource.Quantity) int {
|
||||
return usage.Cmp(limit)
|
||||
},
|
||||
),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "more classifiers than limits",
|
||||
usage: map[string]v1.ResourceList{
|
||||
"node1": {
|
||||
v1.ResourceCPU: resource.MustParse("20"),
|
||||
v1.ResourceMemory: resource.MustParse("20"),
|
||||
},
|
||||
"node2": {
|
||||
v1.ResourceCPU: resource.MustParse("50"),
|
||||
v1.ResourceMemory: resource.MustParse("50"),
|
||||
},
|
||||
"node3": {
|
||||
v1.ResourceCPU: resource.MustParse("80"),
|
||||
v1.ResourceMemory: resource.MustParse("80"),
|
||||
},
|
||||
},
|
||||
limits: map[string][]v1.ResourceList{
|
||||
"node1": {
|
||||
{
|
||||
v1.ResourceCPU: resource.MustParse("30"),
|
||||
v1.ResourceMemory: resource.MustParse("30"),
|
||||
},
|
||||
},
|
||||
"node2": {
|
||||
{
|
||||
v1.ResourceCPU: resource.MustParse("30"),
|
||||
v1.ResourceMemory: resource.MustParse("30"),
|
||||
},
|
||||
},
|
||||
"node3": {
|
||||
{
|
||||
v1.ResourceCPU: resource.MustParse("30"),
|
||||
v1.ResourceMemory: resource.MustParse("30"),
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: []map[string]v1.ResourceList{
|
||||
{
|
||||
"node1": {
|
||||
v1.ResourceCPU: resource.MustParse("20"),
|
||||
v1.ResourceMemory: resource.MustParse("20"),
|
||||
},
|
||||
},
|
||||
{},
|
||||
},
|
||||
classifiers: []Classifier[string, v1.ResourceList]{
|
||||
ForMap[string, v1.ResourceName, resource.Quantity, v1.ResourceList](
|
||||
func(usage, limit resource.Quantity) int {
|
||||
return usage.Cmp(limit)
|
||||
},
|
||||
),
|
||||
ForMap[string, v1.ResourceName, resource.Quantity, v1.ResourceList](
|
||||
func(usage, limit resource.Quantity) int {
|
||||
return limit.Cmp(usage)
|
||||
},
|
||||
),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "single underutilized and properly utilized",
|
||||
usage: map[string]v1.ResourceList{
|
||||
"node1": {
|
||||
v1.ResourceCPU: resource.MustParse("2"),
|
||||
v1.ResourceMemory: resource.MustParse("2Gi"),
|
||||
},
|
||||
"node2": {
|
||||
v1.ResourceCPU: resource.MustParse("5"),
|
||||
v1.ResourceMemory: resource.MustParse("5Gi"),
|
||||
},
|
||||
"node3": {
|
||||
v1.ResourceCPU: resource.MustParse("8"),
|
||||
v1.ResourceMemory: resource.MustParse("8Gi"),
|
||||
},
|
||||
},
|
||||
limits: map[string][]v1.ResourceList{
|
||||
"node1": {
|
||||
{
|
||||
v1.ResourceCPU: resource.MustParse("4"),
|
||||
v1.ResourceMemory: resource.MustParse("4Gi"),
|
||||
},
|
||||
{
|
||||
v1.ResourceCPU: resource.MustParse("16"),
|
||||
v1.ResourceMemory: resource.MustParse("16Gi"),
|
||||
},
|
||||
},
|
||||
"node2": {
|
||||
{
|
||||
v1.ResourceCPU: resource.MustParse("4"),
|
||||
v1.ResourceMemory: resource.MustParse("4Gi"),
|
||||
},
|
||||
{
|
||||
v1.ResourceCPU: resource.MustParse("16"),
|
||||
v1.ResourceMemory: resource.MustParse("16Gi"),
|
||||
},
|
||||
},
|
||||
"node3": {
|
||||
{
|
||||
v1.ResourceCPU: resource.MustParse("4"),
|
||||
v1.ResourceMemory: resource.MustParse("4Gi"),
|
||||
},
|
||||
{
|
||||
v1.ResourceCPU: resource.MustParse("16"),
|
||||
v1.ResourceMemory: resource.MustParse("16Gi"),
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: []map[string]v1.ResourceList{
|
||||
{
|
||||
"node1": {
|
||||
v1.ResourceCPU: resource.MustParse("2"),
|
||||
v1.ResourceMemory: resource.MustParse("2Gi"),
|
||||
},
|
||||
},
|
||||
{},
|
||||
},
|
||||
classifiers: []Classifier[string, v1.ResourceList]{
|
||||
ForMap[string, v1.ResourceName, resource.Quantity, v1.ResourceList](
|
||||
func(usage, limit resource.Quantity) int {
|
||||
return usage.Cmp(limit)
|
||||
},
|
||||
),
|
||||
ForMap[string, v1.ResourceName, resource.Quantity, v1.ResourceList](
|
||||
func(usage, limit resource.Quantity) int {
|
||||
return limit.Cmp(usage)
|
||||
},
|
||||
),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "single underutilized and multiple over utilized nodes",
|
||||
usage: map[string]v1.ResourceList{
|
||||
"node1": {
|
||||
v1.ResourceCPU: resource.MustParse("2"),
|
||||
v1.ResourceMemory: resource.MustParse("2Gi"),
|
||||
},
|
||||
"node2": {
|
||||
v1.ResourceCPU: resource.MustParse("8"),
|
||||
v1.ResourceMemory: resource.MustParse("8Gi"),
|
||||
},
|
||||
"node3": {
|
||||
v1.ResourceCPU: resource.MustParse("8"),
|
||||
v1.ResourceMemory: resource.MustParse("8Gi"),
|
||||
},
|
||||
},
|
||||
limits: map[string][]v1.ResourceList{
|
||||
"node1": {
|
||||
{
|
||||
v1.ResourceCPU: resource.MustParse("4"),
|
||||
v1.ResourceMemory: resource.MustParse("4Gi"),
|
||||
},
|
||||
{
|
||||
v1.ResourceCPU: resource.MustParse("6"),
|
||||
v1.ResourceMemory: resource.MustParse("6Gi"),
|
||||
},
|
||||
},
|
||||
"node2": {
|
||||
{
|
||||
v1.ResourceCPU: resource.MustParse("4"),
|
||||
v1.ResourceMemory: resource.MustParse("4Gi"),
|
||||
},
|
||||
{
|
||||
v1.ResourceCPU: resource.MustParse("6"),
|
||||
v1.ResourceMemory: resource.MustParse("6Gi"),
|
||||
},
|
||||
},
|
||||
"node3": {
|
||||
{
|
||||
v1.ResourceCPU: resource.MustParse("4"),
|
||||
v1.ResourceMemory: resource.MustParse("4Gi"),
|
||||
},
|
||||
{
|
||||
v1.ResourceCPU: resource.MustParse("6"),
|
||||
v1.ResourceMemory: resource.MustParse("6Gi"),
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: []map[string]v1.ResourceList{
|
||||
{
|
||||
"node1": {
|
||||
v1.ResourceCPU: resource.MustParse("2"),
|
||||
v1.ResourceMemory: resource.MustParse("2Gi"),
|
||||
},
|
||||
},
|
||||
{
|
||||
"node2": {
|
||||
v1.ResourceCPU: resource.MustParse("8"),
|
||||
v1.ResourceMemory: resource.MustParse("8Gi"),
|
||||
},
|
||||
"node3": {
|
||||
v1.ResourceCPU: resource.MustParse("8"),
|
||||
v1.ResourceMemory: resource.MustParse("8Gi"),
|
||||
},
|
||||
},
|
||||
},
|
||||
classifiers: []Classifier[string, v1.ResourceList]{
|
||||
ForMap[string, v1.ResourceName, resource.Quantity, v1.ResourceList](
|
||||
func(usage, limit resource.Quantity) int {
|
||||
return usage.Cmp(limit)
|
||||
},
|
||||
),
|
||||
ForMap[string, v1.ResourceName, resource.Quantity, v1.ResourceList](
|
||||
func(usage, limit resource.Quantity) int {
|
||||
return limit.Cmp(usage)
|
||||
},
|
||||
),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "over and under at the same time",
|
||||
usage: map[string]v1.ResourceList{
|
||||
"node1": {
|
||||
v1.ResourceCPU: resource.MustParse("1"),
|
||||
v1.ResourceMemory: resource.MustParse("8Gi"),
|
||||
},
|
||||
"node2": {
|
||||
v1.ResourceCPU: resource.MustParse("1"),
|
||||
v1.ResourceMemory: resource.MustParse("8Gi"),
|
||||
},
|
||||
},
|
||||
limits: map[string][]v1.ResourceList{
|
||||
"node1": {
|
||||
{
|
||||
v1.ResourceCPU: resource.MustParse("4"),
|
||||
v1.ResourceMemory: resource.MustParse("4Gi"),
|
||||
},
|
||||
{
|
||||
v1.ResourceCPU: resource.MustParse("6"),
|
||||
v1.ResourceMemory: resource.MustParse("6Gi"),
|
||||
},
|
||||
},
|
||||
"node2": {
|
||||
{
|
||||
v1.ResourceCPU: resource.MustParse("4"),
|
||||
v1.ResourceMemory: resource.MustParse("4Gi"),
|
||||
},
|
||||
{
|
||||
v1.ResourceCPU: resource.MustParse("6"),
|
||||
v1.ResourceMemory: resource.MustParse("6Gi"),
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: []map[string]v1.ResourceList{
|
||||
{},
|
||||
{},
|
||||
},
|
||||
classifiers: []Classifier[string, v1.ResourceList]{
|
||||
ForMap[string, v1.ResourceName, resource.Quantity, v1.ResourceList](
|
||||
func(usage, limit resource.Quantity) int {
|
||||
return usage.Cmp(limit)
|
||||
},
|
||||
),
|
||||
ForMap[string, v1.ResourceName, resource.Quantity, v1.ResourceList](
|
||||
func(usage, limit resource.Quantity) int {
|
||||
return limit.Cmp(usage)
|
||||
},
|
||||
),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "only memory over utilized",
|
||||
usage: map[string]v1.ResourceList{
|
||||
"node1": {
|
||||
v1.ResourceCPU: resource.MustParse("5"),
|
||||
v1.ResourceMemory: resource.MustParse("8Gi"),
|
||||
},
|
||||
},
|
||||
limits: map[string][]v1.ResourceList{
|
||||
"node1": {
|
||||
{
|
||||
v1.ResourceCPU: resource.MustParse("4"),
|
||||
v1.ResourceMemory: resource.MustParse("4Gi"),
|
||||
},
|
||||
{
|
||||
v1.ResourceCPU: resource.MustParse("6"),
|
||||
v1.ResourceMemory: resource.MustParse("6Gi"),
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: []map[string]v1.ResourceList{
|
||||
{},
|
||||
{},
|
||||
},
|
||||
classifiers: []Classifier[string, v1.ResourceList]{
|
||||
ForMap[string, v1.ResourceName, resource.Quantity, v1.ResourceList](
|
||||
func(usage, limit resource.Quantity) int {
|
||||
return usage.Cmp(limit)
|
||||
},
|
||||
),
|
||||
ForMap[string, v1.ResourceName, resource.Quantity, v1.ResourceList](
|
||||
func(usage, limit resource.Quantity) int {
|
||||
return limit.Cmp(usage)
|
||||
},
|
||||
),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "randomly positioned over utilized",
|
||||
usage: map[string]v1.ResourceList{
|
||||
"node1": {v1.ResourceCPU: resource.MustParse("8")},
|
||||
"node2": {v1.ResourceCPU: resource.MustParse("2")},
|
||||
"node3": {v1.ResourceCPU: resource.MustParse("8")},
|
||||
"node4": {v1.ResourceCPU: resource.MustParse("2")},
|
||||
"node5": {v1.ResourceCPU: resource.MustParse("8")},
|
||||
"node6": {v1.ResourceCPU: resource.MustParse("8")},
|
||||
"node7": {v1.ResourceCPU: resource.MustParse("8")},
|
||||
"node8": {v1.ResourceCPU: resource.MustParse("2")},
|
||||
"node9": {v1.ResourceCPU: resource.MustParse("5")},
|
||||
},
|
||||
limits: map[string][]v1.ResourceList{
|
||||
"node1": {
|
||||
{v1.ResourceCPU: resource.MustParse("4")},
|
||||
{v1.ResourceCPU: resource.MustParse("6")},
|
||||
},
|
||||
"node2": {
|
||||
{v1.ResourceCPU: resource.MustParse("4")},
|
||||
{v1.ResourceCPU: resource.MustParse("6")},
|
||||
},
|
||||
"node3": {
|
||||
{v1.ResourceCPU: resource.MustParse("4")},
|
||||
{v1.ResourceCPU: resource.MustParse("6")},
|
||||
},
|
||||
"node4": {
|
||||
{v1.ResourceCPU: resource.MustParse("4")},
|
||||
{v1.ResourceCPU: resource.MustParse("6")},
|
||||
},
|
||||
"node5": {
|
||||
{v1.ResourceCPU: resource.MustParse("4")},
|
||||
{v1.ResourceCPU: resource.MustParse("6")},
|
||||
},
|
||||
"node6": {
|
||||
{v1.ResourceCPU: resource.MustParse("4")},
|
||||
{v1.ResourceCPU: resource.MustParse("6")},
|
||||
},
|
||||
"node7": {
|
||||
{v1.ResourceCPU: resource.MustParse("4")},
|
||||
{v1.ResourceCPU: resource.MustParse("6")},
|
||||
},
|
||||
"node8": {
|
||||
{v1.ResourceCPU: resource.MustParse("4")},
|
||||
{v1.ResourceCPU: resource.MustParse("6")},
|
||||
},
|
||||
"node9": {
|
||||
{v1.ResourceCPU: resource.MustParse("4")},
|
||||
{v1.ResourceCPU: resource.MustParse("6")},
|
||||
},
|
||||
},
|
||||
expected: []map[string]v1.ResourceList{
|
||||
{
|
||||
"node2": {v1.ResourceCPU: resource.MustParse("2")},
|
||||
"node4": {v1.ResourceCPU: resource.MustParse("2")},
|
||||
"node8": {v1.ResourceCPU: resource.MustParse("2")},
|
||||
},
|
||||
{
|
||||
"node1": {v1.ResourceCPU: resource.MustParse("8")},
|
||||
"node3": {v1.ResourceCPU: resource.MustParse("8")},
|
||||
"node5": {v1.ResourceCPU: resource.MustParse("8")},
|
||||
"node6": {v1.ResourceCPU: resource.MustParse("8")},
|
||||
"node7": {v1.ResourceCPU: resource.MustParse("8")},
|
||||
},
|
||||
},
|
||||
classifiers: []Classifier[string, v1.ResourceList]{
|
||||
ForMap[string, v1.ResourceName, resource.Quantity, v1.ResourceList](
|
||||
func(usage, limit resource.Quantity) int {
|
||||
return usage.Cmp(limit)
|
||||
},
|
||||
),
|
||||
ForMap[string, v1.ResourceName, resource.Quantity, v1.ResourceList](
|
||||
func(usage, limit resource.Quantity) int {
|
||||
return limit.Cmp(usage)
|
||||
},
|
||||
),
|
||||
},
|
||||
},
|
||||
} {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result := Classify(tt.usage, tt.limits, tt.classifiers...)
|
||||
if !reflect.DeepEqual(result, tt.expected) {
|
||||
t.Fatalf("unexpected result: %v", result)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -19,9 +19,9 @@ package nodeutilization
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"slices"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/klog/v2"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
@@ -29,150 +29,247 @@ import (
|
||||
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
|
||||
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/nodeutilization/classifier"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/nodeutilization/normalizer"
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
)
|
||||
|
||||
const HighNodeUtilizationPluginName = "HighNodeUtilization"
|
||||
|
||||
// HighNodeUtilization evicts pods from under utilized nodes so that scheduler can schedule according to its plugin.
|
||||
// Note that CPU/Memory requests are used to calculate nodes' utilization and not the actual resource usage.
|
||||
|
||||
type HighNodeUtilization struct {
|
||||
handle frameworktypes.Handle
|
||||
args *HighNodeUtilizationArgs
|
||||
podFilter func(pod *v1.Pod) bool
|
||||
}
|
||||
|
||||
// this lines makes sure that HighNodeUtilization implements the BalancePlugin
|
||||
// interface.
|
||||
var _ frameworktypes.BalancePlugin = &HighNodeUtilization{}
|
||||
|
||||
// NewHighNodeUtilization builds plugin from its arguments while passing a handle
|
||||
func NewHighNodeUtilization(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
|
||||
highNodeUtilizatioArgs, ok := args.(*HighNodeUtilizationArgs)
|
||||
// HighNodeUtilization evicts pods from under utilized nodes so that scheduler
|
||||
// can schedule according to its plugin. Note that CPU/Memory requests are used
|
||||
// to calculate nodes' utilization and not the actual resource usage.
|
||||
type HighNodeUtilization struct {
|
||||
logger klog.Logger
|
||||
handle frameworktypes.Handle
|
||||
args *HighNodeUtilizationArgs
|
||||
podFilter func(pod *v1.Pod) bool
|
||||
criteria []any
|
||||
resourceNames []v1.ResourceName
|
||||
highThresholds api.ResourceThresholds
|
||||
usageClient usageClient
|
||||
}
|
||||
|
||||
// NewHighNodeUtilization builds plugin from its arguments while passing a handle.
|
||||
func NewHighNodeUtilization(
|
||||
ctx context.Context, genericArgs runtime.Object, handle frameworktypes.Handle,
|
||||
) (frameworktypes.Plugin, error) {
|
||||
args, ok := genericArgs.(*HighNodeUtilizationArgs)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("want args to be of type HighNodeUtilizationArgs, got %T", args)
|
||||
return nil, fmt.Errorf(
|
||||
"want args to be of type HighNodeUtilizationArgs, got %T",
|
||||
genericArgs,
|
||||
)
|
||||
}
|
||||
logger := klog.FromContext(ctx).WithValues("plugin", HighNodeUtilizationPluginName)
|
||||
|
||||
// this plugins worries only about thresholds but the nodeplugins
|
||||
// package was made to take two thresholds into account, one for low
|
||||
// and another for high usage. here we make sure we set the high
|
||||
// threshold to the maximum value for all resources for which we have a
|
||||
// threshold.
|
||||
highThresholds := make(api.ResourceThresholds)
|
||||
for rname := range args.Thresholds {
|
||||
highThresholds[rname] = MaxResourcePercentage
|
||||
}
|
||||
|
||||
podFilter, err := podutil.NewOptions().
|
||||
WithFilter(handle.Evictor().Filter).
|
||||
// get the resource names for which we have a threshold. this is
|
||||
// later used when determining if we are going to evict a pod.
|
||||
resourceThresholds := getResourceNames(args.Thresholds)
|
||||
|
||||
// by default we evict pods from the under utilized nodes even if they
|
||||
// don't define a request for a given threshold. this works most of the
|
||||
// times and there is an use case for it. When using the restrict mode
|
||||
// we evaluate if the pod has a request for any of the resources the
|
||||
// user has provided as threshold.
|
||||
filters := []podutil.FilterFunc{handle.Evictor().Filter}
|
||||
if slices.Contains(args.EvictionModes, EvictionModeOnlyThresholdingResources) {
|
||||
filters = append(
|
||||
filters,
|
||||
withResourceRequestForAny(resourceThresholds...),
|
||||
)
|
||||
}
|
||||
|
||||
podFilter, err := podutil.
|
||||
NewOptions().
|
||||
WithFilter(podutil.WrapFilterFuncs(filters...)).
|
||||
BuildFilterFunc()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error initializing pod filter function: %v", err)
|
||||
}
|
||||
|
||||
// resourceNames is a list of all resource names this plugin cares
|
||||
// about. we care about the resources for which we have a threshold and
|
||||
// all we consider the basic resources (cpu, memory, pods).
|
||||
resourceNames := uniquifyResourceNames(
|
||||
append(
|
||||
resourceThresholds,
|
||||
v1.ResourceCPU,
|
||||
v1.ResourceMemory,
|
||||
v1.ResourcePods,
|
||||
),
|
||||
)
|
||||
|
||||
return &HighNodeUtilization{
|
||||
handle: handle,
|
||||
args: highNodeUtilizatioArgs,
|
||||
podFilter: podFilter,
|
||||
logger: logger,
|
||||
handle: handle,
|
||||
args: args,
|
||||
resourceNames: resourceNames,
|
||||
highThresholds: highThresholds,
|
||||
criteria: thresholdsToKeysAndValues(args.Thresholds),
|
||||
podFilter: podFilter,
|
||||
usageClient: newRequestedUsageClient(
|
||||
resourceNames,
|
||||
handle.GetPodsAssignedToNodeFunc(),
|
||||
),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Name retrieves the plugin name
|
||||
// Name retrieves the plugin name.
|
||||
func (h *HighNodeUtilization) Name() string {
|
||||
return HighNodeUtilizationPluginName
|
||||
}
|
||||
|
||||
// Balance extension point implementation for the plugin
|
||||
// Balance holds the main logic of the plugin. It evicts pods from under
|
||||
// utilized nodes. The goal here is to concentrate pods in fewer nodes so that
|
||||
// less nodes are used.
|
||||
func (h *HighNodeUtilization) Balance(ctx context.Context, nodes []*v1.Node) *frameworktypes.Status {
|
||||
thresholds := h.args.Thresholds
|
||||
targetThresholds := make(api.ResourceThresholds)
|
||||
logger := klog.FromContext(klog.NewContext(ctx, h.logger)).WithValues("ExtensionPoint", frameworktypes.BalanceExtensionPoint)
|
||||
|
||||
setDefaultForThresholds(thresholds, targetThresholds)
|
||||
resourceNames := getResourceNames(targetThresholds)
|
||||
if err := h.usageClient.sync(ctx, nodes); err != nil {
|
||||
return &frameworktypes.Status{
|
||||
Err: fmt.Errorf("error getting node usage: %v", err),
|
||||
}
|
||||
}
|
||||
|
||||
sourceNodes, highNodes := classifyNodes(
|
||||
getNodeUsage(nodes, resourceNames, h.handle.GetPodsAssignedToNodeFunc()),
|
||||
getNodeThresholds(nodes, thresholds, targetThresholds, resourceNames, h.handle.GetPodsAssignedToNodeFunc(), false),
|
||||
func(node *v1.Node, usage NodeUsage, threshold NodeThresholds) bool {
|
||||
return isNodeWithLowUtilization(usage, threshold.lowResourceThreshold)
|
||||
// take a picture of the current state of the nodes, everything else
|
||||
// here is based on this snapshot.
|
||||
nodesMap, nodesUsageMap, podListMap := getNodeUsageSnapshot(nodes, h.usageClient)
|
||||
capacities := referencedResourceListForNodesCapacity(nodes)
|
||||
|
||||
// node usages are not presented as percentages over the capacity.
|
||||
// we need to normalize them to be able to compare them with the
|
||||
// thresholds. thresholds are already provided by the user in
|
||||
// percentage.
|
||||
usage, thresholds := assessNodesUsagesAndStaticThresholds(
|
||||
nodesUsageMap, capacities, h.args.Thresholds, h.highThresholds,
|
||||
)
|
||||
|
||||
// classify nodes in two groups: underutilized and schedulable. we will
|
||||
// later try to move pods from the first group to the second.
|
||||
nodeGroups := classifier.Classify(
|
||||
usage, thresholds,
|
||||
// underutilized nodes.
|
||||
func(nodeName string, usage, threshold api.ResourceThresholds) bool {
|
||||
return isNodeBelowThreshold(usage, threshold)
|
||||
},
|
||||
func(node *v1.Node, usage NodeUsage, threshold NodeThresholds) bool {
|
||||
if nodeutil.IsNodeUnschedulable(node) {
|
||||
klog.V(2).InfoS("Node is unschedulable", "node", klog.KObj(node))
|
||||
// schedulable nodes.
|
||||
func(nodeName string, usage, threshold api.ResourceThresholds) bool {
|
||||
if nodeutil.IsNodeUnschedulable(nodesMap[nodeName]) {
|
||||
logger.V(2).Info(
|
||||
"Node is unschedulable",
|
||||
"node", klog.KObj(nodesMap[nodeName]),
|
||||
)
|
||||
return false
|
||||
}
|
||||
return !isNodeWithLowUtilization(usage, threshold.lowResourceThreshold)
|
||||
})
|
||||
return true
|
||||
},
|
||||
)
|
||||
|
||||
// log message in one line
|
||||
keysAndValues := []interface{}{
|
||||
"CPU", thresholds[v1.ResourceCPU],
|
||||
"Mem", thresholds[v1.ResourceMemory],
|
||||
"Pods", thresholds[v1.ResourcePods],
|
||||
}
|
||||
for name := range thresholds {
|
||||
if !nodeutil.IsBasicResource(name) {
|
||||
keysAndValues = append(keysAndValues, string(name), int64(thresholds[name]))
|
||||
// the nodeplugin package works by means of NodeInfo structures. these
|
||||
// structures hold a series of information about the nodes. now that
|
||||
// we have classified the nodes, we can build the NodeInfo structures
|
||||
// for each group. NodeInfo structs carry usage and available resources
|
||||
// for each node.
|
||||
nodeInfos := make([][]NodeInfo, 2)
|
||||
category := []string{"underutilized", "overutilized"}
|
||||
for i := range nodeGroups {
|
||||
for nodeName := range nodeGroups[i] {
|
||||
logger.Info(
|
||||
"Node has been classified",
|
||||
"category", category[i],
|
||||
"node", klog.KObj(nodesMap[nodeName]),
|
||||
"usage", nodesUsageMap[nodeName],
|
||||
"usagePercentage", normalizer.Round(usage[nodeName]),
|
||||
)
|
||||
nodeInfos[i] = append(nodeInfos[i], NodeInfo{
|
||||
NodeUsage: NodeUsage{
|
||||
node: nodesMap[nodeName],
|
||||
usage: nodesUsageMap[nodeName],
|
||||
allPods: podListMap[nodeName],
|
||||
},
|
||||
available: capNodeCapacitiesToThreshold(
|
||||
nodesMap[nodeName],
|
||||
thresholds[nodeName][1],
|
||||
h.resourceNames,
|
||||
),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
klog.V(1).InfoS("Criteria for a node below target utilization", keysAndValues...)
|
||||
klog.V(1).InfoS("Number of underutilized nodes", "totalNumber", len(sourceNodes))
|
||||
lowNodes, schedulableNodes := nodeInfos[0], nodeInfos[1]
|
||||
|
||||
if len(sourceNodes) == 0 {
|
||||
klog.V(1).InfoS("No node is underutilized, nothing to do here, you might tune your thresholds further")
|
||||
return nil
|
||||
}
|
||||
if len(sourceNodes) <= h.args.NumberOfNodes {
|
||||
klog.V(1).InfoS("Number of nodes underutilized is less or equal than NumberOfNodes, nothing to do here", "underutilizedNodes", len(sourceNodes), "numberOfNodes", h.args.NumberOfNodes)
|
||||
return nil
|
||||
}
|
||||
if len(sourceNodes) == len(nodes) {
|
||||
klog.V(1).InfoS("All nodes are underutilized, nothing to do here")
|
||||
return nil
|
||||
}
|
||||
if len(highNodes) == 0 {
|
||||
klog.V(1).InfoS("No node is available to schedule the pods, nothing to do here")
|
||||
logger.V(1).Info("Criteria for a node below target utilization", h.criteria...)
|
||||
logger.V(1).Info("Number of underutilized nodes", "totalNumber", len(lowNodes))
|
||||
|
||||
if len(lowNodes) == 0 {
|
||||
logger.V(1).Info(
|
||||
"No node is underutilized, nothing to do here, you might tune your thresholds further",
|
||||
)
|
||||
return nil
|
||||
}
|
||||
|
||||
// stop if the total available usage has dropped to zero - no more pods can be scheduled
|
||||
continueEvictionCond := func(nodeInfo NodeInfo, totalAvailableUsage map[v1.ResourceName]*resource.Quantity) bool {
|
||||
for name := range totalAvailableUsage {
|
||||
if totalAvailableUsage[name].CmpInt64(0) < 1 {
|
||||
if len(lowNodes) <= h.args.NumberOfNodes {
|
||||
logger.V(1).Info(
|
||||
"Number of nodes underutilized is less or equal than NumberOfNodes, nothing to do here",
|
||||
"underutilizedNodes", len(lowNodes),
|
||||
"numberOfNodes", h.args.NumberOfNodes,
|
||||
)
|
||||
return nil
|
||||
}
|
||||
|
||||
if len(lowNodes) == len(nodes) {
|
||||
logger.V(1).Info("All nodes are underutilized, nothing to do here")
|
||||
return nil
|
||||
}
|
||||
|
||||
if len(schedulableNodes) == 0 {
|
||||
logger.V(1).Info("No node is available to schedule the pods, nothing to do here")
|
||||
return nil
|
||||
}
|
||||
|
||||
// stops the eviction process if the total available capacity sage has
|
||||
// dropped to zero - no more pods can be scheduled. this will signalize
|
||||
// to stop if any of the available resources has dropped to zero.
|
||||
continueEvictionCond := func(_ NodeInfo, avail api.ReferencedResourceList) bool {
|
||||
for name := range avail {
|
||||
if avail[name].CmpInt64(0) < 1 {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// Sort the nodes by the usage in ascending order
|
||||
sortNodesByUsage(sourceNodes, true)
|
||||
// sorts the nodes by the usage in ascending order.
|
||||
sortNodesByUsage(lowNodes, true)
|
||||
|
||||
evictPodsFromSourceNodes(
|
||||
ctx,
|
||||
h.args.EvictableNamespaces,
|
||||
sourceNodes,
|
||||
highNodes,
|
||||
lowNodes,
|
||||
schedulableNodes,
|
||||
h.handle.Evictor(),
|
||||
evictions.EvictOptions{StrategyName: HighNodeUtilizationPluginName},
|
||||
h.podFilter,
|
||||
resourceNames,
|
||||
continueEvictionCond)
|
||||
h.resourceNames,
|
||||
continueEvictionCond,
|
||||
h.usageClient,
|
||||
nil,
|
||||
)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func setDefaultForThresholds(thresholds, targetThresholds api.ResourceThresholds) {
|
||||
// check if Pods/CPU/Mem are set, if not, set them to 100
|
||||
if _, ok := thresholds[v1.ResourcePods]; !ok {
|
||||
thresholds[v1.ResourcePods] = MaxResourcePercentage
|
||||
}
|
||||
if _, ok := thresholds[v1.ResourceCPU]; !ok {
|
||||
thresholds[v1.ResourceCPU] = MaxResourcePercentage
|
||||
}
|
||||
if _, ok := thresholds[v1.ResourceMemory]; !ok {
|
||||
thresholds[v1.ResourceMemory] = MaxResourcePercentage
|
||||
}
|
||||
|
||||
// Default targetThreshold resource values to 100
|
||||
targetThresholds[v1.ResourcePods] = MaxResourcePercentage
|
||||
targetThresholds[v1.ResourceCPU] = MaxResourcePercentage
|
||||
targetThresholds[v1.ResourceMemory] = MaxResourcePercentage
|
||||
|
||||
for name := range thresholds {
|
||||
if !nodeutil.IsBasicResource(name) {
|
||||
targetThresholds[name] = MaxResourcePercentage
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -23,7 +23,6 @@ import (
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
policy "k8s.io/api/policy/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
core "k8s.io/client-go/testing"
|
||||
@@ -48,6 +47,7 @@ func TestHighNodeUtilization(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
thresholds api.ResourceThresholds
|
||||
evictionModes []EvictionMode
|
||||
nodes []*v1.Node
|
||||
pods []*v1.Pod
|
||||
expectedPodsEvicted uint
|
||||
@@ -96,17 +96,7 @@ func TestHighNodeUtilization(t *testing.T) {
|
||||
test.BuildTestPod("p1", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
// A pod with local storage.
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Spec.Volumes = []v1.Volume{
|
||||
{
|
||||
Name: "sample",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
test.SetHostPathEmptyDirVolumeSource(pod)
|
||||
// A Mirror Pod.
|
||||
pod.Annotations = test.GetMirrorPodAnnotation()
|
||||
}),
|
||||
@@ -114,8 +104,7 @@ func TestHighNodeUtilization(t *testing.T) {
|
||||
// A Critical Pod.
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Namespace = "kube-system"
|
||||
priority := utils.SystemCriticalPriority
|
||||
pod.Spec.Priority = &priority
|
||||
test.SetPodPriority(pod, utils.SystemCriticalPriority)
|
||||
}),
|
||||
// These won't be evicted.
|
||||
test.BuildTestPod("p3", 400, 0, n2NodeName, test.SetDSOwnerRef),
|
||||
@@ -167,8 +156,7 @@ func TestHighNodeUtilization(t *testing.T) {
|
||||
// A Critical Pod.
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Namespace = "kube-system"
|
||||
priority := utils.SystemCriticalPriority
|
||||
pod.Spec.Priority = &priority
|
||||
test.SetPodPriority(pod, utils.SystemCriticalPriority)
|
||||
}),
|
||||
// These won't be evicted.
|
||||
test.BuildTestPod("p3", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
@@ -244,13 +232,11 @@ func TestHighNodeUtilization(t *testing.T) {
|
||||
},
|
||||
// All pods are assumed to be burstable (test.BuildTestNode always sets both cpu/memory resource requests to some value)
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p1", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
test.BuildTestPod("p1", 0, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
test.SetRSOwnerRef(pod)
|
||||
test.MakeBestEffortPod(pod)
|
||||
}),
|
||||
test.BuildTestPod("p2", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
test.SetRSOwnerRef(pod)
|
||||
}),
|
||||
test.BuildTestPod("p2", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
// These won't be evicted.
|
||||
test.BuildTestPod("p3", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p4", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
@@ -433,6 +419,51 @@ func TestHighNodeUtilization(t *testing.T) {
|
||||
},
|
||||
expectedPodsEvicted: 0,
|
||||
},
|
||||
{
|
||||
name: "with extended resource threshold and no extended resource pods",
|
||||
thresholds: api.ResourceThresholds{
|
||||
extendedResource: 40,
|
||||
},
|
||||
evictionModes: []EvictionMode{EvictionModeOnlyThresholdingResources},
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode(n1NodeName, 4000, 3000, 10, func(node *v1.Node) {
|
||||
test.SetNodeExtendedResource(node, extendedResource, 10)
|
||||
}),
|
||||
test.BuildTestNode(n2NodeName, 4000, 3000, 10, func(node *v1.Node) {
|
||||
test.SetNodeExtendedResource(node, extendedResource, 10)
|
||||
}),
|
||||
test.BuildTestNode(n3NodeName, 4000, 3000, 10, func(node *v1.Node) {
|
||||
test.SetNodeExtendedResource(node, extendedResource, 10)
|
||||
}),
|
||||
},
|
||||
pods: []*v1.Pod{
|
||||
// pods on node1 have the extended resource
|
||||
// request set and they put the node in the
|
||||
// over utilization range.
|
||||
test.BuildTestPod("p1", 100, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
test.SetRSOwnerRef(pod)
|
||||
test.SetPodExtendedResourceRequest(pod, extendedResource, 3)
|
||||
}),
|
||||
test.BuildTestPod("p2", 100, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
test.SetRSOwnerRef(pod)
|
||||
test.SetPodExtendedResourceRequest(pod, extendedResource, 3)
|
||||
}),
|
||||
// pods in the other nodes must not be evicted
|
||||
// because they do not have the extended
|
||||
// resource defined in their requests.
|
||||
test.BuildTestPod("p3", 500, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p4", 500, 0, n2NodeName, func(pod *v1.Pod) {
|
||||
test.SetRSOwnerRef(pod)
|
||||
}),
|
||||
test.BuildTestPod("p5", 500, 0, n2NodeName, func(pod *v1.Pod) {
|
||||
test.SetRSOwnerRef(pod)
|
||||
}),
|
||||
test.BuildTestPod("p6", 500, 0, n2NodeName, func(pod *v1.Pod) {
|
||||
test.SetRSOwnerRef(pod)
|
||||
}),
|
||||
},
|
||||
expectedPodsEvicted: 0,
|
||||
},
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
@@ -474,10 +505,14 @@ func TestHighNodeUtilization(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
plugin, err := NewHighNodeUtilization(&HighNodeUtilizationArgs{
|
||||
Thresholds: testCase.thresholds,
|
||||
},
|
||||
handle)
|
||||
plugin, err := NewHighNodeUtilization(
|
||||
ctx,
|
||||
&HighNodeUtilizationArgs{
|
||||
Thresholds: testCase.thresholds,
|
||||
EvictionModes: testCase.evictionModes,
|
||||
},
|
||||
handle,
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to initialize the plugin: %v", err)
|
||||
}
|
||||
@@ -586,7 +621,7 @@ func TestHighNodeUtilizationWithTaints(t *testing.T) {
|
||||
t.Fatalf("Unable to initialize a framework handle: %v", err)
|
||||
}
|
||||
|
||||
plugin, err := NewHighNodeUtilization(&HighNodeUtilizationArgs{
|
||||
plugin, err := NewHighNodeUtilization(ctx, &HighNodeUtilizationArgs{
|
||||
Thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 40,
|
||||
},
|
||||
|
||||
@@ -21,157 +21,270 @@ import (
|
||||
"fmt"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/nodeutilization/classifier"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/nodeutilization/normalizer"
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
)
|
||||
|
||||
const LowNodeUtilizationPluginName = "LowNodeUtilization"
|
||||
|
||||
// LowNodeUtilization evicts pods from overutilized nodes to underutilized nodes. Note that CPU/Memory requests are used
|
||||
// to calculate nodes' utilization and not the actual resource usage.
|
||||
|
||||
type LowNodeUtilization struct {
|
||||
handle frameworktypes.Handle
|
||||
args *LowNodeUtilizationArgs
|
||||
podFilter func(pod *v1.Pod) bool
|
||||
}
|
||||
|
||||
// this lines makes sure that HighNodeUtilization implements the BalancePlugin
|
||||
// interface.
|
||||
var _ frameworktypes.BalancePlugin = &LowNodeUtilization{}
|
||||
|
||||
// NewLowNodeUtilization builds plugin from its arguments while passing a handle
|
||||
func NewLowNodeUtilization(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
|
||||
lowNodeUtilizationArgsArgs, ok := args.(*LowNodeUtilizationArgs)
|
||||
// LowNodeUtilization evicts pods from overutilized nodes to underutilized
|
||||
// nodes. Note that CPU/Memory requests are used to calculate nodes'
|
||||
// utilization and not the actual resource usage.
|
||||
type LowNodeUtilization struct {
|
||||
logger klog.Logger
|
||||
handle frameworktypes.Handle
|
||||
args *LowNodeUtilizationArgs
|
||||
podFilter func(pod *v1.Pod) bool
|
||||
underCriteria []any
|
||||
overCriteria []any
|
||||
resourceNames []v1.ResourceName
|
||||
extendedResourceNames []v1.ResourceName
|
||||
usageClient usageClient
|
||||
}
|
||||
|
||||
// NewLowNodeUtilization builds plugin from its arguments while passing a
|
||||
// handle. this plugin aims to move workload from overutilized nodes to
|
||||
// underutilized nodes.
|
||||
func NewLowNodeUtilization(
|
||||
ctx context.Context, genericArgs runtime.Object, handle frameworktypes.Handle,
|
||||
) (frameworktypes.Plugin, error) {
|
||||
args, ok := genericArgs.(*LowNodeUtilizationArgs)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("want args to be of type LowNodeUtilizationArgs, got %T", args)
|
||||
return nil, fmt.Errorf(
|
||||
"want args to be of type LowNodeUtilizationArgs, got %T",
|
||||
genericArgs,
|
||||
)
|
||||
}
|
||||
logger := klog.FromContext(ctx).WithValues("plugin", LowNodeUtilizationPluginName)
|
||||
|
||||
// resourceNames holds a list of resources for which the user has
|
||||
// provided thresholds for. extendedResourceNames holds those as well
|
||||
// as cpu, memory and pods if no prometheus collection is used.
|
||||
resourceNames := getResourceNames(args.Thresholds)
|
||||
extendedResourceNames := resourceNames
|
||||
|
||||
// if we are using prometheus we need to validate we have everything we
|
||||
// need. if we aren't then we need to make sure we are also collecting
|
||||
// data for cpu, memory and pods.
|
||||
metrics := args.MetricsUtilization
|
||||
if metrics != nil && metrics.Source == api.PrometheusMetrics {
|
||||
if err := validatePrometheusMetricsUtilization(args); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
extendedResourceNames = uniquifyResourceNames(
|
||||
append(
|
||||
resourceNames,
|
||||
v1.ResourceCPU,
|
||||
v1.ResourceMemory,
|
||||
v1.ResourcePods,
|
||||
),
|
||||
)
|
||||
}
|
||||
|
||||
podFilter, err := podutil.NewOptions().
|
||||
podFilter, err := podutil.
|
||||
NewOptions().
|
||||
WithFilter(handle.Evictor().Filter).
|
||||
BuildFilterFunc()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error initializing pod filter function: %v", err)
|
||||
}
|
||||
|
||||
// this plugins supports different ways of collecting usage data. each
|
||||
// different way provides its own "usageClient". here we make sure we
|
||||
// have the correct one or an error is triggered. XXX MetricsServer is
|
||||
// deprecated, removed once dropped.
|
||||
var usageClient usageClient = newRequestedUsageClient(
|
||||
extendedResourceNames, handle.GetPodsAssignedToNodeFunc(),
|
||||
)
|
||||
if metrics != nil {
|
||||
usageClient, err = usageClientForMetrics(args, handle, extendedResourceNames)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return &LowNodeUtilization{
|
||||
handle: handle,
|
||||
args: lowNodeUtilizationArgsArgs,
|
||||
podFilter: podFilter,
|
||||
logger: logger,
|
||||
handle: handle,
|
||||
args: args,
|
||||
underCriteria: thresholdsToKeysAndValues(args.Thresholds),
|
||||
overCriteria: thresholdsToKeysAndValues(args.TargetThresholds),
|
||||
resourceNames: resourceNames,
|
||||
extendedResourceNames: extendedResourceNames,
|
||||
podFilter: podFilter,
|
||||
usageClient: usageClient,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Name retrieves the plugin name
|
||||
// Name retrieves the plugin name.
|
||||
func (l *LowNodeUtilization) Name() string {
|
||||
return LowNodeUtilizationPluginName
|
||||
}
|
||||
|
||||
// Balance extension point implementation for the plugin
|
||||
// Balance holds the main logic of the plugin. It evicts pods from over
|
||||
// utilized nodes to under utilized nodes. The goal here is to evenly
|
||||
// distribute pods across nodes.
|
||||
func (l *LowNodeUtilization) Balance(ctx context.Context, nodes []*v1.Node) *frameworktypes.Status {
|
||||
useDeviationThresholds := l.args.UseDeviationThresholds
|
||||
thresholds := l.args.Thresholds
|
||||
targetThresholds := l.args.TargetThresholds
|
||||
logger := klog.FromContext(klog.NewContext(ctx, l.logger)).WithValues("ExtensionPoint", frameworktypes.BalanceExtensionPoint)
|
||||
|
||||
// check if Pods/CPU/Mem are set, if not, set them to 100
|
||||
if _, ok := thresholds[v1.ResourcePods]; !ok {
|
||||
if useDeviationThresholds {
|
||||
thresholds[v1.ResourcePods] = MinResourcePercentage
|
||||
targetThresholds[v1.ResourcePods] = MinResourcePercentage
|
||||
} else {
|
||||
thresholds[v1.ResourcePods] = MaxResourcePercentage
|
||||
targetThresholds[v1.ResourcePods] = MaxResourcePercentage
|
||||
if err := l.usageClient.sync(ctx, nodes); err != nil {
|
||||
return &frameworktypes.Status{
|
||||
Err: fmt.Errorf("error getting node usage: %v", err),
|
||||
}
|
||||
}
|
||||
if _, ok := thresholds[v1.ResourceCPU]; !ok {
|
||||
if useDeviationThresholds {
|
||||
thresholds[v1.ResourceCPU] = MinResourcePercentage
|
||||
targetThresholds[v1.ResourceCPU] = MinResourcePercentage
|
||||
} else {
|
||||
thresholds[v1.ResourceCPU] = MaxResourcePercentage
|
||||
targetThresholds[v1.ResourceCPU] = MaxResourcePercentage
|
||||
}
|
||||
}
|
||||
if _, ok := thresholds[v1.ResourceMemory]; !ok {
|
||||
if useDeviationThresholds {
|
||||
thresholds[v1.ResourceMemory] = MinResourcePercentage
|
||||
targetThresholds[v1.ResourceMemory] = MinResourcePercentage
|
||||
} else {
|
||||
thresholds[v1.ResourceMemory] = MaxResourcePercentage
|
||||
targetThresholds[v1.ResourceMemory] = MaxResourcePercentage
|
||||
}
|
||||
}
|
||||
resourceNames := getResourceNames(thresholds)
|
||||
|
||||
lowNodes, sourceNodes := classifyNodes(
|
||||
getNodeUsage(nodes, resourceNames, l.handle.GetPodsAssignedToNodeFunc()),
|
||||
getNodeThresholds(nodes, thresholds, targetThresholds, resourceNames, l.handle.GetPodsAssignedToNodeFunc(), useDeviationThresholds),
|
||||
// The node has to be schedulable (to be able to move workload there)
|
||||
func(node *v1.Node, usage NodeUsage, threshold NodeThresholds) bool {
|
||||
if nodeutil.IsNodeUnschedulable(node) {
|
||||
klog.V(2).InfoS("Node is unschedulable, thus not considered as underutilized", "node", klog.KObj(node))
|
||||
// starts by taking a snapshot ofthe nodes usage. we will use this
|
||||
// snapshot to assess the nodes usage and classify them as
|
||||
// underutilized or overutilized.
|
||||
nodesMap, nodesUsageMap, podListMap := getNodeUsageSnapshot(nodes, l.usageClient)
|
||||
capacities := referencedResourceListForNodesCapacity(nodes)
|
||||
|
||||
// usage, by default, is exposed in absolute values. we need to normalize
|
||||
// them (convert them to percentages) to be able to compare them with the
|
||||
// user provided thresholds. thresholds are already provided in percentage
|
||||
// in the <0; 100> interval.
|
||||
var usage map[string]api.ResourceThresholds
|
||||
var thresholds map[string][]api.ResourceThresholds
|
||||
if l.args.UseDeviationThresholds {
|
||||
// here the thresholds provided by the user represent
|
||||
// deviations from the average so we need to treat them
|
||||
// differently. when calculating the average we only
|
||||
// need to consider the resources for which the user
|
||||
// has provided thresholds.
|
||||
usage, thresholds = assessNodesUsagesAndRelativeThresholds(
|
||||
filterResourceNames(nodesUsageMap, l.resourceNames),
|
||||
capacities,
|
||||
l.args.Thresholds,
|
||||
l.args.TargetThresholds,
|
||||
)
|
||||
} else {
|
||||
usage, thresholds = assessNodesUsagesAndStaticThresholds(
|
||||
nodesUsageMap,
|
||||
capacities,
|
||||
l.args.Thresholds,
|
||||
l.args.TargetThresholds,
|
||||
)
|
||||
}
|
||||
|
||||
// classify nodes in under and over utilized. we will later try to move
|
||||
// pods from the overutilized nodes to the underutilized ones.
|
||||
nodeGroups := classifier.Classify(
|
||||
usage, thresholds,
|
||||
// underutilization criteria processing. nodes that are
|
||||
// underutilized but aren't schedulable are ignored.
|
||||
func(nodeName string, usage, threshold api.ResourceThresholds) bool {
|
||||
if nodeutil.IsNodeUnschedulable(nodesMap[nodeName]) {
|
||||
logger.V(2).Info(
|
||||
"Node is unschedulable, thus not considered as underutilized",
|
||||
"node", klog.KObj(nodesMap[nodeName]),
|
||||
)
|
||||
return false
|
||||
}
|
||||
return isNodeWithLowUtilization(usage, threshold.lowResourceThreshold)
|
||||
return isNodeBelowThreshold(usage, threshold)
|
||||
},
|
||||
func(node *v1.Node, usage NodeUsage, threshold NodeThresholds) bool {
|
||||
return isNodeAboveTargetUtilization(usage, threshold.highResourceThreshold)
|
||||
// overutilization criteria evaluation.
|
||||
func(nodeName string, usage, threshold api.ResourceThresholds) bool {
|
||||
return isNodeAboveThreshold(usage, threshold)
|
||||
},
|
||||
)
|
||||
|
||||
// log message for nodes with low utilization
|
||||
underutilizationCriteria := []interface{}{
|
||||
"CPU", thresholds[v1.ResourceCPU],
|
||||
"Mem", thresholds[v1.ResourceMemory],
|
||||
"Pods", thresholds[v1.ResourcePods],
|
||||
}
|
||||
for name := range thresholds {
|
||||
if !nodeutil.IsBasicResource(name) {
|
||||
underutilizationCriteria = append(underutilizationCriteria, string(name), int64(thresholds[name]))
|
||||
}
|
||||
}
|
||||
klog.V(1).InfoS("Criteria for a node under utilization", underutilizationCriteria...)
|
||||
klog.V(1).InfoS("Number of underutilized nodes", "totalNumber", len(lowNodes))
|
||||
// the nodeutilization package was designed to work with NodeInfo
|
||||
// structs. these structs holds information about how utilized a node
|
||||
// is. we need to go through the result of the classification and turn
|
||||
// it into NodeInfo structs.
|
||||
nodeInfos := make([][]NodeInfo, 2)
|
||||
categories := []string{"underutilized", "overutilized"}
|
||||
classifiedNodes := map[string]bool{}
|
||||
for i := range nodeGroups {
|
||||
for nodeName := range nodeGroups[i] {
|
||||
classifiedNodes[nodeName] = true
|
||||
|
||||
// log message for over utilized nodes
|
||||
overutilizationCriteria := []interface{}{
|
||||
"CPU", targetThresholds[v1.ResourceCPU],
|
||||
"Mem", targetThresholds[v1.ResourceMemory],
|
||||
"Pods", targetThresholds[v1.ResourcePods],
|
||||
}
|
||||
for name := range targetThresholds {
|
||||
if !nodeutil.IsBasicResource(name) {
|
||||
overutilizationCriteria = append(overutilizationCriteria, string(name), int64(targetThresholds[name]))
|
||||
logger.Info(
|
||||
"Node has been classified",
|
||||
"category", categories[i],
|
||||
"node", klog.KObj(nodesMap[nodeName]),
|
||||
"usage", nodesUsageMap[nodeName],
|
||||
"usagePercentage", normalizer.Round(usage[nodeName]),
|
||||
)
|
||||
|
||||
nodeInfos[i] = append(nodeInfos[i], NodeInfo{
|
||||
NodeUsage: NodeUsage{
|
||||
node: nodesMap[nodeName],
|
||||
usage: nodesUsageMap[nodeName],
|
||||
allPods: podListMap[nodeName],
|
||||
},
|
||||
available: capNodeCapacitiesToThreshold(
|
||||
nodesMap[nodeName],
|
||||
thresholds[nodeName][1],
|
||||
l.extendedResourceNames,
|
||||
),
|
||||
})
|
||||
}
|
||||
}
|
||||
klog.V(1).InfoS("Criteria for a node above target utilization", overutilizationCriteria...)
|
||||
klog.V(1).InfoS("Number of overutilized nodes", "totalNumber", len(sourceNodes))
|
||||
|
||||
// log nodes that are appropriately utilized.
|
||||
for nodeName := range nodesMap {
|
||||
if !classifiedNodes[nodeName] {
|
||||
logger.Info(
|
||||
"Node is appropriately utilized",
|
||||
"node", klog.KObj(nodesMap[nodeName]),
|
||||
"usage", nodesUsageMap[nodeName],
|
||||
"usagePercentage", normalizer.Round(usage[nodeName]),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
lowNodes, highNodes := nodeInfos[0], nodeInfos[1]
|
||||
|
||||
// log messages for nodes with low and high utilization
|
||||
logger.V(1).Info("Criteria for a node under utilization", l.underCriteria...)
|
||||
logger.V(1).Info("Number of underutilized nodes", "totalNumber", len(lowNodes))
|
||||
logger.V(1).Info("Criteria for a node above target utilization", l.overCriteria...)
|
||||
logger.V(1).Info("Number of overutilized nodes", "totalNumber", len(highNodes))
|
||||
|
||||
if len(lowNodes) == 0 {
|
||||
klog.V(1).InfoS("No node is underutilized, nothing to do here, you might tune your thresholds further")
|
||||
logger.V(1).Info(
|
||||
"No node is underutilized, nothing to do here, you might tune your thresholds further",
|
||||
)
|
||||
return nil
|
||||
}
|
||||
|
||||
if len(lowNodes) <= l.args.NumberOfNodes {
|
||||
klog.V(1).InfoS("Number of nodes underutilized is less or equal than NumberOfNodes, nothing to do here", "underutilizedNodes", len(lowNodes), "numberOfNodes", l.args.NumberOfNodes)
|
||||
logger.V(1).Info(
|
||||
"Number of nodes underutilized is less or equal than NumberOfNodes, nothing to do here",
|
||||
"underutilizedNodes", len(lowNodes),
|
||||
"numberOfNodes", l.args.NumberOfNodes,
|
||||
)
|
||||
return nil
|
||||
}
|
||||
|
||||
if len(lowNodes) == len(nodes) {
|
||||
klog.V(1).InfoS("All nodes are underutilized, nothing to do here")
|
||||
logger.V(1).Info("All nodes are underutilized, nothing to do here")
|
||||
return nil
|
||||
}
|
||||
|
||||
if len(sourceNodes) == 0 {
|
||||
klog.V(1).InfoS("All nodes are under target utilization, nothing to do here")
|
||||
if len(highNodes) == 0 {
|
||||
logger.V(1).Info("All nodes are under target utilization, nothing to do here")
|
||||
return nil
|
||||
}
|
||||
|
||||
// stop if node utilization drops below target threshold or any of required capacity (cpu, memory, pods) is moved
|
||||
continueEvictionCond := func(nodeInfo NodeInfo, totalAvailableUsage map[v1.ResourceName]*resource.Quantity) bool {
|
||||
if !isNodeAboveTargetUtilization(nodeInfo.NodeUsage, nodeInfo.thresholds.highResourceThreshold) {
|
||||
// this is a stop condition for the eviction process. we stop as soon
|
||||
// as the node usage drops below the threshold.
|
||||
continueEvictionCond := func(nodeInfo NodeInfo, totalAvailableUsage api.ReferencedResourceList) bool {
|
||||
if !isNodeAboveTargetUtilization(nodeInfo.NodeUsage, nodeInfo.available) {
|
||||
return false
|
||||
}
|
||||
for name := range totalAvailableUsage {
|
||||
@@ -183,19 +296,90 @@ func (l *LowNodeUtilization) Balance(ctx context.Context, nodes []*v1.Node) *fra
|
||||
return true
|
||||
}
|
||||
|
||||
// Sort the nodes by the usage in descending order
|
||||
sortNodesByUsage(sourceNodes, false)
|
||||
// sort the nodes by the usage in descending order
|
||||
sortNodesByUsage(highNodes, false)
|
||||
|
||||
var nodeLimit *uint
|
||||
if l.args.EvictionLimits != nil {
|
||||
nodeLimit = l.args.EvictionLimits.Node
|
||||
}
|
||||
|
||||
evictPodsFromSourceNodes(
|
||||
ctx,
|
||||
l.args.EvictableNamespaces,
|
||||
sourceNodes,
|
||||
highNodes,
|
||||
lowNodes,
|
||||
l.handle.Evictor(),
|
||||
evictions.EvictOptions{StrategyName: LowNodeUtilizationPluginName},
|
||||
l.podFilter,
|
||||
resourceNames,
|
||||
continueEvictionCond)
|
||||
l.extendedResourceNames,
|
||||
continueEvictionCond,
|
||||
l.usageClient,
|
||||
nodeLimit,
|
||||
)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// validatePrometheusMetricsUtilization validates the Prometheus metrics
|
||||
// utilization. XXX this should be done way earlier than this.
|
||||
func validatePrometheusMetricsUtilization(args *LowNodeUtilizationArgs) error {
|
||||
if args.MetricsUtilization.Prometheus == nil {
|
||||
return fmt.Errorf("prometheus property is missing")
|
||||
}
|
||||
|
||||
if args.MetricsUtilization.Prometheus.Query == "" {
|
||||
return fmt.Errorf("prometheus query is missing")
|
||||
}
|
||||
|
||||
uResourceNames := getResourceNames(args.Thresholds)
|
||||
oResourceNames := getResourceNames(args.TargetThresholds)
|
||||
if len(uResourceNames) != 1 || uResourceNames[0] != MetricResource {
|
||||
return fmt.Errorf(
|
||||
"thresholds are expected to specify a single instance of %q resource, got %v instead",
|
||||
MetricResource, uResourceNames,
|
||||
)
|
||||
}
|
||||
|
||||
if len(oResourceNames) != 1 || oResourceNames[0] != MetricResource {
|
||||
return fmt.Errorf(
|
||||
"targetThresholds are expected to specify a single instance of %q resource, got %v instead",
|
||||
MetricResource, oResourceNames,
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// usageClientForMetrics returns the correct usage client based on the
|
||||
// metrics source. XXX MetricsServer is deprecated, removed once dropped.
|
||||
func usageClientForMetrics(
|
||||
args *LowNodeUtilizationArgs, handle frameworktypes.Handle, resources []v1.ResourceName,
|
||||
) (usageClient, error) {
|
||||
metrics := args.MetricsUtilization
|
||||
switch {
|
||||
case metrics.MetricsServer, metrics.Source == api.KubernetesMetrics:
|
||||
if handle.MetricsCollector() == nil {
|
||||
return nil, fmt.Errorf("metrics client not initialized")
|
||||
}
|
||||
return newActualUsageClient(
|
||||
resources,
|
||||
handle.GetPodsAssignedToNodeFunc(),
|
||||
handle.MetricsCollector(),
|
||||
), nil
|
||||
|
||||
case metrics.Source == api.PrometheusMetrics:
|
||||
if handle.PrometheusClient() == nil {
|
||||
return nil, fmt.Errorf("prometheus client not initialized")
|
||||
}
|
||||
return newPrometheusUsageClient(
|
||||
handle.GetPodsAssignedToNodeFunc(),
|
||||
handle.PrometheusClient(),
|
||||
metrics.Prometheus.Query,
|
||||
), nil
|
||||
case metrics.Source != "":
|
||||
return nil, fmt.Errorf("unrecognized metrics source")
|
||||
default:
|
||||
return nil, fmt.Errorf("metrics source is empty")
|
||||
}
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -18,149 +18,562 @@ package nodeutilization
|
||||
|
||||
import (
|
||||
"math"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/utils/ptr"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/nodeutilization/classifier"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/nodeutilization/normalizer"
|
||||
)
|
||||
|
||||
func BuildTestNodeInfo(name string, apply func(*NodeInfo)) *NodeInfo {
|
||||
nodeInfo := &NodeInfo{
|
||||
NodeUsage: NodeUsage{
|
||||
node: &v1.Node{
|
||||
Status: v1.NodeStatus{
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(3977868*1024, resource.BinarySI),
|
||||
v1.ResourcePods: *resource.NewQuantity(29, resource.BinarySI),
|
||||
},
|
||||
Allocatable: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(1930, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(3287692*1024, resource.BinarySI),
|
||||
v1.ResourcePods: *resource.NewQuantity(29, resource.BinarySI),
|
||||
},
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: name},
|
||||
},
|
||||
},
|
||||
}
|
||||
apply(nodeInfo)
|
||||
return nodeInfo
|
||||
}
|
||||
|
||||
var (
|
||||
lowPriority = int32(0)
|
||||
highPriority = int32(10000)
|
||||
extendedResource = v1.ResourceName("example.com/foo")
|
||||
testNode1 = NodeInfo{
|
||||
NodeUsage: NodeUsage{
|
||||
node: &v1.Node{
|
||||
Status: v1.NodeStatus{
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(3977868*1024, resource.BinarySI),
|
||||
v1.ResourcePods: *resource.NewQuantity(29, resource.BinarySI),
|
||||
},
|
||||
Allocatable: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(1930, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(3287692*1024, resource.BinarySI),
|
||||
v1.ResourcePods: *resource.NewQuantity(29, resource.BinarySI),
|
||||
},
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "node1"},
|
||||
},
|
||||
usage: map[v1.ResourceName]*resource.Quantity{
|
||||
v1.ResourceCPU: resource.NewMilliQuantity(1730, resource.DecimalSI),
|
||||
v1.ResourceMemory: resource.NewQuantity(3038982964, resource.BinarySI),
|
||||
v1.ResourcePods: resource.NewQuantity(25, resource.BinarySI),
|
||||
},
|
||||
},
|
||||
}
|
||||
testNode2 = NodeInfo{
|
||||
NodeUsage: NodeUsage{
|
||||
node: &v1.Node{
|
||||
Status: v1.NodeStatus{
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(3977868*1024, resource.BinarySI),
|
||||
v1.ResourcePods: *resource.NewQuantity(29, resource.BinarySI),
|
||||
},
|
||||
Allocatable: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(1930, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(3287692*1024, resource.BinarySI),
|
||||
v1.ResourcePods: *resource.NewQuantity(29, resource.BinarySI),
|
||||
},
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "node2"},
|
||||
},
|
||||
usage: map[v1.ResourceName]*resource.Quantity{
|
||||
v1.ResourceCPU: resource.NewMilliQuantity(1220, resource.DecimalSI),
|
||||
v1.ResourceMemory: resource.NewQuantity(3038982964, resource.BinarySI),
|
||||
v1.ResourcePods: resource.NewQuantity(11, resource.BinarySI),
|
||||
},
|
||||
},
|
||||
}
|
||||
testNode3 = NodeInfo{
|
||||
NodeUsage: NodeUsage{
|
||||
node: &v1.Node{
|
||||
Status: v1.NodeStatus{
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(3977868*1024, resource.BinarySI),
|
||||
v1.ResourcePods: *resource.NewQuantity(29, resource.BinarySI),
|
||||
},
|
||||
Allocatable: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(1930, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(3287692*1024, resource.BinarySI),
|
||||
v1.ResourcePods: *resource.NewQuantity(29, resource.BinarySI),
|
||||
},
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "node3"},
|
||||
},
|
||||
usage: map[v1.ResourceName]*resource.Quantity{
|
||||
v1.ResourceCPU: resource.NewMilliQuantity(1530, resource.DecimalSI),
|
||||
v1.ResourceMemory: resource.NewQuantity(5038982964, resource.BinarySI),
|
||||
v1.ResourcePods: resource.NewQuantity(20, resource.BinarySI),
|
||||
},
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
func TestResourceUsagePercentages(t *testing.T) {
|
||||
resourceUsagePercentage := resourceUsagePercentages(NodeUsage{
|
||||
node: &v1.Node{
|
||||
Status: v1.NodeStatus{
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(3977868*1024, resource.BinarySI),
|
||||
v1.ResourcePods: *resource.NewQuantity(29, resource.BinarySI),
|
||||
func TestSortNodesByUsage(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
nodeInfoList []NodeInfo
|
||||
expectedNodeInfoNames []string
|
||||
}{
|
||||
{
|
||||
name: "cpu memory pods",
|
||||
nodeInfoList: []NodeInfo{
|
||||
*BuildTestNodeInfo("node1", func(nodeInfo *NodeInfo) {
|
||||
nodeInfo.usage = api.ReferencedResourceList{
|
||||
v1.ResourceCPU: resource.NewMilliQuantity(1730, resource.DecimalSI),
|
||||
v1.ResourceMemory: resource.NewQuantity(3038982964, resource.BinarySI),
|
||||
v1.ResourcePods: resource.NewQuantity(25, resource.BinarySI),
|
||||
}
|
||||
}),
|
||||
*BuildTestNodeInfo("node2", func(nodeInfo *NodeInfo) {
|
||||
nodeInfo.usage = api.ReferencedResourceList{
|
||||
v1.ResourceCPU: resource.NewMilliQuantity(1220, resource.DecimalSI),
|
||||
v1.ResourceMemory: resource.NewQuantity(3038982964, resource.BinarySI),
|
||||
v1.ResourcePods: resource.NewQuantity(11, resource.BinarySI),
|
||||
}
|
||||
}),
|
||||
*BuildTestNodeInfo("node3", func(nodeInfo *NodeInfo) {
|
||||
nodeInfo.usage = api.ReferencedResourceList{
|
||||
v1.ResourceCPU: resource.NewMilliQuantity(1530, resource.DecimalSI),
|
||||
v1.ResourceMemory: resource.NewQuantity(5038982964, resource.BinarySI),
|
||||
v1.ResourcePods: resource.NewQuantity(20, resource.BinarySI),
|
||||
}
|
||||
}),
|
||||
},
|
||||
expectedNodeInfoNames: []string{"node3", "node1", "node2"},
|
||||
},
|
||||
{
|
||||
name: "memory",
|
||||
nodeInfoList: []NodeInfo{
|
||||
*BuildTestNodeInfo("node1", func(nodeInfo *NodeInfo) {
|
||||
nodeInfo.usage = api.ReferencedResourceList{
|
||||
v1.ResourceMemory: resource.NewQuantity(3038982964, resource.BinarySI),
|
||||
}
|
||||
}),
|
||||
*BuildTestNodeInfo("node2", func(nodeInfo *NodeInfo) {
|
||||
nodeInfo.usage = api.ReferencedResourceList{
|
||||
v1.ResourceMemory: resource.NewQuantity(2038982964, resource.BinarySI),
|
||||
}
|
||||
}),
|
||||
*BuildTestNodeInfo("node3", func(nodeInfo *NodeInfo) {
|
||||
nodeInfo.usage = api.ReferencedResourceList{
|
||||
v1.ResourceMemory: resource.NewQuantity(5038982964, resource.BinarySI),
|
||||
}
|
||||
}),
|
||||
},
|
||||
expectedNodeInfoNames: []string{"node3", "node1", "node2"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name+" descending", func(t *testing.T) {
|
||||
sortNodesByUsage(tc.nodeInfoList, false) // ascending=false, sort nodes in descending order
|
||||
|
||||
for i := 0; i < len(tc.nodeInfoList); i++ {
|
||||
if tc.nodeInfoList[i].NodeUsage.node.Name != tc.expectedNodeInfoNames[i] {
|
||||
t.Errorf("Expected %v, got %v", tc.expectedNodeInfoNames[i], tc.nodeInfoList[i].NodeUsage.node.Name)
|
||||
}
|
||||
}
|
||||
})
|
||||
t.Run(tc.name+" ascending", func(t *testing.T) {
|
||||
sortNodesByUsage(tc.nodeInfoList, true) // ascending=true, sort nodes in ascending order
|
||||
|
||||
size := len(tc.nodeInfoList)
|
||||
for i := 0; i < size; i++ {
|
||||
if tc.nodeInfoList[i].NodeUsage.node.Name != tc.expectedNodeInfoNames[size-i-1] {
|
||||
t.Errorf("Expected %v, got %v", tc.expectedNodeInfoNames[size-i-1], tc.nodeInfoList[i].NodeUsage.node.Name)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestResourceUsageToResourceThreshold(t *testing.T) {
|
||||
for _, tt := range []struct {
|
||||
name string
|
||||
usage api.ReferencedResourceList
|
||||
capacity api.ReferencedResourceList
|
||||
expected api.ResourceThresholds
|
||||
}{
|
||||
{
|
||||
name: "10 percent",
|
||||
usage: api.ReferencedResourceList{
|
||||
v1.ResourceCPU: resource.NewMilliQuantity(100, resource.DecimalSI),
|
||||
},
|
||||
capacity: api.ReferencedResourceList{
|
||||
v1.ResourceCPU: resource.NewMilliQuantity(1000, resource.DecimalSI),
|
||||
},
|
||||
expected: api.ResourceThresholds{v1.ResourceCPU: 10},
|
||||
},
|
||||
{
|
||||
name: "zeroed out capacity",
|
||||
usage: api.ReferencedResourceList{
|
||||
v1.ResourceCPU: resource.NewMilliQuantity(100, resource.DecimalSI),
|
||||
},
|
||||
capacity: api.ReferencedResourceList{
|
||||
v1.ResourceCPU: resource.NewMilliQuantity(0, resource.DecimalSI),
|
||||
},
|
||||
expected: api.ResourceThresholds{v1.ResourceCPU: 0},
|
||||
},
|
||||
{
|
||||
name: "non existing usage",
|
||||
usage: api.ReferencedResourceList{
|
||||
"does-not-exist": resource.NewMilliQuantity(100, resource.DecimalSI),
|
||||
},
|
||||
capacity: api.ReferencedResourceList{
|
||||
v1.ResourceCPU: resource.NewMilliQuantity(100, resource.DecimalSI),
|
||||
v1.ResourceMemory: resource.NewMilliQuantity(100, resource.DecimalSI),
|
||||
},
|
||||
expected: api.ResourceThresholds{},
|
||||
},
|
||||
{
|
||||
name: "existing and non existing usage",
|
||||
usage: api.ReferencedResourceList{
|
||||
"does-not-exist": resource.NewMilliQuantity(100, resource.DecimalSI),
|
||||
v1.ResourceCPU: resource.NewMilliQuantity(200, resource.DecimalSI),
|
||||
},
|
||||
capacity: api.ReferencedResourceList{
|
||||
v1.ResourceCPU: resource.NewMilliQuantity(1000, resource.DecimalSI),
|
||||
v1.ResourceMemory: resource.NewMilliQuantity(1000, resource.DecimalSI),
|
||||
},
|
||||
expected: api.ResourceThresholds{v1.ResourceCPU: 20},
|
||||
},
|
||||
{
|
||||
name: "nil usage",
|
||||
usage: api.ReferencedResourceList{
|
||||
v1.ResourceCPU: nil,
|
||||
},
|
||||
capacity: api.ReferencedResourceList{
|
||||
v1.ResourceCPU: resource.NewMilliQuantity(1000, resource.DecimalSI),
|
||||
},
|
||||
expected: api.ResourceThresholds{},
|
||||
},
|
||||
{
|
||||
name: "nil capacity",
|
||||
usage: api.ReferencedResourceList{
|
||||
v1.ResourceCPU: resource.NewMilliQuantity(100, resource.DecimalSI),
|
||||
},
|
||||
capacity: api.ReferencedResourceList{
|
||||
v1.ResourceCPU: nil,
|
||||
},
|
||||
expected: api.ResourceThresholds{},
|
||||
},
|
||||
} {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result := ResourceUsageToResourceThreshold(tt.usage, tt.capacity)
|
||||
if !reflect.DeepEqual(result, tt.expected) {
|
||||
t.Errorf("Expected %v, got %v", tt.expected, result)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func ResourceListUsageNormalizer(usages, totals v1.ResourceList) api.ResourceThresholds {
|
||||
result := api.ResourceThresholds{}
|
||||
for rname, value := range usages {
|
||||
total, ok := totals[rname]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
used, avail := value.Value(), total.Value()
|
||||
if rname == v1.ResourceCPU {
|
||||
used, avail = value.MilliValue(), total.MilliValue()
|
||||
}
|
||||
|
||||
pct := math.Max(math.Min(float64(used)/float64(avail)*100, 100), 0)
|
||||
result[rname] = api.Percentage(pct)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// This is a test for thresholds being defined as deviations from the average
|
||||
// usage. This is expected to be a little longer test case. We are going to
|
||||
// comment the steps to make it easier to follow.
|
||||
func TestClassificationUsingDeviationThresholds(t *testing.T) {
|
||||
// These are the two thresholds defined by the user. These thresholds
|
||||
// mean that our low limit will be 5 pct points below the average and
|
||||
// the high limit will be 5 pct points above the average.
|
||||
userDefinedThresholds := map[string]api.ResourceThresholds{
|
||||
"low": {v1.ResourceCPU: 5, v1.ResourceMemory: 5},
|
||||
"high": {v1.ResourceCPU: 5, v1.ResourceMemory: 5},
|
||||
}
|
||||
|
||||
// Create a fake total amount of resources for all nodes. We define
|
||||
// the total amount to 1000 for both memory and cpu. This is so we
|
||||
// can easily calculate (manually) the percentage of usages here.
|
||||
nodesTotal := normalizer.Replicate(
|
||||
[]string{"node1", "node2", "node3", "node4", "node5"},
|
||||
v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("1000"),
|
||||
v1.ResourceMemory: resource.MustParse("1000"),
|
||||
},
|
||||
)
|
||||
|
||||
// Create a fake usage per server per resource. We are aiming to
|
||||
// have the average of these resources in 50%. When applying the
|
||||
// thresholds we should obtain the low threhold at 45% and the high
|
||||
// threshold at 55%.
|
||||
nodesUsage := map[string]v1.ResourceList{
|
||||
"node1": {
|
||||
v1.ResourceCPU: resource.MustParse("100"),
|
||||
v1.ResourceMemory: resource.MustParse("100"),
|
||||
},
|
||||
"node2": {
|
||||
v1.ResourceCPU: resource.MustParse("480"),
|
||||
v1.ResourceMemory: resource.MustParse("480"),
|
||||
},
|
||||
"node3": {
|
||||
v1.ResourceCPU: resource.MustParse("520"),
|
||||
v1.ResourceMemory: resource.MustParse("520"),
|
||||
},
|
||||
"node4": {
|
||||
v1.ResourceCPU: resource.MustParse("500"),
|
||||
v1.ResourceMemory: resource.MustParse("500"),
|
||||
},
|
||||
"node5": {
|
||||
v1.ResourceCPU: resource.MustParse("900"),
|
||||
v1.ResourceMemory: resource.MustParse("900"),
|
||||
},
|
||||
}
|
||||
|
||||
// Normalize the usage to percentages and then calculate the average
|
||||
// among all nodes.
|
||||
usage := normalizer.Normalize(nodesUsage, nodesTotal, ResourceListUsageNormalizer)
|
||||
average := normalizer.Average(usage)
|
||||
|
||||
// Create the thresholds by first applying the deviations and then
|
||||
// replicating once for each node. Thresholds are supposed to be per
|
||||
// node even though the user provides them only once. This is by
|
||||
// design as it opens the possibility for further implementations of
|
||||
// thresholds per node.
|
||||
thresholds := normalizer.Replicate(
|
||||
[]string{"node1", "node2", "node3", "node4", "node5"},
|
||||
[]api.ResourceThresholds{
|
||||
normalizer.Sum(average, normalizer.Negate(userDefinedThresholds["low"])),
|
||||
normalizer.Sum(average, userDefinedThresholds["high"]),
|
||||
},
|
||||
)
|
||||
|
||||
// Classify the nodes according to the thresholds. Nodes below the low
|
||||
// threshold (45%) are underutilized, nodes above the high threshold
|
||||
// (55%) are overutilized and nodes in between are properly utilized.
|
||||
result := classifier.Classify(
|
||||
usage, thresholds,
|
||||
classifier.ForMap[string, v1.ResourceName, api.Percentage, api.ResourceThresholds](
|
||||
func(usage, limit api.Percentage) int {
|
||||
return int(usage - limit)
|
||||
},
|
||||
),
|
||||
classifier.ForMap[string, v1.ResourceName, api.Percentage, api.ResourceThresholds](
|
||||
func(usage, limit api.Percentage) int {
|
||||
return int(limit - usage)
|
||||
},
|
||||
),
|
||||
)
|
||||
|
||||
// we expect the node1 to be undertilized (10%), node2, node3 and node4
|
||||
// to be properly utilized (48%, 52% and 50% respectively) and node5 to
|
||||
// be overutilized (90%).
|
||||
expected := []map[string]api.ResourceThresholds{
|
||||
{"node1": {v1.ResourceCPU: 10, v1.ResourceMemory: 10}},
|
||||
{"node5": {v1.ResourceCPU: 90, v1.ResourceMemory: 90}},
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(result, expected) {
|
||||
t.Fatalf("unexpected result: %v, expecting: %v", result, expected)
|
||||
}
|
||||
}
|
||||
|
||||
// This is almost a copy of TestUsingDeviationThresholds but we are using
|
||||
// pointers here. This is for making sure our generic types are in check. To
|
||||
// understand this code better read comments on TestUsingDeviationThresholds.
|
||||
func TestUsingDeviationThresholdsWithPointers(t *testing.T) {
|
||||
userDefinedThresholds := map[string]api.ResourceThresholds{
|
||||
"low": {v1.ResourceCPU: 5, v1.ResourceMemory: 5},
|
||||
"high": {v1.ResourceCPU: 5, v1.ResourceMemory: 5},
|
||||
}
|
||||
|
||||
nodesTotal := normalizer.Replicate(
|
||||
[]string{"node1", "node2", "node3", "node4", "node5"},
|
||||
map[v1.ResourceName]*resource.Quantity{
|
||||
v1.ResourceCPU: ptr.To(resource.MustParse("1000")),
|
||||
v1.ResourceMemory: ptr.To(resource.MustParse("1000")),
|
||||
},
|
||||
)
|
||||
|
||||
nodesUsage := map[string]map[v1.ResourceName]*resource.Quantity{
|
||||
"node1": {
|
||||
v1.ResourceCPU: ptr.To(resource.MustParse("100")),
|
||||
v1.ResourceMemory: ptr.To(resource.MustParse("100")),
|
||||
},
|
||||
"node2": {
|
||||
v1.ResourceCPU: ptr.To(resource.MustParse("480")),
|
||||
v1.ResourceMemory: ptr.To(resource.MustParse("480")),
|
||||
},
|
||||
"node3": {
|
||||
v1.ResourceCPU: ptr.To(resource.MustParse("520")),
|
||||
v1.ResourceMemory: ptr.To(resource.MustParse("520")),
|
||||
},
|
||||
"node4": {
|
||||
v1.ResourceCPU: ptr.To(resource.MustParse("500")),
|
||||
v1.ResourceMemory: ptr.To(resource.MustParse("500")),
|
||||
},
|
||||
"node5": {
|
||||
v1.ResourceCPU: ptr.To(resource.MustParse("900")),
|
||||
v1.ResourceMemory: ptr.To(resource.MustParse("900")),
|
||||
},
|
||||
}
|
||||
|
||||
ptrNormalizer := func(
|
||||
usages, totals map[v1.ResourceName]*resource.Quantity,
|
||||
) api.ResourceThresholds {
|
||||
newUsages := v1.ResourceList{}
|
||||
for name, usage := range usages {
|
||||
newUsages[name] = *usage
|
||||
}
|
||||
newTotals := v1.ResourceList{}
|
||||
for name, total := range totals {
|
||||
newTotals[name] = *total
|
||||
}
|
||||
return ResourceListUsageNormalizer(newUsages, newTotals)
|
||||
}
|
||||
|
||||
usage := normalizer.Normalize(nodesUsage, nodesTotal, ptrNormalizer)
|
||||
average := normalizer.Average(usage)
|
||||
|
||||
thresholds := normalizer.Replicate(
|
||||
[]string{"node1", "node2", "node3", "node4", "node5"},
|
||||
[]api.ResourceThresholds{
|
||||
normalizer.Sum(average, normalizer.Negate(userDefinedThresholds["low"])),
|
||||
normalizer.Sum(average, userDefinedThresholds["high"]),
|
||||
},
|
||||
)
|
||||
|
||||
result := classifier.Classify(
|
||||
usage, thresholds,
|
||||
classifier.ForMap[string, v1.ResourceName, api.Percentage, api.ResourceThresholds](
|
||||
func(usage, limit api.Percentage) int {
|
||||
return int(usage - limit)
|
||||
},
|
||||
),
|
||||
classifier.ForMap[string, v1.ResourceName, api.Percentage, api.ResourceThresholds](
|
||||
func(usage, limit api.Percentage) int {
|
||||
return int(limit - usage)
|
||||
},
|
||||
),
|
||||
)
|
||||
|
||||
expected := []map[string]api.ResourceThresholds{
|
||||
{"node1": {v1.ResourceCPU: 10, v1.ResourceMemory: 10}},
|
||||
{"node5": {v1.ResourceCPU: 90, v1.ResourceMemory: 90}},
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(result, expected) {
|
||||
t.Fatalf("unexpected result: %v, expecting: %v", result, expected)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNormalizeAndClassify(t *testing.T) {
|
||||
for _, tt := range []struct {
|
||||
name string
|
||||
usage map[string]v1.ResourceList
|
||||
totals map[string]v1.ResourceList
|
||||
thresholds map[string][]api.ResourceThresholds
|
||||
expected []map[string]api.ResourceThresholds
|
||||
classifiers []classifier.Classifier[string, api.ResourceThresholds]
|
||||
}{
|
||||
{
|
||||
name: "happy path test",
|
||||
usage: map[string]v1.ResourceList{
|
||||
"node1": {
|
||||
// underutilized on cpu and memory.
|
||||
v1.ResourceCPU: resource.MustParse("10"),
|
||||
v1.ResourceMemory: resource.MustParse("10"),
|
||||
},
|
||||
Allocatable: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(1930, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(3287692*1024, resource.BinarySI),
|
||||
v1.ResourcePods: *resource.NewQuantity(29, resource.BinarySI),
|
||||
"node2": {
|
||||
// overutilized on cpu and memory.
|
||||
v1.ResourceCPU: resource.MustParse("90"),
|
||||
v1.ResourceMemory: resource.MustParse("90"),
|
||||
},
|
||||
"node3": {
|
||||
// properly utilized on cpu and memory.
|
||||
v1.ResourceCPU: resource.MustParse("50"),
|
||||
v1.ResourceMemory: resource.MustParse("50"),
|
||||
},
|
||||
"node4": {
|
||||
// underutilized on cpu and overutilized on memory.
|
||||
v1.ResourceCPU: resource.MustParse("10"),
|
||||
v1.ResourceMemory: resource.MustParse("90"),
|
||||
},
|
||||
},
|
||||
totals: normalizer.Replicate(
|
||||
[]string{"node1", "node2", "node3", "node4"},
|
||||
v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("100"),
|
||||
v1.ResourceMemory: resource.MustParse("100"),
|
||||
},
|
||||
),
|
||||
thresholds: normalizer.Replicate(
|
||||
[]string{"node1", "node2", "node3", "node4"},
|
||||
[]api.ResourceThresholds{
|
||||
{v1.ResourceCPU: 20, v1.ResourceMemory: 20},
|
||||
{v1.ResourceCPU: 80, v1.ResourceMemory: 80},
|
||||
},
|
||||
),
|
||||
expected: []map[string]api.ResourceThresholds{
|
||||
{
|
||||
"node1": {v1.ResourceCPU: 10, v1.ResourceMemory: 10},
|
||||
},
|
||||
{
|
||||
"node2": {v1.ResourceCPU: 90, v1.ResourceMemory: 90},
|
||||
},
|
||||
},
|
||||
classifiers: []classifier.Classifier[string, api.ResourceThresholds]{
|
||||
classifier.ForMap[string, v1.ResourceName, api.Percentage, api.ResourceThresholds](
|
||||
func(usage, limit api.Percentage) int {
|
||||
return int(usage - limit)
|
||||
},
|
||||
),
|
||||
classifier.ForMap[string, v1.ResourceName, api.Percentage, api.ResourceThresholds](
|
||||
func(usage, limit api.Percentage) int {
|
||||
return int(limit - usage)
|
||||
},
|
||||
),
|
||||
},
|
||||
},
|
||||
usage: map[v1.ResourceName]*resource.Quantity{
|
||||
v1.ResourceCPU: resource.NewMilliQuantity(1220, resource.DecimalSI),
|
||||
v1.ResourceMemory: resource.NewQuantity(3038982964, resource.BinarySI),
|
||||
v1.ResourcePods: resource.NewQuantity(11, resource.BinarySI),
|
||||
{
|
||||
name: "three thresholds",
|
||||
usage: map[string]v1.ResourceList{
|
||||
"node1": {
|
||||
// match for the first classifier.
|
||||
v1.ResourceCPU: resource.MustParse("10"),
|
||||
v1.ResourceMemory: resource.MustParse("10"),
|
||||
},
|
||||
"node2": {
|
||||
// match for the third classifier.
|
||||
v1.ResourceCPU: resource.MustParse("90"),
|
||||
v1.ResourceMemory: resource.MustParse("90"),
|
||||
},
|
||||
"node3": {
|
||||
// match fo the second classifier.
|
||||
v1.ResourceCPU: resource.MustParse("40"),
|
||||
v1.ResourceMemory: resource.MustParse("40"),
|
||||
},
|
||||
"node4": {
|
||||
// matches no classifier.
|
||||
v1.ResourceCPU: resource.MustParse("10"),
|
||||
v1.ResourceMemory: resource.MustParse("90"),
|
||||
},
|
||||
"node5": {
|
||||
// match for the first classifier.
|
||||
v1.ResourceCPU: resource.MustParse("11"),
|
||||
v1.ResourceMemory: resource.MustParse("18"),
|
||||
},
|
||||
},
|
||||
totals: normalizer.Replicate(
|
||||
[]string{"node1", "node2", "node3", "node4", "node5"},
|
||||
v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("100"),
|
||||
v1.ResourceMemory: resource.MustParse("100"),
|
||||
},
|
||||
),
|
||||
thresholds: normalizer.Replicate(
|
||||
[]string{"node1", "node2", "node3", "node4", "node5"},
|
||||
[]api.ResourceThresholds{
|
||||
{v1.ResourceCPU: 20, v1.ResourceMemory: 20},
|
||||
{v1.ResourceCPU: 50, v1.ResourceMemory: 50},
|
||||
{v1.ResourceCPU: 80, v1.ResourceMemory: 80},
|
||||
},
|
||||
),
|
||||
expected: []map[string]api.ResourceThresholds{
|
||||
{
|
||||
"node1": {v1.ResourceCPU: 10, v1.ResourceMemory: 10},
|
||||
"node5": {v1.ResourceCPU: 11, v1.ResourceMemory: 18},
|
||||
},
|
||||
{
|
||||
"node3": {v1.ResourceCPU: 40, v1.ResourceMemory: 40},
|
||||
},
|
||||
{
|
||||
"node2": {v1.ResourceCPU: 90, v1.ResourceMemory: 90},
|
||||
},
|
||||
},
|
||||
classifiers: []classifier.Classifier[string, api.ResourceThresholds]{
|
||||
classifier.ForMap[string, v1.ResourceName, api.Percentage, api.ResourceThresholds](
|
||||
func(usage, limit api.Percentage) int {
|
||||
return int(usage - limit)
|
||||
},
|
||||
),
|
||||
classifier.ForMap[string, v1.ResourceName, api.Percentage, api.ResourceThresholds](
|
||||
func(usage, limit api.Percentage) int {
|
||||
return int(usage - limit)
|
||||
},
|
||||
),
|
||||
classifier.ForMap[string, v1.ResourceName, api.Percentage, api.ResourceThresholds](
|
||||
func(usage, limit api.Percentage) int {
|
||||
return int(limit - usage)
|
||||
},
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
expectedUsageInIntPercentage := map[v1.ResourceName]float64{
|
||||
v1.ResourceCPU: 63,
|
||||
v1.ResourceMemory: 90,
|
||||
v1.ResourcePods: 37,
|
||||
}
|
||||
|
||||
for resourceName, percentage := range expectedUsageInIntPercentage {
|
||||
if math.Floor(resourceUsagePercentage[resourceName]) != percentage {
|
||||
t.Errorf("Incorrect percentange computation, expected %v, got math.Floor(%v) instead", percentage, resourceUsagePercentage[resourceName])
|
||||
}
|
||||
}
|
||||
|
||||
t.Logf("resourceUsagePercentage: %#v\n", resourceUsagePercentage)
|
||||
}
|
||||
|
||||
func TestSortNodesByUsageDescendingOrder(t *testing.T) {
|
||||
nodeList := []NodeInfo{testNode1, testNode2, testNode3}
|
||||
expectedNodeList := []NodeInfo{testNode3, testNode1, testNode2} // testNode3 has the highest usage
|
||||
sortNodesByUsage(nodeList, false) // ascending=false, sort nodes in descending order
|
||||
|
||||
for i := 0; i < len(expectedNodeList); i++ {
|
||||
if nodeList[i].NodeUsage.node.Name != expectedNodeList[i].NodeUsage.node.Name {
|
||||
t.Errorf("Expected %v, got %v", expectedNodeList[i].NodeUsage.node.Name, nodeList[i].NodeUsage.node.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSortNodesByUsageAscendingOrder(t *testing.T) {
|
||||
nodeList := []NodeInfo{testNode1, testNode2, testNode3}
|
||||
expectedNodeList := []NodeInfo{testNode2, testNode1, testNode3}
|
||||
sortNodesByUsage(nodeList, true) // ascending=true, sort nodes in ascending order
|
||||
|
||||
for i := 0; i < len(expectedNodeList); i++ {
|
||||
if nodeList[i].NodeUsage.node.Name != expectedNodeList[i].NodeUsage.node.Name {
|
||||
t.Errorf("Expected %v, got %v", expectedNodeList[i].NodeUsage.node.Name, nodeList[i].NodeUsage.node.Name)
|
||||
}
|
||||
} {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
pct := normalizer.Normalize(tt.usage, tt.totals, ResourceListUsageNormalizer)
|
||||
res := classifier.Classify(pct, tt.thresholds, tt.classifiers...)
|
||||
if !reflect.DeepEqual(res, tt.expected) {
|
||||
t.Fatalf("unexpected result: %v, expecting: %v", res, tt.expected)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
142
pkg/framework/plugins/nodeutilization/normalizer/normalizer.go
Normal file
142
pkg/framework/plugins/nodeutilization/normalizer/normalizer.go
Normal file
@@ -0,0 +1,142 @@
|
||||
/*
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package normalizer
|
||||
|
||||
import (
|
||||
"math"
|
||||
|
||||
"golang.org/x/exp/constraints"
|
||||
)
|
||||
|
||||
// Normalizer is a function that receives two values of the same type and
|
||||
// return an object of a different type. An usage case can be a function
|
||||
// that converts a memory usage from mb to % (the first argument would be
|
||||
// the memory usage in mb and the second argument would be the total memory
|
||||
// available in mb).
|
||||
type Normalizer[V, N any] func(V, V) N
|
||||
|
||||
// Values is a map of values indexed by a comparable key. An example of this
|
||||
// can be a list of resources indexed by a node name.
|
||||
type Values[K comparable, V any] map[K]V
|
||||
|
||||
// Number is an interface that represents a number. Represents things we
|
||||
// can do math operations on.
|
||||
type Number interface {
|
||||
constraints.Integer | constraints.Float
|
||||
}
|
||||
|
||||
// Normalize uses a Normalizer function to normalize a set of values. For
|
||||
// example one may want to convert a set of memory usages from mb to %.
|
||||
// This function receives a set of usages, a set of totals, and a Normalizer
|
||||
// function. The function will return a map with the normalized values.
|
||||
func Normalize[K comparable, V, N any](usages, totals Values[K, V], fn Normalizer[V, N]) map[K]N {
|
||||
result := Values[K, N]{}
|
||||
for key, value := range usages {
|
||||
total, ok := totals[key]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
result[key] = fn(value, total)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// Replicate replicates the provide value for each key in the provided slice.
|
||||
// Returns a map with the keys and the provided value.
|
||||
func Replicate[K comparable, V any](keys []K, value V) map[K]V {
|
||||
result := map[K]V{}
|
||||
for _, key := range keys {
|
||||
result[key] = value
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// Clamp imposes minimum and maximum limits on a set of values. The function
|
||||
// will return a set of values where each value is between the minimum and
|
||||
// maximum values (included). Values below minimum are rounded up to the
|
||||
// minimum value, and values above maximum are rounded down to the maximum
|
||||
// value.
|
||||
func Clamp[K comparable, N Number, V ~map[K]N](values V, minimum, maximum N) V {
|
||||
result := V{}
|
||||
for key := range values {
|
||||
value := values[key]
|
||||
value = N(math.Max(float64(value), float64(minimum)))
|
||||
value = N(math.Min(float64(value), float64(maximum)))
|
||||
result[key] = value
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// Map applies a function to each element of a map of values. Returns a new
|
||||
// slice with the results of applying the function to each element.
|
||||
func Map[K comparable, N Number, V ~map[K]N](items []V, fn func(V) V) []V {
|
||||
result := []V{}
|
||||
for _, item := range items {
|
||||
result = append(result, fn(item))
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// Negate converts the values of a map to their negated values.
|
||||
func Negate[K comparable, N Number, V ~map[K]N](values V) V {
|
||||
result := V{}
|
||||
for key, value := range values {
|
||||
result[key] = -value
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// Round rounds the values of a map to the nearest integer. Calls math.Round on
|
||||
// each value of the map.
|
||||
func Round[K comparable, N Number, V ~map[K]N](values V) V {
|
||||
result := V{}
|
||||
for key, value := range values {
|
||||
result[key] = N(math.Round(float64(value)))
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// Sum sums up the values of two maps. Values are expected to be of Number
|
||||
// type. Original values are preserved. If a key is present in one map but
|
||||
// not in the other, the key is ignored.
|
||||
func Sum[K comparable, N Number, V ~map[K]N](mapA, mapB V) V {
|
||||
result := V{}
|
||||
for name, value := range mapA {
|
||||
result[name] = value + mapB[name]
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// Average calculates the average of a set of values. This function receives
|
||||
// a map of values and returns the average of all the values. Average expects
|
||||
// the values to represent the same unit of measure. You can use this function
|
||||
// after Normalizing the values.
|
||||
func Average[J, K comparable, N Number, V ~map[J]N](values map[K]V) V {
|
||||
counter := map[J]int{}
|
||||
result := V{}
|
||||
for _, imap := range values {
|
||||
for name, value := range imap {
|
||||
result[name] += value
|
||||
counter[name]++
|
||||
}
|
||||
}
|
||||
|
||||
for name := range result {
|
||||
result[name] /= N(counter[name])
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
@@ -0,0 +1,649 @@
|
||||
/*
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package normalizer
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
)
|
||||
|
||||
func ResourceListUsageNormalizer(usages, totals v1.ResourceList) api.ResourceThresholds {
|
||||
result := api.ResourceThresholds{}
|
||||
for rname, value := range usages {
|
||||
total, ok := totals[rname]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
used, avail := value.Value(), total.Value()
|
||||
if rname == v1.ResourceCPU {
|
||||
used, avail = value.MilliValue(), total.MilliValue()
|
||||
}
|
||||
|
||||
pct := math.Max(math.Min(float64(used)/float64(avail)*100, 100), 0)
|
||||
result[rname] = api.Percentage(pct)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func TestNormalizeSimple(t *testing.T) {
|
||||
for _, tt := range []struct {
|
||||
name string
|
||||
usages map[string]float64
|
||||
totals map[string]float64
|
||||
expected map[string]float64
|
||||
normalizer Normalizer[float64, float64]
|
||||
}{
|
||||
{
|
||||
name: "single normalization",
|
||||
usages: map[string]float64{"cpu": 1},
|
||||
totals: map[string]float64{"cpu": 2},
|
||||
expected: map[string]float64{"cpu": 0.5},
|
||||
normalizer: func(usage, total float64) float64 {
|
||||
return usage / total
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "multiple normalizations",
|
||||
usages: map[string]float64{
|
||||
"cpu": 1,
|
||||
"mem": 6,
|
||||
},
|
||||
totals: map[string]float64{
|
||||
"cpu": 2,
|
||||
"mem": 10,
|
||||
},
|
||||
expected: map[string]float64{
|
||||
"cpu": 0.5,
|
||||
"mem": 0.6,
|
||||
},
|
||||
normalizer: func(usage, total float64) float64 {
|
||||
return usage / total
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "missing totals for a key",
|
||||
usages: map[string]float64{
|
||||
"cpu": 1,
|
||||
"mem": 6,
|
||||
},
|
||||
totals: map[string]float64{
|
||||
"cpu": 2,
|
||||
},
|
||||
expected: map[string]float64{
|
||||
"cpu": 0.5,
|
||||
},
|
||||
normalizer: func(usage, total float64) float64 {
|
||||
return usage / total
|
||||
},
|
||||
},
|
||||
} {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result := Normalize(tt.usages, tt.totals, tt.normalizer)
|
||||
if !reflect.DeepEqual(result, tt.expected) {
|
||||
t.Fatalf("unexpected result: %v", result)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestNormalize(t *testing.T) {
|
||||
for _, tt := range []struct {
|
||||
name string
|
||||
usages map[string]v1.ResourceList
|
||||
totals map[string]v1.ResourceList
|
||||
expected map[string]api.ResourceThresholds
|
||||
normalizer Normalizer[v1.ResourceList, api.ResourceThresholds]
|
||||
}{
|
||||
{
|
||||
name: "single normalization",
|
||||
usages: map[string]v1.ResourceList{
|
||||
"node1": {v1.ResourceCPU: resource.MustParse("1")},
|
||||
},
|
||||
totals: map[string]v1.ResourceList{
|
||||
"node1": {v1.ResourceCPU: resource.MustParse("2")},
|
||||
},
|
||||
expected: map[string]api.ResourceThresholds{
|
||||
"node1": {v1.ResourceCPU: 50},
|
||||
},
|
||||
normalizer: ResourceListUsageNormalizer,
|
||||
},
|
||||
{
|
||||
name: "multiple normalization",
|
||||
usages: map[string]v1.ResourceList{
|
||||
"node1": {
|
||||
v1.ResourceCPU: resource.MustParse("1"),
|
||||
v1.ResourceMemory: resource.MustParse("6"),
|
||||
v1.ResourcePods: resource.MustParse("2"),
|
||||
},
|
||||
"node2": {
|
||||
v1.ResourceCPU: resource.MustParse("10"),
|
||||
v1.ResourceMemory: resource.MustParse("20"),
|
||||
v1.ResourcePods: resource.MustParse("30"),
|
||||
},
|
||||
},
|
||||
totals: map[string]v1.ResourceList{
|
||||
"node1": {
|
||||
v1.ResourceCPU: resource.MustParse("2"),
|
||||
v1.ResourceMemory: resource.MustParse("6"),
|
||||
v1.ResourcePods: resource.MustParse("100"),
|
||||
},
|
||||
"node2": {
|
||||
v1.ResourceCPU: resource.MustParse("100"),
|
||||
v1.ResourceMemory: resource.MustParse("100"),
|
||||
v1.ResourcePods: resource.MustParse("100"),
|
||||
},
|
||||
},
|
||||
expected: map[string]api.ResourceThresholds{
|
||||
"node1": {
|
||||
v1.ResourceCPU: 50,
|
||||
v1.ResourceMemory: 100,
|
||||
v1.ResourcePods: 2,
|
||||
},
|
||||
"node2": {
|
||||
v1.ResourceCPU: 10,
|
||||
v1.ResourceMemory: 20,
|
||||
v1.ResourcePods: 30,
|
||||
},
|
||||
},
|
||||
normalizer: ResourceListUsageNormalizer,
|
||||
},
|
||||
{
|
||||
name: "multiple normalization with over 100% usage",
|
||||
usages: map[string]v1.ResourceList{
|
||||
"node1": {
|
||||
v1.ResourceCPU: resource.MustParse("120"),
|
||||
v1.ResourceMemory: resource.MustParse("130"),
|
||||
v1.ResourcePods: resource.MustParse("140"),
|
||||
},
|
||||
"node2": {
|
||||
v1.ResourceCPU: resource.MustParse("150"),
|
||||
v1.ResourceMemory: resource.MustParse("160"),
|
||||
v1.ResourcePods: resource.MustParse("170"),
|
||||
},
|
||||
},
|
||||
totals: Replicate(
|
||||
[]string{"node1", "node2"},
|
||||
v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("100"),
|
||||
v1.ResourceMemory: resource.MustParse("100"),
|
||||
v1.ResourcePods: resource.MustParse("100"),
|
||||
},
|
||||
),
|
||||
expected: Replicate(
|
||||
[]string{"node1", "node2"},
|
||||
api.ResourceThresholds{
|
||||
v1.ResourceCPU: 100,
|
||||
v1.ResourceMemory: 100,
|
||||
v1.ResourcePods: 100,
|
||||
},
|
||||
),
|
||||
normalizer: ResourceListUsageNormalizer,
|
||||
},
|
||||
{
|
||||
name: "multiple normalization with over 100% usage and different totals",
|
||||
usages: map[string]v1.ResourceList{
|
||||
"node1": {
|
||||
v1.ResourceCPU: resource.MustParse("2"),
|
||||
v1.ResourceMemory: resource.MustParse("2Gi"),
|
||||
},
|
||||
"node2": {
|
||||
v1.ResourceCPU: resource.MustParse("99"),
|
||||
v1.ResourceMemory: resource.MustParse("99Gi"),
|
||||
},
|
||||
"node3": {
|
||||
v1.ResourceCPU: resource.MustParse("8"),
|
||||
v1.ResourceMemory: resource.MustParse("8Gi"),
|
||||
},
|
||||
},
|
||||
totals: map[string]v1.ResourceList{
|
||||
"node1": {
|
||||
v1.ResourceCPU: resource.MustParse("4"),
|
||||
v1.ResourceMemory: resource.MustParse("4Gi"),
|
||||
},
|
||||
"node2": {
|
||||
v1.ResourceCPU: resource.MustParse("100"),
|
||||
v1.ResourceMemory: resource.MustParse("100Gi"),
|
||||
},
|
||||
"node3": {
|
||||
v1.ResourceCPU: resource.MustParse("4"),
|
||||
v1.ResourceMemory: resource.MustParse("4Gi"),
|
||||
},
|
||||
},
|
||||
expected: map[string]api.ResourceThresholds{
|
||||
"node1": {
|
||||
v1.ResourceCPU: 50,
|
||||
v1.ResourceMemory: 50,
|
||||
},
|
||||
"node2": {
|
||||
v1.ResourceCPU: 99,
|
||||
v1.ResourceMemory: 99,
|
||||
},
|
||||
"node3": {
|
||||
v1.ResourceCPU: 100,
|
||||
v1.ResourceMemory: 100,
|
||||
},
|
||||
},
|
||||
normalizer: ResourceListUsageNormalizer,
|
||||
},
|
||||
} {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result := Normalize(tt.usages, tt.totals, tt.normalizer)
|
||||
if !reflect.DeepEqual(result, tt.expected) {
|
||||
t.Fatalf("unexpected result: %v", result)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestAverage(t *testing.T) {
|
||||
for _, tt := range []struct {
|
||||
name string
|
||||
usage map[string]v1.ResourceList
|
||||
limits map[string]v1.ResourceList
|
||||
expected api.ResourceThresholds
|
||||
}{
|
||||
{
|
||||
name: "empty usage",
|
||||
usage: map[string]v1.ResourceList{},
|
||||
limits: map[string]v1.ResourceList{},
|
||||
expected: api.ResourceThresholds{},
|
||||
},
|
||||
{
|
||||
name: "fifty percent usage",
|
||||
usage: map[string]v1.ResourceList{
|
||||
"node1": {
|
||||
v1.ResourceCPU: resource.MustParse("1"),
|
||||
v1.ResourceMemory: resource.MustParse("6"),
|
||||
},
|
||||
"node2": {
|
||||
v1.ResourceCPU: resource.MustParse("2"),
|
||||
v1.ResourceMemory: resource.MustParse("6"),
|
||||
},
|
||||
},
|
||||
limits: map[string]v1.ResourceList{
|
||||
"node1": {
|
||||
v1.ResourceCPU: resource.MustParse("2"),
|
||||
v1.ResourceMemory: resource.MustParse("12"),
|
||||
},
|
||||
"node2": {
|
||||
v1.ResourceCPU: resource.MustParse("4"),
|
||||
v1.ResourceMemory: resource.MustParse("12"),
|
||||
},
|
||||
},
|
||||
expected: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 50,
|
||||
v1.ResourceMemory: 50,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "mixed percent usage",
|
||||
usage: map[string]v1.ResourceList{
|
||||
"node1": {
|
||||
v1.ResourceCPU: resource.MustParse("10"),
|
||||
v1.ResourceMemory: resource.MustParse("80"),
|
||||
v1.ResourcePods: resource.MustParse("20"),
|
||||
},
|
||||
"node2": {
|
||||
v1.ResourceCPU: resource.MustParse("20"),
|
||||
v1.ResourceMemory: resource.MustParse("60"),
|
||||
v1.ResourcePods: resource.MustParse("20"),
|
||||
},
|
||||
},
|
||||
limits: Replicate(
|
||||
[]string{"node1", "node2"},
|
||||
v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("100"),
|
||||
v1.ResourceMemory: resource.MustParse("100"),
|
||||
v1.ResourcePods: resource.MustParse("10000"),
|
||||
},
|
||||
),
|
||||
expected: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 15,
|
||||
v1.ResourceMemory: 70,
|
||||
v1.ResourcePods: 0.2,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "mixed limits",
|
||||
usage: map[string]v1.ResourceList{
|
||||
"node1": {
|
||||
v1.ResourceCPU: resource.MustParse("10"),
|
||||
v1.ResourceMemory: resource.MustParse("30"),
|
||||
v1.ResourcePods: resource.MustParse("200"),
|
||||
},
|
||||
"node2": {
|
||||
v1.ResourceCPU: resource.MustParse("10"),
|
||||
v1.ResourceMemory: resource.MustParse("72"),
|
||||
v1.ResourcePods: resource.MustParse("200"),
|
||||
},
|
||||
},
|
||||
limits: map[string]v1.ResourceList{
|
||||
"node1": {
|
||||
v1.ResourceCPU: resource.MustParse("10"),
|
||||
v1.ResourceMemory: resource.MustParse("100"),
|
||||
v1.ResourcePods: resource.MustParse("1000"),
|
||||
},
|
||||
"node2": {
|
||||
v1.ResourceCPU: resource.MustParse("1000"),
|
||||
v1.ResourceMemory: resource.MustParse("180"),
|
||||
v1.ResourcePods: resource.MustParse("10"),
|
||||
},
|
||||
},
|
||||
expected: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 50.5,
|
||||
v1.ResourceMemory: 35,
|
||||
v1.ResourcePods: 60,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "some nodes missing some resources",
|
||||
usage: map[string]v1.ResourceList{
|
||||
"node1": {
|
||||
"limit-exists-in-all": resource.MustParse("10"),
|
||||
"limit-exists-in-two": resource.MustParse("11"),
|
||||
"limit-does-not-exist": resource.MustParse("12"),
|
||||
"usage-exists-in-all": resource.MustParse("13"),
|
||||
"usage-exists-in-two": resource.MustParse("20"),
|
||||
},
|
||||
"node2": {
|
||||
"limit-exists-in-all": resource.MustParse("10"),
|
||||
"limit-exists-in-two": resource.MustParse("11"),
|
||||
"limit-does-not-exist": resource.MustParse("12"),
|
||||
"usage-exists-in-all": resource.MustParse("13"),
|
||||
"usage-exists-in-two": resource.MustParse("20"),
|
||||
},
|
||||
"node3": {
|
||||
"limit-exists-in-all": resource.MustParse("10"),
|
||||
"limit-exists-in-two": resource.MustParse("11"),
|
||||
"limit-does-not-exist": resource.MustParse("12"),
|
||||
"usage-exists-in-all": resource.MustParse("13"),
|
||||
},
|
||||
"node4": {
|
||||
"limit-exists-in-all": resource.MustParse("10"),
|
||||
"limit-exists-in-two": resource.MustParse("11"),
|
||||
"limit-does-not-exist": resource.MustParse("12"),
|
||||
"usage-exists-in-all": resource.MustParse("13"),
|
||||
},
|
||||
"node5": {
|
||||
"random-usage-without-limit": resource.MustParse("10"),
|
||||
},
|
||||
},
|
||||
limits: map[string]v1.ResourceList{
|
||||
"node1": {
|
||||
"limit-exists-in-all": resource.MustParse("100"),
|
||||
"limit-exists-in-two": resource.MustParse("100"),
|
||||
"usage-exists-in-all": resource.MustParse("100"),
|
||||
"usage-exists-in-two": resource.MustParse("100"),
|
||||
"usage-does-not-exist": resource.MustParse("100"),
|
||||
},
|
||||
"node2": {
|
||||
"limit-exists-in-all": resource.MustParse("100"),
|
||||
"limit-exists-in-two": resource.MustParse("100"),
|
||||
"usage-exists-in-all": resource.MustParse("100"),
|
||||
"usage-exists-in-two": resource.MustParse("100"),
|
||||
"usage-does-not-exist": resource.MustParse("100"),
|
||||
},
|
||||
"node3": {
|
||||
"limit-exists-in-all": resource.MustParse("100"),
|
||||
"usage-exists-in-all": resource.MustParse("100"),
|
||||
"usage-exists-in-two": resource.MustParse("100"),
|
||||
"usage-does-not-exist": resource.MustParse("100"),
|
||||
},
|
||||
"node4": {
|
||||
"limit-exists-in-all": resource.MustParse("100"),
|
||||
"usage-exists-in-all": resource.MustParse("100"),
|
||||
"usage-exists-in-two": resource.MustParse("100"),
|
||||
"usage-does-not-exist": resource.MustParse("100"),
|
||||
},
|
||||
"node5": {
|
||||
"random-limit-without-usage": resource.MustParse("100"),
|
||||
},
|
||||
},
|
||||
expected: api.ResourceThresholds{
|
||||
"limit-exists-in-all": 10,
|
||||
"limit-exists-in-two": 11,
|
||||
"usage-exists-in-all": 13,
|
||||
"usage-exists-in-two": 20,
|
||||
},
|
||||
},
|
||||
} {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
average := Average(
|
||||
Normalize(
|
||||
tt.usage, tt.limits, ResourceListUsageNormalizer,
|
||||
),
|
||||
)
|
||||
if !reflect.DeepEqual(average, tt.expected) {
|
||||
t.Fatalf("unexpected result: %v, expected: %v", average, tt.expected)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSum(t *testing.T) {
|
||||
for _, tt := range []struct {
|
||||
name string
|
||||
data api.ResourceThresholds
|
||||
deviations []api.ResourceThresholds
|
||||
expected []api.ResourceThresholds
|
||||
}{
|
||||
{
|
||||
name: "single deviation",
|
||||
data: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 50,
|
||||
v1.ResourceMemory: 50,
|
||||
v1.ResourcePods: 50,
|
||||
},
|
||||
deviations: []api.ResourceThresholds{
|
||||
{
|
||||
v1.ResourceCPU: 1,
|
||||
v1.ResourceMemory: 1,
|
||||
v1.ResourcePods: 1,
|
||||
},
|
||||
{
|
||||
v1.ResourceCPU: 2,
|
||||
v1.ResourceMemory: 2,
|
||||
v1.ResourcePods: 2,
|
||||
},
|
||||
{
|
||||
v1.ResourceCPU: 3,
|
||||
v1.ResourceMemory: 3,
|
||||
v1.ResourcePods: 3,
|
||||
},
|
||||
},
|
||||
expected: []api.ResourceThresholds{
|
||||
{
|
||||
v1.ResourceCPU: 51,
|
||||
v1.ResourceMemory: 51,
|
||||
v1.ResourcePods: 51,
|
||||
},
|
||||
{
|
||||
v1.ResourceCPU: 52,
|
||||
v1.ResourceMemory: 52,
|
||||
v1.ResourcePods: 52,
|
||||
},
|
||||
{
|
||||
v1.ResourceCPU: 53,
|
||||
v1.ResourceMemory: 53,
|
||||
v1.ResourcePods: 53,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "deviate with negative values",
|
||||
data: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 50,
|
||||
v1.ResourceMemory: 50,
|
||||
v1.ResourcePods: 50,
|
||||
},
|
||||
deviations: []api.ResourceThresholds{
|
||||
{
|
||||
v1.ResourceCPU: -2,
|
||||
v1.ResourceMemory: -2,
|
||||
v1.ResourcePods: -2,
|
||||
},
|
||||
{
|
||||
v1.ResourceCPU: -1,
|
||||
v1.ResourceMemory: -1,
|
||||
v1.ResourcePods: -1,
|
||||
},
|
||||
{
|
||||
v1.ResourceCPU: 0,
|
||||
v1.ResourceMemory: 0,
|
||||
v1.ResourcePods: 0,
|
||||
},
|
||||
{
|
||||
v1.ResourceCPU: 1,
|
||||
v1.ResourceMemory: 1,
|
||||
v1.ResourcePods: 1,
|
||||
},
|
||||
{
|
||||
v1.ResourceCPU: 2,
|
||||
v1.ResourceMemory: 2,
|
||||
v1.ResourcePods: 2,
|
||||
},
|
||||
},
|
||||
expected: []api.ResourceThresholds{
|
||||
{
|
||||
v1.ResourceCPU: 48,
|
||||
v1.ResourceMemory: 48,
|
||||
v1.ResourcePods: 48,
|
||||
},
|
||||
{
|
||||
v1.ResourceCPU: 49,
|
||||
v1.ResourceMemory: 49,
|
||||
v1.ResourcePods: 49,
|
||||
},
|
||||
{
|
||||
v1.ResourceCPU: 50,
|
||||
v1.ResourceMemory: 50,
|
||||
v1.ResourcePods: 50,
|
||||
},
|
||||
{
|
||||
v1.ResourceCPU: 51,
|
||||
v1.ResourceMemory: 51,
|
||||
v1.ResourcePods: 51,
|
||||
},
|
||||
{
|
||||
v1.ResourceCPU: 52,
|
||||
v1.ResourceMemory: 52,
|
||||
v1.ResourcePods: 52,
|
||||
},
|
||||
},
|
||||
},
|
||||
} {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result := []api.ResourceThresholds{}
|
||||
for _, deviation := range tt.deviations {
|
||||
partial := Sum(tt.data, deviation)
|
||||
result = append(result, partial)
|
||||
}
|
||||
|
||||
if len(result) != len(tt.deviations) {
|
||||
t.Fatalf("unexpected result: %v", result)
|
||||
}
|
||||
if !reflect.DeepEqual(result, tt.expected) {
|
||||
fmt.Printf("%T, %T\n", result, tt.expected)
|
||||
t.Fatalf("unexpected result: %v", result)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestClamp(t *testing.T) {
|
||||
for _, tt := range []struct {
|
||||
name string
|
||||
data []api.ResourceThresholds
|
||||
minimum api.Percentage
|
||||
maximum api.Percentage
|
||||
expected []api.ResourceThresholds
|
||||
}{
|
||||
{
|
||||
name: "all over the limit",
|
||||
data: []api.ResourceThresholds{
|
||||
{
|
||||
v1.ResourceCPU: 50,
|
||||
v1.ResourceMemory: 50,
|
||||
v1.ResourcePods: 50,
|
||||
},
|
||||
},
|
||||
minimum: 10,
|
||||
maximum: 20,
|
||||
expected: []api.ResourceThresholds{
|
||||
{
|
||||
v1.ResourceCPU: 20,
|
||||
v1.ResourceMemory: 20,
|
||||
v1.ResourcePods: 20,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "some over some below the limits",
|
||||
data: []api.ResourceThresholds{
|
||||
{
|
||||
v1.ResourceCPU: 7,
|
||||
v1.ResourceMemory: 8,
|
||||
v1.ResourcePods: 88,
|
||||
},
|
||||
},
|
||||
minimum: 10,
|
||||
maximum: 20,
|
||||
expected: []api.ResourceThresholds{
|
||||
{
|
||||
v1.ResourceCPU: 10,
|
||||
v1.ResourceMemory: 10,
|
||||
v1.ResourcePods: 20,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "all within the limits",
|
||||
data: []api.ResourceThresholds{
|
||||
{
|
||||
v1.ResourceCPU: 15,
|
||||
v1.ResourceMemory: 15,
|
||||
v1.ResourcePods: 15,
|
||||
},
|
||||
},
|
||||
minimum: 10,
|
||||
maximum: 20,
|
||||
expected: []api.ResourceThresholds{
|
||||
{
|
||||
v1.ResourceCPU: 15,
|
||||
v1.ResourceMemory: 15,
|
||||
v1.ResourcePods: 15,
|
||||
},
|
||||
},
|
||||
},
|
||||
} {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
fn := func(thresholds api.ResourceThresholds) api.ResourceThresholds {
|
||||
return Clamp(thresholds, tt.minimum, tt.maximum)
|
||||
}
|
||||
result := Map(tt.data, fn)
|
||||
if !reflect.DeepEqual(result, tt.expected) {
|
||||
t.Fatalf("unexpected result: %v", result)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -18,6 +18,18 @@ import (
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
)
|
||||
|
||||
// EvictionMode describe a mode of eviction. See the list below for the
|
||||
// available modes.
|
||||
type EvictionMode string
|
||||
|
||||
const (
|
||||
// EvictionModeOnlyThresholdingResources makes the descheduler evict
|
||||
// only pods that have a resource request defined for any of the user
|
||||
// provided thresholds. If the pod does not request the resource, it
|
||||
// will not be evicted.
|
||||
EvictionModeOnlyThresholdingResources EvictionMode = "OnlyThresholdingResources"
|
||||
)
|
||||
|
||||
// +k8s:deepcopy-gen=true
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
@@ -28,11 +40,15 @@ type LowNodeUtilizationArgs struct {
|
||||
Thresholds api.ResourceThresholds `json:"thresholds"`
|
||||
TargetThresholds api.ResourceThresholds `json:"targetThresholds"`
|
||||
NumberOfNodes int `json:"numberOfNodes,omitempty"`
|
||||
MetricsUtilization *MetricsUtilization `json:"metricsUtilization,omitempty"`
|
||||
|
||||
// Naming this one differently since namespaces are still
|
||||
// considered while considering resources used by pods
|
||||
// but then filtered out before eviction
|
||||
EvictableNamespaces *api.Namespaces `json:"evictableNamespaces,omitempty"`
|
||||
|
||||
// evictionLimits limits the number of evictions per domain. E.g. node, namespace, total.
|
||||
EvictionLimits *api.EvictionLimits `json:"evictionLimits,omitempty"`
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen=true
|
||||
@@ -43,8 +59,39 @@ type HighNodeUtilizationArgs struct {
|
||||
|
||||
Thresholds api.ResourceThresholds `json:"thresholds"`
|
||||
NumberOfNodes int `json:"numberOfNodes,omitempty"`
|
||||
|
||||
// EvictionModes is a set of modes to be taken into account when the
|
||||
// descheduler evicts pods. For example the mode
|
||||
// `OnlyThresholdingResources` can be used to make sure the descheduler
|
||||
// only evicts pods who have resource requests for the defined
|
||||
// thresholds.
|
||||
EvictionModes []EvictionMode `json:"evictionModes,omitempty"`
|
||||
|
||||
// Naming this one differently since namespaces are still
|
||||
// considered while considering resources used by pods
|
||||
// but then filtered out before eviction
|
||||
EvictableNamespaces *api.Namespaces `json:"evictableNamespaces,omitempty"`
|
||||
}
|
||||
|
||||
// MetricsUtilization allow to consume actual resource utilization from metrics
|
||||
// +k8s:deepcopy-gen=true
|
||||
type MetricsUtilization struct {
|
||||
// metricsServer enables metrics from a kubernetes metrics server.
|
||||
// Please see https://kubernetes-sigs.github.io/metrics-server/ for more.
|
||||
// Deprecated. Use Source instead.
|
||||
MetricsServer bool `json:"metricsServer,omitempty"`
|
||||
|
||||
// source enables the plugin to consume metrics from a metrics source.
|
||||
// Currently only KubernetesMetrics available.
|
||||
Source api.MetricsSource `json:"source,omitempty"`
|
||||
|
||||
// prometheus enables metrics collection through a prometheus query.
|
||||
Prometheus *Prometheus `json:"prometheus,omitempty"`
|
||||
}
|
||||
|
||||
type Prometheus struct {
|
||||
// query returning a vector of samples, each sample labeled with `instance`
|
||||
// corresponding to a node name with each sample value as a real number
|
||||
// in <0; 1> interval.
|
||||
Query string `json:"query,omitempty"`
|
||||
}
|
||||
|
||||
322
pkg/framework/plugins/nodeutilization/usageclients.go
Normal file
322
pkg/framework/plugins/nodeutilization/usageclients.go
Normal file
@@ -0,0 +1,322 @@
|
||||
/*
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package nodeutilization
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
promapi "github.com/prometheus/client_golang/api"
|
||||
promv1 "github.com/prometheus/client_golang/api/prometheus/v1"
|
||||
"github.com/prometheus/common/model"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/klog/v2"
|
||||
utilptr "k8s.io/utils/ptr"
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/metricscollector"
|
||||
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
)
|
||||
|
||||
type UsageClientType int
|
||||
|
||||
const (
|
||||
requestedUsageClientType UsageClientType = iota
|
||||
actualUsageClientType
|
||||
prometheusUsageClientType
|
||||
)
|
||||
|
||||
type notSupportedError struct {
|
||||
usageClientType UsageClientType
|
||||
}
|
||||
|
||||
func (e notSupportedError) Error() string {
|
||||
return "maximum number of evicted pods per node reached"
|
||||
}
|
||||
|
||||
func newNotSupportedError(usageClientType UsageClientType) *notSupportedError {
|
||||
return ¬SupportedError{
|
||||
usageClientType: usageClientType,
|
||||
}
|
||||
}
|
||||
|
||||
type usageClient interface {
|
||||
// Both low/high node utilization plugins are expected to invoke sync right
|
||||
// after Balance method is invoked. There's no cache invalidation so each
|
||||
// Balance is expected to get the latest data by invoking sync.
|
||||
sync(ctx context.Context, nodes []*v1.Node) error
|
||||
nodeUtilization(node string) api.ReferencedResourceList
|
||||
pods(node string) []*v1.Pod
|
||||
podUsage(pod *v1.Pod) (api.ReferencedResourceList, error)
|
||||
}
|
||||
|
||||
type requestedUsageClient struct {
|
||||
resourceNames []v1.ResourceName
|
||||
getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc
|
||||
|
||||
_pods map[string][]*v1.Pod
|
||||
_nodeUtilization map[string]api.ReferencedResourceList
|
||||
}
|
||||
|
||||
var _ usageClient = &requestedUsageClient{}
|
||||
|
||||
func newRequestedUsageClient(
|
||||
resourceNames []v1.ResourceName,
|
||||
getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc,
|
||||
) *requestedUsageClient {
|
||||
return &requestedUsageClient{
|
||||
resourceNames: resourceNames,
|
||||
getPodsAssignedToNode: getPodsAssignedToNode,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *requestedUsageClient) nodeUtilization(node string) api.ReferencedResourceList {
|
||||
return s._nodeUtilization[node]
|
||||
}
|
||||
|
||||
func (s *requestedUsageClient) pods(node string) []*v1.Pod {
|
||||
return s._pods[node]
|
||||
}
|
||||
|
||||
func (s *requestedUsageClient) podUsage(pod *v1.Pod) (api.ReferencedResourceList, error) {
|
||||
usage := make(api.ReferencedResourceList)
|
||||
for _, resourceName := range s.resourceNames {
|
||||
usage[resourceName] = utilptr.To[resource.Quantity](utils.GetResourceRequestQuantity(pod, resourceName).DeepCopy())
|
||||
}
|
||||
return usage, nil
|
||||
}
|
||||
|
||||
func (s *requestedUsageClient) sync(ctx context.Context, nodes []*v1.Node) error {
|
||||
s._nodeUtilization = make(map[string]api.ReferencedResourceList)
|
||||
s._pods = make(map[string][]*v1.Pod)
|
||||
|
||||
for _, node := range nodes {
|
||||
pods, err := podutil.ListPodsOnANode(node.Name, s.getPodsAssignedToNode, nil)
|
||||
if err != nil {
|
||||
klog.V(2).InfoS("Node will not be processed, error accessing its pods", "node", klog.KObj(node), "err", err)
|
||||
return fmt.Errorf("error accessing %q node's pods: %v", node.Name, err)
|
||||
}
|
||||
|
||||
nodeUsage, err := nodeutil.NodeUtilization(pods, s.resourceNames, func(pod *v1.Pod) (v1.ResourceList, error) {
|
||||
req, _ := utils.PodRequestsAndLimits(pod)
|
||||
return req, nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// store the snapshot of pods from the same (or the closest) node utilization computation
|
||||
s._pods[node.Name] = pods
|
||||
s._nodeUtilization[node.Name] = nodeUsage
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type actualUsageClient struct {
|
||||
resourceNames []v1.ResourceName
|
||||
getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc
|
||||
metricsCollector *metricscollector.MetricsCollector
|
||||
|
||||
_pods map[string][]*v1.Pod
|
||||
_nodeUtilization map[string]api.ReferencedResourceList
|
||||
}
|
||||
|
||||
var _ usageClient = &actualUsageClient{}
|
||||
|
||||
func newActualUsageClient(
|
||||
resourceNames []v1.ResourceName,
|
||||
getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc,
|
||||
metricsCollector *metricscollector.MetricsCollector,
|
||||
) *actualUsageClient {
|
||||
return &actualUsageClient{
|
||||
resourceNames: resourceNames,
|
||||
getPodsAssignedToNode: getPodsAssignedToNode,
|
||||
metricsCollector: metricsCollector,
|
||||
}
|
||||
}
|
||||
|
||||
func (client *actualUsageClient) nodeUtilization(node string) api.ReferencedResourceList {
|
||||
return client._nodeUtilization[node]
|
||||
}
|
||||
|
||||
func (client *actualUsageClient) pods(node string) []*v1.Pod {
|
||||
return client._pods[node]
|
||||
}
|
||||
|
||||
func (client *actualUsageClient) podUsage(pod *v1.Pod) (api.ReferencedResourceList, error) {
|
||||
// It's not efficient to keep track of all pods in a cluster when only their fractions is evicted.
|
||||
// Thus, take the current pod metrics without computing any softening (like e.g. EWMA).
|
||||
podMetrics, err := client.metricsCollector.MetricsClient().MetricsV1beta1().PodMetricses(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to get podmetrics for %q/%q: %v", pod.Namespace, pod.Name, err)
|
||||
}
|
||||
|
||||
totalUsage := make(api.ReferencedResourceList)
|
||||
for _, container := range podMetrics.Containers {
|
||||
for _, resourceName := range client.resourceNames {
|
||||
if resourceName == v1.ResourcePods {
|
||||
continue
|
||||
}
|
||||
if _, exists := container.Usage[resourceName]; !exists {
|
||||
return nil, fmt.Errorf("pod %v/%v: container %q is missing %q resource", pod.Namespace, pod.Name, container.Name, resourceName)
|
||||
}
|
||||
if totalUsage[resourceName] == nil {
|
||||
totalUsage[resourceName] = utilptr.To[resource.Quantity](container.Usage[resourceName].DeepCopy())
|
||||
} else {
|
||||
totalUsage[resourceName].Add(container.Usage[resourceName])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return totalUsage, nil
|
||||
}
|
||||
|
||||
func (client *actualUsageClient) sync(ctx context.Context, nodes []*v1.Node) error {
|
||||
client._nodeUtilization = make(map[string]api.ReferencedResourceList)
|
||||
client._pods = make(map[string][]*v1.Pod)
|
||||
|
||||
nodesUsage, err := client.metricsCollector.AllNodesUsage()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, node := range nodes {
|
||||
pods, err := podutil.ListPodsOnANode(node.Name, client.getPodsAssignedToNode, nil)
|
||||
if err != nil {
|
||||
klog.V(2).InfoS("Node will not be processed, error accessing its pods", "node", klog.KObj(node), "err", err)
|
||||
return fmt.Errorf("error accessing %q node's pods: %v", node.Name, err)
|
||||
}
|
||||
|
||||
collectedNodeUsage, ok := nodesUsage[node.Name]
|
||||
if !ok {
|
||||
return fmt.Errorf("unable to find node %q in the collected metrics", node.Name)
|
||||
}
|
||||
collectedNodeUsage[v1.ResourcePods] = resource.NewQuantity(int64(len(pods)), resource.DecimalSI)
|
||||
|
||||
nodeUsage := api.ReferencedResourceList{}
|
||||
for _, resourceName := range client.resourceNames {
|
||||
if _, exists := collectedNodeUsage[resourceName]; !exists {
|
||||
return fmt.Errorf("unable to find %q resource for collected %q node metric", resourceName, node.Name)
|
||||
}
|
||||
nodeUsage[resourceName] = collectedNodeUsage[resourceName]
|
||||
}
|
||||
// store the snapshot of pods from the same (or the closest) node utilization computation
|
||||
client._pods[node.Name] = pods
|
||||
client._nodeUtilization[node.Name] = nodeUsage
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type prometheusUsageClient struct {
|
||||
getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc
|
||||
promClient promapi.Client
|
||||
promQuery string
|
||||
|
||||
_pods map[string][]*v1.Pod
|
||||
_nodeUtilization map[string]map[v1.ResourceName]*resource.Quantity
|
||||
}
|
||||
|
||||
var _ usageClient = &actualUsageClient{}
|
||||
|
||||
func newPrometheusUsageClient(
|
||||
getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc,
|
||||
promClient promapi.Client,
|
||||
promQuery string,
|
||||
) *prometheusUsageClient {
|
||||
return &prometheusUsageClient{
|
||||
getPodsAssignedToNode: getPodsAssignedToNode,
|
||||
promClient: promClient,
|
||||
promQuery: promQuery,
|
||||
}
|
||||
}
|
||||
|
||||
func (client *prometheusUsageClient) nodeUtilization(node string) map[v1.ResourceName]*resource.Quantity {
|
||||
return client._nodeUtilization[node]
|
||||
}
|
||||
|
||||
func (client *prometheusUsageClient) pods(node string) []*v1.Pod {
|
||||
return client._pods[node]
|
||||
}
|
||||
|
||||
func (client *prometheusUsageClient) podUsage(pod *v1.Pod) (map[v1.ResourceName]*resource.Quantity, error) {
|
||||
return nil, newNotSupportedError(prometheusUsageClientType)
|
||||
}
|
||||
|
||||
func NodeUsageFromPrometheusMetrics(ctx context.Context, promClient promapi.Client, promQuery string) (map[string]map[v1.ResourceName]*resource.Quantity, error) {
|
||||
logger := klog.FromContext(ctx)
|
||||
results, warnings, err := promv1.NewAPI(promClient).Query(ctx, promQuery, time.Now())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to capture prometheus metrics: %v", err)
|
||||
}
|
||||
if len(warnings) > 0 {
|
||||
logger.Info("prometheus metrics warnings: %v", warnings)
|
||||
}
|
||||
|
||||
if results.Type() != model.ValVector {
|
||||
return nil, fmt.Errorf("expected query results to be of type %q, got %q instead", model.ValVector, results.Type())
|
||||
}
|
||||
|
||||
nodeUsages := make(map[string]map[v1.ResourceName]*resource.Quantity)
|
||||
for _, sample := range results.(model.Vector) {
|
||||
nodeName, exists := sample.Metric["instance"]
|
||||
if !exists {
|
||||
return nil, fmt.Errorf("The collected metrics sample is missing 'instance' key")
|
||||
}
|
||||
if sample.Value < 0 || sample.Value > 1 {
|
||||
return nil, fmt.Errorf("The collected metrics sample for %q has value %v outside of <0; 1> interval", string(nodeName), sample.Value)
|
||||
}
|
||||
nodeUsages[string(nodeName)] = map[v1.ResourceName]*resource.Quantity{
|
||||
MetricResource: resource.NewQuantity(int64(sample.Value*100), resource.DecimalSI),
|
||||
}
|
||||
}
|
||||
|
||||
return nodeUsages, nil
|
||||
}
|
||||
|
||||
func (client *prometheusUsageClient) sync(ctx context.Context, nodes []*v1.Node) error {
|
||||
client._nodeUtilization = make(map[string]map[v1.ResourceName]*resource.Quantity)
|
||||
client._pods = make(map[string][]*v1.Pod)
|
||||
|
||||
nodeUsages, err := NodeUsageFromPrometheusMetrics(ctx, client.promClient, client.promQuery)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, node := range nodes {
|
||||
if _, exists := nodeUsages[node.Name]; !exists {
|
||||
return fmt.Errorf("unable to find metric entry for %v", node.Name)
|
||||
}
|
||||
pods, err := podutil.ListPodsOnANode(node.Name, client.getPodsAssignedToNode, nil)
|
||||
if err != nil {
|
||||
klog.V(2).InfoS("Node will not be processed, error accessing its pods", "node", klog.KObj(node), "err", err)
|
||||
return fmt.Errorf("error accessing %q node's pods: %v", node.Name, err)
|
||||
}
|
||||
|
||||
// store the snapshot of pods from the same (or the closest) node utilization computation
|
||||
client._pods[node.Name] = pods
|
||||
client._nodeUtilization[node.Name] = nodeUsages[node.Name]
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
299
pkg/framework/plugins/nodeutilization/usageclients_test.go
Normal file
299
pkg/framework/plugins/nodeutilization/usageclients_test.go
Normal file
@@ -0,0 +1,299 @@
|
||||
/*
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package nodeutilization
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"testing"
|
||||
|
||||
"github.com/prometheus/common/model"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/client-go/informers"
|
||||
fakeclientset "k8s.io/client-go/kubernetes/fake"
|
||||
"k8s.io/metrics/pkg/apis/metrics/v1beta1"
|
||||
fakemetricsclient "k8s.io/metrics/pkg/client/clientset/versioned/fake"
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/metricscollector"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/test"
|
||||
)
|
||||
|
||||
var (
|
||||
nodesgvr = schema.GroupVersionResource{Group: "metrics.k8s.io", Version: "v1beta1", Resource: "nodes"}
|
||||
podsgvr = schema.GroupVersionResource{Group: "metrics.k8s.io", Version: "v1beta1", Resource: "pods"}
|
||||
)
|
||||
|
||||
func updateMetricsAndCheckNodeUtilization(
|
||||
t *testing.T,
|
||||
ctx context.Context,
|
||||
newValue, expectedValue int64,
|
||||
metricsClientset *fakemetricsclient.Clientset,
|
||||
collector *metricscollector.MetricsCollector,
|
||||
usageClient usageClient,
|
||||
nodes []*v1.Node,
|
||||
nodeName string,
|
||||
nodemetrics *v1beta1.NodeMetrics,
|
||||
) {
|
||||
t.Logf("Set current node cpu usage to %v", newValue)
|
||||
nodemetrics.Usage[v1.ResourceCPU] = *resource.NewMilliQuantity(newValue, resource.DecimalSI)
|
||||
metricsClientset.Tracker().Update(nodesgvr, nodemetrics, "")
|
||||
err := collector.Collect(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to capture metrics: %v", err)
|
||||
}
|
||||
err = usageClient.sync(ctx, nodes)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to sync a snapshot: %v", err)
|
||||
}
|
||||
nodeUtilization := usageClient.nodeUtilization(nodeName)
|
||||
t.Logf("current node cpu usage: %v\n", nodeUtilization[v1.ResourceCPU].MilliValue())
|
||||
if nodeUtilization[v1.ResourceCPU].MilliValue() != expectedValue {
|
||||
t.Fatalf("cpu node usage expected to be %v, got %v instead", expectedValue, nodeUtilization[v1.ResourceCPU].MilliValue())
|
||||
}
|
||||
pods := usageClient.pods(nodeName)
|
||||
fmt.Printf("pods: %#v\n", pods)
|
||||
if len(pods) != 2 {
|
||||
t.Fatalf("expected 2 pods for node %v, got %v instead", nodeName, len(pods))
|
||||
}
|
||||
}
|
||||
|
||||
func TestActualUsageClient(t *testing.T) {
|
||||
n1 := test.BuildTestNode("n1", 2000, 3000, 10, nil)
|
||||
n2 := test.BuildTestNode("n2", 2000, 3000, 10, nil)
|
||||
n3 := test.BuildTestNode("n3", 2000, 3000, 10, nil)
|
||||
|
||||
p1 := test.BuildTestPod("p1", 400, 0, n1.Name, nil)
|
||||
p21 := test.BuildTestPod("p21", 400, 0, n2.Name, nil)
|
||||
p22 := test.BuildTestPod("p22", 400, 0, n2.Name, nil)
|
||||
p3 := test.BuildTestPod("p3", 400, 0, n3.Name, nil)
|
||||
|
||||
nodes := []*v1.Node{n1, n2, n3}
|
||||
|
||||
n1metrics := test.BuildNodeMetrics("n1", 400, 1714978816)
|
||||
n2metrics := test.BuildNodeMetrics("n2", 1400, 1714978816)
|
||||
n3metrics := test.BuildNodeMetrics("n3", 300, 1714978816)
|
||||
|
||||
clientset := fakeclientset.NewSimpleClientset(n1, n2, n3, p1, p21, p22, p3)
|
||||
metricsClientset := fakemetricsclient.NewSimpleClientset()
|
||||
metricsClientset.Tracker().Create(nodesgvr, n1metrics, "")
|
||||
metricsClientset.Tracker().Create(nodesgvr, n2metrics, "")
|
||||
metricsClientset.Tracker().Create(nodesgvr, n3metrics, "")
|
||||
|
||||
ctx := context.TODO()
|
||||
|
||||
resourceNames := []v1.ResourceName{
|
||||
v1.ResourceCPU,
|
||||
v1.ResourceMemory,
|
||||
}
|
||||
|
||||
sharedInformerFactory := informers.NewSharedInformerFactory(clientset, 0)
|
||||
podInformer := sharedInformerFactory.Core().V1().Pods().Informer()
|
||||
nodeLister := sharedInformerFactory.Core().V1().Nodes().Lister()
|
||||
podsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
|
||||
if err != nil {
|
||||
t.Fatalf("Build get pods assigned to node function error: %v", err)
|
||||
}
|
||||
|
||||
sharedInformerFactory.Start(ctx.Done())
|
||||
sharedInformerFactory.WaitForCacheSync(ctx.Done())
|
||||
|
||||
collector := metricscollector.NewMetricsCollector(nodeLister, metricsClientset, labels.Everything())
|
||||
|
||||
usageClient := newActualUsageClient(
|
||||
resourceNames,
|
||||
podsAssignedToNode,
|
||||
collector,
|
||||
)
|
||||
|
||||
updateMetricsAndCheckNodeUtilization(t, ctx,
|
||||
1400, 1400,
|
||||
metricsClientset, collector, usageClient, nodes, n2.Name, n2metrics,
|
||||
)
|
||||
|
||||
updateMetricsAndCheckNodeUtilization(t, ctx,
|
||||
500, 1310,
|
||||
metricsClientset, collector, usageClient, nodes, n2.Name, n2metrics,
|
||||
)
|
||||
|
||||
updateMetricsAndCheckNodeUtilization(t, ctx,
|
||||
900, 1269,
|
||||
metricsClientset, collector, usageClient, nodes, n2.Name, n2metrics,
|
||||
)
|
||||
}
|
||||
|
||||
type fakePromClient struct {
|
||||
result interface{}
|
||||
dataType model.ValueType
|
||||
}
|
||||
|
||||
type fakePayload struct {
|
||||
Status string `json:"status"`
|
||||
Data queryResult `json:"data"`
|
||||
}
|
||||
|
||||
type queryResult struct {
|
||||
Type model.ValueType `json:"resultType"`
|
||||
Result interface{} `json:"result"`
|
||||
}
|
||||
|
||||
func (client *fakePromClient) URL(ep string, args map[string]string) *url.URL {
|
||||
return &url.URL{}
|
||||
}
|
||||
|
||||
func (client *fakePromClient) Do(ctx context.Context, request *http.Request) (*http.Response, []byte, error) {
|
||||
jsonData, err := json.Marshal(fakePayload{
|
||||
Status: "success",
|
||||
Data: queryResult{
|
||||
Type: client.dataType,
|
||||
Result: client.result,
|
||||
},
|
||||
})
|
||||
|
||||
return &http.Response{StatusCode: 200}, jsonData, err
|
||||
}
|
||||
|
||||
func sample(metricName, nodeName string, value float64) *model.Sample {
|
||||
return &model.Sample{
|
||||
Metric: model.Metric{
|
||||
"__name__": model.LabelValue(metricName),
|
||||
"instance": model.LabelValue(nodeName),
|
||||
},
|
||||
Value: model.SampleValue(value),
|
||||
Timestamp: 1728991761711,
|
||||
}
|
||||
}
|
||||
|
||||
func TestPrometheusUsageClient(t *testing.T) {
|
||||
n1 := test.BuildTestNode("ip-10-0-17-165.ec2.internal", 2000, 3000, 10, nil)
|
||||
n2 := test.BuildTestNode("ip-10-0-51-101.ec2.internal", 2000, 3000, 10, nil)
|
||||
n3 := test.BuildTestNode("ip-10-0-94-25.ec2.internal", 2000, 3000, 10, nil)
|
||||
|
||||
nodes := []*v1.Node{n1, n2, n3}
|
||||
|
||||
p1 := test.BuildTestPod("p1", 400, 0, n1.Name, nil)
|
||||
p21 := test.BuildTestPod("p21", 400, 0, n2.Name, nil)
|
||||
p22 := test.BuildTestPod("p22", 400, 0, n2.Name, nil)
|
||||
p3 := test.BuildTestPod("p3", 400, 0, n3.Name, nil)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
result interface{}
|
||||
dataType model.ValueType
|
||||
nodeUsage map[string]int64
|
||||
err error
|
||||
}{
|
||||
{
|
||||
name: "valid data",
|
||||
dataType: model.ValVector,
|
||||
result: model.Vector{
|
||||
sample("instance:node_cpu:rate:sum", "ip-10-0-51-101.ec2.internal", 0.20381818181818104),
|
||||
sample("instance:node_cpu:rate:sum", "ip-10-0-17-165.ec2.internal", 0.4245454545454522),
|
||||
sample("instance:node_cpu:rate:sum", "ip-10-0-94-25.ec2.internal", 0.5695757575757561),
|
||||
},
|
||||
nodeUsage: map[string]int64{
|
||||
"ip-10-0-51-101.ec2.internal": 20,
|
||||
"ip-10-0-17-165.ec2.internal": 42,
|
||||
"ip-10-0-94-25.ec2.internal": 56,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid data missing instance label",
|
||||
dataType: model.ValVector,
|
||||
result: model.Vector{
|
||||
&model.Sample{
|
||||
Metric: model.Metric{
|
||||
"__name__": model.LabelValue("instance:node_cpu:rate:sum"),
|
||||
},
|
||||
Value: model.SampleValue(0.20381818181818104),
|
||||
Timestamp: 1728991761711,
|
||||
},
|
||||
},
|
||||
err: fmt.Errorf("The collected metrics sample is missing 'instance' key"),
|
||||
},
|
||||
{
|
||||
name: "invalid data value out of range",
|
||||
dataType: model.ValVector,
|
||||
result: model.Vector{
|
||||
sample("instance:node_cpu:rate:sum", "ip-10-0-51-101.ec2.internal", 1.20381818181818104),
|
||||
},
|
||||
err: fmt.Errorf("The collected metrics sample for \"ip-10-0-51-101.ec2.internal\" has value 1.203818181818181 outside of <0; 1> interval"),
|
||||
},
|
||||
{
|
||||
name: "invalid data not a vector",
|
||||
dataType: model.ValScalar,
|
||||
result: model.Scalar{
|
||||
Value: model.SampleValue(0.20381818181818104),
|
||||
Timestamp: 1728991761711,
|
||||
},
|
||||
err: fmt.Errorf("expected query results to be of type \"vector\", got \"scalar\" instead"),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
pClient := &fakePromClient{
|
||||
result: tc.result,
|
||||
dataType: tc.dataType,
|
||||
}
|
||||
|
||||
clientset := fakeclientset.NewSimpleClientset(n1, n2, n3, p1, p21, p22, p3)
|
||||
|
||||
ctx := context.TODO()
|
||||
sharedInformerFactory := informers.NewSharedInformerFactory(clientset, 0)
|
||||
podInformer := sharedInformerFactory.Core().V1().Pods().Informer()
|
||||
podsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
|
||||
if err != nil {
|
||||
t.Fatalf("Build get pods assigned to node function error: %v", err)
|
||||
}
|
||||
|
||||
sharedInformerFactory.Start(ctx.Done())
|
||||
sharedInformerFactory.WaitForCacheSync(ctx.Done())
|
||||
|
||||
prometheusUsageClient := newPrometheusUsageClient(podsAssignedToNode, pClient, "instance:node_cpu:rate:sum")
|
||||
err = prometheusUsageClient.sync(ctx, nodes)
|
||||
if tc.err == nil {
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
} else {
|
||||
if err == nil {
|
||||
t.Fatalf("unexpected %q error, got nil instead", tc.err)
|
||||
} else if err.Error() != tc.err.Error() {
|
||||
t.Fatalf("expected %q error, got %q instead", tc.err, err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
for _, node := range nodes {
|
||||
nodeUtil := prometheusUsageClient.nodeUtilization(node.Name)
|
||||
if nodeUtil[MetricResource].Value() != tc.nodeUsage[node.Name] {
|
||||
t.Fatalf("expected %q node utilization to be %v, got %v instead", node.Name, tc.nodeUsage[node.Name], nodeUtil[MetricResource])
|
||||
} else {
|
||||
t.Logf("%v node utilization: %v", node.Name, nodeUtil[MetricResource])
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -30,7 +30,25 @@ func ValidateHighNodeUtilizationArgs(obj runtime.Object) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// make sure we know about the eviction modes defined by the user.
|
||||
return validateEvictionModes(args.EvictionModes)
|
||||
}
|
||||
|
||||
// validateEvictionModes checks if the eviction modes are valid/known
|
||||
// to the descheduler.
|
||||
func validateEvictionModes(modes []EvictionMode) error {
|
||||
// we are using this approach to make the code more extensible
|
||||
// in the future.
|
||||
validModes := map[EvictionMode]bool{
|
||||
EvictionModeOnlyThresholdingResources: true,
|
||||
}
|
||||
|
||||
for _, mode := range modes {
|
||||
if validModes[mode] {
|
||||
continue
|
||||
}
|
||||
return fmt.Errorf("invalid eviction mode %s", mode)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -44,6 +62,17 @@ func ValidateLowNodeUtilizationArgs(obj runtime.Object) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if args.MetricsUtilization != nil {
|
||||
if args.MetricsUtilization.Source == api.KubernetesMetrics && args.MetricsUtilization.MetricsServer {
|
||||
return fmt.Errorf("it is not allowed to set both %q source and metricsServer", api.KubernetesMetrics)
|
||||
}
|
||||
if args.MetricsUtilization.Source == api.KubernetesMetrics && args.MetricsUtilization.Prometheus != nil {
|
||||
return fmt.Errorf("prometheus configuration is not allowed to set when source is set to %q", api.KubernetesMetrics)
|
||||
}
|
||||
if args.MetricsUtilization.Source == api.PrometheusMetrics && (args.MetricsUtilization.Prometheus == nil || args.MetricsUtilization.Prometheus.Query == "") {
|
||||
return fmt.Errorf("prometheus query is required when metrics source is set to %q", api.PrometheusMetrics)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -21,164 +21,374 @@ import (
|
||||
"testing"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
)
|
||||
|
||||
func TestValidateLowNodeUtilizationPluginConfig(t *testing.T) {
|
||||
extendedResource := v1.ResourceName("example.com/foo")
|
||||
tests := []struct {
|
||||
name string
|
||||
thresholds api.ResourceThresholds
|
||||
targetThresholds api.ResourceThresholds
|
||||
errInfo error
|
||||
name string
|
||||
args *LowNodeUtilizationArgs
|
||||
errInfo error
|
||||
}{
|
||||
{
|
||||
name: "passing invalid thresholds",
|
||||
thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 20,
|
||||
v1.ResourceMemory: 120,
|
||||
},
|
||||
targetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
v1.ResourceMemory: 80,
|
||||
args: &LowNodeUtilizationArgs{
|
||||
Thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 20,
|
||||
v1.ResourceMemory: 120,
|
||||
},
|
||||
TargetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
v1.ResourceMemory: 80,
|
||||
},
|
||||
},
|
||||
errInfo: fmt.Errorf("thresholds config is not valid: %v", fmt.Errorf(
|
||||
"%v threshold not in [%v, %v] range", v1.ResourceMemory, MinResourcePercentage, MaxResourcePercentage)),
|
||||
},
|
||||
{
|
||||
name: "thresholds and targetThresholds configured different num of resources",
|
||||
thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 20,
|
||||
v1.ResourceMemory: 20,
|
||||
},
|
||||
targetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
v1.ResourceMemory: 80,
|
||||
v1.ResourcePods: 80,
|
||||
args: &LowNodeUtilizationArgs{
|
||||
Thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 20,
|
||||
v1.ResourceMemory: 20,
|
||||
},
|
||||
TargetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
v1.ResourceMemory: 80,
|
||||
v1.ResourcePods: 80,
|
||||
},
|
||||
},
|
||||
errInfo: fmt.Errorf("thresholds and targetThresholds configured different resources"),
|
||||
},
|
||||
{
|
||||
name: "thresholds and targetThresholds configured different resources",
|
||||
thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 20,
|
||||
v1.ResourceMemory: 20,
|
||||
},
|
||||
targetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
v1.ResourcePods: 80,
|
||||
args: &LowNodeUtilizationArgs{
|
||||
Thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 20,
|
||||
v1.ResourceMemory: 20,
|
||||
},
|
||||
TargetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
v1.ResourcePods: 80,
|
||||
},
|
||||
},
|
||||
errInfo: fmt.Errorf("thresholds and targetThresholds configured different resources"),
|
||||
},
|
||||
{
|
||||
name: "thresholds' CPU config value is greater than targetThresholds'",
|
||||
thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 90,
|
||||
v1.ResourceMemory: 20,
|
||||
},
|
||||
targetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
v1.ResourceMemory: 80,
|
||||
args: &LowNodeUtilizationArgs{
|
||||
Thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 90,
|
||||
v1.ResourceMemory: 20,
|
||||
},
|
||||
TargetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
v1.ResourceMemory: 80,
|
||||
},
|
||||
},
|
||||
errInfo: fmt.Errorf("thresholds' %v percentage is greater than targetThresholds'", v1.ResourceCPU),
|
||||
},
|
||||
{
|
||||
name: "only thresholds configured extended resource",
|
||||
thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 20,
|
||||
v1.ResourceMemory: 20,
|
||||
extendedResource: 20,
|
||||
},
|
||||
targetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
v1.ResourceMemory: 80,
|
||||
args: &LowNodeUtilizationArgs{
|
||||
Thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 20,
|
||||
v1.ResourceMemory: 20,
|
||||
extendedResource: 20,
|
||||
},
|
||||
TargetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
v1.ResourceMemory: 80,
|
||||
},
|
||||
},
|
||||
errInfo: fmt.Errorf("thresholds and targetThresholds configured different resources"),
|
||||
},
|
||||
{
|
||||
name: "only targetThresholds configured extended resource",
|
||||
thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 20,
|
||||
v1.ResourceMemory: 20,
|
||||
},
|
||||
targetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
v1.ResourceMemory: 80,
|
||||
extendedResource: 80,
|
||||
args: &LowNodeUtilizationArgs{
|
||||
Thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 20,
|
||||
v1.ResourceMemory: 20,
|
||||
},
|
||||
TargetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
v1.ResourceMemory: 80,
|
||||
extendedResource: 80,
|
||||
},
|
||||
},
|
||||
errInfo: fmt.Errorf("thresholds and targetThresholds configured different resources"),
|
||||
},
|
||||
{
|
||||
name: "thresholds and targetThresholds configured different extended resources",
|
||||
thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 20,
|
||||
v1.ResourceMemory: 20,
|
||||
extendedResource: 20,
|
||||
},
|
||||
targetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
v1.ResourceMemory: 80,
|
||||
"example.com/bar": 80,
|
||||
args: &LowNodeUtilizationArgs{
|
||||
Thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 20,
|
||||
v1.ResourceMemory: 20,
|
||||
extendedResource: 20,
|
||||
},
|
||||
TargetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
v1.ResourceMemory: 80,
|
||||
"example.com/bar": 80,
|
||||
},
|
||||
},
|
||||
errInfo: fmt.Errorf("thresholds and targetThresholds configured different resources"),
|
||||
},
|
||||
{
|
||||
name: "thresholds' extended resource config value is greater than targetThresholds'",
|
||||
thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 20,
|
||||
v1.ResourceMemory: 20,
|
||||
extendedResource: 90,
|
||||
},
|
||||
targetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
v1.ResourceMemory: 80,
|
||||
extendedResource: 20,
|
||||
args: &LowNodeUtilizationArgs{
|
||||
Thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 20,
|
||||
v1.ResourceMemory: 20,
|
||||
extendedResource: 90,
|
||||
},
|
||||
TargetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
v1.ResourceMemory: 80,
|
||||
extendedResource: 20,
|
||||
},
|
||||
},
|
||||
errInfo: fmt.Errorf("thresholds' %v percentage is greater than targetThresholds'", extendedResource),
|
||||
},
|
||||
{
|
||||
name: "passing valid plugin config",
|
||||
thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 20,
|
||||
v1.ResourceMemory: 20,
|
||||
},
|
||||
targetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
v1.ResourceMemory: 80,
|
||||
args: &LowNodeUtilizationArgs{
|
||||
Thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 20,
|
||||
v1.ResourceMemory: 20,
|
||||
},
|
||||
TargetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
v1.ResourceMemory: 80,
|
||||
},
|
||||
},
|
||||
errInfo: nil,
|
||||
},
|
||||
{
|
||||
name: "passing valid plugin config with extended resource",
|
||||
thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 20,
|
||||
v1.ResourceMemory: 20,
|
||||
extendedResource: 20,
|
||||
},
|
||||
targetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
v1.ResourceMemory: 80,
|
||||
extendedResource: 80,
|
||||
args: &LowNodeUtilizationArgs{
|
||||
Thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 20,
|
||||
v1.ResourceMemory: 20,
|
||||
extendedResource: 20,
|
||||
},
|
||||
TargetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
v1.ResourceMemory: 80,
|
||||
extendedResource: 80,
|
||||
},
|
||||
},
|
||||
errInfo: nil,
|
||||
},
|
||||
{
|
||||
name: "setting both kubernetes metrics source and metricsserver",
|
||||
args: &LowNodeUtilizationArgs{
|
||||
Thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 20,
|
||||
v1.ResourceMemory: 20,
|
||||
extendedResource: 20,
|
||||
},
|
||||
TargetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
v1.ResourceMemory: 80,
|
||||
extendedResource: 80,
|
||||
},
|
||||
MetricsUtilization: &MetricsUtilization{
|
||||
MetricsServer: true,
|
||||
Source: api.KubernetesMetrics,
|
||||
},
|
||||
},
|
||||
errInfo: fmt.Errorf("it is not allowed to set both \"KubernetesMetrics\" source and metricsServer"),
|
||||
},
|
||||
{
|
||||
name: "missing prometheus query",
|
||||
args: &LowNodeUtilizationArgs{
|
||||
Thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 20,
|
||||
v1.ResourceMemory: 20,
|
||||
extendedResource: 20,
|
||||
},
|
||||
TargetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
v1.ResourceMemory: 80,
|
||||
extendedResource: 80,
|
||||
},
|
||||
MetricsUtilization: &MetricsUtilization{
|
||||
Source: api.PrometheusMetrics,
|
||||
},
|
||||
},
|
||||
errInfo: fmt.Errorf("prometheus query is required when metrics source is set to \"Prometheus\""),
|
||||
},
|
||||
{
|
||||
name: "prometheus set when source set to kubernetes metrics",
|
||||
args: &LowNodeUtilizationArgs{
|
||||
Thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 20,
|
||||
v1.ResourceMemory: 20,
|
||||
extendedResource: 20,
|
||||
},
|
||||
TargetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
v1.ResourceMemory: 80,
|
||||
extendedResource: 80,
|
||||
},
|
||||
MetricsUtilization: &MetricsUtilization{
|
||||
Source: api.KubernetesMetrics,
|
||||
Prometheus: &Prometheus{},
|
||||
},
|
||||
},
|
||||
errInfo: fmt.Errorf("prometheus configuration is not allowed to set when source is set to \"KubernetesMetrics\""),
|
||||
},
|
||||
}
|
||||
|
||||
for _, testCase := range tests {
|
||||
args := &LowNodeUtilizationArgs{
|
||||
Thresholds: testCase.thresholds,
|
||||
TargetThresholds: testCase.targetThresholds,
|
||||
}
|
||||
validateErr := validateLowNodeUtilizationThresholds(args.Thresholds, args.TargetThresholds, false)
|
||||
|
||||
if validateErr == nil || testCase.errInfo == nil {
|
||||
if validateErr != testCase.errInfo {
|
||||
t.Errorf("expected validity of plugin config: thresholds %#v targetThresholds %#v to be %v but got %v instead",
|
||||
testCase.thresholds, testCase.targetThresholds, testCase.errInfo, validateErr)
|
||||
t.Run(testCase.name, func(t *testing.T) {
|
||||
validateErr := ValidateLowNodeUtilizationArgs(runtime.Object(testCase.args))
|
||||
if validateErr == nil || testCase.errInfo == nil {
|
||||
if validateErr != testCase.errInfo {
|
||||
t.Errorf("expected validity of plugin config: %q but got %q instead", testCase.errInfo, validateErr)
|
||||
}
|
||||
} else if validateErr.Error() != testCase.errInfo.Error() {
|
||||
t.Errorf("expected validity of plugin config: %q but got %q instead", testCase.errInfo, validateErr)
|
||||
}
|
||||
} else if validateErr.Error() != testCase.errInfo.Error() {
|
||||
t.Errorf("expected validity of plugin config: thresholds %#v targetThresholds %#v to be %v but got %v instead",
|
||||
testCase.thresholds, testCase.targetThresholds, testCase.errInfo, validateErr)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateHighNodeUtilizationPluginConfig(t *testing.T) {
|
||||
extendedResource := v1.ResourceName("example.com/foo")
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
args *HighNodeUtilizationArgs
|
||||
wantErr bool
|
||||
errMsg string
|
||||
}{
|
||||
{
|
||||
name: "valid configuration with CPU and memory",
|
||||
args: &HighNodeUtilizationArgs{
|
||||
Thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
v1.ResourceMemory: 90,
|
||||
},
|
||||
EvictionModes: []EvictionMode{EvictionModeOnlyThresholdingResources},
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "valid configuration with extended resource",
|
||||
args: &HighNodeUtilizationArgs{
|
||||
Thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 85,
|
||||
extendedResource: 95,
|
||||
},
|
||||
EvictionModes: []EvictionMode{EvictionModeOnlyThresholdingResources},
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "empty thresholds",
|
||||
args: &HighNodeUtilizationArgs{
|
||||
Thresholds: api.ResourceThresholds{},
|
||||
},
|
||||
wantErr: true,
|
||||
errMsg: "no resource threshold is configured",
|
||||
},
|
||||
{
|
||||
name: "threshold below minimum (0%)",
|
||||
args: &HighNodeUtilizationArgs{
|
||||
Thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: -1,
|
||||
},
|
||||
},
|
||||
wantErr: true,
|
||||
errMsg: "cpu threshold not in [0, 100] range",
|
||||
},
|
||||
{
|
||||
name: "threshold above maximum (100%)",
|
||||
args: &HighNodeUtilizationArgs{
|
||||
Thresholds: api.ResourceThresholds{
|
||||
v1.ResourceMemory: 101,
|
||||
},
|
||||
},
|
||||
wantErr: true,
|
||||
errMsg: "memory threshold not in [0, 100] range",
|
||||
},
|
||||
{
|
||||
name: "multiple thresholds with one out of range",
|
||||
args: &HighNodeUtilizationArgs{
|
||||
Thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 50,
|
||||
v1.ResourceMemory: 150,
|
||||
},
|
||||
},
|
||||
wantErr: true,
|
||||
errMsg: "memory threshold not in [0, 100] range",
|
||||
},
|
||||
{
|
||||
name: "evictableNamespaces with Exclude (allowed)",
|
||||
args: &HighNodeUtilizationArgs{
|
||||
Thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
},
|
||||
EvictableNamespaces: &api.Namespaces{
|
||||
Exclude: []string{"ns1", "ns2"},
|
||||
},
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "invalid eviction mode",
|
||||
args: &HighNodeUtilizationArgs{
|
||||
Thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
},
|
||||
EvictionModes: []EvictionMode{"InvalidMode"},
|
||||
},
|
||||
wantErr: true,
|
||||
errMsg: "invalid eviction mode InvalidMode",
|
||||
},
|
||||
{
|
||||
name: "missing eviction modes (nil) - should be allowed (treated as empty)",
|
||||
args: &HighNodeUtilizationArgs{
|
||||
Thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
},
|
||||
EvictionModes: nil,
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "empty eviction modes slice - should be allowed",
|
||||
args: &HighNodeUtilizationArgs{
|
||||
Thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
},
|
||||
EvictionModes: []EvictionMode{},
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
err := ValidateHighNodeUtilizationArgs(runtime.Object(tc.args))
|
||||
|
||||
if tc.wantErr {
|
||||
if err == nil {
|
||||
t.Fatalf("expected error, but got nil")
|
||||
}
|
||||
if tc.errMsg != "" && err.Error() != tc.errMsg {
|
||||
t.Errorf("expected error message: %q, but got: %q", tc.errMsg, err.Error())
|
||||
}
|
||||
} else {
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
Copyright 2026 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@@ -37,6 +37,11 @@ func (in *HighNodeUtilizationArgs) DeepCopyInto(out *HighNodeUtilizationArgs) {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
if in.EvictionModes != nil {
|
||||
in, out := &in.EvictionModes, &out.EvictionModes
|
||||
*out = make([]EvictionMode, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.EvictableNamespaces != nil {
|
||||
in, out := &in.EvictableNamespaces, &out.EvictableNamespaces
|
||||
*out = new(api.Namespaces)
|
||||
@@ -81,11 +86,21 @@ func (in *LowNodeUtilizationArgs) DeepCopyInto(out *LowNodeUtilizationArgs) {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
if in.MetricsUtilization != nil {
|
||||
in, out := &in.MetricsUtilization, &out.MetricsUtilization
|
||||
*out = new(MetricsUtilization)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.EvictableNamespaces != nil {
|
||||
in, out := &in.EvictableNamespaces, &out.EvictableNamespaces
|
||||
*out = new(api.Namespaces)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.EvictionLimits != nil {
|
||||
in, out := &in.EvictionLimits, &out.EvictionLimits
|
||||
*out = new(api.EvictionLimits)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -106,3 +121,24 @@ func (in *LowNodeUtilizationArgs) DeepCopyObject() runtime.Object {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *MetricsUtilization) DeepCopyInto(out *MetricsUtilization) {
|
||||
*out = *in
|
||||
if in.Prometheus != nil {
|
||||
in, out := &in.Prometheus, &out.Prometheus
|
||||
*out = new(Prometheus)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricsUtilization.
|
||||
func (in *MetricsUtilization) DeepCopy() *MetricsUtilization {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(MetricsUtilization)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user