mirror of
https://github.com/kubernetes-sigs/descheduler.git
synced 2026-01-26 05:14:13 +01:00
Compare commits
737 Commits
chart-0.1.
...
deschedule
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
839c506c3c | ||
|
|
9dbf1af06f | ||
|
|
bfe1885eb2 | ||
|
|
b1be089175 | ||
|
|
5f1b31fcfc | ||
|
|
bd9dea9979 | ||
|
|
861c6325f3 | ||
|
|
549fb88dac | ||
|
|
03747b0794 | ||
|
|
37e64abab8 | ||
|
|
17ab593677 | ||
|
|
e6580b97a8 | ||
|
|
923921a1aa | ||
|
|
646e2a72be | ||
|
|
a7c16d92fa | ||
|
|
ca79c2679d | ||
|
|
e73634f04d | ||
|
|
3c80f33bb0 | ||
|
|
1a37c0800e | ||
|
|
6233b6c06e | ||
|
|
6f0ef883da | ||
|
|
63813c9d3b | ||
|
|
bf72cf52c1 | ||
|
|
3d706640bc | ||
|
|
849d8762be | ||
|
|
1f682685ef | ||
|
|
70d917a624 | ||
|
|
29f48e8c3c | ||
|
|
d7a4c221a3 | ||
|
|
8dbd44c467 | ||
|
|
4d04018c93 | ||
|
|
9f6afd4d25 | ||
|
|
97c22e1e4b | ||
|
|
c418736f6e | ||
|
|
d73b33040f | ||
|
|
9e1805d9e1 | ||
|
|
329c77ab54 | ||
|
|
456248d4ba | ||
|
|
da8b145980 | ||
|
|
6e953b2ff3 | ||
|
|
d997be59a9 | ||
|
|
7bf60cd01e | ||
|
|
ac8adfbb09 | ||
|
|
99868a41de | ||
|
|
f938ac5e87 | ||
|
|
dfb1dbc91f | ||
|
|
ba6b2b97d9 | ||
|
|
fa1f7969d1 | ||
|
|
5641fa335a | ||
|
|
398ffa7ee0 | ||
|
|
4d203c5e0c | ||
|
|
6481f1bc38 | ||
|
|
3067d2b311 | ||
|
|
7777d5aa40 | ||
|
|
30aab9c0ae | ||
|
|
894bfe5e2e | ||
|
|
76a0014707 | ||
|
|
3f53c0b4ea | ||
|
|
121ea1ba5f | ||
|
|
8d1d240473 | ||
|
|
9a120ddc68 | ||
|
|
28e0965eeb | ||
|
|
f2b674e690 | ||
|
|
c2ab28b79a | ||
|
|
7bed6456d1 | ||
|
|
bed8693c69 | ||
|
|
4fb781ae7c | ||
|
|
7c7e1b9cdc | ||
|
|
da3ebb7293 | ||
|
|
9e2c323aa9 | ||
|
|
7349b398e5 | ||
|
|
1d79f8761f | ||
|
|
ac0261fafb | ||
|
|
bfb688e016 | ||
|
|
0d8106a32d | ||
|
|
7b1ddf1edb | ||
|
|
28ed86f079 | ||
|
|
fa874844a1 | ||
|
|
63396d4afc | ||
|
|
acc267b0b3 | ||
|
|
1d3e130366 | ||
|
|
35aff52ec4 | ||
|
|
a665a7b4e6 | ||
|
|
2b1746cda8 | ||
|
|
96c03a3f97 | ||
|
|
3d28b85212 | ||
|
|
9eb4c350d8 | ||
|
|
dc04abc0e6 | ||
|
|
5a755e73f2 | ||
|
|
a000bfa796 | ||
|
|
3007448bdb | ||
|
|
b9a89196f2 | ||
|
|
78c3260692 | ||
|
|
0c88326df9 | ||
|
|
3b1cdcd442 | ||
|
|
8d471baf3e | ||
|
|
e31f4aac00 | ||
|
|
ad82adf245 | ||
|
|
c10e6d6c12 | ||
|
|
3e385d9504 | ||
|
|
edec8fe010 | ||
|
|
a85f2cf6f5 | ||
|
|
15045c5dcb | ||
|
|
6c4d1a900d | ||
|
|
59834cf8a7 | ||
|
|
82ed18fd2b | ||
|
|
2c17af79f4 | ||
|
|
72bf50fde6 | ||
|
|
f47c2c4407 | ||
|
|
16619fcf44 | ||
|
|
0317be1b76 | ||
|
|
d8bac08592 | ||
|
|
d14df1fedf | ||
|
|
8a769603a6 | ||
|
|
137a6b999f | ||
|
|
334b4bb12c | ||
|
|
5a2a180f17 | ||
|
|
1265b4c325 | ||
|
|
ea8e648cfb | ||
|
|
e8fae9a3b7 | ||
|
|
c9b0fbe467 | ||
|
|
66694bb767 | ||
|
|
e68ceb2273 | ||
|
|
dcb81f65a9 | ||
|
|
face080485 | ||
|
|
1eade5bf91 | ||
|
|
bfcd310a16 | ||
|
|
70df89601a | ||
|
|
680e650706 | ||
|
|
b743b2d5f7 | ||
|
|
cfc5d0c24a | ||
|
|
ddd145c69a | ||
|
|
d99bdfffc8 | ||
|
|
a2dd86ac3b | ||
|
|
50676622de | ||
|
|
fa3ddc6fee | ||
|
|
674bf4655d | ||
|
|
6d4abe88ca | ||
|
|
d4ff3aef61 | ||
|
|
83c4f5d526 | ||
|
|
d1a9190c50 | ||
|
|
a1d4770634 | ||
|
|
ba85e794b2 | ||
|
|
0a50d5a7da | ||
|
|
2de4e23425 | ||
|
|
3474725176 | ||
|
|
27fa7a70a1 | ||
|
|
ccfaeb2275 | ||
|
|
d798e7d204 | ||
|
|
788e9f86bd | ||
|
|
0c3bf7f957 | ||
|
|
349453264e | ||
|
|
e9c23fe42f | ||
|
|
27ed7d15b9 | ||
|
|
55d4ed479c | ||
|
|
d109ea64d0 | ||
|
|
330def2e56 | ||
|
|
9880ed5372 | ||
|
|
d4ecff5ba4 | ||
|
|
46e712163a | ||
|
|
0d1d485850 | ||
|
|
1294106a22 | ||
|
|
0aa233415e | ||
|
|
0d3ff8a84f | ||
|
|
704f6d4496 | ||
|
|
c699dd1ccc | ||
|
|
d5e66ab62e | ||
|
|
3a486f1a79 | ||
|
|
6e69a10396 | ||
|
|
d78994fe6d | ||
|
|
9ef87b9937 | ||
|
|
8b849106ed | ||
|
|
2ea0a2e1c0 | ||
|
|
329c357834 | ||
|
|
8072a8c82e | ||
|
|
7a7393f5ff | ||
|
|
df65157a3b | ||
|
|
754f8c9def | ||
|
|
d75e9e8c4e | ||
|
|
2cd79c6816 | ||
|
|
aff9a0ba06 | ||
|
|
e1a10c36de | ||
|
|
d8897635b0 | ||
|
|
abf5752260 | ||
|
|
934fffb669 | ||
|
|
7a5e67d462 | ||
|
|
469bde0a01 | ||
|
|
c838614b6c | ||
|
|
cc49f9fcc2 | ||
|
|
4e710cdf3b | ||
|
|
d5ee855221 | ||
|
|
b2418ef481 | ||
|
|
1f1aad335a | ||
|
|
627d219dab | ||
|
|
30c972e49e | ||
|
|
a7cfb25e9b | ||
|
|
45e1cdbd01 | ||
|
|
dad3db3187 | ||
|
|
d2130747d8 | ||
|
|
84c8d1ca03 | ||
|
|
5dfd54e500 | ||
|
|
7550fba2fa | ||
|
|
038b6e1ca7 | ||
|
|
98a946dea7 | ||
|
|
871a10344e | ||
|
|
311d75223f | ||
|
|
33807ed8e4 | ||
|
|
3cc0a68f13 | ||
|
|
8e1d35cb3c | ||
|
|
59c4904ddc | ||
|
|
c5604c760d | ||
|
|
f769296243 | ||
|
|
8972bd9bf0 | ||
|
|
873381197b | ||
|
|
af45591c25 | ||
|
|
17e986418f | ||
|
|
5a9e65833f | ||
|
|
725ca47bda | ||
|
|
f39058af1c | ||
|
|
332d61dba8 | ||
|
|
3cbae5e72b | ||
|
|
d8a609a6e7 | ||
|
|
f0fa4c0cc0 | ||
|
|
e61823c299 | ||
|
|
14b83e6cc5 | ||
|
|
5e3b825427 | ||
|
|
15794ba00d | ||
|
|
e494a5817e | ||
|
|
eb0be65687 | ||
|
|
64786460cd | ||
|
|
9c110c4004 | ||
|
|
0eddf7f108 | ||
|
|
3c8d6c4d53 | ||
|
|
6e84d0a6ba | ||
|
|
fb1df468ad | ||
|
|
ac4d576df8 | ||
|
|
314ad65b04 | ||
|
|
969a618933 | ||
|
|
028f205e8c | ||
|
|
3eca2782d4 | ||
|
|
16eb9063b6 | ||
|
|
eac3b4b54a | ||
|
|
d08cea731a | ||
|
|
0fc5ba9316 | ||
|
|
ecbd10afe2 | ||
|
|
e5ed0540f2 | ||
|
|
4e972a7602 | ||
|
|
ae20b5b034 | ||
|
|
406e3ed5b3 | ||
|
|
7589aaf00b | ||
|
|
ca90b53913 | ||
|
|
238eebeaca | ||
|
|
cf59d08193 | ||
|
|
2ea65e69dc | ||
|
|
7f6a2a69b0 | ||
|
|
ac3362149b | ||
|
|
0a52af9ab8 | ||
|
|
07bbdc61c4 | ||
|
|
17595fdcfc | ||
|
|
285523f0d9 | ||
|
|
c55a897599 | ||
|
|
52ff50f2d1 | ||
|
|
8ebf3fb323 | ||
|
|
0e0ae8df90 | ||
|
|
bd3daa82d3 | ||
|
|
60a15f0392 | ||
|
|
d98cb84568 | ||
|
|
6ab01eca63 | ||
|
|
584ac2d604 | ||
|
|
448dc4784c | ||
|
|
3ca77e7a3d | ||
|
|
01e7015b97 | ||
|
|
fd5a8c7d78 | ||
|
|
43148ecd0c | ||
|
|
16501978dc | ||
|
|
1b4e48b006 | ||
|
|
da6a3e063f | ||
|
|
5784c0cc04 | ||
|
|
254a3a9ec1 | ||
|
|
328c695141 | ||
|
|
3ab0268c5a | ||
|
|
cd8dbdd1e2 | ||
|
|
54c50c5390 | ||
|
|
a2cbc25397 | ||
|
|
bd81f6436e | ||
|
|
30be19b04e | ||
|
|
3c251fb09d | ||
|
|
224e2b078f | ||
|
|
dd80d60f4f | ||
|
|
e88837a349 | ||
|
|
5901f8af1b | ||
|
|
0d1704a192 | ||
|
|
c5878b18c6 | ||
|
|
ff1954b32e | ||
|
|
4c8040bbaf | ||
|
|
deaa314492 | ||
|
|
9c653a2274 | ||
|
|
8d37557743 | ||
|
|
5081ad84b5 | ||
|
|
c51c066cd1 | ||
|
|
afb1d75ce1 | ||
|
|
90e6174fdd | ||
|
|
8e3ef9a6b3 | ||
|
|
778a18c550 | ||
|
|
1a98a566b3 | ||
|
|
a643c619c9 | ||
|
|
203388ff1a | ||
|
|
2844f80a35 | ||
|
|
901a16ecbc | ||
|
|
e0f086ff85 | ||
|
|
0251935268 | ||
|
|
8752a28025 | ||
|
|
24884c7568 | ||
|
|
175f648045 | ||
|
|
f50a3fa119 | ||
|
|
551eced42a | ||
|
|
3635a8171c | ||
|
|
796f347305 | ||
|
|
13abbe7f09 | ||
|
|
e4df54d2d1 | ||
|
|
c38f617e40 | ||
|
|
e6551564c4 | ||
|
|
3a991dd50c | ||
|
|
77cb406052 | ||
|
|
921a5680ab | ||
|
|
1529180d70 | ||
|
|
2d9143d129 | ||
|
|
e9c0833b6f | ||
|
|
8462cf56d7 | ||
|
|
a60d6a527d | ||
|
|
2b23694704 | ||
|
|
d0a95bee2f | ||
|
|
57a910f5d1 | ||
|
|
ccaedde183 | ||
|
|
2020642b6f | ||
|
|
96ff5d2dd9 | ||
|
|
d8718d7db3 | ||
|
|
1e5165ba9f | ||
|
|
8e74f8bd77 | ||
|
|
2424928019 | ||
|
|
e6314d2c7e | ||
|
|
271ee3c7e3 | ||
|
|
e58686c142 | ||
|
|
0b2c10d6ce | ||
|
|
cac3b9185b | ||
|
|
94888e653c | ||
|
|
936578b238 | ||
|
|
4fa7bf978c | ||
|
|
2f7c496944 | ||
|
|
5fe3ca86ff | ||
|
|
0ff8ecb41e | ||
|
|
08ed129a07 | ||
|
|
49ad197dfc | ||
|
|
82201d0e48 | ||
|
|
2b95332e8c | ||
|
|
e8ed62e540 | ||
|
|
e5725de7bb | ||
|
|
c47e811937 | ||
|
|
e0bac4c371 | ||
|
|
73a7adf572 | ||
|
|
5cf381a817 | ||
|
|
4603182320 | ||
|
|
ad207775ff | ||
|
|
d0f11a41c0 | ||
|
|
c7524705b3 | ||
|
|
50f9513cbb | ||
|
|
6fd80ba29c | ||
|
|
5b557941fa | ||
|
|
c6229934a0 | ||
|
|
ed28eaeccc | ||
|
|
3be910c238 | ||
|
|
d96dd6da2d | ||
|
|
f7c26ef41f | ||
|
|
57ad9cc91b | ||
|
|
926339594d | ||
|
|
1ba53ad68c | ||
|
|
6eb37ce079 | ||
|
|
54d660eee0 | ||
|
|
cf219fbfae | ||
|
|
d1d9ea0c48 | ||
|
|
4448d9c670 | ||
|
|
3909f3acae | ||
|
|
9f1274f3ab | ||
|
|
e6926e11ea | ||
|
|
16228c9dd1 | ||
|
|
57dabbca5c | ||
|
|
04439c6e64 | ||
|
|
0e0e688fe8 | ||
|
|
0603de4353 | ||
|
|
ea911db6dc | ||
|
|
c079c7aaae | ||
|
|
5420988a28 | ||
|
|
b56a1ab80a | ||
|
|
a6b34c1130 | ||
|
|
0de8002b7d | ||
|
|
84d648ff60 | ||
|
|
6ad6f6fce5 | ||
|
|
b7100ad871 | ||
|
|
38c0f1c639 | ||
|
|
64d7901d82 | ||
|
|
ab1015e5fa | ||
|
|
1753bf4422 | ||
|
|
7cb44dca27 | ||
|
|
2ec4b8b674 | ||
|
|
0d0633488d | ||
|
|
f3b3853d9d | ||
|
|
e550e5e22a | ||
|
|
2bf37ff495 | ||
|
|
fa84ec6774 | ||
|
|
e18e0416b1 | ||
|
|
34282162f8 | ||
|
|
7a043d31be | ||
|
|
b0e5d64bd7 | ||
|
|
1c9ac2daee | ||
|
|
c6b67e8a6f | ||
|
|
2e4873d103 | ||
|
|
3e483c4d85 | ||
|
|
032ea70380 | ||
|
|
df84dc4548 | ||
|
|
d4fa83f8bc | ||
|
|
448dbceadd | ||
|
|
b83b064992 | ||
|
|
133a0049e3 | ||
|
|
50b1c1337d | ||
|
|
d5deed44ca | ||
|
|
0f785b9530 | ||
|
|
eb1f0ecc14 | ||
|
|
b59995eeb8 | ||
|
|
d998d82357 | ||
|
|
f51ea72ce0 | ||
|
|
fe8d4c0d21 | ||
|
|
3843a2d5d1 | ||
|
|
839a237d6a | ||
|
|
4cd1e66ef3 | ||
|
|
2f18864fa5 | ||
|
|
6e71068f4f | ||
|
|
e40620effa | ||
|
|
d7dc0abd7b | ||
|
|
012ca2398f | ||
|
|
f07089d7b3 | ||
|
|
a54b59f208 | ||
|
|
bfd5feaf60 | ||
|
|
41d46d0d3b | ||
|
|
646c13ae15 | ||
|
|
3b9d3d9719 | ||
|
|
449383caa3 | ||
|
|
31fd097c0a | ||
|
|
11143d5b2c | ||
|
|
8480e03e9c | ||
|
|
0397425010 | ||
|
|
5396282e3d | ||
|
|
a9ff644de6 | ||
|
|
fe8e17f72c | ||
|
|
a1709e9edd | ||
|
|
24c0ca2ef9 | ||
|
|
9b26abd538 | ||
|
|
fc83c13166 | ||
|
|
9b69962053 | ||
|
|
4edbecc85d | ||
|
|
54f67266bb | ||
|
|
4ba48b018c | ||
|
|
2a3529c543 | ||
|
|
58408d710b | ||
|
|
161f66a12f | ||
|
|
6bde95c9a1 | ||
|
|
724ff8a188 | ||
|
|
feae158a50 | ||
|
|
780ac7a51e | ||
|
|
c4afb6bb30 | ||
|
|
8b5c4e805d | ||
|
|
f4e24a408f | ||
|
|
2781106d49 | ||
|
|
534a30a058 | ||
|
|
bb55741320 | ||
|
|
079bd6157b | ||
|
|
92cb1b23ed | ||
|
|
832facc526 | ||
|
|
c4fa6c472f | ||
|
|
a848dac3cf | ||
|
|
43a2ccf9c4 | ||
|
|
60cf3aeb95 | ||
|
|
84b174e841 | ||
|
|
40337d064d | ||
|
|
9fe585c854 | ||
|
|
4fce2ca2f1 | ||
|
|
4c11de0403 | ||
|
|
a9099efc45 | ||
|
|
6edb644f2e | ||
|
|
c239e1199f | ||
|
|
b713b7852a | ||
|
|
5d07d0c8e2 | ||
|
|
7076ba0760 | ||
|
|
81b816d4a4 | ||
|
|
9ebc909c7f | ||
|
|
af01b675b0 | ||
|
|
ce6ce5a058 | ||
|
|
bd4f6d4fcd | ||
|
|
6a4181158a | ||
|
|
a2746d09e8 | ||
|
|
b5d7219391 | ||
|
|
b09d5d99dc | ||
|
|
dbcc20f37f | ||
|
|
51340b56b8 | ||
|
|
160669817e | ||
|
|
6ca4479892 | ||
|
|
92740a25d4 | ||
|
|
56e4daccaf | ||
|
|
b546832b66 | ||
|
|
39e5b34af3 | ||
|
|
e699e08d13 | ||
|
|
af26b57e5e | ||
|
|
22fe589ae6 | ||
|
|
0a11b5a138 | ||
|
|
363f02710b | ||
|
|
6abfa232e7 | ||
|
|
bbfb12a120 | ||
|
|
5df2a0c516 | ||
|
|
8ecd14289a | ||
|
|
131ed42a4c | ||
|
|
6b8d4cd5a7 | ||
|
|
24a06511a2 | ||
|
|
09c7d1be0a | ||
|
|
f5666882de | ||
|
|
701f22404b | ||
|
|
d5fa60bdd5 | ||
|
|
9e14f733b7 | ||
|
|
29ade13ce7 | ||
|
|
03518badb8 | ||
|
|
24458fb0ca | ||
|
|
1c5b32763b | ||
|
|
3bd031bbb3 | ||
|
|
ea6e9f22b9 | ||
|
|
73309a3948 | ||
|
|
105313a0e3 | ||
|
|
d368cbed32 | ||
|
|
5f4dfbc922 | ||
|
|
967911e4c1 | ||
|
|
726091712d | ||
|
|
854afa7c73 | ||
|
|
2517268b1f | ||
|
|
fea8beabab | ||
|
|
7f9c95fa16 | ||
|
|
6f7e2a271e | ||
|
|
217ebdfa73 | ||
|
|
e014fda58e | ||
|
|
6d0360fd16 | ||
|
|
01a87b6143 | ||
|
|
f3e871492c | ||
|
|
bf8d744686 | ||
|
|
477d12968e | ||
|
|
dcb4136a96 | ||
|
|
2c0afafccf | ||
|
|
e56617253f | ||
|
|
500aaea4dd | ||
|
|
4444811f26 | ||
|
|
b63c6fac27 | ||
|
|
dfc76906d4 | ||
|
|
fbd17d4caf | ||
|
|
9c01589fb9 | ||
|
|
a0942afaa1 | ||
|
|
16fa21a4a6 | ||
|
|
241f1325c9 | ||
|
|
c1a63a557a | ||
|
|
e45e21368a | ||
|
|
f24b367479 | ||
|
|
8ba9cb1df7 | ||
|
|
d502f05910 | ||
|
|
241f47d947 | ||
|
|
19424f4119 | ||
|
|
635a40f305 | ||
|
|
1804d2e3a2 | ||
|
|
fea4870243 | ||
|
|
f54df67d11 | ||
|
|
1a998037f8 | ||
|
|
c481877c03 | ||
|
|
67df39690b | ||
|
|
674f14da78 | ||
|
|
0cfbdf642b | ||
|
|
c86d1c7eb2 | ||
|
|
f67c265533 | ||
|
|
969921640c | ||
|
|
0273fd7597 | ||
|
|
e84d0c5587 | ||
|
|
5267ec407c | ||
|
|
6714d8e0b7 | ||
|
|
b3439eab41 | ||
|
|
509118587a | ||
|
|
f482537dff | ||
|
|
0f95817746 | ||
|
|
70d1fadae7 | ||
|
|
499beb2fd7 | ||
|
|
de24f3854b | ||
|
|
a5e8ba1a70 | ||
|
|
45ad48042f | ||
|
|
550de966c7 | ||
|
|
c94342db31 | ||
|
|
94f1c7dd8d | ||
|
|
bf91e6790e | ||
|
|
251f44e568 | ||
|
|
922c4f6a63 | ||
|
|
2b5ec01381 | ||
|
|
7bcd562ff5 | ||
|
|
03852d0914 | ||
|
|
5f1e9a97c4 | ||
|
|
cd6f2cd4cb | ||
|
|
e679c7fabc | ||
|
|
6f5918d765 | ||
|
|
5a46ba0630 | ||
|
|
c1323719f4 | ||
|
|
8795fe6b90 | ||
|
|
a3f8bb0369 | ||
|
|
cd3c3bf4da | ||
|
|
652ee87bf5 | ||
|
|
5225ec4597 | ||
|
|
b2af720ddb | ||
|
|
94f07996f7 | ||
|
|
4839d5f369 | ||
|
|
022e07c278 | ||
|
|
620d71abdf | ||
|
|
85d00ab457 | ||
|
|
b30bd40860 | ||
|
|
4108362158 | ||
|
|
b27dc5f14e | ||
|
|
3c54378749 | ||
|
|
6240aa68f7 | ||
|
|
301af7fd9c | ||
|
|
41d529ebe2 | ||
|
|
cc6bb633ba | ||
|
|
31cf70c34c | ||
|
|
3399619395 | ||
|
|
cfc4cce08b | ||
|
|
f9e9f0654a | ||
|
|
73af0e84fa | ||
|
|
b33928ac91 | ||
|
|
3ac0c408de | ||
|
|
149f900811 | ||
|
|
9ede04ba9b | ||
|
|
f9cbed8b71 | ||
|
|
fa4da031e4 | ||
|
|
9511f308d0 | ||
|
|
52f43f0fcb | ||
|
|
4bb0ceeed5 | ||
|
|
279f648e9a | ||
|
|
411ec740ff | ||
|
|
6237ba5a43 | ||
|
|
5d65a9ad68 | ||
|
|
28f3f867c3 | ||
|
|
00f79aa28d | ||
|
|
6042d717e9 | ||
|
|
7afa54519f | ||
|
|
8c3a80fbf9 | ||
|
|
11b9829885 | ||
|
|
4798559545 | ||
|
|
8b34d6eb94 | ||
|
|
70700a1c97 | ||
|
|
d7420eb945 | ||
|
|
c9cfeb35c2 | ||
|
|
fda63a816f | ||
|
|
6329b6c27b | ||
|
|
9b4f781c5c | ||
|
|
63039fcfd6 | ||
|
|
d25f3757d6 | ||
|
|
1303fe6eb9 | ||
|
|
1682cc9462 | ||
|
|
605927676f | ||
|
|
dc41e6a41c | ||
|
|
e37c27313e | ||
|
|
e5d9756ebe | ||
|
|
e6f1c6f78a | ||
|
|
fceebded6d | ||
|
|
08b2dffa42 | ||
|
|
745e29959c | ||
|
|
aa1bab2c4a | ||
|
|
19c3e02b44 | ||
|
|
a45057200f | ||
|
|
74d6be3943 | ||
|
|
1fb3445692 | ||
|
|
195082d33b | ||
|
|
03dbc93961 | ||
|
|
d27f64480b | ||
|
|
5645663b71 | ||
|
|
dbc8092282 | ||
|
|
50d2b246d9 | ||
|
|
6220aca03e | ||
|
|
674993d23a | ||
|
|
f4c3f9b18f | ||
|
|
d65a7c4783 | ||
|
|
89541f7545 | ||
|
|
17f769c1c1 | ||
|
|
eb9e62f047 | ||
|
|
6ccd80f2ee | ||
|
|
d8251b9086 | ||
|
|
4cd1f45d90 | ||
|
|
2dc3f53a13 | ||
|
|
f5d8a02f79 | ||
|
|
a7c51ffae0 | ||
|
|
9746fd300f | ||
|
|
b5e17f91cd | ||
|
|
f5524153ba | ||
|
|
6d693d06fb | ||
|
|
4d7a6ee9be | ||
|
|
0fdaac6042 | ||
|
|
bb7ab369d7 | ||
|
|
d96cca2221 | ||
|
|
6ee87d9d7c | ||
|
|
95ce2a4ff7 | ||
|
|
19e1387bf1 | ||
|
|
ec4c5bed5d | ||
|
|
ae38aa63af | ||
|
|
cdcd677aa0 | ||
|
|
a5eb9fc36d | ||
|
|
96efd2312b | ||
|
|
e7699c4f6b | ||
|
|
d0fbebb77c | ||
|
|
46bb5b6f55 | ||
|
|
b799ed074a | ||
|
|
eee41ee111 | ||
|
|
6ac81e0b9c | ||
|
|
5970899029 | ||
|
|
5bb0389538 | ||
|
|
92cb6a378a | ||
|
|
b09932e92a | ||
|
|
63603f38d6 | ||
|
|
42db31683f | ||
|
|
c40a9c397f | ||
|
|
4014ebad92 | ||
|
|
bb7cb05571 | ||
|
|
30b2bd5d9f | ||
|
|
8d5ab05aa0 | ||
|
|
db501da34d | ||
|
|
8d60370612 | ||
|
|
052f011288 | ||
|
|
6e23579bd0 | ||
|
|
7331f4e5de | ||
|
|
11f1333af7 | ||
|
|
74f70fdbc9 | ||
|
|
0006fb039d |
5
.github/ci/ct.yaml
vendored
Normal file
5
.github/ci/ct.yaml
vendored
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
chart-dirs:
|
||||||
|
- charts
|
||||||
|
helm-extra-args: "--timeout=5m"
|
||||||
|
check-version-increment: false
|
||||||
|
target-branch: master
|
||||||
69
.github/workflows/helm.yaml
vendored
Normal file
69
.github/workflows/helm.yaml
vendored
Normal file
@@ -0,0 +1,69 @@
|
|||||||
|
name: Helm
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- master
|
||||||
|
- release-*
|
||||||
|
paths:
|
||||||
|
- 'charts/**'
|
||||||
|
- '.github/workflows/helm.yaml'
|
||||||
|
- '.github/ci/ct.yaml'
|
||||||
|
pull_request:
|
||||||
|
paths:
|
||||||
|
- 'charts/**'
|
||||||
|
- '.github/workflows/helm.yaml'
|
||||||
|
- '.github/ci/ct.yaml'
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
lint-and-test:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
|
||||||
|
- name: Set up Helm
|
||||||
|
uses: azure/setup-helm@v2.1
|
||||||
|
with:
|
||||||
|
version: v3.9.2
|
||||||
|
|
||||||
|
- uses: actions/setup-python@v3.1.2
|
||||||
|
with:
|
||||||
|
python-version: 3.7
|
||||||
|
|
||||||
|
- uses: actions/setup-go@v3
|
||||||
|
with:
|
||||||
|
go-version: '1.19.3'
|
||||||
|
|
||||||
|
- name: Set up chart-testing
|
||||||
|
uses: helm/chart-testing-action@v2.2.1
|
||||||
|
with:
|
||||||
|
version: v3.7.0
|
||||||
|
|
||||||
|
- name: Run chart-testing (list-changed)
|
||||||
|
id: list-changed
|
||||||
|
run: |
|
||||||
|
changed=$(ct list-changed --config=.github/ci/ct.yaml)
|
||||||
|
if [[ -n "$changed" ]]; then
|
||||||
|
echo "::set-output name=changed::true"
|
||||||
|
fi
|
||||||
|
|
||||||
|
- name: Run chart-testing (lint)
|
||||||
|
run: ct lint --config=.github/ci/ct.yaml --validate-maintainers=false
|
||||||
|
|
||||||
|
# Need a multi node cluster so descheduler runs until evictions
|
||||||
|
- name: Create multi node Kind cluster
|
||||||
|
run: make kind-multi-node
|
||||||
|
|
||||||
|
# helm-extra-set-args only available after ct 3.6.0
|
||||||
|
- name: Run chart-testing (install)
|
||||||
|
run: ct install --config=.github/ci/ct.yaml --helm-extra-set-args='--set=kind=Deployment'
|
||||||
|
|
||||||
|
- name: E2E after chart install
|
||||||
|
env:
|
||||||
|
KUBERNETES_VERSION: "v1.26.0"
|
||||||
|
KIND_E2E: true
|
||||||
|
SKIP_INSTALL: true
|
||||||
|
run: make test-e2e
|
||||||
19
.github/workflows/release.yaml
vendored
19
.github/workflows/release.yaml
vendored
@@ -2,8 +2,8 @@ name: Release Charts
|
|||||||
|
|
||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
tags:
|
branches:
|
||||||
- chart-*
|
- release-*
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
release:
|
release:
|
||||||
@@ -11,20 +11,21 @@ jobs:
|
|||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v2
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
|
||||||
- name: Configure Git
|
- name: Configure Git
|
||||||
run: |
|
run: |
|
||||||
git config user.name "$GITHUB_ACTOR"
|
git config user.name "$GITHUB_ACTOR"
|
||||||
git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
|
git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
|
||||||
|
|
||||||
- name: Fetch history
|
- name: Install Helm
|
||||||
run: git fetch --prune --unshallow
|
uses: azure/setup-helm@v1
|
||||||
|
with:
|
||||||
- name: Add dependency chart repos
|
version: v3.4.0
|
||||||
run: |
|
|
||||||
helm repo add stable https://kubernetes-charts.storage.googleapis.com/
|
|
||||||
|
|
||||||
- name: Run chart-releaser
|
- name: Run chart-releaser
|
||||||
uses: helm/chart-releaser-action@v1.0.0-rc.2
|
uses: helm/chart-releaser-action@v1.1.0
|
||||||
env:
|
env:
|
||||||
CR_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
|
CR_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
|
||||||
|
CR_RELEASE_NAME_TEMPLATE: "descheduler-helm-chart-{{ .Version }}"
|
||||||
|
|||||||
47
.github/workflows/security.yaml
vendored
Normal file
47
.github/workflows/security.yaml
vendored
Normal file
@@ -0,0 +1,47 @@
|
|||||||
|
name: "Security"
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- main
|
||||||
|
- master
|
||||||
|
- release-*
|
||||||
|
schedule:
|
||||||
|
- cron: '30 1 * * 0'
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
analyze:
|
||||||
|
name: Analyze
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
permissions:
|
||||||
|
actions: read
|
||||||
|
contents: read
|
||||||
|
security-events: write
|
||||||
|
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
|
||||||
|
- name: Build image
|
||||||
|
run: |
|
||||||
|
IMAGE_REPO=${HELM_IMAGE_REPO:-descheduler}
|
||||||
|
IMAGE_TAG=${HELM_IMAGE_TAG:-security-test}
|
||||||
|
VERSION=security-test make image
|
||||||
|
- name: Run Trivy vulnerability scanner
|
||||||
|
uses: aquasecurity/trivy-action@master
|
||||||
|
with:
|
||||||
|
image-ref: 'descheduler:security-test'
|
||||||
|
format: 'sarif'
|
||||||
|
exit-code: '0'
|
||||||
|
severity: 'CRITICAL,HIGH'
|
||||||
|
output: 'trivy-results.sarif'
|
||||||
|
|
||||||
|
- name: Upload Trivy scan results to GitHub Security tab
|
||||||
|
uses: github/codeql-action/upload-sarif@v2
|
||||||
|
with:
|
||||||
|
sarif_file: 'trivy-results.sarif'
|
||||||
|
exit-code: '0'
|
||||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -3,3 +3,6 @@ _tmp/
|
|||||||
vendordiff.patch
|
vendordiff.patch
|
||||||
.idea/
|
.idea/
|
||||||
*.code-workspace
|
*.code-workspace
|
||||||
|
.vscode/
|
||||||
|
kind
|
||||||
|
bin/
|
||||||
@@ -5,11 +5,14 @@ linters:
|
|||||||
disable-all: true
|
disable-all: true
|
||||||
enable:
|
enable:
|
||||||
- gofmt
|
- gofmt
|
||||||
|
- gofumpt
|
||||||
- gosimple
|
- gosimple
|
||||||
- gocyclo
|
- gocyclo
|
||||||
- misspell
|
- misspell
|
||||||
- govet
|
- govet
|
||||||
|
|
||||||
linters-settings:
|
linters-settings:
|
||||||
|
gofumpt:
|
||||||
|
extra-rules: true
|
||||||
goimports:
|
goimports:
|
||||||
local-prefixes: sigs.k8s.io/descheduler
|
local-prefixes: sigs.k8s.io/descheduler
|
||||||
|
|||||||
10
Dockerfile
10
Dockerfile
@@ -11,15 +11,19 @@
|
|||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
FROM golang:1.14.4
|
FROM golang:1.19.3
|
||||||
|
|
||||||
WORKDIR /go/src/sigs.k8s.io/descheduler
|
WORKDIR /go/src/sigs.k8s.io/descheduler
|
||||||
COPY . .
|
COPY . .
|
||||||
RUN make
|
ARG ARCH
|
||||||
|
ARG VERSION
|
||||||
|
RUN VERSION=${VERSION} make build.$ARCH
|
||||||
|
|
||||||
FROM scratch
|
FROM scratch
|
||||||
|
|
||||||
MAINTAINER Avesh Agarwal <avesh.ncsu@gmail.com>
|
MAINTAINER Kubernetes SIG Scheduling <kubernetes-sig-scheduling@googlegroups.com>
|
||||||
|
|
||||||
|
USER 1000
|
||||||
|
|
||||||
COPY --from=0 /go/src/sigs.k8s.io/descheduler/_output/bin/descheduler /bin/descheduler
|
COPY --from=0 /go/src/sigs.k8s.io/descheduler/_output/bin/descheduler /bin/descheduler
|
||||||
|
|
||||||
|
|||||||
@@ -13,7 +13,9 @@
|
|||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
FROM scratch
|
FROM scratch
|
||||||
|
|
||||||
MAINTAINER Avesh Agarwal <avesh.ncsu@gmail.com>
|
MAINTAINER Kubernetes SIG Scheduling <kubernetes-sig-scheduling@googlegroups.com>
|
||||||
|
|
||||||
|
USER 1000
|
||||||
|
|
||||||
COPY _output/bin/descheduler /bin/descheduler
|
COPY _output/bin/descheduler /bin/descheduler
|
||||||
|
|
||||||
|
|||||||
107
Makefile
107
Makefile
@@ -14,16 +14,23 @@
|
|||||||
|
|
||||||
.PHONY: test
|
.PHONY: test
|
||||||
|
|
||||||
# VERSION is currently based on the last commit
|
export CONTAINER_ENGINE ?= docker
|
||||||
VERSION?=$(shell git describe --tags)
|
|
||||||
COMMIT=$(shell git rev-parse HEAD)
|
# VERSION is based on a date stamp plus the last commit
|
||||||
|
VERSION?=v$(shell date +%Y%m%d)-$(shell git describe --tags)
|
||||||
|
BRANCH?=$(shell git branch --show-current)
|
||||||
|
SHA1?=$(shell git rev-parse HEAD)
|
||||||
BUILD=$(shell date +%FT%T%z)
|
BUILD=$(shell date +%FT%T%z)
|
||||||
LDFLAG_LOCATION=sigs.k8s.io/descheduler/cmd/descheduler/app
|
LDFLAG_LOCATION=sigs.k8s.io/descheduler/pkg/version
|
||||||
|
ARCHS = amd64 arm arm64
|
||||||
|
|
||||||
LDFLAGS=-ldflags "-X ${LDFLAG_LOCATION}.version=${VERSION} -X ${LDFLAG_LOCATION}.buildDate=${BUILD} -X ${LDFLAG_LOCATION}.gitCommit=${COMMIT}"
|
LDFLAGS=-ldflags "-X ${LDFLAG_LOCATION}.version=${VERSION} -X ${LDFLAG_LOCATION}.buildDate=${BUILD} -X ${LDFLAG_LOCATION}.gitbranch=${BRANCH} -X ${LDFLAG_LOCATION}.gitsha1=${SHA1}"
|
||||||
|
|
||||||
GOLANGCI_VERSION := v1.15.0
|
GOLANGCI_VERSION := v1.49.0
|
||||||
HAS_GOLANGCI := $(shell ls _output/bin/golangci-lint)
|
HAS_GOLANGCI := $(shell ls _output/bin/golangci-lint 2> /dev/null)
|
||||||
|
|
||||||
|
GOFUMPT_VERSION := v0.4.0
|
||||||
|
HAS_GOFUMPT := $(shell command -v gofumpt 2> /dev/null)
|
||||||
|
|
||||||
# REGISTRY is the container registry to push
|
# REGISTRY is the container registry to push
|
||||||
# into. The default is to push to the staging
|
# into. The default is to push to the staging
|
||||||
@@ -41,30 +48,65 @@ IMAGE_GCLOUD:=$(REGISTRY)/descheduler:$(VERSION)
|
|||||||
# In the future binaries can be uploaded to
|
# In the future binaries can be uploaded to
|
||||||
# GCS bucket gs://k8s-staging-descheduler.
|
# GCS bucket gs://k8s-staging-descheduler.
|
||||||
|
|
||||||
HAS_HELM := $(shell which helm)
|
HAS_HELM := $(shell which helm 2> /dev/null)
|
||||||
|
|
||||||
all: build
|
all: build
|
||||||
|
|
||||||
build:
|
build:
|
||||||
CGO_ENABLED=0 go build ${LDFLAGS} -o _output/bin/descheduler sigs.k8s.io/descheduler/cmd/descheduler
|
CGO_ENABLED=0 go build ${LDFLAGS} -o _output/bin/descheduler sigs.k8s.io/descheduler/cmd/descheduler
|
||||||
|
|
||||||
|
build.amd64:
|
||||||
|
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build ${LDFLAGS} -o _output/bin/descheduler sigs.k8s.io/descheduler/cmd/descheduler
|
||||||
|
|
||||||
|
build.arm:
|
||||||
|
CGO_ENABLED=0 GOOS=linux GOARCH=arm GOARM=7 go build ${LDFLAGS} -o _output/bin/descheduler sigs.k8s.io/descheduler/cmd/descheduler
|
||||||
|
|
||||||
|
build.arm64:
|
||||||
|
CGO_ENABLED=0 GOOS=linux GOARCH=arm64 go build ${LDFLAGS} -o _output/bin/descheduler sigs.k8s.io/descheduler/cmd/descheduler
|
||||||
|
|
||||||
dev-image: build
|
dev-image: build
|
||||||
docker build -f Dockerfile.dev -t $(IMAGE) .
|
$(CONTAINER_ENGINE) build -f Dockerfile.dev -t $(IMAGE) .
|
||||||
|
|
||||||
image:
|
image:
|
||||||
docker build -t $(IMAGE) .
|
$(CONTAINER_ENGINE) build --build-arg VERSION="$(VERSION)" --build-arg ARCH="amd64" -t $(IMAGE) .
|
||||||
|
|
||||||
push-container-to-gcloud: image
|
image.amd64:
|
||||||
|
$(CONTAINER_ENGINE) build --build-arg VERSION="$(VERSION)" --build-arg ARCH="amd64" -t $(IMAGE)-amd64 .
|
||||||
|
|
||||||
|
image.arm:
|
||||||
|
$(CONTAINER_ENGINE) build --build-arg VERSION="$(VERSION)" --build-arg ARCH="arm" -t $(IMAGE)-arm .
|
||||||
|
|
||||||
|
image.arm64:
|
||||||
|
$(CONTAINER_ENGINE) build --build-arg VERSION="$(VERSION)" --build-arg ARCH="arm64" -t $(IMAGE)-arm64 .
|
||||||
|
|
||||||
|
push: image
|
||||||
gcloud auth configure-docker
|
gcloud auth configure-docker
|
||||||
docker tag $(IMAGE) $(IMAGE_GCLOUD)
|
$(CONTAINER_ENGINE) tag $(IMAGE) $(IMAGE_GCLOUD)
|
||||||
docker push $(IMAGE_GCLOUD)
|
$(CONTAINER_ENGINE) push $(IMAGE_GCLOUD)
|
||||||
|
|
||||||
push: push-container-to-gcloud
|
push-all: image.amd64 image.arm image.arm64
|
||||||
|
gcloud auth configure-docker
|
||||||
|
for arch in $(ARCHS); do \
|
||||||
|
$(CONTAINER_ENGINE) tag $(IMAGE)-$${arch} $(IMAGE_GCLOUD)-$${arch} ;\
|
||||||
|
$(CONTAINER_ENGINE) push $(IMAGE_GCLOUD)-$${arch} ;\
|
||||||
|
done
|
||||||
|
DOCKER_CLI_EXPERIMENTAL=enabled $(CONTAINER_ENGINE) manifest create $(IMAGE_GCLOUD) $(addprefix --amend $(IMAGE_GCLOUD)-, $(ARCHS))
|
||||||
|
for arch in $(ARCHS); do \
|
||||||
|
DOCKER_CLI_EXPERIMENTAL=enabled $(CONTAINER_ENGINE) manifest annotate --arch $${arch} $(IMAGE_GCLOUD) $(IMAGE_GCLOUD)-$${arch} ;\
|
||||||
|
done
|
||||||
|
DOCKER_CLI_EXPERIMENTAL=enabled $(CONTAINER_ENGINE) manifest push $(IMAGE_GCLOUD) ;\
|
||||||
|
|
||||||
clean:
|
clean:
|
||||||
rm -rf _output
|
rm -rf _output
|
||||||
|
rm -rf _tmp
|
||||||
|
|
||||||
verify: verify-gofmt verify-vendor lint lint-chart
|
verify: verify-govet verify-spelling verify-gofmt verify-vendor lint lint-chart verify-toc verify-gen
|
||||||
|
|
||||||
|
verify-govet:
|
||||||
|
./hack/verify-govet.sh
|
||||||
|
|
||||||
|
verify-spelling:
|
||||||
|
./hack/verify-spelling.sh
|
||||||
|
|
||||||
verify-gofmt:
|
verify-gofmt:
|
||||||
./hack/verify-gofmt.sh
|
./hack/verify-gofmt.sh
|
||||||
@@ -72,6 +114,9 @@ verify-gofmt:
|
|||||||
verify-vendor:
|
verify-vendor:
|
||||||
./hack/verify-vendor.sh
|
./hack/verify-vendor.sh
|
||||||
|
|
||||||
|
verify-toc:
|
||||||
|
./hack/verify-toc.sh
|
||||||
|
|
||||||
test-unit:
|
test-unit:
|
||||||
./test/run-unit-tests.sh
|
./test/run-unit-tests.sh
|
||||||
|
|
||||||
@@ -82,15 +127,43 @@ gen:
|
|||||||
./hack/update-generated-conversions.sh
|
./hack/update-generated-conversions.sh
|
||||||
./hack/update-generated-deep-copies.sh
|
./hack/update-generated-deep-copies.sh
|
||||||
./hack/update-generated-defaulters.sh
|
./hack/update-generated-defaulters.sh
|
||||||
|
./hack/update-toc.sh
|
||||||
|
|
||||||
|
verify-gen:
|
||||||
|
./hack/verify-conversions.sh
|
||||||
|
./hack/verify-deep-copies.sh
|
||||||
|
./hack/verify-defaulters.sh
|
||||||
|
|
||||||
lint:
|
lint:
|
||||||
ifndef HAS_GOLANGCI
|
ifndef HAS_GOLANGCI
|
||||||
curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b ./_output/bin ${GOLANGCI_VERSION}
|
curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b ./_output/bin ${GOLANGCI_VERSION}
|
||||||
endif
|
endif
|
||||||
./_output/bin/golangci-lint run
|
./_output/bin/golangci-lint run
|
||||||
|
|
||||||
lint-chart:
|
fmt:
|
||||||
|
ifndef HAS_GOFUMPT
|
||||||
|
go install mvdan.cc/gofumpt@${GOFUMPT_VERSION}
|
||||||
|
endif
|
||||||
|
gofumpt -w -extra .
|
||||||
|
|
||||||
|
# helm
|
||||||
|
|
||||||
|
ensure-helm-install:
|
||||||
ifndef HAS_HELM
|
ifndef HAS_HELM
|
||||||
curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 && chmod 700 ./get_helm.sh && ./get_helm.sh
|
curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 && chmod 700 ./get_helm.sh && ./get_helm.sh
|
||||||
endif
|
endif
|
||||||
|
|
||||||
|
lint-chart: ensure-helm-install
|
||||||
helm lint ./charts/descheduler
|
helm lint ./charts/descheduler
|
||||||
|
|
||||||
|
build-helm:
|
||||||
|
helm package ./charts/descheduler --dependency-update --destination ./bin/chart
|
||||||
|
|
||||||
|
test-helm: ensure-helm-install
|
||||||
|
./test/run-helm-tests.sh
|
||||||
|
|
||||||
|
kind-multi-node:
|
||||||
|
kind create cluster --name kind --config ./hack/kind_config.yaml --wait 2m
|
||||||
|
|
||||||
|
ct-helm:
|
||||||
|
./hack/verify-chart.sh
|
||||||
|
|||||||
17
OWNERS
17
OWNERS
@@ -1,12 +1,17 @@
|
|||||||
approvers:
|
approvers:
|
||||||
- aveshagarwal
|
|
||||||
- k82cn
|
|
||||||
- ravisantoshgudimetla
|
|
||||||
- damemi
|
- damemi
|
||||||
|
- ingvagabund
|
||||||
|
- seanmalloy
|
||||||
|
- a7i
|
||||||
reviewers:
|
reviewers:
|
||||||
- aveshagarwal
|
|
||||||
- k82cn
|
|
||||||
- ravisantoshgudimetla
|
|
||||||
- damemi
|
- damemi
|
||||||
- seanmalloy
|
- seanmalloy
|
||||||
- ingvagabund
|
- ingvagabund
|
||||||
|
- lixiang233
|
||||||
|
- a7i
|
||||||
|
- janeliul
|
||||||
|
- knelasevero
|
||||||
|
emeritus_approvers:
|
||||||
|
- aveshagarwal
|
||||||
|
- k82cn
|
||||||
|
- ravisantoshgudimetla
|
||||||
|
|||||||
706
README.md
706
README.md
@@ -1,9 +1,12 @@
|
|||||||
[](https://goreportcard.com/report/sigs.k8s.io/descheduler)
|
[](https://goreportcard.com/report/sigs.k8s.io/descheduler)
|
||||||
|

|
||||||
|
|
||||||
|
<p align="center">
|
||||||
|
<img src="assets/logo/descheduler-stacked-color.png" width="40%" align="center" alt="descheduler">
|
||||||
|
</p>
|
||||||
|
|
||||||
# Descheduler for Kubernetes
|
# Descheduler for Kubernetes
|
||||||
|
|
||||||
## Introduction
|
|
||||||
|
|
||||||
Scheduling in Kubernetes is the process of binding pending pods to nodes, and is performed by
|
Scheduling in Kubernetes is the process of binding pending pods to nodes, and is performed by
|
||||||
a component of Kubernetes called kube-scheduler. The scheduler's decisions, whether or where a
|
a component of Kubernetes called kube-scheduler. The scheduler's decisions, whether or where a
|
||||||
pod can or can not be scheduled, are guided by its configurable policy which comprises of set of
|
pod can or can not be scheduled, are guided by its configurable policy which comprises of set of
|
||||||
@@ -23,9 +26,47 @@ Descheduler, based on its policy, finds pods that can be moved and evicts them.
|
|||||||
note, in current implementation, descheduler does not schedule replacement of evicted pods
|
note, in current implementation, descheduler does not schedule replacement of evicted pods
|
||||||
but relies on the default scheduler for that.
|
but relies on the default scheduler for that.
|
||||||
|
|
||||||
|
Table of Contents
|
||||||
|
=================
|
||||||
|
<!-- toc -->
|
||||||
|
- [Quick Start](#quick-start)
|
||||||
|
- [Run As A Job](#run-as-a-job)
|
||||||
|
- [Run As A CronJob](#run-as-a-cronjob)
|
||||||
|
- [Run As A Deployment](#run-as-a-deployment)
|
||||||
|
- [Install Using Helm](#install-using-helm)
|
||||||
|
- [Install Using Kustomize](#install-using-kustomize)
|
||||||
|
- [User Guide](#user-guide)
|
||||||
|
- [Policy and Strategies](#policy-and-strategies)
|
||||||
|
- [RemoveDuplicates](#removeduplicates)
|
||||||
|
- [LowNodeUtilization](#lownodeutilization)
|
||||||
|
- [HighNodeUtilization](#highnodeutilization)
|
||||||
|
- [RemovePodsViolatingInterPodAntiAffinity](#removepodsviolatinginterpodantiaffinity)
|
||||||
|
- [RemovePodsViolatingNodeAffinity](#removepodsviolatingnodeaffinity)
|
||||||
|
- [RemovePodsViolatingNodeTaints](#removepodsviolatingnodetaints)
|
||||||
|
- [RemovePodsViolatingTopologySpreadConstraint](#removepodsviolatingtopologyspreadconstraint)
|
||||||
|
- [RemovePodsHavingTooManyRestarts](#removepodshavingtoomanyrestarts)
|
||||||
|
- [PodLifeTime](#podlifetime)
|
||||||
|
- [RemoveFailedPods](#removefailedpods)
|
||||||
|
- [Filter Pods](#filter-pods)
|
||||||
|
- [Namespace filtering](#namespace-filtering)
|
||||||
|
- [Priority filtering](#priority-filtering)
|
||||||
|
- [Label filtering](#label-filtering)
|
||||||
|
- [Node Fit filtering](#node-fit-filtering)
|
||||||
|
- [Pod Evictions](#pod-evictions)
|
||||||
|
- [Pod Disruption Budget (PDB)](#pod-disruption-budget-pdb)
|
||||||
|
- [High Availability](#high-availability)
|
||||||
|
- [Configure HA Mode](#configure-ha-mode)
|
||||||
|
- [Metrics](#metrics)
|
||||||
|
- [Compatibility Matrix](#compatibility-matrix)
|
||||||
|
- [Getting Involved and Contributing](#getting-involved-and-contributing)
|
||||||
|
- [Communicating With Contributors](#communicating-with-contributors)
|
||||||
|
- [Roadmap](#roadmap)
|
||||||
|
- [Code of conduct](#code-of-conduct)
|
||||||
|
<!-- /toc -->
|
||||||
|
|
||||||
## Quick Start
|
## Quick Start
|
||||||
|
|
||||||
The descheduler can be run as a Job or CronJob inside of a k8s cluster. It has the
|
The descheduler can be run as a `Job`, `CronJob`, or `Deployment` inside of a k8s cluster. It has the
|
||||||
advantage of being able to be run multiple times without needing user intervention.
|
advantage of being able to be run multiple times without needing user intervention.
|
||||||
The descheduler pod is run as a critical pod in the `kube-system` namespace to avoid
|
The descheduler pod is run as a critical pod in the `kube-system` namespace to avoid
|
||||||
being evicted by itself or by the kubelet.
|
being evicted by itself or by the kubelet.
|
||||||
@@ -33,17 +74,52 @@ being evicted by itself or by the kubelet.
|
|||||||
### Run As A Job
|
### Run As A Job
|
||||||
|
|
||||||
```
|
```
|
||||||
kubectl create -f kubernetes/rbac.yaml
|
kubectl create -f kubernetes/base/rbac.yaml
|
||||||
kubectl create -f kubernetes/configmap.yaml
|
kubectl create -f kubernetes/base/configmap.yaml
|
||||||
kubectl create -f kubernetes/job.yaml
|
kubectl create -f kubernetes/job/job.yaml
|
||||||
```
|
```
|
||||||
|
|
||||||
### Run As A CronJob
|
### Run As A CronJob
|
||||||
|
|
||||||
```
|
```
|
||||||
kubectl create -f kubernetes/rbac.yaml
|
kubectl create -f kubernetes/base/rbac.yaml
|
||||||
kubectl create -f kubernetes/configmap.yaml
|
kubectl create -f kubernetes/base/configmap.yaml
|
||||||
kubectl create -f kubernetes/cronjob.yaml
|
kubectl create -f kubernetes/cronjob/cronjob.yaml
|
||||||
|
```
|
||||||
|
|
||||||
|
### Run As A Deployment
|
||||||
|
|
||||||
|
```
|
||||||
|
kubectl create -f kubernetes/base/rbac.yaml
|
||||||
|
kubectl create -f kubernetes/base/configmap.yaml
|
||||||
|
kubectl create -f kubernetes/deployment/deployment.yaml
|
||||||
|
```
|
||||||
|
|
||||||
|
### Install Using Helm
|
||||||
|
|
||||||
|
Starting with release v0.18.0 there is an official helm chart that can be used to install the
|
||||||
|
descheduler. See the [helm chart README](https://github.com/kubernetes-sigs/descheduler/blob/master/charts/descheduler/README.md) for detailed instructions.
|
||||||
|
|
||||||
|
The descheduler helm chart is also listed on the [artifact hub](https://artifacthub.io/packages/helm/descheduler/descheduler).
|
||||||
|
|
||||||
|
### Install Using Kustomize
|
||||||
|
|
||||||
|
You can use kustomize to install descheduler.
|
||||||
|
See the [resources | Kustomize](https://kubectl.docs.kubernetes.io/references/kustomize/cmd/build/) for detailed instructions.
|
||||||
|
|
||||||
|
Run As A Job
|
||||||
|
```
|
||||||
|
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/job?ref=v0.26.1' | kubectl apply -f -
|
||||||
|
```
|
||||||
|
|
||||||
|
Run As A CronJob
|
||||||
|
```
|
||||||
|
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/cronjob?ref=v0.26.1' | kubectl apply -f -
|
||||||
|
```
|
||||||
|
|
||||||
|
Run As A Deployment
|
||||||
|
```
|
||||||
|
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/deployment?ref=v0.26.1' | kubectl apply -f -
|
||||||
```
|
```
|
||||||
|
|
||||||
## User Guide
|
## User Guide
|
||||||
@@ -52,25 +128,68 @@ See the [user guide](docs/user-guide.md) in the `/docs` directory.
|
|||||||
|
|
||||||
## Policy and Strategies
|
## Policy and Strategies
|
||||||
|
|
||||||
Descheduler's policy is configurable and includes strategies that can be enabled or disabled.
|
Descheduler's policy is configurable and includes strategies that can be enabled or disabled. By default, all strategies are enabled.
|
||||||
Seven strategies `RemoveDuplicates`, `LowNodeUtilization`, `RemovePodsViolatingInterPodAntiAffinity`,
|
|
||||||
`RemovePodsViolatingNodeAffinity`, `RemovePodsViolatingNodeTaints`, `RemovePodsHavingTooManyRestarts`, and `PodLifeTime`
|
The policy includes a common configuration that applies to all the strategies:
|
||||||
are currently implemented. As part of the policy, the parameters associated with the strategies can be configured too.
|
| Name | Default Value | Description |
|
||||||
By default, all strategies are enabled.
|
|------|---------------|-------------|
|
||||||
|
| `nodeSelector` | `nil` | limiting the nodes which are processed |
|
||||||
|
| `evictLocalStoragePods` | `false` | allows eviction of pods with local storage |
|
||||||
|
| `evictSystemCriticalPods` | `false` | [Warning: Will evict Kubernetes system pods] allows eviction of pods with any priority, including system pods like kube-dns |
|
||||||
|
| `ignorePvcPods` | `false` | set whether PVC pods should be evicted or ignored |
|
||||||
|
| `maxNoOfPodsToEvictPerNode` | `nil` | maximum number of pods evicted from each node (summed through all strategies) |
|
||||||
|
| `maxNoOfPodsToEvictPerNamespace` | `nil` | maximum number of pods evicted from each namespace (summed through all strategies) |
|
||||||
|
| `evictFailedBarePods` | `false` | allow eviction of pods without owner references and in failed phase |
|
||||||
|
|
||||||
|
As part of the policy, the parameters associated with each strategy can be configured.
|
||||||
|
See each strategy for details on available parameters.
|
||||||
|
|
||||||
|
**Policy:**
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
apiVersion: "descheduler/v1alpha1"
|
||||||
|
kind: "DeschedulerPolicy"
|
||||||
|
nodeSelector: prod=dev
|
||||||
|
evictFailedBarePods: false
|
||||||
|
evictLocalStoragePods: true
|
||||||
|
evictSystemCriticalPods: true
|
||||||
|
maxNoOfPodsToEvictPerNode: 40
|
||||||
|
ignorePvcPods: false
|
||||||
|
strategies:
|
||||||
|
...
|
||||||
|
```
|
||||||
|
|
||||||
|
The following diagram provides a visualization of most of the strategies to help
|
||||||
|
categorize how strategies fit together.
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
### RemoveDuplicates
|
### RemoveDuplicates
|
||||||
|
|
||||||
This strategy makes sure that there is only one pod associated with a Replica Set (RS),
|
This strategy makes sure that there is only one pod associated with a ReplicaSet (RS),
|
||||||
Replication Controller (RC), Deployment, or Job running on the same node. If there are more,
|
ReplicationController (RC), StatefulSet, or Job running on the same node. If there are more,
|
||||||
those duplicate pods are evicted for better spreading of pods in a cluster. This issue could happen
|
those duplicate pods are evicted for better spreading of pods in a cluster. This issue could happen
|
||||||
if some nodes went down due to whatever reasons, and pods on them were moved to other nodes leading to
|
if some nodes went down due to whatever reasons, and pods on them were moved to other nodes leading to
|
||||||
more than one pod associated with a RS or RC, for example, running on the same node. Once the failed nodes
|
more than one pod associated with a RS or RC, for example, running on the same node. Once the failed nodes
|
||||||
are ready again, this strategy could be enabled to evict those duplicate pods.
|
are ready again, this strategy could be enabled to evict those duplicate pods.
|
||||||
|
|
||||||
It provides one optional parameter, `ExcludeOwnerKinds`, which is a list of OwnerRef `Kind`s. If a pod
|
It provides one optional parameter, `excludeOwnerKinds`, which is a list of OwnerRef `Kind`s. If a pod
|
||||||
has any of these `Kind`s listed as an `OwnerRef`, that pod will not be considered for eviction.
|
has any of these `Kind`s listed as an `OwnerRef`, that pod will not be considered for eviction. Note that
|
||||||
|
pods created by Deployments are considered for eviction by this strategy. The `excludeOwnerKinds` parameter
|
||||||
|
should include `ReplicaSet` to have pods created by Deployments excluded.
|
||||||
|
|
||||||
```
|
**Parameters:**
|
||||||
|
|
||||||
|
|Name|Type|
|
||||||
|
|---|---|
|
||||||
|
|`excludeOwnerKinds`|list(string)|
|
||||||
|
|`namespaces`|(see [namespace filtering](#namespace-filtering))|
|
||||||
|
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))|
|
||||||
|
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))|
|
||||||
|
|`nodeFit`|bool (see [node fit filtering](#node-fit-filtering))|
|
||||||
|
|
||||||
|
**Example:**
|
||||||
|
```yaml
|
||||||
apiVersion: "descheduler/v1alpha1"
|
apiVersion: "descheduler/v1alpha1"
|
||||||
kind: "DeschedulerPolicy"
|
kind: "DeschedulerPolicy"
|
||||||
strategies:
|
strategies:
|
||||||
@@ -89,20 +208,50 @@ in the hope that recreation of evicted pods will be scheduled on these underutil
|
|||||||
parameters of this strategy are configured under `nodeResourceUtilizationThresholds`.
|
parameters of this strategy are configured under `nodeResourceUtilizationThresholds`.
|
||||||
|
|
||||||
The under utilization of nodes is determined by a configurable threshold `thresholds`. The threshold
|
The under utilization of nodes is determined by a configurable threshold `thresholds`. The threshold
|
||||||
`thresholds` can be configured for cpu, memory, and number of pods in terms of percentage. If a node's
|
`thresholds` can be configured for cpu, memory, number of pods, and extended resources in terms of percentage (the percentage is
|
||||||
usage is below threshold for all (cpu, memory, and number of pods), the node is considered underutilized.
|
calculated as the current resources requested on the node vs [total allocatable](https://kubernetes.io/docs/concepts/architecture/nodes/#capacity).
|
||||||
|
For pods, this means the number of pods on the node as a fraction of the pod capacity set for that node).
|
||||||
|
|
||||||
|
If a node's usage is below threshold for all (cpu, memory, number of pods and extended resources), the node is considered underutilized.
|
||||||
Currently, pods request resource requirements are considered for computing node resource utilization.
|
Currently, pods request resource requirements are considered for computing node resource utilization.
|
||||||
|
|
||||||
There is another configurable threshold, `targetThresholds`, that is used to compute those potential nodes
|
There is another configurable threshold, `targetThresholds`, that is used to compute those potential nodes
|
||||||
from where pods could be evicted. If a node's usage is above targetThreshold for any (cpu, memory, or number of pods),
|
from where pods could be evicted. If a node's usage is above targetThreshold for any (cpu, memory, number of pods, or extended resources),
|
||||||
the node is considered over utilized. Any node between the thresholds, `thresholds` and `targetThresholds` is
|
the node is considered over utilized. Any node between the thresholds, `thresholds` and `targetThresholds` is
|
||||||
considered appropriately utilized and is not considered for eviction. The threshold, `targetThresholds`,
|
considered appropriately utilized and is not considered for eviction. The threshold, `targetThresholds`,
|
||||||
can be configured for cpu, memory, and number of pods too in terms of percentage.
|
can be configured for cpu, memory, and number of pods too in terms of percentage.
|
||||||
|
|
||||||
These thresholds, `thresholds` and `targetThresholds`, could be tuned as per your cluster requirements.
|
These thresholds, `thresholds` and `targetThresholds`, could be tuned as per your cluster requirements. Note that this
|
||||||
Here is an example of a policy for this strategy:
|
strategy evicts pods from `overutilized nodes` (those with usage above `targetThresholds`) to `underutilized nodes`
|
||||||
|
(those with usage below `thresholds`), it will abort if any number of `underutilized nodes` or `overutilized nodes` is zero.
|
||||||
|
|
||||||
```
|
Additionally, the strategy accepts a `useDeviationThresholds` parameter.
|
||||||
|
If that parameter is set to `true`, the thresholds are considered as percentage deviations from mean resource usage.
|
||||||
|
`thresholds` will be deducted from the mean among all nodes and `targetThresholds` will be added to the mean.
|
||||||
|
A resource consumption above (resp. below) this window is considered as overutilization (resp. underutilization).
|
||||||
|
|
||||||
|
**NOTE:** Node resource consumption is determined by the requests and limits of pods, not actual usage.
|
||||||
|
This approach is chosen in order to maintain consistency with the kube-scheduler, which follows the same
|
||||||
|
design for scheduling pods onto nodes. This means that resource usage as reported by Kubelet (or commands
|
||||||
|
like `kubectl top`) may differ from the calculated consumption, due to these components reporting
|
||||||
|
actual usage metrics. Implementing metrics-based descheduling is currently TODO for the project.
|
||||||
|
|
||||||
|
**Parameters:**
|
||||||
|
|
||||||
|
|Name|Type|
|
||||||
|
|---|---|
|
||||||
|
|`thresholds`|map(string:int)|
|
||||||
|
|`targetThresholds`|map(string:int)|
|
||||||
|
|`numberOfNodes`|int|
|
||||||
|
|`useDeviationThresholds`|bool|
|
||||||
|
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))|
|
||||||
|
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))|
|
||||||
|
|`nodeFit`|bool (see [node fit filtering](#node-fit-filtering))|
|
||||||
|
|`Namespaces`|(see [namespace filtering](#namespace-filtering))|
|
||||||
|
|
||||||
|
**Example:**
|
||||||
|
|
||||||
|
```yaml
|
||||||
apiVersion: "descheduler/v1alpha1"
|
apiVersion: "descheduler/v1alpha1"
|
||||||
kind: "DeschedulerPolicy"
|
kind: "DeschedulerPolicy"
|
||||||
strategies:
|
strategies:
|
||||||
@@ -121,34 +270,111 @@ strategies:
|
|||||||
```
|
```
|
||||||
|
|
||||||
Policy should pass the following validation checks:
|
Policy should pass the following validation checks:
|
||||||
* Only three types of resources are supported: `cpu`, `memory` and `pods`.
|
* Three basic native types of resources are supported: `cpu`, `memory` and `pods`.
|
||||||
|
If any of these resource types is not specified, all its thresholds default to 100% to avoid nodes going from underutilized to overutilized.
|
||||||
|
* Extended resources are supported. For example, resource type `nvidia.com/gpu` is specified for GPU node utilization. Extended resources are optional,
|
||||||
|
and will not be used to compute node's usage if it's not specified in `thresholds` and `targetThresholds` explicitly.
|
||||||
* `thresholds` or `targetThresholds` can not be nil and they must configure exactly the same types of resources.
|
* `thresholds` or `targetThresholds` can not be nil and they must configure exactly the same types of resources.
|
||||||
* The valid range of the resource's percentage value is \[0, 100\]
|
* The valid range of the resource's percentage value is \[0, 100\]
|
||||||
* Percentage value of `thresholds` can not be greater than `targetThresholds` for the same resource.
|
* Percentage value of `thresholds` can not be greater than `targetThresholds` for the same resource.
|
||||||
|
|
||||||
If any of the resource types is not specified, all its thresholds default to 100% to avoid nodes going
|
|
||||||
from underutilized to overutilized.
|
|
||||||
|
|
||||||
There is another parameter associated with the `LowNodeUtilization` strategy, called `numberOfNodes`.
|
There is another parameter associated with the `LowNodeUtilization` strategy, called `numberOfNodes`.
|
||||||
This parameter can be configured to activate the strategy only when the number of under utilized nodes
|
This parameter can be configured to activate the strategy only when the number of under utilized nodes
|
||||||
are above the configured value. This could be helpful in large clusters where a few nodes could go
|
are above the configured value. This could be helpful in large clusters where a few nodes could go
|
||||||
under utilized frequently or for a short period of time. By default, `numberOfNodes` is set to zero.
|
under utilized frequently or for a short period of time. By default, `numberOfNodes` is set to zero.
|
||||||
|
|
||||||
|
### HighNodeUtilization
|
||||||
|
|
||||||
|
This strategy finds nodes that are under utilized and evicts pods from the nodes in the hope that these pods will be
|
||||||
|
scheduled compactly into fewer nodes. Used in conjunction with node auto-scaling, this strategy is intended to help
|
||||||
|
trigger down scaling of under utilized nodes.
|
||||||
|
This strategy **must** be used with the scheduler scoring strategy `MostAllocated`. The parameters of this strategy are
|
||||||
|
configured under `nodeResourceUtilizationThresholds`.
|
||||||
|
|
||||||
|
The under utilization of nodes is determined by a configurable threshold `thresholds`. The threshold
|
||||||
|
`thresholds` can be configured for cpu, memory, number of pods, and extended resources in terms of percentage. The percentage is
|
||||||
|
calculated as the current resources requested on the node vs [total allocatable](https://kubernetes.io/docs/concepts/architecture/nodes/#capacity).
|
||||||
|
For pods, this means the number of pods on the node as a fraction of the pod capacity set for that node.
|
||||||
|
|
||||||
|
If a node's usage is below threshold for all (cpu, memory, number of pods and extended resources), the node is considered underutilized.
|
||||||
|
Currently, pods request resource requirements are considered for computing node resource utilization.
|
||||||
|
Any node above `thresholds` is considered appropriately utilized and is not considered for eviction.
|
||||||
|
|
||||||
|
The `thresholds` param could be tuned as per your cluster requirements. Note that this
|
||||||
|
strategy evicts pods from `underutilized nodes` (those with usage below `thresholds`)
|
||||||
|
so that they can be recreated in appropriately utilized nodes.
|
||||||
|
The strategy will abort if any number of `underutilized nodes` or `appropriately utilized nodes` is zero.
|
||||||
|
|
||||||
|
**NOTE:** Node resource consumption is determined by the requests and limits of pods, not actual usage.
|
||||||
|
This approach is chosen in order to maintain consistency with the kube-scheduler, which follows the same
|
||||||
|
design for scheduling pods onto nodes. This means that resource usage as reported by Kubelet (or commands
|
||||||
|
like `kubectl top`) may differ from the calculated consumption, due to these components reporting
|
||||||
|
actual usage metrics. Implementing metrics-based descheduling is currently TODO for the project.
|
||||||
|
|
||||||
|
**Parameters:**
|
||||||
|
|
||||||
|
|Name|Type|
|
||||||
|
|---|---|
|
||||||
|
|`thresholds`|map(string:int)|
|
||||||
|
|`numberOfNodes`|int|
|
||||||
|
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))|
|
||||||
|
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))|
|
||||||
|
|`nodeFit`|bool (see [node fit filtering](#node-fit-filtering))|
|
||||||
|
|`Namespaces`|(see [namespace filtering](#namespace-filtering))|
|
||||||
|
|
||||||
|
**Example:**
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
apiVersion: "descheduler/v1alpha1"
|
||||||
|
kind: "DeschedulerPolicy"
|
||||||
|
strategies:
|
||||||
|
"HighNodeUtilization":
|
||||||
|
enabled: true
|
||||||
|
params:
|
||||||
|
nodeResourceUtilizationThresholds:
|
||||||
|
thresholds:
|
||||||
|
"cpu" : 20
|
||||||
|
"memory": 20
|
||||||
|
"pods": 20
|
||||||
|
```
|
||||||
|
|
||||||
|
Policy should pass the following validation checks:
|
||||||
|
* Three basic native types of resources are supported: `cpu`, `memory` and `pods`. If any of these resource types is not specified, all its thresholds default to 100%.
|
||||||
|
* Extended resources are supported. For example, resource type `nvidia.com/gpu` is specified for GPU node utilization. Extended resources are optional, and will not be used to compute node's usage if it's not specified in `thresholds` explicitly.
|
||||||
|
* `thresholds` can not be nil.
|
||||||
|
* The valid range of the resource's percentage value is \[0, 100\]
|
||||||
|
|
||||||
|
There is another parameter associated with the `HighNodeUtilization` strategy, called `numberOfNodes`.
|
||||||
|
This parameter can be configured to activate the strategy only when the number of under utilized nodes
|
||||||
|
is above the configured value. This could be helpful in large clusters where a few nodes could go
|
||||||
|
under utilized frequently or for a short period of time. By default, `numberOfNodes` is set to zero.
|
||||||
|
|
||||||
### RemovePodsViolatingInterPodAntiAffinity
|
### RemovePodsViolatingInterPodAntiAffinity
|
||||||
|
|
||||||
This strategy makes sure that pods violating interpod anti-affinity are removed from nodes. For example,
|
This strategy makes sure that pods violating interpod anti-affinity are removed from nodes. For example,
|
||||||
if there is podA on a node and podB and podC (running on the same node) have anti-affinity rules which prohibit
|
if there is podA on a node and podB and podC (running on the same node) have anti-affinity rules which prohibit
|
||||||
them to run on the same node, then podA will be evicted from the node so that podB and podC could run. This
|
them to run on the same node, then podA will be evicted from the node so that podB and podC could run. This
|
||||||
issue could happen, when the anti-affinity rules for podB and podC are created when they are already running on
|
issue could happen, when the anti-affinity rules for podB and podC are created when they are already running on
|
||||||
node. Currently, there are no parameters associated with this strategy. To disable this strategy, the
|
node.
|
||||||
policy should look like:
|
|
||||||
|
|
||||||
```
|
**Parameters:**
|
||||||
|
|
||||||
|
|Name|Type|
|
||||||
|
|---|---|
|
||||||
|
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))|
|
||||||
|
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))|
|
||||||
|
|`namespaces`|(see [namespace filtering](#namespace-filtering))|
|
||||||
|
|`labelSelector`|(see [label filtering](#label-filtering))|
|
||||||
|
|`nodeFit`|bool (see [node fit filtering](#node-fit-filtering))|
|
||||||
|
|
||||||
|
**Example:**
|
||||||
|
|
||||||
|
```yaml
|
||||||
apiVersion: "descheduler/v1alpha1"
|
apiVersion: "descheduler/v1alpha1"
|
||||||
kind: "DeschedulerPolicy"
|
kind: "DeschedulerPolicy"
|
||||||
strategies:
|
strategies:
|
||||||
"RemovePodsViolatingInterPodAntiAffinity":
|
"RemovePodsViolatingInterPodAntiAffinity":
|
||||||
enabled: false
|
enabled: true
|
||||||
```
|
```
|
||||||
|
|
||||||
### RemovePodsViolatingNodeAffinity
|
### RemovePodsViolatingNodeAffinity
|
||||||
@@ -169,9 +395,20 @@ of scheduling. Over time nodeA stops to satisfy the rule. When the strategy gets
|
|||||||
executed and there is another node available that satisfies the node affinity rule,
|
executed and there is another node available that satisfies the node affinity rule,
|
||||||
podA gets evicted from nodeA.
|
podA gets evicted from nodeA.
|
||||||
|
|
||||||
The policy file should look like:
|
**Parameters:**
|
||||||
|
|
||||||
```
|
|Name|Type|
|
||||||
|
|---|---|
|
||||||
|
|`nodeAffinityType`|list(string)|
|
||||||
|
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))|
|
||||||
|
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))|
|
||||||
|
|`namespaces`|(see [namespace filtering](#namespace-filtering))|
|
||||||
|
|`labelSelector`|(see [label filtering](#label-filtering))|
|
||||||
|
|`nodeFit`|bool (see [node fit filtering](#node-fit-filtering))|
|
||||||
|
|
||||||
|
**Example:**
|
||||||
|
|
||||||
|
```yaml
|
||||||
apiVersion: "descheduler/v1alpha1"
|
apiVersion: "descheduler/v1alpha1"
|
||||||
kind: "DeschedulerPolicy"
|
kind: "DeschedulerPolicy"
|
||||||
strategies:
|
strategies:
|
||||||
@@ -187,21 +424,97 @@ strategies:
|
|||||||
This strategy makes sure that pods violating NoSchedule taints on nodes are removed. For example there is a
|
This strategy makes sure that pods violating NoSchedule taints on nodes are removed. For example there is a
|
||||||
pod "podA" with a toleration to tolerate a taint ``key=value:NoSchedule`` scheduled and running on the tainted
|
pod "podA" with a toleration to tolerate a taint ``key=value:NoSchedule`` scheduled and running on the tainted
|
||||||
node. If the node's taint is subsequently updated/removed, taint is no longer satisfied by its pods' tolerations
|
node. If the node's taint is subsequently updated/removed, taint is no longer satisfied by its pods' tolerations
|
||||||
and will be evicted. The policy file should look like:
|
and will be evicted.
|
||||||
|
|
||||||
````
|
Node taints can be excluded from consideration by specifying a list of excludedTaints. If a node taint key **or**
|
||||||
|
key=value matches an excludedTaints entry, the taint will be ignored.
|
||||||
|
|
||||||
|
For example, excludedTaints entry "dedicated" would match all taints with key "dedicated", regardless of value.
|
||||||
|
excludedTaints entry "dedicated=special-user" would match taints with key "dedicated" and value "special-user".
|
||||||
|
|
||||||
|
**Parameters:**
|
||||||
|
|
||||||
|
|Name|Type|
|
||||||
|
|---|---|
|
||||||
|
|`excludedTaints`|list(string)|
|
||||||
|
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))|
|
||||||
|
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))|
|
||||||
|
|`namespaces`|(see [namespace filtering](#namespace-filtering))|
|
||||||
|
|`labelSelector`|(see [label filtering](#label-filtering))|
|
||||||
|
|`nodeFit`|bool (see [node fit filtering](#node-fit-filtering))|
|
||||||
|
|
||||||
|
**Example:**
|
||||||
|
|
||||||
|
````yaml
|
||||||
apiVersion: "descheduler/v1alpha1"
|
apiVersion: "descheduler/v1alpha1"
|
||||||
kind: "DeschedulerPolicy"
|
kind: "DeschedulerPolicy"
|
||||||
strategies:
|
strategies:
|
||||||
"RemovePodsViolatingNodeTaints":
|
"RemovePodsViolatingNodeTaints":
|
||||||
enabled: true
|
enabled: true
|
||||||
|
params:
|
||||||
|
excludedTaints:
|
||||||
|
- dedicated=special-user # exclude taints with key "dedicated" and value "special-user"
|
||||||
|
- reserved # exclude all taints with key "reserved"
|
||||||
````
|
````
|
||||||
|
|
||||||
|
### RemovePodsViolatingTopologySpreadConstraint
|
||||||
|
|
||||||
|
This strategy makes sure that pods violating [topology spread constraints](https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/)
|
||||||
|
are evicted from nodes. Specifically, it tries to evict the minimum number of pods required to balance topology domains to within each constraint's `maxSkew`.
|
||||||
|
This strategy requires k8s version 1.18 at a minimum.
|
||||||
|
|
||||||
|
By default, this strategy only deals with hard constraints, setting parameter `includeSoftConstraints` to `true` will
|
||||||
|
include soft constraints.
|
||||||
|
|
||||||
|
Strategy parameter `labelSelector` is not utilized when balancing topology domains and is only applied during eviction to determine if the pod can be evicted.
|
||||||
|
|
||||||
|
**Parameters:**
|
||||||
|
|
||||||
|
|Name|Type|
|
||||||
|
|---|---|
|
||||||
|
|`includeSoftConstraints`|bool|
|
||||||
|
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))|
|
||||||
|
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))|
|
||||||
|
|`namespaces`|(see [namespace filtering](#namespace-filtering))|
|
||||||
|
|`labelSelector`|(see [label filtering](#label-filtering))|
|
||||||
|
|`nodeFit`|bool (see [node fit filtering](#node-fit-filtering))|
|
||||||
|
|
||||||
|
**Example:**
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
apiVersion: "descheduler/v1alpha1"
|
||||||
|
kind: "DeschedulerPolicy"
|
||||||
|
strategies:
|
||||||
|
"RemovePodsViolatingTopologySpreadConstraint":
|
||||||
|
enabled: true
|
||||||
|
params:
|
||||||
|
includeSoftConstraints: false
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
### RemovePodsHavingTooManyRestarts
|
### RemovePodsHavingTooManyRestarts
|
||||||
|
|
||||||
This strategy makes sure that pods having too many restarts are removed from nodes. For example a pod with EBS/PD that can't get the volume/disk attached to the instance, then the pod should be re-scheduled to other nodes.
|
This strategy makes sure that pods having too many restarts are removed from nodes. For example a pod with EBS/PD that
|
||||||
|
can't get the volume/disk attached to the instance, then the pod should be re-scheduled to other nodes. Its parameters
|
||||||
|
include `podRestartThreshold`, which is the number of restarts (summed over all eligible containers) at which a pod
|
||||||
|
should be evicted, and `includingInitContainers`, which determines whether init container restarts should be factored
|
||||||
|
into that calculation.
|
||||||
|
|
||||||
```
|
**Parameters:**
|
||||||
|
|
||||||
|
|Name|Type|
|
||||||
|
|---|---|
|
||||||
|
|`podRestartThreshold`|int|
|
||||||
|
|`includingInitContainers`|bool|
|
||||||
|
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))|
|
||||||
|
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))|
|
||||||
|
|`namespaces`|(see [namespace filtering](#namespace-filtering))|
|
||||||
|
|`labelSelector`|(see [label filtering](#label-filtering))|
|
||||||
|
|`nodeFit`|bool (see [node fit filtering](#node-fit-filtering))|
|
||||||
|
|
||||||
|
**Example:**
|
||||||
|
|
||||||
|
```yaml
|
||||||
apiVersion: "descheduler/v1alpha1"
|
apiVersion: "descheduler/v1alpha1"
|
||||||
kind: "DeschedulerPolicy"
|
kind: "DeschedulerPolicy"
|
||||||
strategies:
|
strategies:
|
||||||
@@ -215,33 +528,280 @@ strategies:
|
|||||||
|
|
||||||
### PodLifeTime
|
### PodLifeTime
|
||||||
|
|
||||||
This strategy evicts pods that are older than `.strategies.PodLifeTime.params.maxPodLifeTimeSeconds` The policy
|
This strategy evicts pods that are older than `maxPodLifeTimeSeconds`.
|
||||||
file should look like:
|
|
||||||
|
|
||||||
````
|
You can also specify `states` parameter to **only** evict pods matching the following conditions:
|
||||||
|
- [Pod Phase](https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-phase) status of: `Running`, `Pending`
|
||||||
|
- [Container State Waiting](https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-state-waiting) condition of: `PodInitializing`, `ContainerCreating`
|
||||||
|
|
||||||
|
If a value for `states` or `podStatusPhases` is not specified,
|
||||||
|
Pods in any state (even `Running`) are considered for eviction.
|
||||||
|
|
||||||
|
**Parameters:**
|
||||||
|
|
||||||
|
|Name|Type|Notes|
|
||||||
|
|---|---|---|
|
||||||
|
|`maxPodLifeTimeSeconds`|int||
|
||||||
|
|`podStatusPhases`|list(string)|Deprecated in v0.25+ Use `states` instead|
|
||||||
|
|`states`|list(string)|Only supported in v0.25+|
|
||||||
|
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))||
|
||||||
|
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))||
|
||||||
|
|`namespaces`|(see [namespace filtering](#namespace-filtering))||
|
||||||
|
|`labelSelector`|(see [label filtering](#label-filtering))||
|
||||||
|
|
||||||
|
**Example:**
|
||||||
|
|
||||||
|
```yaml
|
||||||
apiVersion: "descheduler/v1alpha1"
|
apiVersion: "descheduler/v1alpha1"
|
||||||
kind: "DeschedulerPolicy"
|
kind: "DeschedulerPolicy"
|
||||||
strategies:
|
strategies:
|
||||||
"PodLifeTime":
|
"PodLifeTime":
|
||||||
enabled: true
|
enabled: true
|
||||||
params:
|
params:
|
||||||
|
podLifeTime:
|
||||||
|
maxPodLifeTimeSeconds: 86400
|
||||||
|
states:
|
||||||
|
- "Pending"
|
||||||
|
- "PodInitializing"
|
||||||
|
```
|
||||||
|
|
||||||
|
### RemoveFailedPods
|
||||||
|
|
||||||
|
This strategy evicts pods that are in failed status phase.
|
||||||
|
You can provide an optional parameter to filter by failed `reasons`.
|
||||||
|
`reasons` can be expanded to include reasons of InitContainers as well by setting the optional parameter `includingInitContainers` to `true`.
|
||||||
|
You can specify an optional parameter `minPodLifetimeSeconds` to evict pods that are older than specified seconds.
|
||||||
|
Lastly, you can specify the optional parameter `excludeOwnerKinds` and if a pod
|
||||||
|
has any of these `Kind`s listed as an `OwnerRef`, that pod will not be considered for eviction.
|
||||||
|
|
||||||
|
**Parameters:**
|
||||||
|
|
||||||
|
|Name|Type|
|
||||||
|
|---|---|
|
||||||
|
|`minPodLifetimeSeconds`|uint|
|
||||||
|
|`excludeOwnerKinds`|list(string)|
|
||||||
|
|`reasons`|list(string)|
|
||||||
|
|`includingInitContainers`|bool|
|
||||||
|
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))|
|
||||||
|
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))|
|
||||||
|
|`namespaces`|(see [namespace filtering](#namespace-filtering))|
|
||||||
|
|`labelSelector`|(see [label filtering](#label-filtering))|
|
||||||
|
|`nodeFit`|bool (see [node fit filtering](#node-fit-filtering))|
|
||||||
|
|
||||||
|
**Example:**
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
apiVersion: "descheduler/v1alpha1"
|
||||||
|
kind: "DeschedulerPolicy"
|
||||||
|
strategies:
|
||||||
|
"RemoveFailedPods":
|
||||||
|
enabled: true
|
||||||
|
params:
|
||||||
|
failedPods:
|
||||||
|
reasons:
|
||||||
|
- "NodeAffinity"
|
||||||
|
includingInitContainers: true
|
||||||
|
excludeOwnerKinds:
|
||||||
|
- "Job"
|
||||||
|
minPodLifetimeSeconds: 3600
|
||||||
|
```
|
||||||
|
|
||||||
|
## Filter Pods
|
||||||
|
|
||||||
|
### Namespace filtering
|
||||||
|
|
||||||
|
The following strategies accept a `namespaces` parameter which allows to specify a list of including, resp. excluding namespaces:
|
||||||
|
* `PodLifeTime`
|
||||||
|
* `RemovePodsHavingTooManyRestarts`
|
||||||
|
* `RemovePodsViolatingNodeTaints`
|
||||||
|
* `RemovePodsViolatingNodeAffinity`
|
||||||
|
* `RemovePodsViolatingInterPodAntiAffinity`
|
||||||
|
* `RemoveDuplicates`
|
||||||
|
* `RemovePodsViolatingTopologySpreadConstraint`
|
||||||
|
* `RemoveFailedPods`
|
||||||
|
* `LowNodeUtilization` and `HighNodeUtilization` (Only filtered right before eviction)
|
||||||
|
|
||||||
|
For example:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
apiVersion: "descheduler/v1alpha1"
|
||||||
|
kind: "DeschedulerPolicy"
|
||||||
|
strategies:
|
||||||
|
"PodLifeTime":
|
||||||
|
enabled: true
|
||||||
|
params:
|
||||||
|
podLifeTime:
|
||||||
|
maxPodLifeTimeSeconds: 86400
|
||||||
|
namespaces:
|
||||||
|
include:
|
||||||
|
- "namespace1"
|
||||||
|
- "namespace2"
|
||||||
|
```
|
||||||
|
|
||||||
|
In the examples `PodLifeTime` gets executed only over `namespace1` and `namespace2`.
|
||||||
|
The similar holds for `exclude` field:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
apiVersion: "descheduler/v1alpha1"
|
||||||
|
kind: "DeschedulerPolicy"
|
||||||
|
strategies:
|
||||||
|
"PodLifeTime":
|
||||||
|
enabled: true
|
||||||
|
params:
|
||||||
|
podLifeTime:
|
||||||
|
maxPodLifeTimeSeconds: 86400
|
||||||
|
namespaces:
|
||||||
|
exclude:
|
||||||
|
- "namespace1"
|
||||||
|
- "namespace2"
|
||||||
|
```
|
||||||
|
|
||||||
|
The strategy gets executed over all namespaces but `namespace1` and `namespace2`.
|
||||||
|
|
||||||
|
It's not allowed to compute `include` with `exclude` field.
|
||||||
|
|
||||||
|
### Priority filtering
|
||||||
|
|
||||||
|
All strategies are able to configure a priority threshold, only pods under the threshold can be evicted. You can
|
||||||
|
specify this threshold by setting `thresholdPriorityClassName`(setting the threshold to the value of the given
|
||||||
|
priority class) or `thresholdPriority`(directly setting the threshold) parameters. By default, this threshold
|
||||||
|
is set to the value of `system-cluster-critical` priority class.
|
||||||
|
|
||||||
|
Note: Setting `evictSystemCriticalPods` to true disables priority filtering entirely.
|
||||||
|
|
||||||
|
E.g.
|
||||||
|
|
||||||
|
Setting `thresholdPriority`
|
||||||
|
```yaml
|
||||||
|
apiVersion: "descheduler/v1alpha1"
|
||||||
|
kind: "DeschedulerPolicy"
|
||||||
|
strategies:
|
||||||
|
"PodLifeTime":
|
||||||
|
enabled: true
|
||||||
|
params:
|
||||||
|
podLifeTime:
|
||||||
|
maxPodLifeTimeSeconds: 86400
|
||||||
|
thresholdPriority: 10000
|
||||||
|
```
|
||||||
|
|
||||||
|
Setting `thresholdPriorityClassName`
|
||||||
|
```yaml
|
||||||
|
apiVersion: "descheduler/v1alpha1"
|
||||||
|
kind: "DeschedulerPolicy"
|
||||||
|
strategies:
|
||||||
|
"PodLifeTime":
|
||||||
|
enabled: true
|
||||||
|
params:
|
||||||
|
podLifeTime:
|
||||||
|
maxPodLifeTimeSeconds: 86400
|
||||||
|
thresholdPriorityClassName: "priorityclass1"
|
||||||
|
```
|
||||||
|
|
||||||
|
Note that you can't configure both `thresholdPriority` and `thresholdPriorityClassName`, if the given priority class
|
||||||
|
does not exist, descheduler won't create it and will throw an error.
|
||||||
|
|
||||||
|
### Label filtering
|
||||||
|
|
||||||
|
The following strategies can configure a [standard kubernetes labelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#labelselector-v1-meta)
|
||||||
|
to filter pods by their labels:
|
||||||
|
|
||||||
|
* `PodLifeTime`
|
||||||
|
* `RemovePodsHavingTooManyRestarts`
|
||||||
|
* `RemovePodsViolatingNodeTaints`
|
||||||
|
* `RemovePodsViolatingNodeAffinity`
|
||||||
|
* `RemovePodsViolatingInterPodAntiAffinity`
|
||||||
|
* `RemovePodsViolatingTopologySpreadConstraint`
|
||||||
|
* `RemoveFailedPods`
|
||||||
|
|
||||||
|
This allows running strategies among pods the descheduler is interested in.
|
||||||
|
|
||||||
|
For example:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
apiVersion: "descheduler/v1alpha1"
|
||||||
|
kind: "DeschedulerPolicy"
|
||||||
|
strategies:
|
||||||
|
"PodLifeTime":
|
||||||
|
enabled: true
|
||||||
|
params:
|
||||||
|
podLifeTime:
|
||||||
maxPodLifeTimeSeconds: 86400
|
maxPodLifeTimeSeconds: 86400
|
||||||
````
|
labelSelector:
|
||||||
|
matchLabels:
|
||||||
|
component: redis
|
||||||
|
matchExpressions:
|
||||||
|
- {key: tier, operator: In, values: [cache]}
|
||||||
|
- {key: environment, operator: NotIn, values: [dev]}
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
### Node Fit filtering
|
||||||
|
|
||||||
|
The following strategies accept a `nodeFit` boolean parameter which can optimize descheduling:
|
||||||
|
* `RemoveDuplicates`
|
||||||
|
* `LowNodeUtilization`
|
||||||
|
* `HighNodeUtilization`
|
||||||
|
* `RemovePodsViolatingInterPodAntiAffinity`
|
||||||
|
* `RemovePodsViolatingNodeAffinity`
|
||||||
|
* `RemovePodsViolatingNodeTaints`
|
||||||
|
* `RemovePodsViolatingTopologySpreadConstraint`
|
||||||
|
* `RemovePodsHavingTooManyRestarts`
|
||||||
|
* `RemoveFailedPods`
|
||||||
|
|
||||||
|
If set to `true` the descheduler will consider whether or not the pods that meet eviction criteria will fit on other nodes before evicting them. If a pod cannot be rescheduled to another node, it will not be evicted. Currently the following criteria are considered when setting `nodeFit` to `true`:
|
||||||
|
- A `nodeSelector` on the pod
|
||||||
|
- Any `tolerations` on the pod and any `taints` on the other nodes
|
||||||
|
- `nodeAffinity` on the pod
|
||||||
|
- Resource `requests` made by the pod and the resources available on other nodes
|
||||||
|
- Whether any of the other nodes are marked as `unschedulable`
|
||||||
|
|
||||||
|
E.g.
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
apiVersion: "descheduler/v1alpha1"
|
||||||
|
kind: "DeschedulerPolicy"
|
||||||
|
strategies:
|
||||||
|
"LowNodeUtilization":
|
||||||
|
enabled: true
|
||||||
|
params:
|
||||||
|
nodeFit: true
|
||||||
|
nodeResourceUtilizationThresholds:
|
||||||
|
thresholds:
|
||||||
|
"cpu": 20
|
||||||
|
"memory": 20
|
||||||
|
"pods": 20
|
||||||
|
targetThresholds:
|
||||||
|
"cpu": 50
|
||||||
|
"memory": 50
|
||||||
|
"pods": 50
|
||||||
|
```
|
||||||
|
|
||||||
|
Note that node fit filtering references the current pod spec, and not that of it's owner.
|
||||||
|
Thus, if the pod is owned by a ReplicationController (and that ReplicationController was modified recently),
|
||||||
|
the pod may be running with an outdated spec, which the descheduler will reference when determining node fit.
|
||||||
|
This is expected behavior as the descheduler is a "best-effort" mechanism.
|
||||||
|
|
||||||
|
Using Deployments instead of ReplicationControllers provides an automated rollout of pod spec changes, therefore ensuring that the descheduler has an up-to-date view of the cluster state.
|
||||||
|
|
||||||
## Pod Evictions
|
## Pod Evictions
|
||||||
|
|
||||||
When the descheduler decides to evict pods from a node, it employs the following general mechanism:
|
When the descheduler decides to evict pods from a node, it employs the following general mechanism:
|
||||||
|
|
||||||
* [Critical pods](https://kubernetes.io/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/) (with priorityClassName set to system-cluster-critical or system-node-critical) are never evicted.
|
* [Critical pods](https://kubernetes.io/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/) (with priorityClassName set to system-cluster-critical or system-node-critical) are never evicted (unless `evictSystemCriticalPods: true` is set).
|
||||||
* Pods (static or mirrored pods or stand alone pods) not part of an RC, RS, Deployment or Job are
|
* Pods (static or mirrored pods or standalone pods) not part of an ReplicationController, ReplicaSet(Deployment), StatefulSet, or Job are
|
||||||
never evicted because these pods won't be recreated.
|
never evicted because these pods won't be recreated. (Standalone pods in failed status phase can be evicted by setting `evictFailedBarePods: true`)
|
||||||
* Pods associated with DaemonSets are never evicted.
|
* Pods associated with DaemonSets are never evicted.
|
||||||
* Pods with local storage are never evicted.
|
* Pods with local storage are never evicted (unless `evictLocalStoragePods: true` is set).
|
||||||
|
* Pods with PVCs are evicted (unless `ignorePvcPods: true` is set).
|
||||||
* In `LowNodeUtilization` and `RemovePodsViolatingInterPodAntiAffinity`, pods are evicted by their priority from low to high, and if they have same priority,
|
* In `LowNodeUtilization` and `RemovePodsViolatingInterPodAntiAffinity`, pods are evicted by their priority from low to high, and if they have same priority,
|
||||||
best effort pods are evicted before burstable and guaranteed pods.
|
best effort pods are evicted before burstable and guaranteed pods.
|
||||||
* All types of pods with the annotation descheduler.alpha.kubernetes.io/evict are evicted. This
|
* All types of pods with the annotation `descheduler.alpha.kubernetes.io/evict` are eligible for eviction. This
|
||||||
annotation is used to override checks which prevent eviction and users can select which pod is evicted.
|
annotation is used to override checks which prevent eviction and users can select which pod is evicted.
|
||||||
Users should know how and if the pod will be recreated.
|
Users should know how and if the pod will be recreated.
|
||||||
|
The annotation only affects internal descheduler checks.
|
||||||
|
The anti-disruption protection provided by the [/eviction](https://kubernetes.io/docs/concepts/scheduling-eviction/api-eviction/)
|
||||||
|
subresource is still respected.
|
||||||
|
* Pods with a non-nil DeletionTimestamp are not evicted by default.
|
||||||
|
|
||||||
Setting `--v=4` or greater on the Descheduler will log all reasons why any pod is not evictable.
|
Setting `--v=4` or greater on the Descheduler will log all reasons why any pod is not evictable.
|
||||||
|
|
||||||
@@ -250,6 +810,33 @@ Setting `--v=4` or greater on the Descheduler will log all reasons why any pod i
|
|||||||
Pods subject to a Pod Disruption Budget(PDB) are not evicted if descheduling violates its PDB. The pods
|
Pods subject to a Pod Disruption Budget(PDB) are not evicted if descheduling violates its PDB. The pods
|
||||||
are evicted by using the eviction subresource to handle PDB.
|
are evicted by using the eviction subresource to handle PDB.
|
||||||
|
|
||||||
|
## High Availability
|
||||||
|
|
||||||
|
In High Availability mode, Descheduler starts [leader election](https://github.com/kubernetes/client-go/tree/master/tools/leaderelection) process in Kubernetes. You can activate HA mode
|
||||||
|
if you choose to deploy your application as Deployment.
|
||||||
|
|
||||||
|
Deployment starts with 1 replica by default. If you want to use more than 1 replica, you must consider
|
||||||
|
enable High Availability mode since we don't want to run descheduler pods simultaneously.
|
||||||
|
|
||||||
|
### Configure HA Mode
|
||||||
|
|
||||||
|
The leader election process can be enabled by setting `--leader-elect` in the CLI. You can also set
|
||||||
|
`--set=leaderElection.enabled=true` flag if you are using Helm.
|
||||||
|
|
||||||
|
To get best results from HA mode some additional configurations might require:
|
||||||
|
* Configure a [podAntiAffinity](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node) rule if you want to schedule onto a node only if that node is in the same zone as at least one already-running descheduler
|
||||||
|
* Set the replica count greater than 1
|
||||||
|
|
||||||
|
## Metrics
|
||||||
|
|
||||||
|
| name | type | description |
|
||||||
|
|-------|-------|----------------|
|
||||||
|
| build_info | gauge | constant 1 |
|
||||||
|
| pods_evicted | CounterVec | total number of pods evicted |
|
||||||
|
|
||||||
|
The metrics are served through https://localhost:10258/metrics by default.
|
||||||
|
The address and port can be changed by setting `--binding-address` and `--secure-port` flags.
|
||||||
|
|
||||||
## Compatibility Matrix
|
## Compatibility Matrix
|
||||||
The below compatibility matrix shows the k8s client package(client-go, apimachinery, etc) versions that descheduler
|
The below compatibility matrix shows the k8s client package(client-go, apimachinery, etc) versions that descheduler
|
||||||
is compiled with. At this time descheduler does not have a hard dependency to a specific k8s release. However a
|
is compiled with. At this time descheduler does not have a hard dependency to a specific k8s release. However a
|
||||||
@@ -259,13 +846,20 @@ v0.18 should work with k8s v1.18, v1.17, and v1.16.
|
|||||||
Starting with descheduler release v0.18 the minor version of descheduler matches the minor version of the k8s client
|
Starting with descheduler release v0.18 the minor version of descheduler matches the minor version of the k8s client
|
||||||
packages that it is compiled with.
|
packages that it is compiled with.
|
||||||
|
|
||||||
Descheduler | Supported Kubernetes Version
|
| Descheduler | Supported Kubernetes Version |
|
||||||
-------------|-----------------------------
|
|-------------|------------------------------|
|
||||||
v0.18 | v1.18
|
| v0.26 | v1.26 |
|
||||||
v0.10 | v1.17
|
| v0.25 | v1.25 |
|
||||||
v0.4-v0.9 | v1.9+
|
| v0.24 | v1.24 |
|
||||||
v0.1-v0.3 | v1.7-v1.8
|
| v0.23 | v1.23 |
|
||||||
|
| v0.22 | v1.22 |
|
||||||
|
| v0.21 | v1.21 |
|
||||||
|
| v0.20 | v1.20 |
|
||||||
|
| v0.19 | v1.19 |
|
||||||
|
| v0.18 | v1.18 |
|
||||||
|
| v0.10 | v1.17 |
|
||||||
|
| v0.4-v0.9 | v1.9+ |
|
||||||
|
| v0.1-v0.3 | v1.7-v1.8 |
|
||||||
|
|
||||||
## Getting Involved and Contributing
|
## Getting Involved and Contributing
|
||||||
|
|
||||||
|
|||||||
BIN
assets/logo/descheduler-stacked-color.png
Executable file
BIN
assets/logo/descheduler-stacked-color.png
Executable file
Binary file not shown.
|
After Width: | Height: | Size: 41 KiB |
@@ -1,7 +1,7 @@
|
|||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
name: descheduler
|
name: descheduler
|
||||||
version: 0.18.0
|
version: 0.26.1
|
||||||
appVersion: 0.18.0
|
appVersion: 0.26.1
|
||||||
description: Descheduler for Kubernetes is used to rebalance clusters by evicting pods that can potentially be scheduled on better nodes. In the current implementation, descheduler does not schedule replacement of evicted pods but relies on the default scheduler for that.
|
description: Descheduler for Kubernetes is used to rebalance clusters by evicting pods that can potentially be scheduled on better nodes. In the current implementation, descheduler does not schedule replacement of evicted pods but relies on the default scheduler for that.
|
||||||
keywords:
|
keywords:
|
||||||
- kubernetes
|
- kubernetes
|
||||||
@@ -12,5 +12,5 @@ icon: https://kubernetes.io/images/favicon.png
|
|||||||
sources:
|
sources:
|
||||||
- https://github.com/kubernetes-sigs/descheduler
|
- https://github.com/kubernetes-sigs/descheduler
|
||||||
maintainers:
|
maintainers:
|
||||||
- name: stevehipwell
|
- name: Kubernetes SIG Scheduling
|
||||||
email: steve.hipwell@github.com
|
email: kubernetes-sig-scheduling@googlegroups.com
|
||||||
|
|||||||
@@ -6,12 +6,12 @@
|
|||||||
|
|
||||||
```shell
|
```shell
|
||||||
helm repo add descheduler https://kubernetes-sigs.github.io/descheduler/
|
helm repo add descheduler https://kubernetes-sigs.github.io/descheduler/
|
||||||
$ helm install descheduler/descheduler --name my-release
|
helm install my-release --namespace kube-system descheduler/descheduler
|
||||||
```
|
```
|
||||||
|
|
||||||
## Introduction
|
## Introduction
|
||||||
|
|
||||||
This chart bootstraps a [desheduler](https://github.com/kubernetes-sigs/descheduler/) cron job on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
|
This chart bootstraps a [descheduler](https://github.com/kubernetes-sigs/descheduler/) cron job on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
|
||||||
|
|
||||||
## Prerequisites
|
## Prerequisites
|
||||||
|
|
||||||
@@ -22,7 +22,7 @@ This chart bootstraps a [desheduler](https://github.com/kubernetes-sigs/deschedu
|
|||||||
To install the chart with the release name `my-release`:
|
To install the chart with the release name `my-release`:
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
helm install --name my-release descheduler/descheduler
|
helm install --namespace kube-system my-release descheduler/descheduler
|
||||||
```
|
```
|
||||||
|
|
||||||
The command deploys _descheduler_ on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation.
|
The command deploys _descheduler_ on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation.
|
||||||
@@ -43,17 +43,46 @@ The command removes all the Kubernetes components associated with the chart and
|
|||||||
|
|
||||||
The following table lists the configurable parameters of the _descheduler_ chart and their default values.
|
The following table lists the configurable parameters of the _descheduler_ chart and their default values.
|
||||||
|
|
||||||
| Parameter | Description | Default |
|
| Parameter | Description | Default |
|
||||||
| ------------------------------ | --------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------ |
|
| ----------------------------------- | --------------------------------------------------------------------------------------------------------------------- | ----------------------------------------- |
|
||||||
| `image.repository` | Docker repository to use | `us.gcr.io/k8s-artifacts-prod/descheduler/descheduler` |
|
| `kind` | Use as CronJob or Deployment | `CronJob` |
|
||||||
| `image.tag` | Docker tag to use | `v[chart appVersion]` |
|
| `image.repository` | Docker repository to use | `registry.k8s.io/descheduler/descheduler` |
|
||||||
| `image.pullPolicy` | Docker image pull policy | `IfNotPresent` |
|
| `image.tag` | Docker tag to use | `v[chart appVersion]` |
|
||||||
| `nameOverride` | String to partially override `descheduler.fullname` template (will prepend the release name) | `""` |
|
| `image.pullPolicy` | Docker image pull policy | `IfNotPresent` |
|
||||||
| `fullnameOverride` | String to fully override `descheduler.fullname` template | `""` |
|
| `imagePullSecrets` | Docker repository secrets | `[]` |
|
||||||
| `schedule` | The cron schedule to run the _descheduler_ job on | `"*/2 * * * *"` |
|
| `nameOverride` | String to partially override `descheduler.fullname` template (will prepend the release name) | `""` |
|
||||||
| `cmdOptions` | The options to pass to the _descheduler_ command | _see values.yaml_ |
|
| `fullnameOverride` | String to fully override `descheduler.fullname` template | `""` |
|
||||||
| `deschedulerPolicy.strategies` | The _descheduler_ strategies to apply | _see values.yaml_ |
|
| `cronJobApiVersion` | CronJob API Group Version | `"batch/v1"` |
|
||||||
| `priorityClassName` | The name of the priority class to add to pods | `system-cluster-critical` |
|
| `schedule` | The cron schedule to run the _descheduler_ job on | `"*/2 * * * *"` |
|
||||||
| `rbac.create` | If `true`, create & use RBAC resources | `true` |
|
| `startingDeadlineSeconds` | If set, configure `startingDeadlineSeconds` for the _descheduler_ job | `nil` |
|
||||||
| `serviceAccount.create` | If `true`, create a service account for the cron job | `true` |
|
| `successfulJobsHistoryLimit` | If set, configure `successfulJobsHistoryLimit` for the _descheduler_ job | `3` |
|
||||||
| `serviceAccount.name` | The name of the service account to use, if not set and create is true a name is generated using the fullname template | `nil` |
|
| `failedJobsHistoryLimit` | If set, configure `failedJobsHistoryLimit` for the _descheduler_ job | `1` |
|
||||||
|
| `ttlSecondsAfterFinished` | If set, configure `ttlSecondsAfterFinished` for the _descheduler_ job | `nil` |
|
||||||
|
| `deschedulingInterval` | If using kind:Deployment, sets time between consecutive descheduler executions. | `5m` |
|
||||||
|
| `replicas` | The replica count for Deployment | `1` |
|
||||||
|
| `leaderElection` | The options for high availability when running replicated components | _see values.yaml_ |
|
||||||
|
| `cmdOptions` | The options to pass to the _descheduler_ command | _see values.yaml_ |
|
||||||
|
| `deschedulerPolicy.strategies` | The _descheduler_ strategies to apply | _see values.yaml_ |
|
||||||
|
| `priorityClassName` | The name of the priority class to add to pods | `system-cluster-critical` |
|
||||||
|
| `rbac.create` | If `true`, create & use RBAC resources | `true` |
|
||||||
|
| `resources` | Descheduler container CPU and memory requests/limits | _see values.yaml_ |
|
||||||
|
| `serviceAccount.create` | If `true`, create a service account for the cron job | `true` |
|
||||||
|
| `serviceAccount.name` | The name of the service account to use, if not set and create is true a name is generated using the fullname template | `nil` |
|
||||||
|
| `serviceAccount.annotations` | Specifies custom annotations for the serviceAccount | `{}` |
|
||||||
|
| `podAnnotations` | Annotations to add to the descheduler Pods | `{}` |
|
||||||
|
| `podLabels` | Labels to add to the descheduler Pods | `{}` |
|
||||||
|
| `nodeSelector` | Node selectors to run the descheduler cronjob/deployment on specific nodes | `nil` |
|
||||||
|
| `service.enabled` | If `true`, create a service for deployment | `false` |
|
||||||
|
| `serviceMonitor.enabled` | If `true`, create a ServiceMonitor for deployment | `false` |
|
||||||
|
| `serviceMonitor.namespace` | The namespace where Prometheus expects to find service monitors | `nil` |
|
||||||
|
| `serviceMonitor.interval` | The scrape interval. If not set, the Prometheus default scrape interval is used | `nil` |
|
||||||
|
| `serviceMonitor.honorLabels` | Keeps the scraped data's labels when labels are on collisions with target labels. | `true` |
|
||||||
|
| `serviceMonitor.insecureSkipVerify` | Skip TLS certificate validation when scraping | `true` |
|
||||||
|
| `serviceMonitor.serverName` | Name of the server to use when validating TLS certificate | `nil` |
|
||||||
|
| `serviceMonitor.metricRelabelings` | MetricRelabelConfigs to apply to samples after scraping, but before ingestion | `[]` |
|
||||||
|
| `serviceMonitor.relabelings` | RelabelConfigs to apply to samples before scraping | `[]` |
|
||||||
|
| `affinity` | Node affinity to run the descheduler cronjob/deployment on specific nodes | `nil` |
|
||||||
|
| `tolerations` | tolerations to run the descheduler cronjob/deployment on specific nodes | `nil` |
|
||||||
|
| `suspend` | Set spec.suspend in descheduler cronjob | `false` |
|
||||||
|
| `commonLabels` | Labels to apply to all resources | `{}` |
|
||||||
|
| `livenessProbe` | Liveness probe configuration for the descheduler container | _see values.yaml_ |
|
||||||
|
|||||||
@@ -1 +1,12 @@
|
|||||||
Descheduler installed as a cron job.
|
Descheduler installed as a {{ .Values.kind }}.
|
||||||
|
|
||||||
|
{{- if eq .Values.kind "Deployment" }}
|
||||||
|
{{- if eq .Values.replicas 1.0}}
|
||||||
|
WARNING: You set replica count as 1 and workload kind as Deployment however leaderElection is not enabled. Consider enabling Leader Election for HA mode.
|
||||||
|
{{- end}}
|
||||||
|
{{- if .Values.leaderElection }}
|
||||||
|
{{- if and (hasKey .Values.cmdOptions "dry-run") (eq (get .Values.cmdOptions "dry-run") true) }}
|
||||||
|
WARNING: You enabled DryRun mode, you can't use Leader Election.
|
||||||
|
{{- end}}
|
||||||
|
{{- end}}
|
||||||
|
{{- end}}
|
||||||
|
|||||||
@@ -42,6 +42,17 @@ app.kubernetes.io/instance: {{ .Release.Name }}
|
|||||||
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
|
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||||
|
{{- if .Values.commonLabels}}
|
||||||
|
{{ toYaml .Values.commonLabels }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end -}}
|
||||||
|
|
||||||
|
{{/*
|
||||||
|
Selector labels
|
||||||
|
*/}}
|
||||||
|
{{- define "descheduler.selectorLabels" -}}
|
||||||
|
app.kubernetes.io/name: {{ include "descheduler.name" . }}
|
||||||
|
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||||
{{- end -}}
|
{{- end -}}
|
||||||
|
|
||||||
{{/*
|
{{/*
|
||||||
@@ -54,3 +65,30 @@ Create the name of the service account to use
|
|||||||
{{ default "default" .Values.serviceAccount.name }}
|
{{ default "default" .Values.serviceAccount.name }}
|
||||||
{{- end -}}
|
{{- end -}}
|
||||||
{{- end -}}
|
{{- end -}}
|
||||||
|
|
||||||
|
{{/*
|
||||||
|
Leader Election
|
||||||
|
*/}}
|
||||||
|
{{- define "descheduler.leaderElection"}}
|
||||||
|
{{- if .Values.leaderElection -}}
|
||||||
|
- --leader-elect={{ .Values.leaderElection.enabled }}
|
||||||
|
{{- if .Values.leaderElection.leaseDuration }}
|
||||||
|
- --leader-elect-lease-duration={{ .Values.leaderElection.leaseDuration }}
|
||||||
|
{{- end }}
|
||||||
|
{{- if .Values.leaderElection.renewDeadline }}
|
||||||
|
- --leader-elect-renew-deadline={{ .Values.leaderElection.renewDeadline }}
|
||||||
|
{{- end }}
|
||||||
|
{{- if .Values.leaderElection.retryPeriod }}
|
||||||
|
- --leader-elect-retry-period={{ .Values.leaderElection.retryPeriod }}
|
||||||
|
{{- end }}
|
||||||
|
{{- if .Values.leaderElection.resourceLock }}
|
||||||
|
- --leader-elect-resource-lock={{ .Values.leaderElection.resourceLock }}
|
||||||
|
{{- end }}
|
||||||
|
{{- if .Values.leaderElection.resourceName }}
|
||||||
|
- --leader-elect-resource-name={{ .Values.leaderElection.resourceName }}
|
||||||
|
{{- end }}
|
||||||
|
{{- if .Values.leaderElection.resourceNamescape }}
|
||||||
|
- --leader-elect-resource-namespace={{ .Values.leaderElection.resourceNamescape }}
|
||||||
|
{{- end -}}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
|||||||
@@ -6,16 +6,31 @@ metadata:
|
|||||||
labels:
|
labels:
|
||||||
{{- include "descheduler.labels" . | nindent 4 }}
|
{{- include "descheduler.labels" . | nindent 4 }}
|
||||||
rules:
|
rules:
|
||||||
- apiGroups: [""]
|
- apiGroups: ["events.k8s.io"]
|
||||||
resources: ["events"]
|
resources: ["events"]
|
||||||
verbs: ["create", "update"]
|
verbs: ["create", "update"]
|
||||||
- apiGroups: [""]
|
- apiGroups: [""]
|
||||||
resources: ["nodes"]
|
resources: ["nodes"]
|
||||||
verbs: ["get", "watch", "list"]
|
verbs: ["get", "watch", "list"]
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources: ["namespaces"]
|
||||||
|
verbs: ["get", "watch", "list"]
|
||||||
- apiGroups: [""]
|
- apiGroups: [""]
|
||||||
resources: ["pods"]
|
resources: ["pods"]
|
||||||
verbs: ["get", "watch", "list", "delete"]
|
verbs: ["get", "watch", "list", "delete"]
|
||||||
- apiGroups: [""]
|
- apiGroups: [""]
|
||||||
resources: ["pods/eviction"]
|
resources: ["pods/eviction"]
|
||||||
verbs: ["create"]
|
verbs: ["create"]
|
||||||
|
- apiGroups: ["scheduling.k8s.io"]
|
||||||
|
resources: ["priorityclasses"]
|
||||||
|
verbs: ["get", "watch", "list"]
|
||||||
|
{{- if .Values.leaderElection.enabled }}
|
||||||
|
- apiGroups: ["coordination.k8s.io"]
|
||||||
|
resources: ["leases"]
|
||||||
|
verbs: ["create", "update"]
|
||||||
|
- apiGroups: ["coordination.k8s.io"]
|
||||||
|
resources: ["leases"]
|
||||||
|
resourceNames: ["{{ .Values.leaderElection.resourceName | default "descheduler" }}"]
|
||||||
|
verbs: ["get", "patch", "delete"]
|
||||||
|
{{- end }}
|
||||||
{{- end -}}
|
{{- end -}}
|
||||||
|
|||||||
@@ -2,6 +2,7 @@ apiVersion: v1
|
|||||||
kind: ConfigMap
|
kind: ConfigMap
|
||||||
metadata:
|
metadata:
|
||||||
name: {{ template "descheduler.fullname" . }}
|
name: {{ template "descheduler.fullname" . }}
|
||||||
|
namespace: {{ .Release.Namespace }}
|
||||||
labels:
|
labels:
|
||||||
{{- include "descheduler.labels" . | nindent 4 }}
|
{{- include "descheduler.labels" . | nindent 4 }}
|
||||||
data:
|
data:
|
||||||
|
|||||||
@@ -1,14 +1,31 @@
|
|||||||
apiVersion: batch/v1beta1
|
{{- if eq .Values.kind "CronJob" }}
|
||||||
|
apiVersion: {{ .Values.cronJobApiVersion | default "batch/v1" }}
|
||||||
kind: CronJob
|
kind: CronJob
|
||||||
metadata:
|
metadata:
|
||||||
name: {{ template "descheduler.fullname" . }}
|
name: {{ template "descheduler.fullname" . }}
|
||||||
|
namespace: {{ .Release.Namespace }}
|
||||||
labels:
|
labels:
|
||||||
{{- include "descheduler.labels" . | nindent 4 }}
|
{{- include "descheduler.labels" . | nindent 4 }}
|
||||||
spec:
|
spec:
|
||||||
schedule: {{ .Values.schedule | quote }}
|
schedule: {{ .Values.schedule | quote }}
|
||||||
|
{{- if .Values.suspend }}
|
||||||
|
suspend: {{ .Values.suspend }}
|
||||||
|
{{- end }}
|
||||||
concurrencyPolicy: "Forbid"
|
concurrencyPolicy: "Forbid"
|
||||||
|
{{- if .Values.startingDeadlineSeconds }}
|
||||||
|
startingDeadlineSeconds: {{ .Values.startingDeadlineSeconds }}
|
||||||
|
{{- end }}
|
||||||
|
{{- if .Values.successfulJobsHistoryLimit }}
|
||||||
|
successfulJobsHistoryLimit: {{ .Values.successfulJobsHistoryLimit }}
|
||||||
|
{{- end }}
|
||||||
|
{{- if .Values.failedJobsHistoryLimit }}
|
||||||
|
failedJobsHistoryLimit: {{ .Values.failedJobsHistoryLimit }}
|
||||||
|
{{- end }}
|
||||||
jobTemplate:
|
jobTemplate:
|
||||||
spec:
|
spec:
|
||||||
|
{{- if .Values.ttlSecondsAfterFinished }}
|
||||||
|
ttlSecondsAfterFinished: {{ .Values.ttlSecondsAfterFinished }}
|
||||||
|
{{- end }}
|
||||||
template:
|
template:
|
||||||
metadata:
|
metadata:
|
||||||
name: {{ template "descheduler.fullname" . }}
|
name: {{ template "descheduler.fullname" . }}
|
||||||
@@ -18,17 +35,32 @@ spec:
|
|||||||
{{- .Values.podAnnotations | toYaml | nindent 12 }}
|
{{- .Values.podAnnotations | toYaml | nindent 12 }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
labels:
|
labels:
|
||||||
app.kubernetes.io/name: {{ include "descheduler.name" . }}
|
{{- include "descheduler.selectorLabels" . | nindent 12 }}
|
||||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
|
||||||
{{- if .Values.podLabels }}
|
{{- if .Values.podLabels }}
|
||||||
{{- .Values.podLabels | toYaml | nindent 12 }}
|
{{- .Values.podLabels | toYaml | nindent 12 }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
spec:
|
spec:
|
||||||
|
{{- with .Values.nodeSelector }}
|
||||||
|
nodeSelector:
|
||||||
|
{{- toYaml . | nindent 12 }}
|
||||||
|
{{- end }}
|
||||||
|
{{- with .Values.affinity }}
|
||||||
|
affinity:
|
||||||
|
{{- toYaml . | nindent 12 }}
|
||||||
|
{{- end }}
|
||||||
|
{{- with .Values.tolerations }}
|
||||||
|
tolerations:
|
||||||
|
{{- toYaml . | nindent 12 }}
|
||||||
|
{{- end }}
|
||||||
{{- if .Values.priorityClassName }}
|
{{- if .Values.priorityClassName }}
|
||||||
priorityClassName: {{ .Values.priorityClassName }}
|
priorityClassName: {{ .Values.priorityClassName }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
serviceAccountName: {{ template "descheduler.serviceAccountName" . }}
|
serviceAccountName: {{ template "descheduler.serviceAccountName" . }}
|
||||||
restartPolicy: "Never"
|
restartPolicy: "Never"
|
||||||
|
{{- with .Values.imagePullSecrets }}
|
||||||
|
imagePullSecrets:
|
||||||
|
{{- toYaml . | nindent 10 }}
|
||||||
|
{{- end }}
|
||||||
containers:
|
containers:
|
||||||
- name: {{ .Chart.Name }}
|
- name: {{ .Chart.Name }}
|
||||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default (printf "v%s" .Chart.AppVersion) }}"
|
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default (printf "v%s" .Chart.AppVersion) }}"
|
||||||
@@ -44,6 +76,18 @@ spec:
|
|||||||
- {{ $value | quote }}
|
- {{ $value | quote }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
livenessProbe:
|
||||||
|
{{- toYaml .Values.livenessProbe | nindent 16 }}
|
||||||
|
resources:
|
||||||
|
{{- toYaml .Values.resources | nindent 16 }}
|
||||||
|
securityContext:
|
||||||
|
allowPrivilegeEscalation: false
|
||||||
|
capabilities:
|
||||||
|
drop:
|
||||||
|
- ALL
|
||||||
|
privileged: false
|
||||||
|
readOnlyRootFilesystem: true
|
||||||
|
runAsNonRoot: true
|
||||||
volumeMounts:
|
volumeMounts:
|
||||||
- mountPath: /policy-dir
|
- mountPath: /policy-dir
|
||||||
name: policy-volume
|
name: policy-volume
|
||||||
@@ -51,3 +95,4 @@ spec:
|
|||||||
- name: policy-volume
|
- name: policy-volume
|
||||||
configMap:
|
configMap:
|
||||||
name: {{ template "descheduler.fullname" . }}
|
name: {{ template "descheduler.fullname" . }}
|
||||||
|
{{- end }}
|
||||||
|
|||||||
94
charts/descheduler/templates/deployment.yaml
Normal file
94
charts/descheduler/templates/deployment.yaml
Normal file
@@ -0,0 +1,94 @@
|
|||||||
|
{{- if eq .Values.kind "Deployment" }}
|
||||||
|
apiVersion: apps/v1
|
||||||
|
kind: Deployment
|
||||||
|
metadata:
|
||||||
|
name: {{ template "descheduler.fullname" . }}
|
||||||
|
namespace: {{ .Release.Namespace }}
|
||||||
|
labels:
|
||||||
|
{{- include "descheduler.labels" . | nindent 4 }}
|
||||||
|
spec:
|
||||||
|
{{- if gt .Values.replicas 1.0}}
|
||||||
|
{{- if not .Values.leaderElection.enabled }}
|
||||||
|
{{- fail "You must set leaderElection to use more than 1 replica"}}
|
||||||
|
{{- end}}
|
||||||
|
replicas: {{ required "leaderElection required for running more than one replica" .Values.replicas }}
|
||||||
|
{{- else }}
|
||||||
|
replicas: 1
|
||||||
|
{{- end }}
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
{{- include "descheduler.selectorLabels" . | nindent 6 }}
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
{{- include "descheduler.selectorLabels" . | nindent 8 }}
|
||||||
|
{{- if .Values.podLabels }}
|
||||||
|
{{- .Values.podLabels | toYaml | nindent 8 }}
|
||||||
|
{{- end }}
|
||||||
|
annotations:
|
||||||
|
checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }}
|
||||||
|
{{- if .Values.podAnnotations }}
|
||||||
|
{{- .Values.podAnnotations | toYaml | nindent 8 }}
|
||||||
|
{{- end }}
|
||||||
|
spec:
|
||||||
|
{{- if .Values.priorityClassName }}
|
||||||
|
priorityClassName: {{ .Values.priorityClassName }}
|
||||||
|
{{- end }}
|
||||||
|
serviceAccountName: {{ template "descheduler.serviceAccountName" . }}
|
||||||
|
{{- with .Values.imagePullSecrets }}
|
||||||
|
imagePullSecrets:
|
||||||
|
{{- toYaml . | nindent 10 }}
|
||||||
|
{{- end }}
|
||||||
|
containers:
|
||||||
|
- name: {{ .Chart.Name }}
|
||||||
|
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default (printf "v%s" .Chart.AppVersion) }}"
|
||||||
|
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||||
|
command:
|
||||||
|
- "/bin/descheduler"
|
||||||
|
args:
|
||||||
|
- "--policy-config-file"
|
||||||
|
- "/policy-dir/policy.yaml"
|
||||||
|
- "--descheduling-interval"
|
||||||
|
- {{ required "deschedulingInterval required for running as Deployment" .Values.deschedulingInterval }}
|
||||||
|
{{- range $key, $value := .Values.cmdOptions }}
|
||||||
|
- {{ printf "--%s" $key | quote }}
|
||||||
|
{{- if $value }}
|
||||||
|
- {{ $value | quote }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
{{- include "descheduler.leaderElection" . | nindent 12 }}
|
||||||
|
ports:
|
||||||
|
- containerPort: 10258
|
||||||
|
protocol: TCP
|
||||||
|
livenessProbe:
|
||||||
|
{{- toYaml .Values.livenessProbe | nindent 12 }}
|
||||||
|
resources:
|
||||||
|
{{- toYaml .Values.resources | nindent 12 }}
|
||||||
|
securityContext:
|
||||||
|
allowPrivilegeEscalation: false
|
||||||
|
capabilities:
|
||||||
|
drop:
|
||||||
|
- ALL
|
||||||
|
privileged: false
|
||||||
|
readOnlyRootFilesystem: true
|
||||||
|
runAsNonRoot: true
|
||||||
|
volumeMounts:
|
||||||
|
- mountPath: /policy-dir
|
||||||
|
name: policy-volume
|
||||||
|
volumes:
|
||||||
|
- name: policy-volume
|
||||||
|
configMap:
|
||||||
|
name: {{ template "descheduler.fullname" . }}
|
||||||
|
{{- with .Values.nodeSelector }}
|
||||||
|
nodeSelector:
|
||||||
|
{{- toYaml . | nindent 8 }}
|
||||||
|
{{- end }}
|
||||||
|
{{- with .Values.affinity }}
|
||||||
|
affinity:
|
||||||
|
{{- toYaml . | nindent 8 }}
|
||||||
|
{{- end }}
|
||||||
|
{{- with .Values.tolerations }}
|
||||||
|
tolerations:
|
||||||
|
{{- toYaml . | nindent 8 }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
21
charts/descheduler/templates/service.yaml
Normal file
21
charts/descheduler/templates/service.yaml
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
{{- if eq .Values.kind "Deployment" }}
|
||||||
|
{{- if eq .Values.service.enabled true }}
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Service
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
{{- include "descheduler.labels" . | nindent 4 }}
|
||||||
|
name: {{ template "descheduler.fullname" . }}
|
||||||
|
namespace: {{ .Release.Namespace }}
|
||||||
|
spec:
|
||||||
|
clusterIP: None
|
||||||
|
ports:
|
||||||
|
- name: http-metrics
|
||||||
|
port: 10258
|
||||||
|
protocol: TCP
|
||||||
|
targetPort: 10258
|
||||||
|
selector:
|
||||||
|
{{- include "descheduler.selectorLabels" . | nindent 4 }}
|
||||||
|
type: ClusterIP
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
@@ -3,6 +3,10 @@ apiVersion: v1
|
|||||||
kind: ServiceAccount
|
kind: ServiceAccount
|
||||||
metadata:
|
metadata:
|
||||||
name: {{ template "descheduler.serviceAccountName" . }}
|
name: {{ template "descheduler.serviceAccountName" . }}
|
||||||
|
namespace: {{ .Release.Namespace }}
|
||||||
labels:
|
labels:
|
||||||
{{- include "descheduler.labels" . | nindent 4 }}
|
{{- include "descheduler.labels" . | nindent 4 }}
|
||||||
|
{{- if .Values.serviceAccount.annotations }}
|
||||||
|
annotations: {{ toYaml .Values.serviceAccount.annotations | nindent 4 }}
|
||||||
|
{{- end }}
|
||||||
{{- end -}}
|
{{- end -}}
|
||||||
|
|||||||
41
charts/descheduler/templates/servicemonitor.yaml
Normal file
41
charts/descheduler/templates/servicemonitor.yaml
Normal file
@@ -0,0 +1,41 @@
|
|||||||
|
{{- if eq .Values.kind "Deployment" }}
|
||||||
|
{{- if eq .Values.serviceMonitor.enabled true }}
|
||||||
|
apiVersion: monitoring.coreos.com/v1
|
||||||
|
kind: ServiceMonitor
|
||||||
|
metadata:
|
||||||
|
name: {{ template "descheduler.fullname" . }}-servicemonitor
|
||||||
|
namespace: {{ .Values.serviceMonitor.namespace | default .Release.Namespace }}
|
||||||
|
labels:
|
||||||
|
{{- include "descheduler.labels" . | nindent 4 }}
|
||||||
|
spec:
|
||||||
|
jobLabel: jobLabel
|
||||||
|
namespaceSelector:
|
||||||
|
matchNames:
|
||||||
|
- {{ .Release.Namespace }}
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
{{- include "descheduler.selectorLabels" . | nindent 6 }}
|
||||||
|
endpoints:
|
||||||
|
- honorLabels: {{ .Values.serviceMonitor.honorLabels | default true }}
|
||||||
|
port: http-metrics
|
||||||
|
{{- if .Values.serviceMonitor.interval }}
|
||||||
|
interval: {{ .Values.serviceMonitor.interval }}
|
||||||
|
{{- end }}
|
||||||
|
scheme: https
|
||||||
|
tlsConfig:
|
||||||
|
{{- if eq .Values.serviceMonitor.insecureSkipVerify true }}
|
||||||
|
insecureSkipVerify: true
|
||||||
|
{{- end }}
|
||||||
|
{{- if .Values.serviceMonitor.serverName }}
|
||||||
|
serverName: {{ .Values.serviceMonitor.serverName }}
|
||||||
|
{{- end}}
|
||||||
|
{{- if .Values.serviceMonitor.metricRelabelings }}
|
||||||
|
metricRelabelings:
|
||||||
|
{{ tpl (toYaml .Values.serviceMonitor.metricRelabelings | indent 4) . }}
|
||||||
|
{{- end }}
|
||||||
|
{{- if .Values.serviceMonitor.relabelings }}
|
||||||
|
relabelings:
|
||||||
|
{{ tpl (toYaml .Values.serviceMonitor.relabelings | indent 4) . }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
@@ -2,36 +2,92 @@
|
|||||||
# This is a YAML-formatted file.
|
# This is a YAML-formatted file.
|
||||||
# Declare variables to be passed into your templates.
|
# Declare variables to be passed into your templates.
|
||||||
|
|
||||||
|
# CronJob or Deployment
|
||||||
|
kind: CronJob
|
||||||
|
|
||||||
image:
|
image:
|
||||||
repository: us.gcr.io/k8s-artifacts-prod/descheduler/descheduler
|
repository: registry.k8s.io/descheduler/descheduler
|
||||||
# Overrides the image tag whose default is the chart version
|
# Overrides the image tag whose default is the chart version
|
||||||
tag: ""
|
tag: ""
|
||||||
pullPolicy: IfNotPresent
|
pullPolicy: IfNotPresent
|
||||||
|
|
||||||
|
imagePullSecrets:
|
||||||
|
# - name: container-registry-secret
|
||||||
|
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
cpu: 500m
|
||||||
|
memory: 256Mi
|
||||||
|
# limits:
|
||||||
|
# cpu: 100m
|
||||||
|
# memory: 128Mi
|
||||||
|
|
||||||
nameOverride: ""
|
nameOverride: ""
|
||||||
fullnameOverride: ""
|
fullnameOverride: ""
|
||||||
|
|
||||||
|
# labels that'll be applied to all resources
|
||||||
|
commonLabels: {}
|
||||||
|
|
||||||
|
cronJobApiVersion: "batch/v1"
|
||||||
schedule: "*/2 * * * *"
|
schedule: "*/2 * * * *"
|
||||||
|
suspend: false
|
||||||
|
# startingDeadlineSeconds: 200
|
||||||
|
# successfulJobsHistoryLimit: 3
|
||||||
|
# failedJobsHistoryLimit: 1
|
||||||
|
# ttlSecondsAfterFinished 600
|
||||||
|
|
||||||
|
# Required when running as a Deployment
|
||||||
|
deschedulingInterval: 5m
|
||||||
|
|
||||||
|
# Specifies the replica count for Deployment
|
||||||
|
# Set leaderElection if you want to use more than 1 replica
|
||||||
|
# Set affinity.podAntiAffinity rule if you want to schedule onto a node
|
||||||
|
# only if that node is in the same zone as at least one already-running descheduler
|
||||||
|
replicas: 1
|
||||||
|
|
||||||
|
# Specifies whether Leader Election resources should be created
|
||||||
|
# Required when running as a Deployment
|
||||||
|
# NOTE: Leader election can't be activated if DryRun enabled
|
||||||
|
leaderElection: {}
|
||||||
|
# enabled: true
|
||||||
|
# leaseDuration: 15s
|
||||||
|
# renewDeadline: 10s
|
||||||
|
# retryPeriod: 2s
|
||||||
|
# resourceLock: "leases"
|
||||||
|
# resourceName: "descheduler"
|
||||||
|
# resourceNamescape: "kube-system"
|
||||||
|
|
||||||
cmdOptions:
|
cmdOptions:
|
||||||
v: 3
|
v: 3
|
||||||
# evict-local-storage-pods:
|
|
||||||
# max-pods-to-evict-per-node: 10
|
|
||||||
# node-selector: "key1=value1,key2=value2"
|
|
||||||
|
|
||||||
deschedulerPolicy:
|
deschedulerPolicy:
|
||||||
|
# nodeSelector: "key1=value1,key2=value2"
|
||||||
|
# maxNoOfPodsToEvictPerNode: 10
|
||||||
|
# maxNoOfPodsToEvictPerNamespace: 10
|
||||||
|
# ignorePvcPods: true
|
||||||
|
# evictLocalStoragePods: true
|
||||||
strategies:
|
strategies:
|
||||||
RemoveDuplicates:
|
RemoveDuplicates:
|
||||||
enabled: true
|
enabled: true
|
||||||
|
RemovePodsHavingTooManyRestarts:
|
||||||
|
enabled: true
|
||||||
|
params:
|
||||||
|
podsHavingTooManyRestarts:
|
||||||
|
podRestartThreshold: 100
|
||||||
|
includingInitContainers: true
|
||||||
RemovePodsViolatingNodeTaints:
|
RemovePodsViolatingNodeTaints:
|
||||||
enabled: true
|
enabled: true
|
||||||
RemovePodsViolatingNodeAffinity:
|
RemovePodsViolatingNodeAffinity:
|
||||||
enabled: true
|
enabled: true
|
||||||
params:
|
params:
|
||||||
nodeAffinityType:
|
nodeAffinityType:
|
||||||
- requiredDuringSchedulingIgnoredDuringExecution
|
- requiredDuringSchedulingIgnoredDuringExecution
|
||||||
RemovePodsViolatingInterPodAntiAffinity:
|
RemovePodsViolatingInterPodAntiAffinity:
|
||||||
enabled: true
|
enabled: true
|
||||||
|
RemovePodsViolatingTopologySpreadConstraint:
|
||||||
|
enabled: true
|
||||||
|
params:
|
||||||
|
includeSoftConstraints: false
|
||||||
LowNodeUtilization:
|
LowNodeUtilization:
|
||||||
enabled: true
|
enabled: true
|
||||||
params:
|
params:
|
||||||
@@ -47,6 +103,34 @@ deschedulerPolicy:
|
|||||||
|
|
||||||
priorityClassName: system-cluster-critical
|
priorityClassName: system-cluster-critical
|
||||||
|
|
||||||
|
nodeSelector: {}
|
||||||
|
# foo: bar
|
||||||
|
|
||||||
|
affinity: {}
|
||||||
|
# nodeAffinity:
|
||||||
|
# requiredDuringSchedulingIgnoredDuringExecution:
|
||||||
|
# nodeSelectorTerms:
|
||||||
|
# - matchExpressions:
|
||||||
|
# - key: kubernetes.io/e2e-az-name
|
||||||
|
# operator: In
|
||||||
|
# values:
|
||||||
|
# - e2e-az1
|
||||||
|
# - e2e-az2
|
||||||
|
# podAntiAffinity:
|
||||||
|
# requiredDuringSchedulingIgnoredDuringExecution:
|
||||||
|
# - labelSelector:
|
||||||
|
# matchExpressions:
|
||||||
|
# - key: app.kubernetes.io/name
|
||||||
|
# operator: In
|
||||||
|
# values:
|
||||||
|
# - descheduler
|
||||||
|
# topologyKey: "kubernetes.io/hostname"
|
||||||
|
tolerations: []
|
||||||
|
# - key: 'management'
|
||||||
|
# operator: 'Equal'
|
||||||
|
# value: 'tool'
|
||||||
|
# effect: 'NoSchedule'
|
||||||
|
|
||||||
rbac:
|
rbac:
|
||||||
# Specifies whether RBAC resources should be created
|
# Specifies whether RBAC resources should be created
|
||||||
create: true
|
create: true
|
||||||
@@ -57,3 +141,41 @@ serviceAccount:
|
|||||||
# The name of the ServiceAccount to use.
|
# The name of the ServiceAccount to use.
|
||||||
# If not set and create is true, a name is generated using the fullname template
|
# If not set and create is true, a name is generated using the fullname template
|
||||||
name:
|
name:
|
||||||
|
# Specifies custom annotations for the serviceAccount
|
||||||
|
annotations: {}
|
||||||
|
|
||||||
|
podAnnotations: {}
|
||||||
|
|
||||||
|
podLabels: {}
|
||||||
|
|
||||||
|
livenessProbe:
|
||||||
|
failureThreshold: 3
|
||||||
|
httpGet:
|
||||||
|
path: /healthz
|
||||||
|
port: 10258
|
||||||
|
scheme: HTTPS
|
||||||
|
initialDelaySeconds: 3
|
||||||
|
periodSeconds: 10
|
||||||
|
|
||||||
|
service:
|
||||||
|
enabled: false
|
||||||
|
|
||||||
|
serviceMonitor:
|
||||||
|
enabled: false
|
||||||
|
# The namespace where Prometheus expects to find service monitors.
|
||||||
|
# namespace: ""
|
||||||
|
interval: ""
|
||||||
|
# honorLabels: true
|
||||||
|
insecureSkipVerify: true
|
||||||
|
serverName: null
|
||||||
|
metricRelabelings: []
|
||||||
|
# - action: keep
|
||||||
|
# regex: 'descheduler_(build_info|pods_evicted)'
|
||||||
|
# sourceLabels: [__name__]
|
||||||
|
relabelings: []
|
||||||
|
# - sourceLabels: [__meta_kubernetes_pod_node_name]
|
||||||
|
# separator: ;
|
||||||
|
# regex: ^(.*)$
|
||||||
|
# targetLabel: nodename
|
||||||
|
# replacement: $1
|
||||||
|
# action: replace
|
||||||
|
|||||||
@@ -1,20 +1,20 @@
|
|||||||
# See https://cloud.google.com/cloud-build/docs/build-config
|
# See https://cloud.google.com/cloud-build/docs/build-config
|
||||||
|
|
||||||
# this must be specified in seconds. If omitted, defaults to 600s (10 mins)
|
# this must be specified in seconds. If omitted, defaults to 600s (10 mins)
|
||||||
timeout: 1200s
|
timeout: 3600s
|
||||||
# this prevents errors if you don't use both _GIT_TAG and _PULL_BASE_REF,
|
# this prevents errors if you don't use both _GIT_TAG and _PULL_BASE_REF,
|
||||||
# or any new substitutions added in the future.
|
# or any new substitutions added in the future.
|
||||||
options:
|
options:
|
||||||
substitution_option: ALLOW_LOOSE
|
substitution_option: ALLOW_LOOSE
|
||||||
steps:
|
steps:
|
||||||
- name: 'gcr.io/k8s-testimages/gcb-docker-gcloud:v20190906-745fed4'
|
- name: 'gcr.io/k8s-staging-test-infra/gcb-docker-gcloud:v20211118-2f2d816b90'
|
||||||
entrypoint: make
|
entrypoint: make
|
||||||
env:
|
env:
|
||||||
- DOCKER_CLI_EXPERIMENTAL=enabled
|
- DOCKER_CLI_EXPERIMENTAL=enabled
|
||||||
- VERSION=$_GIT_TAG
|
- VERSION=$_GIT_TAG
|
||||||
- BASE_REF=$_PULL_BASE_REF
|
- BASE_REF=$_PULL_BASE_REF
|
||||||
args:
|
args:
|
||||||
- push
|
- push-all
|
||||||
substitutions:
|
substitutions:
|
||||||
# _GIT_TAG will be filled with a git-based tag for the image, of the form vYYYYMMDD-hash, and
|
# _GIT_TAG will be filled with a git-based tag for the image, of the form vYYYYMMDD-hash, and
|
||||||
# can be used as a substitution
|
# can be used as a substitution
|
||||||
|
|||||||
@@ -18,44 +18,79 @@ limitations under the License.
|
|||||||
package options
|
package options
|
||||||
|
|
||||||
import (
|
import (
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
"time"
|
||||||
|
|
||||||
// install the componentconfig api so we get its defaulting and conversion functions
|
"github.com/spf13/pflag"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
apiserveroptions "k8s.io/apiserver/pkg/server/options"
|
||||||
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
|
componentbaseconfig "k8s.io/component-base/config"
|
||||||
|
componentbaseoptions "k8s.io/component-base/config/options"
|
||||||
"sigs.k8s.io/descheduler/pkg/apis/componentconfig"
|
"sigs.k8s.io/descheduler/pkg/apis/componentconfig"
|
||||||
"sigs.k8s.io/descheduler/pkg/apis/componentconfig/v1alpha1"
|
"sigs.k8s.io/descheduler/pkg/apis/componentconfig/v1alpha1"
|
||||||
deschedulerscheme "sigs.k8s.io/descheduler/pkg/descheduler/scheme"
|
deschedulerscheme "sigs.k8s.io/descheduler/pkg/descheduler/scheme"
|
||||||
|
)
|
||||||
|
|
||||||
"github.com/spf13/pflag"
|
const (
|
||||||
|
DefaultDeschedulerPort = 10258
|
||||||
)
|
)
|
||||||
|
|
||||||
// DeschedulerServer configuration
|
// DeschedulerServer configuration
|
||||||
type DeschedulerServer struct {
|
type DeschedulerServer struct {
|
||||||
componentconfig.DeschedulerConfiguration
|
componentconfig.DeschedulerConfiguration
|
||||||
Client clientset.Interface
|
|
||||||
|
Client clientset.Interface
|
||||||
|
EventClient clientset.Interface
|
||||||
|
SecureServing *apiserveroptions.SecureServingOptionsWithLoopback
|
||||||
|
DisableMetrics bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewDeschedulerServer creates a new DeschedulerServer with default parameters
|
// NewDeschedulerServer creates a new DeschedulerServer with default parameters
|
||||||
func NewDeschedulerServer() *DeschedulerServer {
|
func NewDeschedulerServer() (*DeschedulerServer, error) {
|
||||||
versioned := v1alpha1.DeschedulerConfiguration{}
|
cfg, err := newDefaultComponentConfig()
|
||||||
deschedulerscheme.Scheme.Default(&versioned)
|
if err != nil {
|
||||||
cfg := componentconfig.DeschedulerConfiguration{}
|
return nil, err
|
||||||
deschedulerscheme.Scheme.Convert(versioned, &cfg, nil)
|
|
||||||
s := DeschedulerServer{
|
|
||||||
DeschedulerConfiguration: cfg,
|
|
||||||
}
|
}
|
||||||
return &s
|
|
||||||
|
secureServing := apiserveroptions.NewSecureServingOptions().WithLoopback()
|
||||||
|
secureServing.BindPort = DefaultDeschedulerPort
|
||||||
|
|
||||||
|
return &DeschedulerServer{
|
||||||
|
DeschedulerConfiguration: *cfg,
|
||||||
|
SecureServing: secureServing,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func newDefaultComponentConfig() (*componentconfig.DeschedulerConfiguration, error) {
|
||||||
|
versionedCfg := v1alpha1.DeschedulerConfiguration{
|
||||||
|
LeaderElection: componentbaseconfig.LeaderElectionConfiguration{
|
||||||
|
LeaderElect: false,
|
||||||
|
LeaseDuration: metav1.Duration{Duration: 137 * time.Second},
|
||||||
|
RenewDeadline: metav1.Duration{Duration: 107 * time.Second},
|
||||||
|
RetryPeriod: metav1.Duration{Duration: 26 * time.Second},
|
||||||
|
ResourceLock: "leases",
|
||||||
|
ResourceName: "descheduler",
|
||||||
|
ResourceNamespace: "kube-system",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
deschedulerscheme.Scheme.Default(&versionedCfg)
|
||||||
|
cfg := componentconfig.DeschedulerConfiguration{}
|
||||||
|
if err := deschedulerscheme.Scheme.Convert(&versionedCfg, &cfg, nil); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &cfg, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// AddFlags adds flags for a specific SchedulerServer to the specified FlagSet
|
// AddFlags adds flags for a specific SchedulerServer to the specified FlagSet
|
||||||
func (rs *DeschedulerServer) AddFlags(fs *pflag.FlagSet) {
|
func (rs *DeschedulerServer) AddFlags(fs *pflag.FlagSet) {
|
||||||
|
fs.StringVar(&rs.Logging.Format, "logging-format", "text", `Sets the log format. Permitted formats: "text", "json". Non-default formats don't honor these flags: --add-dir-header, --alsologtostderr, --log-backtrace-at, --log_dir, --log_file, --log_file_max_size, --logtostderr, --skip-headers, --skip-log-headers, --stderrthreshold, --log-flush-frequency.\nNon-default choices are currently alpha and subject to change without warning.`)
|
||||||
fs.DurationVar(&rs.DeschedulingInterval, "descheduling-interval", rs.DeschedulingInterval, "Time interval between two consecutive descheduler executions. Setting this value instructs the descheduler to run in a continuous loop at the interval specified.")
|
fs.DurationVar(&rs.DeschedulingInterval, "descheduling-interval", rs.DeschedulingInterval, "Time interval between two consecutive descheduler executions. Setting this value instructs the descheduler to run in a continuous loop at the interval specified.")
|
||||||
fs.StringVar(&rs.KubeconfigFile, "kubeconfig", rs.KubeconfigFile, "File with kube configuration.")
|
fs.StringVar(&rs.KubeconfigFile, "kubeconfig", rs.KubeconfigFile, "File with kube configuration.")
|
||||||
fs.StringVar(&rs.PolicyConfigFile, "policy-config-file", rs.PolicyConfigFile, "File with descheduler policy configuration.")
|
fs.StringVar(&rs.PolicyConfigFile, "policy-config-file", rs.PolicyConfigFile, "File with descheduler policy configuration.")
|
||||||
fs.BoolVar(&rs.DryRun, "dry-run", rs.DryRun, "execute descheduler in dry run mode.")
|
fs.BoolVar(&rs.DryRun, "dry-run", rs.DryRun, "execute descheduler in dry run mode.")
|
||||||
// node-selector query causes descheduler to run only on nodes that matches the node labels in the query
|
fs.BoolVar(&rs.DisableMetrics, "disable-metrics", rs.DisableMetrics, "Disables metrics. The metrics are by default served through https://localhost:10258/metrics. Secure address, resp. port can be changed through --bind-address, resp. --secure-port flags.")
|
||||||
fs.StringVar(&rs.NodeSelector, "node-selector", rs.NodeSelector, "Selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2)")
|
|
||||||
// max-no-pods-to-evict limits the maximum number of pods to be evicted per node by descheduler.
|
componentbaseoptions.BindLeaderElectionFlags(&rs.LeaderElection, fs)
|
||||||
fs.IntVar(&rs.MaxNoOfPodsToEvictPerNode, "max-pods-to-evict-per-node", rs.MaxNoOfPodsToEvictPerNode, "Limits the maximum number of pods to be evicted per node by descheduler")
|
|
||||||
// evict-local-storage-pods allows eviction of pods that are using local storage. This is false by default.
|
rs.SecureServing.AddFlags(fs)
|
||||||
fs.BoolVar(&rs.EvictLocalStoragePods, "evict-local-storage-pods", rs.EvictLocalStoragePods, "Enables evicting pods using local storage by descheduler")
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -18,44 +18,103 @@ limitations under the License.
|
|||||||
package app
|
package app
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"flag"
|
"context"
|
||||||
"io"
|
"io"
|
||||||
|
"os"
|
||||||
|
"os/signal"
|
||||||
|
"syscall"
|
||||||
|
|
||||||
|
"k8s.io/apiserver/pkg/server/healthz"
|
||||||
|
|
||||||
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
|
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
|
||||||
"sigs.k8s.io/descheduler/pkg/descheduler"
|
"sigs.k8s.io/descheduler/pkg/descheduler"
|
||||||
|
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
|
|
||||||
aflag "k8s.io/component-base/cli/flag"
|
apiserver "k8s.io/apiserver/pkg/server"
|
||||||
"k8s.io/component-base/logs"
|
"k8s.io/apiserver/pkg/server/mux"
|
||||||
|
restclient "k8s.io/client-go/rest"
|
||||||
|
registry "k8s.io/component-base/logs/api/v1"
|
||||||
|
jsonLog "k8s.io/component-base/logs/json"
|
||||||
|
_ "k8s.io/component-base/logs/json/register"
|
||||||
|
"k8s.io/component-base/metrics/legacyregistry"
|
||||||
"k8s.io/klog/v2"
|
"k8s.io/klog/v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
// NewDeschedulerCommand creates a *cobra.Command object with default parameters
|
// NewDeschedulerCommand creates a *cobra.Command object with default parameters
|
||||||
func NewDeschedulerCommand(out io.Writer) *cobra.Command {
|
func NewDeschedulerCommand(out io.Writer) *cobra.Command {
|
||||||
s := options.NewDeschedulerServer()
|
s, err := options.NewDeschedulerServer()
|
||||||
|
if err != nil {
|
||||||
|
klog.ErrorS(err, "unable to initialize server")
|
||||||
|
}
|
||||||
|
|
||||||
cmd := &cobra.Command{
|
cmd := &cobra.Command{
|
||||||
Use: "descheduler",
|
Use: "descheduler",
|
||||||
Short: "descheduler",
|
Short: "descheduler",
|
||||||
Long: `The descheduler evicts pods which may be bound to less desired nodes`,
|
Long: `The descheduler evicts pods which may be bound to less desired nodes`,
|
||||||
Run: func(cmd *cobra.Command, args []string) {
|
Run: func(cmd *cobra.Command, args []string) {
|
||||||
logs.InitLogs()
|
// s.Logs.Config.Format = s.Logging.Format
|
||||||
defer logs.FlushLogs()
|
|
||||||
err := Run(s)
|
// LoopbackClientConfig is a config for a privileged loopback connection
|
||||||
if err != nil {
|
var LoopbackClientConfig *restclient.Config
|
||||||
klog.Errorf("%v", err)
|
var SecureServing *apiserver.SecureServingInfo
|
||||||
|
if err := s.SecureServing.ApplyTo(&SecureServing, &LoopbackClientConfig); err != nil {
|
||||||
|
klog.ErrorS(err, "failed to apply secure server configuration")
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
var factory registry.LogFormatFactory
|
||||||
|
|
||||||
|
if s.Logging.Format == "json" {
|
||||||
|
factory = jsonLog.Factory{}
|
||||||
|
}
|
||||||
|
|
||||||
|
if factory == nil {
|
||||||
|
klog.ClearLogger()
|
||||||
|
} else {
|
||||||
|
log, logrFlush := factory.Create(registry.LoggingConfiguration{
|
||||||
|
Format: s.Logging.Format,
|
||||||
|
Verbosity: s.Logging.Verbosity,
|
||||||
|
})
|
||||||
|
defer logrFlush()
|
||||||
|
klog.SetLogger(log)
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx, done := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM)
|
||||||
|
|
||||||
|
pathRecorderMux := mux.NewPathRecorderMux("descheduler")
|
||||||
|
if !s.DisableMetrics {
|
||||||
|
pathRecorderMux.Handle("/metrics", legacyregistry.HandlerWithReset())
|
||||||
|
}
|
||||||
|
|
||||||
|
healthz.InstallHandler(pathRecorderMux, healthz.NamedCheck("Descheduler", healthz.PingHealthz.Check))
|
||||||
|
|
||||||
|
stoppedCh, _, err := SecureServing.Serve(pathRecorderMux, 0, ctx.Done())
|
||||||
|
if err != nil {
|
||||||
|
klog.Fatalf("failed to start secure server: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
err = Run(ctx, s)
|
||||||
|
if err != nil {
|
||||||
|
klog.ErrorS(err, "descheduler server")
|
||||||
|
}
|
||||||
|
|
||||||
|
done()
|
||||||
|
// wait for metrics server to close
|
||||||
|
<-stoppedCh
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
cmd.SetOutput(out)
|
cmd.SetOut(out)
|
||||||
|
|
||||||
flags := cmd.Flags()
|
flags := cmd.Flags()
|
||||||
flags.SetNormalizeFunc(aflag.WordSepNormalizeFunc)
|
|
||||||
flags.AddGoFlagSet(flag.CommandLine)
|
|
||||||
s.AddFlags(flags)
|
s.AddFlags(flags)
|
||||||
return cmd
|
return cmd
|
||||||
}
|
}
|
||||||
|
|
||||||
func Run(rs *options.DeschedulerServer) error {
|
func Run(ctx context.Context, rs *options.DeschedulerServer) error {
|
||||||
return descheduler.Run(rs)
|
return descheduler.Run(ctx, rs)
|
||||||
|
}
|
||||||
|
|
||||||
|
func SetupLogs() {
|
||||||
|
klog.SetOutput(os.Stdout)
|
||||||
|
klog.InitFlags(nil)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -18,70 +18,19 @@ package app
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"runtime"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/version"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
|
||||||
// gitCommit is a constant representing the source version that
|
|
||||||
// generated this build. It should be set during build via -ldflags.
|
|
||||||
gitCommit string
|
|
||||||
// version is a constant representing the version tag that
|
|
||||||
// generated this build. It should be set during build via -ldflags.
|
|
||||||
version string
|
|
||||||
// buildDate in ISO8601 format, output of $(date -u +'%Y-%m-%dT%H:%M:%SZ')
|
|
||||||
//It should be set during build via -ldflags.
|
|
||||||
buildDate string
|
|
||||||
)
|
|
||||||
|
|
||||||
// Info holds the information related to descheduler app version.
|
|
||||||
type Info struct {
|
|
||||||
Major string `json:"major"`
|
|
||||||
Minor string `json:"minor"`
|
|
||||||
GitCommit string `json:"gitCommit"`
|
|
||||||
GitVersion string `json:"gitVersion"`
|
|
||||||
BuildDate string `json:"buildDate"`
|
|
||||||
GoVersion string `json:"goVersion"`
|
|
||||||
Compiler string `json:"compiler"`
|
|
||||||
Platform string `json:"platform"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get returns the overall codebase version. It's for detecting
|
|
||||||
// what code a binary was built from.
|
|
||||||
func Get() Info {
|
|
||||||
majorVersion, minorVersion := splitVersion(version)
|
|
||||||
return Info{
|
|
||||||
Major: majorVersion,
|
|
||||||
Minor: minorVersion,
|
|
||||||
GitCommit: gitCommit,
|
|
||||||
GitVersion: version,
|
|
||||||
BuildDate: buildDate,
|
|
||||||
GoVersion: runtime.Version(),
|
|
||||||
Compiler: runtime.Compiler,
|
|
||||||
Platform: fmt.Sprintf("%s/%s", runtime.GOOS, runtime.GOARCH),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewVersionCommand() *cobra.Command {
|
func NewVersionCommand() *cobra.Command {
|
||||||
var versionCmd = &cobra.Command{
|
versionCmd := &cobra.Command{
|
||||||
Use: "version",
|
Use: "version",
|
||||||
Short: "Version of descheduler",
|
Short: "Version of descheduler",
|
||||||
Long: `Prints the version of descheduler.`,
|
Long: `Prints the version of descheduler.`,
|
||||||
Run: func(cmd *cobra.Command, args []string) {
|
Run: func(cmd *cobra.Command, args []string) {
|
||||||
fmt.Printf("Descheduler version %+v\n", Get())
|
fmt.Printf("Descheduler version %+v\n", version.Get())
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
return versionCmd
|
return versionCmd
|
||||||
}
|
}
|
||||||
|
|
||||||
// splitVersion splits the git version to generate major and minor versions needed.
|
|
||||||
func splitVersion(version string) (string, string) {
|
|
||||||
if version == "" {
|
|
||||||
return "", ""
|
|
||||||
}
|
|
||||||
// A sample version would be of form v0.1.0-7-ge884046, so split at first '.' and
|
|
||||||
// then return 0 and 1+(+ appended to follow semver convention) for major and minor versions.
|
|
||||||
return strings.Trim(strings.Split(version, ".")[0], "v"), strings.Split(version, ".")[1] + "+"
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -17,20 +17,23 @@ limitations under the License.
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"flag"
|
|
||||||
"fmt"
|
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
|
"k8s.io/component-base/cli"
|
||||||
"sigs.k8s.io/descheduler/cmd/descheduler/app"
|
"sigs.k8s.io/descheduler/cmd/descheduler/app"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/descheduler"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
app.SetupLogs()
|
||||||
|
descheduler.SetupPlugins()
|
||||||
|
}
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
out := os.Stdout
|
out := os.Stdout
|
||||||
cmd := app.NewDeschedulerCommand(out)
|
cmd := app.NewDeschedulerCommand(out)
|
||||||
cmd.AddCommand(app.NewVersionCommand())
|
cmd.AddCommand(app.NewVersionCommand())
|
||||||
flag.CommandLine.Parse([]string{})
|
|
||||||
if err := cmd.Execute(); err != nil {
|
code := cli.Run(cmd)
|
||||||
fmt.Println(err)
|
os.Exit(code)
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -3,10 +3,10 @@
|
|||||||
## Required Tools
|
## Required Tools
|
||||||
|
|
||||||
- [Git](https://git-scm.com/downloads)
|
- [Git](https://git-scm.com/downloads)
|
||||||
- [Go 1.14+](https://golang.org/dl/)
|
- [Go 1.16+](https://golang.org/dl/)
|
||||||
- [Docker](https://docs.docker.com/install/)
|
- [Docker](https://docs.docker.com/install/)
|
||||||
- [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl)
|
- [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl)
|
||||||
- [kind](https://kind.sigs.k8s.io/)
|
- [kind v0.10.0+](https://kind.sigs.k8s.io/)
|
||||||
|
|
||||||
## Build and Run
|
## Build and Run
|
||||||
|
|
||||||
@@ -31,12 +31,47 @@ View all CLI options.
|
|||||||
## Run Tests
|
## Run Tests
|
||||||
```
|
```
|
||||||
GOOS=linux make dev-image
|
GOOS=linux make dev-image
|
||||||
kind create cluster --config hack/kind_config.yaml
|
make kind-multi-node
|
||||||
kind load docker-image <image name>
|
kind load docker-image <image name>
|
||||||
kind get kubeconfig > /tmp/admin.conf
|
kind get kubeconfig > /tmp/admin.conf
|
||||||
|
export KUBECONFIG=/tmp/admin.conf
|
||||||
make test-unit
|
make test-unit
|
||||||
make test-e2e
|
make test-e2e
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Format Code
|
||||||
|
|
||||||
|
After making changes in the code base, ensure that the code is formatted correctly:
|
||||||
|
|
||||||
|
```
|
||||||
|
make fmt
|
||||||
|
```
|
||||||
|
|
||||||
|
## Build Helm Package locally
|
||||||
|
|
||||||
|
If you made some changes in the chart, and just want to check if templating is ok, or if the chart is buildable, you can run this command to have a package built from the `./charts` directory.
|
||||||
|
|
||||||
|
```
|
||||||
|
make build-helm
|
||||||
|
```
|
||||||
|
|
||||||
|
## Lint Helm Chart locally
|
||||||
|
|
||||||
|
To check linting of your changes in the helm chart locally you can run:
|
||||||
|
|
||||||
|
```
|
||||||
|
make lint-chart
|
||||||
|
```
|
||||||
|
|
||||||
|
## Test helm changes locally with kind and ct
|
||||||
|
|
||||||
|
You will need kind and docker (or equivalent) installed. We can use ct public image to avoid installing ct and all its dependencies.
|
||||||
|
|
||||||
|
|
||||||
|
```
|
||||||
|
make kind-multi-node
|
||||||
|
make ct-helm
|
||||||
|
```
|
||||||
|
|
||||||
### Miscellaneous
|
### Miscellaneous
|
||||||
See the [hack directory](https://github.com/kubernetes-sigs/descheduler/tree/master/hack) for additional tools and scripts used for developing the descheduler.
|
See the [hack directory](https://github.com/kubernetes-sigs/descheduler/tree/master/hack) for additional tools and scripts used for developing the descheduler.
|
||||||
16
docs/proposals.md
Normal file
16
docs/proposals.md
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
# Proposals
|
||||||
|
This document walk you through about all the enhancements proposals for descheduler.
|
||||||
|
|
||||||
|
## Descheduler v1alpha2 Design Proposal
|
||||||
|
```yaml
|
||||||
|
title: Descheduler v1alpha2 Design Proposal
|
||||||
|
authors:
|
||||||
|
- "@damemi"
|
||||||
|
link:
|
||||||
|
- https://docs.google.com/document/d/1S1JCh-0F-QCJvBBG-kbmXiHAJFF8doArhDIAKbOj93I/edit#heading=h.imbp1ctnc8lx
|
||||||
|
- https://github.com/kubernetes-sigs/descheduler/issues/679
|
||||||
|
owning-sig: sig-scheduling
|
||||||
|
creation-date: 2021-05-01
|
||||||
|
status: implementable
|
||||||
|
```
|
||||||
|
|
||||||
@@ -1,32 +1,83 @@
|
|||||||
# Release Guide
|
# Release Guide
|
||||||
|
|
||||||
## Container Image
|
The process for publishing each Descheduler release includes a mixture of manual and automatic steps. Over
|
||||||
|
time, it would be good to automate as much of this process as possible. However, due to current limitations there
|
||||||
|
is care that must be taken to perform each manual step precisely so that the automated steps execute properly.
|
||||||
|
|
||||||
### Semi-automatic
|
## Pre-release Code Changes
|
||||||
|
|
||||||
1. Make sure your repo is clean by git's standards
|
Before publishing each release, the following code updates must be made:
|
||||||
2. Create a release branch `git checkout -b release-1.18` (not required for patch releases)
|
|
||||||
3. Push the release branch to the descheuler repo and ensure branch protection is enabled (not required for patch releases)
|
|
||||||
4. Tag the repository and push the tag `VERSION=v0.18.0 git tag -m $VERSION $VERSION; git push origin $VERSION`
|
|
||||||
5. Publish a draft release using the tag you just created
|
|
||||||
6. Perform the [image promotion process](https://github.com/kubernetes/k8s.io/tree/master/k8s.gcr.io#image-promoter)
|
|
||||||
7. Publish release
|
|
||||||
8. Email `kubernetes-sig-scheduling@googlegroups.com` to announce the release
|
|
||||||
|
|
||||||
### Manual
|
- [ ] (Optional, but recommended) Bump `k8s.io` dependencies to the `-rc` tags. These tags are usually published around upstream code freeze. [Example](https://github.com/kubernetes-sigs/descheduler/pull/539)
|
||||||
|
- [ ] Bump `k8s.io` dependencies to GA tags once they are published (following the upstream release). [Example](https://github.com/kubernetes-sigs/descheduler/pull/615)
|
||||||
|
- [ ] Ensure that Go is updated to the same version as upstream. [Example](https://github.com/kubernetes-sigs/descheduler/pull/801)
|
||||||
|
- [ ] Make CI changes in [github.com/kubernetes/test-infra](https://github.com/kubernetes/test-infra) to add the new version's tests (note, this may also include a Go bump). [Example](https://github.com/kubernetes/test-infra/pull/25833)
|
||||||
|
- [ ] Update local CI versions for utils (such as golang-ci), kind, and go. [Example - e2e](https://github.com/kubernetes-sigs/descheduler/commit/ac4d576df8831c0c399ee8fff1e85469e90b8c44), [Example - helm](https://github.com/kubernetes-sigs/descheduler/pull/821)
|
||||||
|
- [ ] Update version references in docs and Readme. [Example](https://github.com/kubernetes-sigs/descheduler/pull/617)
|
||||||
|
|
||||||
1. Make sure your repo is clean by git's standards
|
## Release Process
|
||||||
2. Create a release branch `git checkout -b release-1.18` (not required for patch releases)
|
|
||||||
3. Push the release branch to the descheuler repo and ensure branch protection is enabled (not required for patch releases)
|
When the above pre-release steps are complete and the release is ready to be cut, perform the following steps **in order**
|
||||||
4. Tag the repository and push the tag `VERSION=v0.18.0 git tag -m $VERSION $VERSION; git push origin $VERSION`
|
(the flowchart below demonstrates these steps):
|
||||||
5. Checkout the tag you just created and make sure your repo is clean by git's standards `git checkout $VERSION`
|
|
||||||
6. Build and push the container image to the staging registry `VERSION=$VERSION make push`
|
**Version release**
|
||||||
7. Publish a draft release using the tag you just created
|
1. Create the `git tag` on `master` for the release, eg `v0.24.0`
|
||||||
8. Perform the [image promotion process](https://github.com/kubernetes/k8s.io/tree/master/k8s.gcr.io#image-promoter)
|
2. Merge Helm chart version update to `master` (see [Helm chart](#helm-chart) below). [Example](https://github.com/kubernetes-sigs/descheduler/pull/709)
|
||||||
9. Publish release
|
3. Perform the [image promotion process](https://github.com/kubernetes/k8s.io/tree/main/k8s.gcr.io#image-promoter). [Example](https://github.com/kubernetes/k8s.io/pull/3344)
|
||||||
10. Email `kubernetes-sig-scheduling@googlegroups.com` to announce the release
|
4. Cut release branch from `master`, eg `release-1.24`
|
||||||
|
5. Publish release using Github's release process from the git tag you created
|
||||||
|
6. Email `kubernetes-sig-scheduling@googlegroups.com` to announce the release
|
||||||
|
|
||||||
|
**Patch release**
|
||||||
|
1. Pick relevant code change commits to the matching release branch, eg `release-1.24`
|
||||||
|
2. Create the patch tag on the release branch, eg `v0.24.1` on `release-1.24`
|
||||||
|
3. Merge Helm chart version update to release branch
|
||||||
|
4. Perform the image promotion process for the patch version
|
||||||
|
5. Publish release using Github's release process from the git tag you created
|
||||||
|
6. Email `kubernetes-sig-scheduling@googlegroups.com` to announce the release
|
||||||
|
|
||||||
|
### Flowchart
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
### Image promotion process
|
||||||
|
|
||||||
|
Every merge to any branch triggers an [image build and push](https://github.com/kubernetes/test-infra/blob/c36b8e5/config/jobs/image-pushing/k8s-staging-descheduler.yaml) to a `gcr.io` repository.
|
||||||
|
These automated image builds are snapshots of the code in place at the time of every PR merge and
|
||||||
|
tagged with the latest git SHA at the time of the build. To create a final release image, the desired
|
||||||
|
auto-built image SHA is added to a [file upstream](https://github.com/kubernetes/k8s.io/blob/e9e971c/k8s.gcr.io/images/k8s-staging-descheduler/images.yaml) which
|
||||||
|
copies that image to a public registry.
|
||||||
|
|
||||||
|
Automatic builds can be monitored and re-triggered with the [`post-descheduler-push-images` job](https://prow.k8s.io/?job=post-descheduler-push-images) on prow.k8s.io.
|
||||||
|
|
||||||
|
Note that images can also be manually built and pushed using `VERSION=$VERSION make push-all` by [users with access](https://github.com/kubernetes/k8s.io/blob/fbee8f67b70304241e613a672c625ad972998ad7/groups/sig-scheduling/groups.yaml#L33-L43).
|
||||||
|
|
||||||
|
## Helm Chart
|
||||||
|
We currently use the [chart-releaser-action GitHub Action](https://github.com/helm/chart-releaser-action) to automatically
|
||||||
|
publish [Helm chart releases](https://github.com/kubernetes-sigs/descheduler/blob/022e07c/.github/workflows/release.yaml).
|
||||||
|
This action is triggered when it detects any changes to [`Chart.yaml`](https://github.com/kubernetes-sigs/descheduler/blob/022e07c27853fade6d1304adc0a6ebe02642386c/charts/descheduler/Chart.yaml) on
|
||||||
|
a `release-*` branch.
|
||||||
|
|
||||||
|
Helm chart releases are managed by a separate set of git tags that are prefixed with `descheduler-helm-chart-*`. Example git tag name is `descheduler-helm-chart-0.18.0`.
|
||||||
|
Released versions of the helm charts are stored in the `gh-pages` branch of this repo.
|
||||||
|
|
||||||
|
The major and minor version of the chart matches the descheduler major and minor versions. For example descheduler helm chart version helm-descheduler-chart-0.18.0 corresponds
|
||||||
|
to descheduler version v0.18.0. The patch version of the descheduler helm chart and the patcher version of the descheduler will not necessarily match. The patch
|
||||||
|
version of the descheduler helm chart is used to version changes specific to the helm chart.
|
||||||
|
|
||||||
|
1. Merge all helm chart changes into the master branch before the release is tagged/cut
|
||||||
|
1. Ensure that `appVersion` in file `charts/descheduler/Chart.yaml` matches the descheduler version(no `v` prefix)
|
||||||
|
2. Ensure that `version` in file `charts/descheduler/Chart.yaml` has been incremented. This is the chart version.
|
||||||
|
2. Make sure your repo is clean by git's standards
|
||||||
|
3. Follow the release-branch or patch release tagging pattern from the above section.
|
||||||
|
4. Verify the new helm artifact has been successfully pushed to the `gh-pages` branch
|
||||||
|
|
||||||
|
## Notes
|
||||||
|
The Helm releaser-action compares the changes in the action-triggering branch to the latest tag on that branch, so if you tag before creating the new branch there
|
||||||
|
will be nothing to compare and it will fail. This is why it's necessary to tag, eg, `v0.24.0` *before* making the changes to the
|
||||||
|
Helm chart version, so that there is a new diff for the action to find. (Tagging *after* making the Helm chart changes would
|
||||||
|
also work, but then the code that gets built into the promoted image will be tagged as `descheduler-helm-chart-xxx` rather than `v0.xx.0`).
|
||||||
|
|
||||||
### Notes
|
|
||||||
See [post-descheduler-push-images dashboard](https://testgrid.k8s.io/sig-scheduling#post-descheduler-push-images) for staging registry image build job status.
|
See [post-descheduler-push-images dashboard](https://testgrid.k8s.io/sig-scheduling#post-descheduler-push-images) for staging registry image build job status.
|
||||||
|
|
||||||
View the descheduler staging registry using [this URL](https://console.cloud.google.com/gcr/images/k8s-staging-descheduler/GLOBAL/descheduler) in a web browser
|
View the descheduler staging registry using [this URL](https://console.cloud.google.com/gcr/images/k8s-staging-descheduler/GLOBAL/descheduler) in a web browser
|
||||||
@@ -51,19 +102,3 @@ Pull image from the staging registry.
|
|||||||
```
|
```
|
||||||
docker pull gcr.io/k8s-staging-descheduler/descheduler:v20200206-0.9.0-94-ge2a23f284
|
docker pull gcr.io/k8s-staging-descheduler/descheduler:v20200206-0.9.0-94-ge2a23f284
|
||||||
```
|
```
|
||||||
|
|
||||||
## Helm Chart
|
|
||||||
Helm chart releases are managed by a separate set of git tags that are prefixed with `chart-*`. Example git tag name is `chart-0.18.0`. Released versions of the
|
|
||||||
helm charts are stored in the `gh-pages` branch of this repo. The [chart-releaser-action GitHub Action](https://github.com/helm/chart-releaser-action) is setup to
|
|
||||||
build and push the helm charts to the `gh-pages` branch when a `chart-*` git tag is created.
|
|
||||||
|
|
||||||
The major and minor version of the chart matches the descheduler major and minor versions. For example descheduler helm chart version chart-0.18.0 corresponds
|
|
||||||
to descheduler version v0.18.0. The patch version of the descheduler helm chart and the patcher version of the descheduler will not necessarily match. The patch
|
|
||||||
version of the descheduler helm chart is used to version changes specific to the helm chart.
|
|
||||||
|
|
||||||
1. Merge all helm chart changes into the appropriate release branch(i.e. release-1.18)
|
|
||||||
1. Ensure that `appVersion` in file `charts/descheduler/Chart.yaml` matches the descheduler version(no `v` prefix)
|
|
||||||
2. Ensure that `version` in file `charts/descheduler/Chart.yaml` has been incremented. This is the chart version.
|
|
||||||
2. Make sure your repo is clean by git's standards
|
|
||||||
3. Create the tag and push it `git checkout release-1.18; CHART_VERSION=chart-0.18.0; git tag $CHART_VERSION; git push origin $CHART_VERSION`
|
|
||||||
4. Verify the new helm artifact has been successfully pushed to the `gh-pages` branch
|
|
||||||
|
|||||||
BIN
docs/release-process.png
Normal file
BIN
docs/release-process.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 121 KiB |
@@ -1,9 +1,31 @@
|
|||||||
# User Guide
|
# User Guide
|
||||||
|
|
||||||
Starting with descheduler release v0.10.0 container images are available in these container registries.
|
Starting with descheduler release v0.10.0 container images are available in the official k8s container registry.
|
||||||
* `asia.gcr.io/k8s-artifacts-prod/descheduler/descheduler`
|
|
||||||
* `eu.gcr.io/k8s-artifacts-prod/descheduler/descheduler`
|
Descheduler Version | Container Image | Architectures |
|
||||||
* `us.gcr.io/k8s-artifacts-prod/descheduler/descheduler`
|
------------------- |-------------------------------------------------|-------------------------|
|
||||||
|
v0.26.1 | registry.k8s.io/descheduler/descheduler:v0.26.1 | AMD64<br>ARM64<br>ARMv7 |
|
||||||
|
v0.26.0 | registry.k8s.io/descheduler/descheduler:v0.26.0 | AMD64<br>ARM64<br>ARMv7 |
|
||||||
|
v0.25.1 | registry.k8s.io/descheduler/descheduler:v0.25.1 | AMD64<br>ARM64<br>ARMv7 |
|
||||||
|
v0.25.0 | registry.k8s.io/descheduler/descheduler:v0.25.0 | AMD64<br>ARM64<br>ARMv7 |
|
||||||
|
v0.24.1 | registry.k8s.io/descheduler/descheduler:v0.24.1 | AMD64<br>ARM64<br>ARMv7 |
|
||||||
|
v0.24.0 | registry.k8s.io/descheduler/descheduler:v0.24.0 | AMD64<br>ARM64<br>ARMv7 |
|
||||||
|
v0.23.1 | registry.k8s.io/descheduler/descheduler:v0.23.1 | AMD64<br>ARM64<br>ARMv7 |
|
||||||
|
v0.22.0 | registry.k8s.io/descheduler/descheduler:v0.22.0 | AMD64<br>ARM64<br>ARMv7 |
|
||||||
|
v0.21.0 | registry.k8s.io/descheduler/descheduler:v0.21.0 | AMD64<br>ARM64<br>ARMv7 |
|
||||||
|
v0.20.0 | registry.k8s.io/descheduler/descheduler:v0.20.0 | AMD64<br>ARM64 |
|
||||||
|
v0.19.0 | registry.k8s.io/descheduler/descheduler:v0.19.0 | AMD64 |
|
||||||
|
v0.18.0 | registry.k8s.io/descheduler/descheduler:v0.18.0 | AMD64 |
|
||||||
|
v0.10.0 | registry.k8s.io/descheduler/descheduler:v0.10.0 | AMD64 |
|
||||||
|
|
||||||
|
Note that multi-arch container images cannot be pulled by [kind](https://kind.sigs.k8s.io) from a registry. Therefore
|
||||||
|
starting with descheduler release v0.20.0 use the below process to download the official descheduler
|
||||||
|
image into a kind cluster.
|
||||||
|
```
|
||||||
|
kind create cluster
|
||||||
|
docker pull registry.k8s.io/descheduler/descheduler:v0.20.0
|
||||||
|
kind load docker-image registry.k8s.io/descheduler/descheduler:v0.20.0
|
||||||
|
```
|
||||||
|
|
||||||
## Policy Configuration Examples
|
## Policy Configuration Examples
|
||||||
The [examples](https://github.com/kubernetes-sigs/descheduler/tree/master/examples) directory has descheduler policy configuration examples.
|
The [examples](https://github.com/kubernetes-sigs/descheduler/tree/master/examples) directory has descheduler policy configuration examples.
|
||||||
@@ -19,31 +41,52 @@ Usage:
|
|||||||
descheduler [command]
|
descheduler [command]
|
||||||
|
|
||||||
Available Commands:
|
Available Commands:
|
||||||
|
completion generate the autocompletion script for the specified shell
|
||||||
help Help about any command
|
help Help about any command
|
||||||
version Version of descheduler
|
version Version of descheduler
|
||||||
|
|
||||||
Flags:
|
Flags:
|
||||||
--add-dir-header If true, adds the file directory to the header
|
--add-dir-header If true, adds the file directory to the header of the log messages (DEPRECATED: will be removed in a future release, see https://github.com/kubernetes/enhancements/tree/master/keps/sig-instrumentation/2845-deprecate-klog-specific-flags-in-k8s-components)
|
||||||
--alsologtostderr log to standard error as well as files
|
--alsologtostderr log to standard error as well as files (DEPRECATED: will be removed in a future release, see https://github.com/kubernetes/enhancements/tree/master/keps/sig-instrumentation/2845-deprecate-klog-specific-flags-in-k8s-components)
|
||||||
--descheduling-interval duration Time interval between two consecutive descheduler executions. Setting this value instructs the descheduler to run in a continuous loop at the interval specified.
|
--bind-address ip The IP address on which to listen for the --secure-port port. The associated interface(s) must be reachable by the rest of the cluster, and by CLI/web clients. If blank or an unspecified address (0.0.0.0 or ::), all interfaces will be used. (default 0.0.0.0)
|
||||||
--dry-run execute descheduler in dry run mode.
|
--cert-dir string The directory where the TLS certs are located. If --tls-cert-file and --tls-private-key-file are provided, this flag will be ignored. (default "apiserver.local.config/certificates")
|
||||||
--evict-local-storage-pods Enables evicting pods using local storage by descheduler
|
--descheduling-interval duration Time interval between two consecutive descheduler executions. Setting this value instructs the descheduler to run in a continuous loop at the interval specified.
|
||||||
-h, --help help for descheduler
|
--disable-metrics Disables metrics. The metrics are by default served through https://localhost:10258/metrics. Secure address, resp. port can be changed through --bind-address, resp. --secure-port flags.
|
||||||
--kubeconfig string File with kube configuration.
|
--dry-run execute descheduler in dry run mode.
|
||||||
--log-backtrace-at traceLocation when logging hits line file:N, emit a stack trace (default :0)
|
-h, --help help for descheduler
|
||||||
--log-dir string If non-empty, write log files in this directory
|
--http2-max-streams-per-connection int The limit that the server gives to clients for the maximum number of streams in an HTTP/2 connection. Zero means to use golang's default.
|
||||||
--log-file string If non-empty, use this log file
|
--kubeconfig string File with kube configuration.
|
||||||
--log-file-max-size uint Defines the maximum size a log file can grow to. Unit is megabytes. If the value is 0, the maximum file size is unlimited. (default 1800)
|
--leader-elect Start a leader election client and gain leadership before executing the main loop. Enable this when running replicated components for high availability.
|
||||||
--log-flush-frequency duration Maximum number of seconds between log flushes (default 5s)
|
--leader-elect-lease-duration duration The duration that non-leader candidates will wait after observing a leadership renewal until attempting to acquire leadership of a led but unrenewed leader slot. This is effectively the maximum duration that a leader can be stopped before it is replaced by another candidate. This is only applicable if leader election is enabled. (default 15s)
|
||||||
--logtostderr log to standard error instead of files (default true)
|
--leader-elect-renew-deadline duration The interval between attempts by the acting master to renew a leadership slot before it stops leading. This must be less than or equal to the lease duration. This is only applicable if leader election is enabled. (default 10s)
|
||||||
--max-pods-to-evict-per-node int Limits the maximum number of pods to be evicted per node by descheduler
|
--leader-elect-resource-lock string The type of resource object that is used for locking during leader election. Supported options are 'endpoints', 'configmaps', 'leases', 'endpointsleases' and 'configmapsleases'. (default "leases")
|
||||||
--node-selector string Selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2)
|
--leader-elect-resource-name string The name of resource object that is used for locking during leader election. (default "descheduler")
|
||||||
--policy-config-file string File with descheduler policy configuration.
|
--leader-elect-resource-namespace string The namespace of resource object that is used for locking during leader election. (default "kube-system")
|
||||||
--skip-headers If true, avoid header prefixes in the log messages
|
--leader-elect-retry-period duration The duration the clients should wait between attempting acquisition and renewal of a leadership. This is only applicable if leader election is enabled. (default 2s)
|
||||||
--skip-log-headers If true, avoid headers when opening log files
|
--log-backtrace-at traceLocation when logging hits line file:N, emit a stack trace (default :0) (DEPRECATED: will be removed in a future release, see https://github.com/kubernetes/enhancements/tree/master/keps/sig-instrumentation/2845-deprecate-klog-specific-flags-in-k8s-components)
|
||||||
--stderrthreshold severity logs at or above this threshold go to stderr (default 2)
|
--log_dir string If non-empty, write log files in this directory (DEPRECATED: will be removed in a future release, see https://github.com/kubernetes/enhancements/tree/master/keps/sig-instrumentation/2845-deprecate-klog-specific-flags-in-k8s-components)
|
||||||
-v, --v Level number for the log level verbosity
|
--log_file string If non-empty, use this log file (DEPRECATED: will be removed in a future release, see https://github.com/kubernetes/enhancements/tree/master/keps/sig-instrumentation/2845-deprecate-klog-specific-flags-in-k8s-components)
|
||||||
--vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
|
--log_file_max_size uint Defines the maximum size a log file can grow to. Unit is megabytes. If the value is 0, the maximum file size is unlimited. (default 1800) (DEPRECATED: will be removed in a future release, see https://github.com/kubernetes/enhancements/tree/master/keps/sig-instrumentation/2845-deprecate-klog-specific-flags-in-k8s-components)
|
||||||
|
--log-flush-frequency duration Maximum number of seconds between log flushes (default 5s)
|
||||||
|
--logging-format string Sets the log format. Permitted formats: "text", "json". Non-default formats don't honor these flags: --add-dir-header, --alsologtostderr, --log-backtrace-at, --log_dir, --log_file, --log_file_max_size, --logtostderr, --skip-headers, --skip-log-headers, --stderrthreshold, --log-flush-frequency.\nNon-default choices are currently alpha and subject to change without warning. (default "text")
|
||||||
|
--logtostderr log to standard error instead of files (default true) (DEPRECATED: will be removed in a future release, see https://github.com/kubernetes/enhancements/tree/master/keps/sig-instrumentation/2845-deprecate-klog-specific-flags-in-k8s-components)
|
||||||
|
--one-output If true, only write logs to their native severity level (vs also writing to each lower severity level) (DEPRECATED: will be removed in a future release, see https://github.com/kubernetes/enhancements/tree/master/keps/sig-instrumentation/2845-deprecate-klog-specific-flags-in-k8s-components)
|
||||||
|
--permit-address-sharing If true, SO_REUSEADDR will be used when binding the port. This allows binding to wildcard IPs like 0.0.0.0 and specific IPs in parallel, and it avoids waiting for the kernel to release sockets in TIME_WAIT state. [default=false]
|
||||||
|
--permit-port-sharing If true, SO_REUSEPORT will be used when binding the port, which allows more than one instance to bind on the same address and port. [default=false]
|
||||||
|
--policy-config-file string File with descheduler policy configuration.
|
||||||
|
--secure-port int The port on which to serve HTTPS with authentication and authorization. If 0, don't serve HTTPS at all. (default 10258)
|
||||||
|
--skip-headers If true, avoid header prefixes in the log messages (DEPRECATED: will be removed in a future release, see https://github.com/kubernetes/enhancements/tree/master/keps/sig-instrumentation/2845-deprecate-klog-specific-flags-in-k8s-components)
|
||||||
|
--skip-log-headers If true, avoid headers when opening log files (DEPRECATED: will be removed in a future release, see https://github.com/kubernetes/enhancements/tree/master/keps/sig-instrumentation/2845-deprecate-klog-specific-flags-in-k8s-components)
|
||||||
|
--stderrthreshold severity logs at or above this threshold go to stderr (default 2) (DEPRECATED: will be removed in a future release, see https://github.com/kubernetes/enhancements/tree/master/keps/sig-instrumentation/2845-deprecate-klog-specific-flags-in-k8s-components)
|
||||||
|
--tls-cert-file string File containing the default x509 Certificate for HTTPS. (CA cert, if any, concatenated after server cert). If HTTPS serving is enabled, and --tls-cert-file and --tls-private-key-file are not provided, a self-signed certificate and key are generated for the public address and saved to the directory specified by --cert-dir.
|
||||||
|
--tls-cipher-suites strings Comma-separated list of cipher suites for the server. If omitted, the default Go cipher suites will be used.
|
||||||
|
Preferred values: TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, TLS_RSA_WITH_AES_128_CBC_SHA, TLS_RSA_WITH_AES_128_GCM_SHA256, TLS_RSA_WITH_AES_256_CBC_SHA, TLS_RSA_WITH_AES_256_GCM_SHA384.
|
||||||
|
Insecure values: TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_RSA_WITH_RC4_128_SHA, TLS_RSA_WITH_3DES_EDE_CBC_SHA, TLS_RSA_WITH_AES_128_CBC_SHA256, TLS_RSA_WITH_RC4_128_SHA.
|
||||||
|
--tls-min-version string Minimum TLS version supported. Possible values: VersionTLS10, VersionTLS11, VersionTLS12, VersionTLS13
|
||||||
|
--tls-private-key-file string File containing the default x509 private key matching --tls-cert-file.
|
||||||
|
--tls-sni-cert-key namedCertKey A pair of x509 certificate and private key file paths, optionally suffixed with a list of domain patterns which are fully qualified domain names, possibly with prefixed wildcard segments. The domain patterns also allow IP addresses, but IPs should only be used if the apiserver has visibility to the IP address requested by a client. If no domain patterns are provided, the names of the certificate are extracted. Non-wildcard matches trump over wildcard matches, explicit domain patterns trump over extracted names. For multiple key/certificate pairs, use the --tls-sni-cert-key multiple times. Examples: "example.crt,example.key" or "foo.crt,foo.key:*.foo.com,foo.com". (default [])
|
||||||
|
-v, --v Level number for the log level verbosity
|
||||||
|
--vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
|
||||||
|
|
||||||
Use "descheduler [command] --help" for more information about a command.
|
Use "descheduler [command] --help" for more information about a command.
|
||||||
```
|
```
|
||||||
@@ -71,20 +114,67 @@ This policy configuration file ensures that pods created more than 7 days ago ar
|
|||||||
apiVersion: "descheduler/v1alpha1"
|
apiVersion: "descheduler/v1alpha1"
|
||||||
kind: "DeschedulerPolicy"
|
kind: "DeschedulerPolicy"
|
||||||
strategies:
|
strategies:
|
||||||
"LowNodeUtilization":
|
|
||||||
enabled: false
|
|
||||||
"RemoveDuplicates":
|
|
||||||
enabled: false
|
|
||||||
"RemovePodsViolatingInterPodAntiAffinity":
|
|
||||||
enabled: false
|
|
||||||
"RemovePodsViolatingNodeAffinity":
|
|
||||||
enabled: false
|
|
||||||
"RemovePodsViolatingNodeTaints":
|
|
||||||
enabled: false
|
|
||||||
"RemovePodsHavingTooManyRestarts":
|
|
||||||
enabled: false
|
|
||||||
"PodLifeTime":
|
"PodLifeTime":
|
||||||
enabled: true
|
enabled: true
|
||||||
params:
|
params:
|
||||||
maxPodLifeTimeSeconds: 604800 # pods run for a maximum of 7 days
|
podLifeTime:
|
||||||
|
maxPodLifeTimeSeconds: 604800 # pods run for a maximum of 7 days
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### Balance Cluster By Node Memory Utilization
|
||||||
|
If your cluster has been running for a long period of time, you may find that the resource utilization is not very
|
||||||
|
balanced. The following two strategies can be used to rebalance your cluster based on `cpu`, `memory`
|
||||||
|
or `number of pods`.
|
||||||
|
|
||||||
|
#### Balance high utilization nodes
|
||||||
|
Using `LowNodeUtilization`, descheduler will rebalance the cluster based on memory by evicting pods
|
||||||
|
from nodes with memory utilization over 70% to nodes with memory utilization below 20%.
|
||||||
|
|
||||||
|
```
|
||||||
|
apiVersion: "descheduler/v1alpha1"
|
||||||
|
kind: "DeschedulerPolicy"
|
||||||
|
strategies:
|
||||||
|
"LowNodeUtilization":
|
||||||
|
enabled: true
|
||||||
|
params:
|
||||||
|
nodeResourceUtilizationThresholds:
|
||||||
|
thresholds:
|
||||||
|
"memory": 20
|
||||||
|
targetThresholds:
|
||||||
|
"memory": 70
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Balance low utilization nodes
|
||||||
|
Using `HighNodeUtilization`, descheduler will rebalance the cluster based on memory by evicting pods
|
||||||
|
from nodes with memory utilization lower than 20%. This should be use `NodeResourcesFit` with the `MostAllocated` scoring strategy based on these [doc](https://kubernetes.io/docs/reference/scheduling/config/#scheduling-plugins).
|
||||||
|
The evicted pods will be compacted into minimal set of nodes.
|
||||||
|
|
||||||
|
```
|
||||||
|
apiVersion: "descheduler/v1alpha1"
|
||||||
|
kind: "DeschedulerPolicy"
|
||||||
|
strategies:
|
||||||
|
"HighNodeUtilization":
|
||||||
|
enabled: true
|
||||||
|
params:
|
||||||
|
nodeResourceUtilizationThresholds:
|
||||||
|
thresholds:
|
||||||
|
"memory": 20
|
||||||
|
```
|
||||||
|
|
||||||
|
### Autoheal Node Problems
|
||||||
|
|
||||||
|
Descheduler's `RemovePodsViolatingNodeTaints` strategy can be combined with
|
||||||
|
[Node Problem Detector](https://github.com/kubernetes/node-problem-detector/) and
|
||||||
|
[Cluster Autoscaler](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler) to automatically remove
|
||||||
|
Nodes which have problems. Node Problem Detector can detect specific Node problems and report them to the API server.
|
||||||
|
There is a feature called TaintNodeByCondition of the node controller that takes some conditions and turns them into taints. Currently, this only works for the default node conditions: PIDPressure, MemoryPressure, DiskPressure, Ready, and some cloud provider specific conditions.
|
||||||
|
The Descheduler will then deschedule workloads from those Nodes. Finally, if the descheduled Node's resource
|
||||||
|
allocation falls below the Cluster Autoscaler's scale down threshold, the Node will become a scale down candidate
|
||||||
|
and can be removed by Cluster Autoscaler. These three components form an autohealing cycle for Node problems.
|
||||||
|
|
||||||
|
---
|
||||||
|
**NOTE**
|
||||||
|
|
||||||
|
Once [kubernetes/node-problem-detector#565](https://github.com/kubernetes/node-problem-detector/pull/565) is available in NPD, we need to update this section.
|
||||||
|
|
||||||
|
---
|
||||||
|
|||||||
14
examples/failed-pods.yaml
Normal file
14
examples/failed-pods.yaml
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
apiVersion: "descheduler/v1alpha1"
|
||||||
|
kind: "DeschedulerPolicy"
|
||||||
|
strategies:
|
||||||
|
"RemoveFailedPods":
|
||||||
|
enabled: true
|
||||||
|
params:
|
||||||
|
failedPods:
|
||||||
|
reasons:
|
||||||
|
- "OutOfcpu"
|
||||||
|
- "CreateContainerConfigError"
|
||||||
|
includingInitContainers: true
|
||||||
|
excludeOwnerKinds:
|
||||||
|
- "Job"
|
||||||
|
minPodLifetimeSeconds: 3600 # 1 hour
|
||||||
9
examples/high-node-utilization.yml
Normal file
9
examples/high-node-utilization.yml
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
apiVersion: "descheduler/v1alpha1"
|
||||||
|
kind: "DeschedulerPolicy"
|
||||||
|
strategies:
|
||||||
|
"HighNodeUtilization":
|
||||||
|
enabled: true
|
||||||
|
params:
|
||||||
|
nodeResourceUtilizationThresholds:
|
||||||
|
thresholds:
|
||||||
|
"memory": 20
|
||||||
11
examples/low-node-utilization.yml
Normal file
11
examples/low-node-utilization.yml
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
apiVersion: "descheduler/v1alpha1"
|
||||||
|
kind: "DeschedulerPolicy"
|
||||||
|
strategies:
|
||||||
|
"LowNodeUtilization":
|
||||||
|
enabled: true
|
||||||
|
params:
|
||||||
|
nodeResourceUtilizationThresholds:
|
||||||
|
thresholds:
|
||||||
|
"memory": 20
|
||||||
|
targetThresholds:
|
||||||
|
"memory": 70
|
||||||
@@ -1,20 +1,11 @@
|
|||||||
---
|
|
||||||
apiVersion: "descheduler/v1alpha1"
|
apiVersion: "descheduler/v1alpha1"
|
||||||
kind: "DeschedulerPolicy"
|
kind: "DeschedulerPolicy"
|
||||||
strategies:
|
strategies:
|
||||||
"LowNodeUtilization":
|
|
||||||
enabled: false
|
|
||||||
"RemoveDuplicates":
|
|
||||||
enabled: false
|
|
||||||
"RemovePodsViolatingInterPodAntiAffinity":
|
|
||||||
enabled: false
|
|
||||||
"RemovePodsViolatingNodeAffinity":
|
|
||||||
enabled: false
|
|
||||||
"RemovePodsViolatingNodeTaints":
|
|
||||||
enabled: false
|
|
||||||
"RemovePodsHavingTooManyRestarts":
|
|
||||||
enabled: false
|
|
||||||
"PodLifeTime":
|
"PodLifeTime":
|
||||||
enabled: true
|
enabled: true
|
||||||
params:
|
params:
|
||||||
maxPodLifeTimeSeconds: 604800 # 7 days
|
podLifeTime:
|
||||||
|
maxPodLifeTimeSeconds: 604800 # 7 days
|
||||||
|
states:
|
||||||
|
- "Pending"
|
||||||
|
- "PodInitializing"
|
||||||
|
|||||||
@@ -23,3 +23,7 @@ strategies:
|
|||||||
podsHavingTooManyRestarts:
|
podsHavingTooManyRestarts:
|
||||||
podRestartThreshold: 100
|
podRestartThreshold: 100
|
||||||
includingInitContainers: true
|
includingInitContainers: true
|
||||||
|
"RemovePodsViolatingTopologySpreadConstraint":
|
||||||
|
enabled: true
|
||||||
|
params:
|
||||||
|
includeSoftConstraints: true
|
||||||
|
|||||||
8
examples/topology-spread-constraint.yaml
Normal file
8
examples/topology-spread-constraint.yaml
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
apiVersion: "descheduler/v1alpha1"
|
||||||
|
kind: "DeschedulerPolicy"
|
||||||
|
strategies:
|
||||||
|
"RemovePodsViolatingTopologySpreadConstraint":
|
||||||
|
enabled: true
|
||||||
|
params:
|
||||||
|
nodeFit: true
|
||||||
|
includeSoftConstraints: true # Include 'ScheduleAnyways' constraints
|
||||||
113
go.mod
113
go.mod
@@ -1,15 +1,110 @@
|
|||||||
module sigs.k8s.io/descheduler
|
module sigs.k8s.io/descheduler
|
||||||
|
|
||||||
go 1.14
|
go 1.19
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/spf13/cobra v0.0.5
|
github.com/client9/misspell v0.3.4
|
||||||
|
github.com/google/go-cmp v0.5.9
|
||||||
|
github.com/spf13/cobra v1.6.0
|
||||||
github.com/spf13/pflag v1.0.5
|
github.com/spf13/pflag v1.0.5
|
||||||
k8s.io/api v0.18.4
|
k8s.io/api v0.26.0
|
||||||
k8s.io/apimachinery v0.18.4
|
k8s.io/apimachinery v0.26.0
|
||||||
k8s.io/apiserver v0.18.4
|
k8s.io/apiserver v0.26.0
|
||||||
k8s.io/client-go v0.18.4
|
k8s.io/client-go v0.26.0
|
||||||
k8s.io/code-generator v0.18.4
|
k8s.io/code-generator v0.26.0
|
||||||
k8s.io/component-base v0.18.4
|
k8s.io/component-base v0.26.0
|
||||||
k8s.io/klog/v2 v2.0.0
|
k8s.io/component-helpers v0.26.0
|
||||||
|
k8s.io/klog/v2 v2.80.1
|
||||||
|
k8s.io/utils v0.0.0-20221128185143-99ec85e7a448
|
||||||
|
sigs.k8s.io/mdtoc v1.0.1
|
||||||
|
)
|
||||||
|
|
||||||
|
require (
|
||||||
|
github.com/BurntSushi/toml v0.3.1 // indirect
|
||||||
|
github.com/NYTimes/gziphandler v1.1.1 // indirect
|
||||||
|
github.com/antlr/antlr4/runtime/Go/antlr v1.4.10 // indirect
|
||||||
|
github.com/beorn7/perks v1.0.1 // indirect
|
||||||
|
github.com/blang/semver/v4 v4.0.0 // indirect
|
||||||
|
github.com/cenkalti/backoff/v4 v4.1.3 // indirect
|
||||||
|
github.com/cespare/xxhash/v2 v2.1.2 // indirect
|
||||||
|
github.com/coreos/go-semver v0.3.0 // indirect
|
||||||
|
github.com/coreos/go-systemd/v22 v22.3.2 // indirect
|
||||||
|
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||||
|
github.com/emicklei/go-restful/v3 v3.9.0 // indirect
|
||||||
|
github.com/evanphx/json-patch v4.12.0+incompatible // indirect
|
||||||
|
github.com/felixge/httpsnoop v1.0.3 // indirect
|
||||||
|
github.com/fsnotify/fsnotify v1.6.0 // indirect
|
||||||
|
github.com/go-logr/logr v1.2.3 // indirect
|
||||||
|
github.com/go-logr/stdr v1.2.2 // indirect
|
||||||
|
github.com/go-logr/zapr v1.2.3 // indirect
|
||||||
|
github.com/go-openapi/jsonpointer v0.19.5 // indirect
|
||||||
|
github.com/go-openapi/jsonreference v0.20.0 // indirect
|
||||||
|
github.com/go-openapi/swag v0.19.14 // indirect
|
||||||
|
github.com/gogo/protobuf v1.3.2 // indirect
|
||||||
|
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||||
|
github.com/golang/protobuf v1.5.2 // indirect
|
||||||
|
github.com/gomarkdown/markdown v0.0.0-20200824053859-8c8b3816f167 // indirect
|
||||||
|
github.com/google/cel-go v0.12.5 // indirect
|
||||||
|
github.com/google/gnostic v0.5.7-v3refs // indirect
|
||||||
|
github.com/google/gofuzz v1.1.0 // indirect
|
||||||
|
github.com/google/uuid v1.1.2 // indirect
|
||||||
|
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect
|
||||||
|
github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 // indirect
|
||||||
|
github.com/imdario/mergo v0.3.6 // indirect
|
||||||
|
github.com/inconshreveable/mousetrap v1.0.1 // indirect
|
||||||
|
github.com/josharian/intern v1.0.0 // indirect
|
||||||
|
github.com/json-iterator/go v1.1.12 // indirect
|
||||||
|
github.com/mailru/easyjson v0.7.6 // indirect
|
||||||
|
github.com/matttproud/golang_protobuf_extensions v1.0.2 // indirect
|
||||||
|
github.com/mmarkdown/mmark v2.0.40+incompatible // indirect
|
||||||
|
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||||
|
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||||
|
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||||
|
github.com/pkg/errors v0.9.1 // indirect
|
||||||
|
github.com/prometheus/client_golang v1.14.0 // indirect
|
||||||
|
github.com/prometheus/client_model v0.3.0 // indirect
|
||||||
|
github.com/prometheus/common v0.37.0 // indirect
|
||||||
|
github.com/prometheus/procfs v0.8.0 // indirect
|
||||||
|
github.com/stoewer/go-strcase v1.2.0 // indirect
|
||||||
|
go.etcd.io/etcd/api/v3 v3.5.5 // indirect
|
||||||
|
go.etcd.io/etcd/client/pkg/v3 v3.5.5 // indirect
|
||||||
|
go.etcd.io/etcd/client/v3 v3.5.5 // indirect
|
||||||
|
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0 // indirect
|
||||||
|
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.0 // indirect
|
||||||
|
go.opentelemetry.io/otel v1.10.0 // indirect
|
||||||
|
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0 // indirect
|
||||||
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0 // indirect
|
||||||
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0 // indirect
|
||||||
|
go.opentelemetry.io/otel/metric v0.31.0 // indirect
|
||||||
|
go.opentelemetry.io/otel/sdk v1.10.0 // indirect
|
||||||
|
go.opentelemetry.io/otel/trace v1.10.0 // indirect
|
||||||
|
go.opentelemetry.io/proto/otlp v0.19.0 // indirect
|
||||||
|
go.uber.org/atomic v1.7.0 // indirect
|
||||||
|
go.uber.org/multierr v1.6.0 // indirect
|
||||||
|
go.uber.org/zap v1.19.0 // indirect
|
||||||
|
golang.org/x/crypto v0.1.0 // indirect
|
||||||
|
golang.org/x/mod v0.6.0 // indirect
|
||||||
|
golang.org/x/net v0.3.1-0.20221206200815-1e63c2f08a10 // indirect
|
||||||
|
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b // indirect
|
||||||
|
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 // indirect
|
||||||
|
golang.org/x/sys v0.3.0 // indirect
|
||||||
|
golang.org/x/term v0.3.0 // indirect
|
||||||
|
golang.org/x/text v0.5.0 // indirect
|
||||||
|
golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 // indirect
|
||||||
|
golang.org/x/tools v0.2.0 // indirect
|
||||||
|
google.golang.org/appengine v1.6.7 // indirect
|
||||||
|
google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21 // indirect
|
||||||
|
google.golang.org/grpc v1.49.0 // indirect
|
||||||
|
google.golang.org/protobuf v1.28.1 // indirect
|
||||||
|
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||||
|
gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect
|
||||||
|
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||||
|
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||||
|
k8s.io/gengo v0.0.0-20220902162205-c0856e24416d // indirect
|
||||||
|
k8s.io/kms v0.26.0 // indirect
|
||||||
|
k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 // indirect
|
||||||
|
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.33 // indirect
|
||||||
|
sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect
|
||||||
|
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect
|
||||||
|
sigs.k8s.io/yaml v1.3.0 // indirect
|
||||||
)
|
)
|
||||||
|
|||||||
843
go.sum
843
go.sum
@@ -1,381 +1,766 @@
|
|||||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||||
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||||
cloud.google.com/go v0.38.0 h1:ROfEUZz+Gh5pa62DJWXSaonyu3StP6EA6lPEXPI6mCo=
|
|
||||||
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
|
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
|
||||||
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
|
cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
|
||||||
github.com/Azure/go-autorest/autorest v0.9.0 h1:MRvx8gncNaXJqOoLmhNjUAKh33JJF8LyxPhomEtOsjs=
|
cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
|
||||||
github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI=
|
cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
|
||||||
github.com/Azure/go-autorest/autorest/adal v0.5.0 h1:q2gDruN08/guU9vAjuPWff0+QIrpH6ediguzdAzXAUU=
|
cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
|
||||||
github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0=
|
cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
|
||||||
github.com/Azure/go-autorest/autorest/date v0.1.0 h1:YGrhWfrgtFs84+h0o46rJrlmsZtyZRg470CqAXTZaGM=
|
cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
|
||||||
github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA=
|
cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
|
||||||
github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
|
cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
|
||||||
github.com/Azure/go-autorest/autorest/mocks v0.2.0 h1:Ww5g4zThfD/6cLb4z6xxgeyDa7QDkizMkJKe0ysZXp0=
|
cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
|
||||||
github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
|
cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
|
||||||
github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY=
|
cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=
|
||||||
github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc=
|
cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
|
||||||
github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k=
|
cloud.google.com/go v0.97.0 h1:3DXvAyifywvq64LfkKaMOmkWPS1CikIQdMe2lY9vxU8=
|
||||||
github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk=
|
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
|
||||||
|
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
|
||||||
|
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
|
||||||
|
cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
|
||||||
|
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
|
||||||
|
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
|
||||||
|
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
|
||||||
|
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
|
||||||
|
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
|
||||||
|
cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
|
||||||
|
cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
|
||||||
|
cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU=
|
||||||
|
cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
|
||||||
|
cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
|
||||||
|
cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
|
||||||
|
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
|
||||||
|
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
|
||||||
|
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||||
|
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
|
||||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||||
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
|
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||||
github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
|
github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I=
|
||||||
github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI=
|
github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c=
|
||||||
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
|
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
||||||
github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
|
|
||||||
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M=
|
|
||||||
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
|
|
||||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||||
|
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||||
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
|
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||||
|
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
|
||||||
|
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
|
||||||
|
github.com/antlr/antlr4/runtime/Go/antlr v1.4.10 h1:yL7+Jz0jTC6yykIK/Wh74gnTJnrGr5AyrNMXuA0gves=
|
||||||
|
github.com/antlr/antlr4/runtime/Go/antlr v1.4.10/go.mod h1:F7bn7fEU90QkQ3tnmaTx3LTKLEDqnwWODIYppRQ5hnY=
|
||||||
|
github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
|
||||||
|
github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
|
||||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||||
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
|
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||||
github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
|
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||||
|
github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM=
|
||||||
|
github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ=
|
||||||
|
github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4=
|
||||||
|
github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
|
||||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||||
|
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||||
|
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||||
|
github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
|
||||||
|
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||||
|
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
|
||||||
|
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
|
||||||
|
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
||||||
|
github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI=
|
||||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||||
github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
|
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||||
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
|
||||||
github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
|
github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI=
|
||||||
github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc=
|
github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||||
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||||
|
github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||||
|
github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||||
|
github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||||
|
github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM=
|
||||||
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||||
github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI=
|
||||||
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
||||||
github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
|
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||||
github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
|
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||||
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
|
|
||||||
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
|
|
||||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=
|
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
|
||||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=
|
||||||
github.com/docker/docker v0.7.3-0.20190327010347-be7ac8be2ae0/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
|
||||||
github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
|
|
||||||
github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
|
||||||
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||||
github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
|
github.com/emicklei/go-restful/v3 v3.9.0 h1:XwGDlfxEnQZzuopoqxwSEllNcCOM9DhhFyhFIIGKwxE=
|
||||||
github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
|
github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
|
||||||
github.com/emicklei/go-restful v2.9.5+incompatible h1:spTtZBk5DYEvbxMVutUuTyh1Ao2r4iyvLdACqsl/Ljk=
|
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||||
github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
|
|
||||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||||
|
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
|
||||||
|
github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
|
||||||
|
github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
|
||||||
|
github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=
|
||||||
|
github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=
|
||||||
|
github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE=
|
||||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||||
github.com/evanphx/json-patch v4.2.0+incompatible h1:fUDGZCv/7iAN7u0puUVhvKCcsR6vRfwrJatElLBEf0I=
|
github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84=
|
||||||
github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||||
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk=
|
||||||
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
|
github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
||||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
github.com/form3tech-oss/jwt-go v3.2.3+incompatible h1:7ZaBxOI7TMoYBfyA3cQHErNNyAWIKUMIwqxEtgHOs5c=
|
||||||
github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY=
|
||||||
|
github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw=
|
||||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||||
|
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
|
||||||
|
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||||
|
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||||
|
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||||
|
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
|
||||||
|
github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0=
|
||||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||||
github.com/go-logr/logr v0.1.0 h1:M1Tv3VzNlEHg6uyACnRdtrploV2P7wZqH8BoQMtz0cg=
|
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||||
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
|
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
|
||||||
github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0=
|
github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
|
||||||
github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg=
|
github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
|
||||||
github.com/go-openapi/jsonpointer v0.19.3 h1:gihV7YNZK1iK6Tgwwsxo2rJbD1GTbdm72325Bq8FI3w=
|
github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||||
|
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||||
|
github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0=
|
||||||
|
github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||||
|
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
||||||
|
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
||||||
|
github.com/go-logr/zapr v1.2.3 h1:a9vnzlIBPQBBkeaR9IuMUfmVOrQlkoC4YfPoFkX3T7A=
|
||||||
|
github.com/go-logr/zapr v1.2.3/go.mod h1:eIauM6P8qSvTw5o2ez6UEAfGjQKrxQTl5EoK+Qa2oG4=
|
||||||
github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
|
github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
|
||||||
github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg=
|
github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY=
|
||||||
github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc=
|
github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
|
||||||
github.com/go-openapi/jsonreference v0.19.3 h1:5cxNfTy0UVC3X8JL5ymxzyoUZmo8iZb+jeTWn7tUa8o=
|
github.com/go-openapi/jsonreference v0.20.0 h1:MYlu0sBgChmCfJxxUKZ8g1cPWFOB37YSZqewK7OKeyA=
|
||||||
github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8=
|
github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo=
|
||||||
github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc=
|
|
||||||
github.com/go-openapi/spec v0.19.3 h1:0XRyw8kguri6Yw4SxhsQA/atC88yqrk0+G4YhI2wabc=
|
|
||||||
github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo=
|
|
||||||
github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I=
|
|
||||||
github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
|
|
||||||
github.com/go-openapi/swag v0.19.5 h1:lTz6Ys4CmqqCQmZPBlbQENR1/GucA2bzYTE12Pw4tFY=
|
|
||||||
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
|
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
|
||||||
|
github.com/go-openapi/swag v0.19.14 h1:gm3vOOXfiuw5i9p5N9xJvfjvuofpyvLA9Wr6QfK5Fng=
|
||||||
|
github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
|
||||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||||
|
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||||
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
|
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||||
github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls=
|
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||||
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
|
|
||||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||||
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903 h1:LbsanbbD6LieFkXbj9YNNBupiGHJgFeLpO0j0Fza1h8=
|
github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ=
|
||||||
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4=
|
||||||
|
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||||
|
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||||
|
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||||
|
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
|
||||||
|
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||||
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||||
github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
|
||||||
|
github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
|
||||||
|
github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
|
||||||
|
github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
|
||||||
|
github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
|
||||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
|
|
||||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
|
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
|
||||||
|
github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
|
||||||
|
github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
|
||||||
|
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
|
||||||
|
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
|
||||||
|
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
|
||||||
|
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
|
||||||
|
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
||||||
|
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
|
||||||
|
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||||
|
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||||
|
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||||
|
github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
|
||||||
|
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||||
|
github.com/gomarkdown/markdown v0.0.0-20200824053859-8c8b3816f167 h1:LP/6EfrZ/LyCc+SXvANDrIJ4sP9u2NAtqyv6QknetNQ=
|
||||||
|
github.com/gomarkdown/markdown v0.0.0-20200824053859-8c8b3816f167/go.mod h1:aii0r/K0ZnHv7G0KF7xy1v0A7s2Ljrb5byB7MO5p6TU=
|
||||||
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||||
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||||
|
github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4=
|
||||||
|
github.com/google/cel-go v0.12.5 h1:DmzaiSgoaqGCjtpPQWl26/gND+yRpim56H1jCVev6d8=
|
||||||
|
github.com/google/cel-go v0.12.5/go.mod h1:Jk7ljRzLBhkmiAwBoUxB1sZSCVBAzkqPF25olK/iRDw=
|
||||||
|
github.com/google/gnostic v0.5.7-v3refs h1:FhTMOKj2VhjpouxvWJAV1TL304uMlb9zcDqkl6cEI54=
|
||||||
|
github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ=
|
||||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||||
github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY=
|
|
||||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||||
|
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||||
|
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
|
github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
|
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
|
github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
|
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
|
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
|
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
|
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
|
||||||
|
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||||
github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g=
|
github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g=
|
||||||
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||||
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||||
|
github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
|
||||||
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||||
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||||
github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
|
github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||||
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||||
|
github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||||
|
github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||||
|
github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||||
|
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||||
|
github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y=
|
||||||
|
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||||
github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
|
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
||||||
github.com/googleapis/gnostic v0.1.0 h1:rVsPeBmXbYv4If/cumu1AzZPwV58q433hvONV1UEZoI=
|
github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc=
|
||||||
github.com/googleapis/gnostic v0.1.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
|
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw=
|
||||||
github.com/gophercloud/gophercloud v0.1.0 h1:P/nh25+rzXouhytV2pUHBb65fnds26Ghl8/391+sT5o=
|
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho=
|
||||||
github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8=
|
|
||||||
github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
|
||||||
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
|
||||||
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
|
|
||||||
github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
|
|
||||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
|
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
|
||||||
github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo=
|
||||||
|
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
|
||||||
|
github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 h1:BZHcxBETFHIdVyhyEfOvn/RdU/QGdLI4y34qQGjGWO0=
|
||||||
|
github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks=
|
||||||
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||||
github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU=
|
|
||||||
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||||
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
|
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||||
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
|
github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28=
|
||||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
||||||
github.com/imdario/mergo v0.3.5 h1:JboBksRwiiAJWvIYJVo46AfV+IAIKZpfrSzVKj42R4Q=
|
github.com/inconshreveable/mousetrap v1.0.1 h1:U3uMjPSQEBMNp1lFxmllqCPM6P5u/Xq7Pgzkat/bFNc=
|
||||||
github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
||||||
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
|
github.com/jonboulle/clockwork v0.2.2 h1:UOGuzwb1PwsrDAObMuhUnj0p5ULPj8V/xJ7Kx9qUBdQ=
|
||||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
|
||||||
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
|
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
|
||||||
|
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
|
||||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||||
github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||||
github.com/json-iterator/go v1.1.8 h1:QiWkFLKq0T7mpzwOTu6BzNDbfTE8OLrYhVKYMLF46Ok=
|
github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||||
github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
|
||||||
|
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
|
||||||
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
|
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
|
||||||
|
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
|
||||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||||
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
|
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
|
||||||
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
|
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||||
|
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||||
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
|
|
||||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||||
|
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||||
github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=
|
|
||||||
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
|
||||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||||
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||||
github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||||
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||||
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||||
github.com/mailru/easyjson v0.7.0 h1:aizVhC/NAAcKWb+5QsU1iNOZb4Yws5UO2I+aIprQITM=
|
github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA=
|
||||||
github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs=
|
github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
||||||
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
|
|
||||||
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
|
||||||
github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
|
||||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||||
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
github.com/matttproud/golang_protobuf_extensions v1.0.2 h1:hAHbPm5IJGijwng3PWk09JkG9WeqChjprR5s9bBZ+OM=
|
||||||
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
github.com/matttproud/golang_protobuf_extensions v1.0.2/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
|
||||||
|
github.com/mmarkdown/mmark v2.0.40+incompatible h1:vMeUeDzBK3H+/mU0oMVfMuhSXJlIA+DE/DMPQNAj5C4=
|
||||||
|
github.com/mmarkdown/mmark v2.0.40+incompatible/go.mod h1:Uvmoz7tvsWpr7bMVxIpqZPyN3FbOtzDmnsJDFp7ltJs=
|
||||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||||
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||||
github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
|
|
||||||
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||||
github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
|
||||||
|
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||||
|
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
|
||||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||||
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
|
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||||
github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
|
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs=
|
||||||
github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
|
||||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
github.com/onsi/ginkgo/v2 v2.4.0 h1:+Ig9nvqgS5OBSACXNk15PLdp0U9XPYROt9CFzVdFGIs=
|
||||||
github.com/onsi/ginkgo v1.11.0 h1:JAKSXpt1YjtLA7YpPiqO9ss6sNXEsPfSGdwN0UHqzrw=
|
github.com/onsi/gomega v1.23.0 h1:/oxKu9c2HVap+F3PfKort2Hw5DEU+HGlW8n+tguWsys=
|
||||||
github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
|
||||||
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
|
|
||||||
github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME=
|
|
||||||
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
|
||||||
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
|
|
||||||
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
|
|
||||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||||
|
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||||
|
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA=
|
|
||||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||||
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||||
|
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
|
||||||
|
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
|
||||||
|
github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
|
||||||
|
github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
|
||||||
|
github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw=
|
||||||
|
github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y=
|
||||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||||
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||||
|
github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4=
|
||||||
|
github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w=
|
||||||
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||||
|
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
|
||||||
|
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
|
||||||
|
github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
|
||||||
|
github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE=
|
||||||
|
github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA=
|
||||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||||
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||||
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
|
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||||
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
|
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||||
|
github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||||
|
github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo=
|
||||||
|
github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4=
|
||||||
|
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
|
||||||
|
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||||
|
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||||
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
|
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
|
||||||
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
|
github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE=
|
||||||
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
|
github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js=
|
||||||
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||||
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
|
github.com/spf13/cobra v1.6.0 h1:42a0n6jwCot1pUmomAp4T7DeMD+20LFv4Q54pxLf2LI=
|
||||||
github.com/spf13/cobra v0.0.5 h1:f0B+LkLX6DtmRH1isoNA9VTtNUK9K8xYd28JNNfOv/s=
|
github.com/spf13/cobra v1.6.0/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY=
|
||||||
github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
|
|
||||||
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
|
|
||||||
github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
|
||||||
github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
|
||||||
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
|
||||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||||
github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
|
github.com/stoewer/go-strcase v1.2.0 h1:Z2iHWqGXH00XYgqDmNgQbIBxf3wrNq0F3feEy0ainaU=
|
||||||
|
github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8=
|
||||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
|
|
||||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||||
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
|
|
||||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
||||||
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
|
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||||
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
|
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk=
|
||||||
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
|
github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802 h1:uruHq4dN7GR16kFc5fp3d1RIYzJW5onx8Ybykw2YQFA=
|
||||||
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8=
|
||||||
go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg=
|
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||||
|
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||||
|
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||||
|
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||||
|
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
||||||
|
go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU=
|
||||||
|
go.etcd.io/etcd/api/v3 v3.5.5 h1:BX4JIbQ7hl7+jL+g+2j5UAr0o1bctCm6/Ct+ArBGkf0=
|
||||||
|
go.etcd.io/etcd/api/v3 v3.5.5/go.mod h1:KFtNaxGDw4Yx/BA4iPPwevUTAuqcsPxzyX8PHydchN8=
|
||||||
|
go.etcd.io/etcd/client/pkg/v3 v3.5.5 h1:9S0JUVvmrVl7wCF39iTQthdaaNIiAaQbmK75ogO6GU8=
|
||||||
|
go.etcd.io/etcd/client/pkg/v3 v3.5.5/go.mod h1:ggrwbk069qxpKPq8/FKkQ3Xq9y39kbFR4LnKszpRXeQ=
|
||||||
|
go.etcd.io/etcd/client/v2 v2.305.5 h1:DktRP60//JJpnPC0VBymAN/7V71GHMdjDCBt4ZPXDjI=
|
||||||
|
go.etcd.io/etcd/client/v3 v3.5.5 h1:q++2WTJbUgpQu4B6hCuT7VkdwaTP7Qz6Daak3WzbrlI=
|
||||||
|
go.etcd.io/etcd/client/v3 v3.5.5/go.mod h1:aApjR4WGlSumpnJ2kloS75h6aHUmAyaPLjHMxpc7E7c=
|
||||||
|
go.etcd.io/etcd/pkg/v3 v3.5.5 h1:Ablg7T7OkR+AeeeU32kdVhw/AGDsitkKPl7aW73ssjU=
|
||||||
|
go.etcd.io/etcd/raft/v3 v3.5.5 h1:Ibz6XyZ60OYyRopu73lLM/P+qco3YtlZMOhnXNS051I=
|
||||||
|
go.etcd.io/etcd/server/v3 v3.5.5 h1:jNjYm/9s+f9A9r6+SC4RvNaz6AqixpOvhrFdT0PvIj0=
|
||||||
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
|
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
|
||||||
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
|
||||||
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||||
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||||
|
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||||
|
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0 h1:xFSRQBbXF6VvYRf2lqMJXxoB72XI1K/azav8TekHHSw=
|
||||||
|
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0/go.mod h1:h8TWwRAhQpOd0aM5nYsRD8+flnkj+526GEIVlarH7eY=
|
||||||
|
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.0 h1:Ajldaqhxqw/gNzQA45IKFWLdG7jZuXX/wBW1d5qvbUI=
|
||||||
|
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.0/go.mod h1:9NiG9I2aHTKkcxqCILhjtyNA1QEiCjdBACv4IvrFQ+c=
|
||||||
|
go.opentelemetry.io/otel v1.10.0 h1:Y7DTJMR6zs1xkS/upamJYk0SxxN4C9AqRd77jmZnyY4=
|
||||||
|
go.opentelemetry.io/otel v1.10.0/go.mod h1:NbvWjCthWHKBEUMpf0/v8ZRZlni86PpGFEMA9pnQSnQ=
|
||||||
|
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0 h1:TaB+1rQhddO1sF71MpZOZAuSPW1klK2M8XxfrBMfK7Y=
|
||||||
|
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0/go.mod h1:78XhIg8Ht9vR4tbLNUhXsiOnE2HOuSeKAiAcoVQEpOY=
|
||||||
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0 h1:pDDYmo0QadUPal5fwXoY1pmMpFcdyhXOmL5drCrI3vU=
|
||||||
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0/go.mod h1:Krqnjl22jUJ0HgMzw5eveuCvFDXY4nSYb4F8t5gdrag=
|
||||||
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0 h1:KtiUEhQmj/Pa874bVYKGNVdq8NPKiacPbaRRtgXi+t4=
|
||||||
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0/go.mod h1:OfUCyyIiDvNXHWpcWgbF+MWvqPZiNa3YDEnivcnYsV0=
|
||||||
|
go.opentelemetry.io/otel/metric v0.31.0 h1:6SiklT+gfWAwWUR0meEMxQBtihpiEs4c+vL9spDTqUs=
|
||||||
|
go.opentelemetry.io/otel/metric v0.31.0/go.mod h1:ohmwj9KTSIeBnDBm/ZwH2PSZxZzoOaG2xZeekTRzL5A=
|
||||||
|
go.opentelemetry.io/otel/sdk v1.10.0 h1:jZ6K7sVn04kk/3DNUdJ4mqRlGDiXAVuIG+MMENpTNdY=
|
||||||
|
go.opentelemetry.io/otel/sdk v1.10.0/go.mod h1:vO06iKzD5baltJz1zarxMCNHFpUlUiOy4s65ECtn6kE=
|
||||||
|
go.opentelemetry.io/otel/trace v1.10.0 h1:npQMbR8o7mum8uF95yFbOEJffhs1sbCOfDh8zAJiH5E=
|
||||||
|
go.opentelemetry.io/otel/trace v1.10.0/go.mod h1:Sij3YYczqAdz+EhmGhE6TpTxUO5/F/AzrK+kxfGqySM=
|
||||||
|
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
|
||||||
|
go.opentelemetry.io/proto/otlp v0.19.0 h1:IVN6GR+mhC4s5yfcTbmzHYODqvWAp3ZedA2SJPI1Nnw=
|
||||||
|
go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U=
|
||||||
|
go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw=
|
||||||
|
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
|
||||||
|
go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
|
||||||
|
go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk=
|
||||||
|
go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4=
|
||||||
|
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
|
||||||
|
go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo=
|
||||||
|
go.uber.org/zap v1.19.0 h1:mZQZefskPPCMIBCSEH0v2/iUqqLrYtaeqwD6FUGUnFE=
|
||||||
|
go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI=
|
||||||
|
golang.org/dl v0.0.0-20190829154251-82a15e2f2ead/go.mod h1:IUMfjQLJQd4UTqG1Z90tenwKoCX93Gn3MAQJMOSBsDQ=
|
||||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||||
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
|
||||||
golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
|
||||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||||
golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||||
golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975 h1:/Tl7pH94bvbAAHBdZJT947M/+gp0+CqQXDtMRC0fseo=
|
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||||
golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||||
|
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||||
|
golang.org/x/crypto v0.1.0 h1:MDRAIl0xIo9Io2xV565hzXHw3zVseKrJKodhohM5CjU=
|
||||||
|
golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
|
||||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||||
|
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||||
|
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||||
|
golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
|
||||||
|
golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
|
||||||
|
golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||||
|
golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||||
|
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||||
|
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
|
||||||
|
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
|
||||||
|
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
||||||
|
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||||
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||||
golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||||
|
golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||||
|
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||||
|
golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
|
||||||
|
golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||||
|
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||||
|
golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||||
|
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
|
||||||
|
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
|
||||||
|
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
|
||||||
|
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
|
||||||
|
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||||
|
golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||||
|
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||||
|
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||||
|
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||||
|
golang.org/x/mod v0.6.0 h1:b9gGHsz9/HhJ3HF5DHQytPpuwocVTChQJK3AvoLRD5I=
|
||||||
|
golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI=
|
||||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
|
||||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
|
||||||
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||||
|
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||||
|
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||||
|
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
||||||
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
golang.org/x/net v0.0.0-20191004110552-13f9640d40b9 h1:rjwSpXsdiK0dV8/Naq3kAw9ymfAeJIyd0upUIElB+lI=
|
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
|
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
|
golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
|
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
|
golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
|
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||||
|
golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||||
|
golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||||
|
golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||||
|
golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||||
|
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||||
|
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||||
|
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||||
|
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||||
|
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
|
||||||
|
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||||
|
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||||
|
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||||
|
golang.org/x/net v0.3.1-0.20221206200815-1e63c2f08a10 h1:Frnccbp+ok2GkUS2tC84yAq/U9Vg+0sIO7aRL3T4Xnc=
|
||||||
|
golang.org/x/net v0.3.1-0.20221206200815-1e63c2f08a10/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
|
||||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0=
|
|
||||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||||
|
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||||
|
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||||
|
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||||
|
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||||
|
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b h1:clP8eMhB30EHdc0bd2Twtq6kgU7yl5ub2cQLSdrv1Dg=
|
||||||
|
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
|
||||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 h1:uVc8UZUe6tr40fFVnUP5Oj+veunVezqYl9z7DYw9xzw=
|
||||||
|
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
|
||||||
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
|
||||||
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
|
||||||
golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
|
||||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7 h1:HmbHVPwrPEKPGLAcHSrMe6+hqSUlvZU0rab6x5EXfGU=
|
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.3.0 h1:w8ZOecv6NaNa/zC8944JTU3vz4u6Lagfk4RPQxv92NQ=
|
||||||
|
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||||
|
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||||
|
golang.org/x/term v0.3.0 h1:qoo4akIqOcDME5bhc/NgxUdovd6BSS2uMsVjB56q1xI=
|
||||||
|
golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA=
|
||||||
|
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
|
|
||||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||||
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
|
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
|
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
|
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||||
|
golang.org/x/text v0.5.0 h1:OLmvp0KP+FVG99Ct/qFiL/Fhk4zp4QQnZ7b2U+5piUM=
|
||||||
|
golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ=
|
|
||||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
|
golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 h1:vVKdlvoWBphwdxWKrFZEuM0kGgGLxUOYcY4U/2Vjg44=
|
||||||
|
golang.org/x/time v0.0.0-20220210224613-90d013bbcef8/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
|
||||||
golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
|
||||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||||
|
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||||
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||||
|
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||||
|
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||||
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||||
golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||||
golang.org/x/tools v0.0.0-20190920225731-5eefd052ad72 h1:bw9doJza/SFBEweII/rHQh338oozWyiFsBRHtrflcws=
|
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||||
golang.org/x/tools v0.0.0-20190920225731-5eefd052ad72/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||||
|
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
|
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
|
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
|
golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
|
golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
|
golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
|
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
|
golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
|
golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
|
golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||||
|
golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||||
|
golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||||
|
golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||||
|
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||||
|
golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||||
|
golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||||
|
golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||||
|
golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||||
|
golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||||
|
golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
|
||||||
|
golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
|
||||||
|
golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
|
||||||
|
golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||||
|
golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||||
|
golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||||
|
golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||||
|
golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||||
|
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||||
|
golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||||
|
golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||||
|
golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||||
|
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||||
|
golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||||
|
golang.org/x/tools v0.2.0 h1:G6AHpWxTMGY1KyEYoAQ5WTtIekUUvDNjan3ugu60JvE=
|
||||||
|
golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA=
|
||||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
|
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
|
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
|
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
|
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
|
||||||
|
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
|
||||||
|
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
|
||||||
|
google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
|
||||||
|
google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
|
||||||
|
google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
|
||||||
|
google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
|
||||||
|
google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||||
|
google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||||
|
google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||||
|
google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||||
|
google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||||
|
google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
|
||||||
|
google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
|
||||||
|
google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
|
||||||
|
google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
|
||||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||||
google.golang.org/appengine v1.5.0 h1:KxkO13IPW4Lslp2bz+KHP2E3gtFlrIGNThxkZQ3g+4c=
|
|
||||||
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||||
|
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
|
||||||
|
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||||
|
google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||||
|
google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c=
|
||||||
|
google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||||
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||||
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||||
|
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||||
|
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||||
|
google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||||
|
google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
|
||||||
|
google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||||
|
google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||||
|
google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||||
|
google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||||
|
google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||||
|
google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||||
|
google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=
|
||||||
|
google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||||
|
google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||||
|
google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||||
|
google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||||
|
google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||||
|
google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||||
|
google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||||
|
google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||||
|
google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||||
|
google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
|
||||||
|
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||||
|
google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
|
||||||
|
google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||||
|
google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||||
|
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||||
|
google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||||
|
google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
|
||||||
|
google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||||
|
google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21 h1:hrbNEivu7Zn1pxvHk6MBrq9iE22woVILTHqexqBxe6I=
|
||||||
|
google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4=
|
||||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||||
|
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||||
|
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||||
google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
|
||||||
google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||||
|
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||||
|
google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||||
|
google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
|
||||||
|
google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
|
||||||
|
google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
|
||||||
|
google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
|
||||||
|
google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
|
||||||
|
google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
|
||||||
|
google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
|
||||||
|
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
|
||||||
|
google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k=
|
||||||
|
google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
|
||||||
|
google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
|
||||||
|
google.golang.org/grpc v1.49.0 h1:WTLtQzmQori5FUH25Pq4WT22oCsv8USpQ+F6rqtsmxw=
|
||||||
|
google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI=
|
||||||
|
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||||
|
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||||
|
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||||
|
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
|
||||||
|
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
|
||||||
|
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||||
|
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||||
|
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||||
|
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
|
||||||
|
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
||||||
|
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||||
|
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||||
|
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||||
|
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||||
|
google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w=
|
||||||
|
google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
|
|
||||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
|
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
|
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU=
|
||||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
|
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||||
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
|
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
|
||||||
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
||||||
|
gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8=
|
||||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
|
gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
|
||||||
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
|
|
||||||
gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
|
|
||||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
|
|
||||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
|
||||||
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
|
|
||||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
|
gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
|
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
|
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
|
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
|
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||||
|
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||||
|
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
|
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
|
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
|
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||||
|
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||||
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||||
|
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||||
k8s.io/api v0.18.4 h1:8x49nBRxuXGUlDlwlWd3RMY1SayZrzFfxea3UZSkFw4=
|
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
||||||
k8s.io/api v0.18.4/go.mod h1:lOIQAKYgai1+vz9J7YcDZwC26Z0zQewYOGWdyIPUUQ4=
|
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||||
k8s.io/apimachinery v0.18.4 h1:ST2beySjhqwJoIFk6p7Hp5v5O0hYY6Gngq/gUYXTPIA=
|
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||||
k8s.io/apimachinery v0.18.4/go.mod h1:OaXp26zu/5J7p0f92ASynJa1pZo06YlV9fG7BoWbCko=
|
k8s.io/api v0.26.0 h1:IpPlZnxBpV1xl7TGk/X6lFtpgjgntCg8PJ+qrPHAC7I=
|
||||||
k8s.io/apiserver v0.18.4 h1:pn1jSQkfboPSirZopkVpEdLW4FcQLnYMaIY8LFxxj30=
|
k8s.io/api v0.26.0/go.mod h1:k6HDTaIFC8yn1i6pSClSqIwLABIcLV9l5Q4EcngKnQg=
|
||||||
k8s.io/apiserver v0.18.4/go.mod h1:q+zoFct5ABNnYkGIaGQ3bcbUNdmPyOCoEBcg51LChY8=
|
k8s.io/apimachinery v0.26.0 h1:1feANjElT7MvPqp0JT6F3Ss6TWDwmcjLypwoPpEf7zg=
|
||||||
k8s.io/client-go v0.18.4 h1:un55V1Q/B3JO3A76eS0kUSywgGK/WR3BQ8fHQjNa6Zc=
|
k8s.io/apimachinery v0.26.0/go.mod h1:tnPmbONNJ7ByJNz9+n9kMjNP8ON+1qoAIIC70lztu74=
|
||||||
k8s.io/client-go v0.18.4/go.mod h1:f5sXwL4yAZRkAtzOxRWUhA/N8XzGCb+nPZI8PfobZ9g=
|
k8s.io/apiserver v0.26.0 h1:q+LqIK5EZwdznGZb8bq0+a+vCqdeEEe4Ux3zsOjbc4o=
|
||||||
k8s.io/code-generator v0.18.4 h1:SouAMfh3jbL7aL8rnUQ/C+7WwXYTZnPa8L9V2TtIE7o=
|
k8s.io/apiserver v0.26.0/go.mod h1:aWhlLD+mU+xRo+zhkvP/gFNbShI4wBDHS33o0+JGI84=
|
||||||
k8s.io/code-generator v0.18.4/go.mod h1:TgNEVx9hCyPGpdtCWA34olQYLkh3ok9ar7XfSsr8b6c=
|
k8s.io/client-go v0.26.0 h1:lT1D3OfO+wIi9UFolCrifbjUUgu7CpLca0AD8ghRLI8=
|
||||||
k8s.io/component-base v0.18.4 h1:Kr53Fp1iCGNsl9Uv4VcRvLy7YyIqi9oaJOQ7SXtKI98=
|
k8s.io/client-go v0.26.0/go.mod h1:I2Sh57A79EQsDmn7F7ASpmru1cceh3ocVT9KlX2jEZg=
|
||||||
k8s.io/component-base v0.18.4/go.mod h1:7jr/Ef5PGmKwQhyAz/pjByxJbC58mhKAhiaDu0vXfPk=
|
k8s.io/code-generator v0.26.0 h1:ZDY+7Gic9p/lACgD1G72gQg2CvNGeAYZTPIncv+iALM=
|
||||||
k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
|
k8s.io/code-generator v0.26.0/go.mod h1:OMoJ5Dqx1wgaQzKgc+ZWaZPfGjdRq/Y3WubFrZmeI3I=
|
||||||
k8s.io/gengo v0.0.0-20200114144118-36b2048a9120 h1:RPscN6KhmG54S33L+lr3GS+oD1jmchIU0ll519K6FA4=
|
k8s.io/component-base v0.26.0 h1:0IkChOCohtDHttmKuz+EP3j3+qKmV55rM9gIFTXA7Vs=
|
||||||
k8s.io/gengo v0.0.0-20200114144118-36b2048a9120/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
|
k8s.io/component-base v0.26.0/go.mod h1:lqHwlfV1/haa14F/Z5Zizk5QmzaVf23nQzCwVOQpfC8=
|
||||||
k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
|
k8s.io/component-helpers v0.26.0 h1:KNgwqs3EUdK0HLfW4GhnbD+q/Zl9U021VfIU7qoVYFk=
|
||||||
k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
|
k8s.io/component-helpers v0.26.0/go.mod h1:jHN01qS/Jdj95WCbTe9S2VZ9yxpxXNY488WjF+yW4fo=
|
||||||
k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8=
|
k8s.io/gengo v0.0.0-20220902162205-c0856e24416d h1:U9tB195lKdzwqicbJvyJeOXV7Klv+wNAWENRnXEGi08=
|
||||||
k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
|
k8s.io/gengo v0.0.0-20220902162205-c0856e24416d/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
|
||||||
k8s.io/klog/v2 v2.0.0 h1:Foj74zO6RbjjP4hBEKjnYtjjAhGg4jNynUdYF6fJrok=
|
k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
|
||||||
k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
|
k8s.io/klog/v2 v2.80.1 h1:atnLQ121W371wYYFawwYx1aEY2eUfs4l3J72wtgAwV4=
|
||||||
k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6 h1:Oh3Mzx5pJ+yIumsAD0MOECPVeXsVot0UkiaCGVyfGQY=
|
k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
|
||||||
k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E=
|
k8s.io/kms v0.26.0 h1:5+GOQLvUajSd0z5ODF52RzB2rHo1HJUSYsVC3Ri3VgI=
|
||||||
k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89 h1:d4vVOjXm687F1iLSP2q3lyPPuyvTUt3aVoBpi2DqRsU=
|
k8s.io/kms v0.26.0/go.mod h1:ReC1IEGuxgfN+PDCIpR6w8+XMmDE7uJhxcCwMZFdIYc=
|
||||||
k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
|
k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 h1:+70TFaan3hfJzs+7VK2o+OGxg8HsuBr/5f6tVAjDu6E=
|
||||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.7/go.mod h1:PHgbrJT7lCHcxMU+mDHEm+nx46H4zuuHZkDP6icnhu0=
|
k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280/go.mod h1:+Axhij7bCpeqhklhUTe3xmOn6bWxolyZEeyaFpjGtl4=
|
||||||
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
|
k8s.io/utils v0.0.0-20221128185143-99ec85e7a448 h1:KTgPnR10d5zhztWptI952TNtt/4u5h3IzDXkdIMuo2Y=
|
||||||
sigs.k8s.io/structured-merge-diff/v3 v3.0.0 h1:dOmIZBMfhcHS09XZkMyUgkq5trg3/jRyJYFZUiaOp8E=
|
k8s.io/utils v0.0.0-20221128185143-99ec85e7a448/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||||
sigs.k8s.io/structured-merge-diff/v3 v3.0.0/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
|
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
|
||||||
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
|
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
|
||||||
sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q=
|
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
|
||||||
|
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.33 h1:LYqFq+6Cj2D0gFfrJvL7iElD4ET6ir3VDdhDdTK7rgc=
|
||||||
|
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.33/go.mod h1:soWkSNf2tZC7aMibXEqVhCd73GOY5fJikn8qbdzemB0=
|
||||||
|
sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 h1:iXTIw73aPyC+oRdyqqvVJuloN1p0AC/kzH07hu3NE+k=
|
||||||
|
sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
|
||||||
|
sigs.k8s.io/mdtoc v1.0.1 h1:6ECKhQnbetwZBR6R2IeT2LH+1w+2Zsip0iXjikgaXIk=
|
||||||
|
sigs.k8s.io/mdtoc v1.0.1/go.mod h1:COYBtOjsaCg7o7SC4eaLwEXPuVRSuiVuLLRrHd7kShw=
|
||||||
|
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE=
|
||||||
|
sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E=
|
||||||
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
|
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
|
||||||
|
sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo=
|
||||||
|
sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8=
|
||||||
|
|||||||
6
hack/.spelling_failures
Normal file
6
hack/.spelling_failures
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
BUILD
|
||||||
|
CHANGELOG
|
||||||
|
OWNERS
|
||||||
|
go.mod
|
||||||
|
go.sum
|
||||||
|
vendor/
|
||||||
@@ -18,15 +18,15 @@ E2E_GCE_HOME=$DESCHEDULER_ROOT/hack/e2e-gce
|
|||||||
|
|
||||||
create_cluster() {
|
create_cluster() {
|
||||||
echo "#################### Creating instances ##########################"
|
echo "#################### Creating instances ##########################"
|
||||||
gcloud compute instances create descheduler-$master_uuid --image="ubuntu-1604-xenial-v20180306" --image-project="ubuntu-os-cloud" --zone=us-east1-b
|
gcloud compute instances create descheduler-$master_uuid --image-family="ubuntu-1804-lts" --image-project="ubuntu-os-cloud" --zone=us-east1-b
|
||||||
# Keeping the --zone here so as to make sure that e2e's can run locally.
|
# Keeping the --zone here so as to make sure that e2e's can run locally.
|
||||||
echo "gcloud compute instances delete descheduler-$master_uuid --zone=us-east1-b --quiet" > $E2E_GCE_HOME/delete_cluster.sh
|
echo "gcloud compute instances delete descheduler-$master_uuid --zone=us-east1-b --quiet" > $E2E_GCE_HOME/delete_cluster.sh
|
||||||
|
|
||||||
gcloud compute instances create descheduler-$node1_uuid --image="ubuntu-1604-xenial-v20180306" --image-project="ubuntu-os-cloud" --zone=us-east1-b
|
gcloud compute instances create descheduler-$node1_uuid --image-family="ubuntu-1804-lts" --image-project="ubuntu-os-cloud" --zone=us-east1-b
|
||||||
echo "gcloud compute instances delete descheduler-$node1_uuid --zone=us-east1-b --quiet" >> $E2E_GCE_HOME/delete_cluster.sh
|
echo "gcloud compute instances delete descheduler-$node1_uuid --zone=us-east1-b --quiet" >> $E2E_GCE_HOME/delete_cluster.sh
|
||||||
|
|
||||||
gcloud compute instances create descheduler-$node2_uuid --image="ubuntu-1604-xenial-v20180306" --image-project="ubuntu-os-cloud" --zone=us-east1-b
|
gcloud compute instances create descheduler-$node2_uuid --image-family="ubuntu-1804-lts" --image-project="ubuntu-os-cloud" --zone=us-east1-b
|
||||||
echo "gcloud compute instances delete descheduler-$node2_uuid --zone=us-east1-b --quiet" >> $E2E_GCE_HOME/delete_cluster.sh
|
echo "gcloud compute instances delete descheduler-$node2_uuid --zone=us-east1-c --quiet" >> $E2E_GCE_HOME/delete_cluster.sh
|
||||||
|
|
||||||
# Delete the firewall port created for master.
|
# Delete the firewall port created for master.
|
||||||
echo "gcloud compute firewall-rules delete kubeapiserver-$master_uuid --quiet" >> $E2E_GCE_HOME/delete_cluster.sh
|
echo "gcloud compute firewall-rules delete kubeapiserver-$master_uuid --quiet" >> $E2E_GCE_HOME/delete_cluster.sh
|
||||||
@@ -44,10 +44,10 @@ generate_kubeadm_instance_files() {
|
|||||||
|
|
||||||
|
|
||||||
transfer_install_files() {
|
transfer_install_files() {
|
||||||
gcloud compute scp $E2E_GCE_HOME/kubeadm_preinstall.sh descheduler-$master_uuid:/tmp --zone=us-east1-b
|
gcloud compute scp $E2E_GCE_HOME/kubeadm_preinstall.sh descheduler-$master_uuid:/tmp --zone=us-east1-b
|
||||||
gcloud compute scp $E2E_GCE_HOME/kubeadm_install.sh descheduler-$master_uuid:/tmp --zone=us-east1-b
|
gcloud compute scp $E2E_GCE_HOME/kubeadm_install.sh descheduler-$master_uuid:/tmp --zone=us-east1-b
|
||||||
gcloud compute scp $E2E_GCE_HOME/kubeadm_preinstall.sh descheduler-$node1_uuid:/tmp --zone=us-east1-b
|
gcloud compute scp $E2E_GCE_HOME/kubeadm_preinstall.sh descheduler-$node1_uuid:/tmp --zone=us-east1-b
|
||||||
gcloud compute scp $E2E_GCE_HOME/kubeadm_preinstall.sh descheduler-$node2_uuid:/tmp --zone=us-east1-b
|
gcloud compute scp $E2E_GCE_HOME/kubeadm_preinstall.sh descheduler-$node2_uuid:/tmp --zone=us-east1-c
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@@ -55,7 +55,7 @@ install_kube() {
|
|||||||
# Docker installation.
|
# Docker installation.
|
||||||
gcloud compute ssh descheduler-$master_uuid --command "sudo apt-get update; sudo apt-get install -y docker.io" --zone=us-east1-b
|
gcloud compute ssh descheduler-$master_uuid --command "sudo apt-get update; sudo apt-get install -y docker.io" --zone=us-east1-b
|
||||||
gcloud compute ssh descheduler-$node1_uuid --command "sudo apt-get update; sudo apt-get install -y docker.io" --zone=us-east1-b
|
gcloud compute ssh descheduler-$node1_uuid --command "sudo apt-get update; sudo apt-get install -y docker.io" --zone=us-east1-b
|
||||||
gcloud compute ssh descheduler-$node2_uuid --command "sudo apt-get update; sudo apt-get install -y docker.io" --zone=us-east1-b
|
gcloud compute ssh descheduler-$node2_uuid --command "sudo apt-get update; sudo apt-get install -y docker.io" --zone=us-east1-c
|
||||||
# kubeadm installation.
|
# kubeadm installation.
|
||||||
# 1. Transfer files to master, nodes.
|
# 1. Transfer files to master, nodes.
|
||||||
transfer_install_files
|
transfer_install_files
|
||||||
@@ -81,10 +81,9 @@ install_kube() {
|
|||||||
gcloud compute scp $E2E_GCE_HOME/kubeadm_join.sh descheduler-$node1_uuid:/tmp --zone=us-east1-b
|
gcloud compute scp $E2E_GCE_HOME/kubeadm_join.sh descheduler-$node1_uuid:/tmp --zone=us-east1-b
|
||||||
gcloud compute ssh descheduler-$node1_uuid --command "sudo chmod 755 /tmp/kubeadm_join.sh; sudo /tmp/kubeadm_join.sh" --zone=us-east1-b
|
gcloud compute ssh descheduler-$node1_uuid --command "sudo chmod 755 /tmp/kubeadm_join.sh; sudo /tmp/kubeadm_join.sh" --zone=us-east1-b
|
||||||
|
|
||||||
gcloud compute ssh descheduler-$node2_uuid --command "sudo chmod 755 /tmp/kubeadm_preinstall.sh; sudo /tmp/kubeadm_preinstall.sh" --zone=us-east1-b
|
gcloud compute ssh descheduler-$node2_uuid --command "sudo chmod 755 /tmp/kubeadm_preinstall.sh; sudo /tmp/kubeadm_preinstall.sh" --zone=us-east1-c
|
||||||
gcloud compute scp $E2E_GCE_HOME/kubeadm_join.sh descheduler-$node2_uuid:/tmp --zone=us-east1-b
|
gcloud compute scp $E2E_GCE_HOME/kubeadm_join.sh descheduler-$node2_uuid:/tmp --zone=us-east1-c
|
||||||
gcloud compute ssh descheduler-$node2_uuid --command "sudo chmod 755 /tmp/kubeadm_join.sh; sudo /tmp/kubeadm_join.sh" --zone=us-east1-b
|
gcloud compute ssh descheduler-$node2_uuid --command "sudo chmod 755 /tmp/kubeadm_join.sh; sudo /tmp/kubeadm_join.sh" --zone=us-east1-c
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +1,18 @@
|
|||||||
kind: Cluster
|
kind: Cluster
|
||||||
apiVersion: kind.sigs.k8s.io/v1alpha3
|
apiVersion: kind.x-k8s.io/v1alpha4
|
||||||
nodes:
|
nodes:
|
||||||
- role: control-plane
|
- role: control-plane
|
||||||
- role: worker
|
- role: worker
|
||||||
|
kubeadmConfigPatches:
|
||||||
|
- |
|
||||||
|
kind: JoinConfiguration
|
||||||
|
nodeRegistration:
|
||||||
|
kubeletExtraArgs:
|
||||||
|
node-labels: "topology.kubernetes.io/zone=local-a"
|
||||||
- role: worker
|
- role: worker
|
||||||
|
kubeadmConfigPatches:
|
||||||
|
- |
|
||||||
|
kind: JoinConfiguration
|
||||||
|
nodeRegistration:
|
||||||
|
kubeletExtraArgs:
|
||||||
|
node-labels: "topology.kubernetes.io/zone=local-b"
|
||||||
|
|||||||
25
hack/lib/generator-help.sh
Normal file
25
hack/lib/generator-help.sh
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
# Utility command based on 'find' command. The pipeline is as following:
|
||||||
|
# 1. find all the go files; (exclude specific path: vendor etc)
|
||||||
|
# 2. find all the files containing specific tags in contents;
|
||||||
|
# 3. extract related dirs;
|
||||||
|
# 4. remove duplicated paths;
|
||||||
|
# 5. merge all dirs in array with delimiter ,;
|
||||||
|
#
|
||||||
|
# Example:
|
||||||
|
# find_dirs_containing_comment_tags("+k8s:")
|
||||||
|
# Return:
|
||||||
|
# sigs.k8s.io/descheduler/a,sigs.k8s.io/descheduler/b,sigs.k8s.io/descheduler/c
|
||||||
|
function find_dirs_containing_comment_tags() {
|
||||||
|
array=()
|
||||||
|
while IFS='' read -r line; do array+=("$line"); done < <( \
|
||||||
|
find . -type f -name \*.go -not -path "./vendor/*" -not -path "./_tmp/*" -print0 \
|
||||||
|
| xargs -0 grep --color=never -l "$@" \
|
||||||
|
| xargs -n1 dirname \
|
||||||
|
| LC_ALL=C sort -u \
|
||||||
|
)
|
||||||
|
|
||||||
|
IFS=",";
|
||||||
|
printf '%s' "${array[*]}";
|
||||||
|
}
|
||||||
@@ -1,3 +1,4 @@
|
|||||||
|
//go:build tools
|
||||||
// +build tools
|
// +build tools
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -19,4 +20,8 @@ limitations under the License.
|
|||||||
// This package imports things required by build scripts, to force `go mod` to see them as dependencies
|
// This package imports things required by build scripts, to force `go mod` to see them as dependencies
|
||||||
package tools
|
package tools
|
||||||
|
|
||||||
import _ "k8s.io/code-generator"
|
import (
|
||||||
|
_ "github.com/client9/misspell/cmd/misspell"
|
||||||
|
_ "k8s.io/code-generator"
|
||||||
|
_ "sigs.k8s.io/mdtoc"
|
||||||
|
)
|
||||||
|
|||||||
@@ -1,9 +1,10 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
source "$(dirname "${BASH_SOURCE}")/lib/init.sh"
|
source "$(dirname "${BASH_SOURCE}")/lib/init.sh"
|
||||||
|
source "$(dirname "${BASH_SOURCE}")/lib/generator-help.sh"
|
||||||
|
|
||||||
go build -o "${OS_OUTPUT_BINPATH}/conversion-gen" "k8s.io/code-generator/cmd/conversion-gen"
|
go build -o "${OS_OUTPUT_BINPATH}/conversion-gen" "k8s.io/code-generator/cmd/conversion-gen"
|
||||||
|
|
||||||
${OS_OUTPUT_BINPATH}/conversion-gen \
|
${OS_OUTPUT_BINPATH}/conversion-gen \
|
||||||
--go-header-file "hack/boilerplate/boilerplate.go.txt" \
|
--go-header-file "hack/boilerplate/boilerplate.go.txt" \
|
||||||
--input-dirs "${PRJ_PREFIX}/pkg/apis/componentconfig/v1alpha1,${PRJ_PREFIX}/pkg/api/v1alpha1" \
|
--input-dirs "$(find_dirs_containing_comment_tags "+k8s:conversion-gen=")" \
|
||||||
--output-file-base zz_generated.conversion
|
--output-file-base zz_generated.conversion
|
||||||
|
|||||||
@@ -1,10 +1,11 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
source "$(dirname "${BASH_SOURCE}")/lib/init.sh"
|
source "$(dirname "${BASH_SOURCE}")/lib/init.sh"
|
||||||
|
source "$(dirname "${BASH_SOURCE}")/lib/generator-help.sh"
|
||||||
|
|
||||||
go build -o "${OS_OUTPUT_BINPATH}/deepcopy-gen" "k8s.io/code-generator/cmd/deepcopy-gen"
|
go build -o "${OS_OUTPUT_BINPATH}/deepcopy-gen" "k8s.io/code-generator/cmd/deepcopy-gen"
|
||||||
|
|
||||||
${OS_OUTPUT_BINPATH}/deepcopy-gen \
|
${OS_OUTPUT_BINPATH}/deepcopy-gen \
|
||||||
--go-header-file "hack/boilerplate/boilerplate.go.txt" \
|
--go-header-file "hack/boilerplate/boilerplate.go.txt" \
|
||||||
--input-dirs "${PRJ_PREFIX}/pkg/apis/componentconfig,${PRJ_PREFIX}/pkg/apis/componentconfig/v1alpha1,${PRJ_PREFIX}/pkg/api,${PRJ_PREFIX}/pkg/api/v1alpha1" \
|
--input-dirs "$(find_dirs_containing_comment_tags "+k8s:deepcopy-gen=")" \
|
||||||
--output-file-base zz_generated.deepcopy
|
--output-file-base zz_generated.deepcopy
|
||||||
|
|
||||||
|
|||||||
@@ -1,10 +1,11 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
source "$(dirname "${BASH_SOURCE}")/lib/init.sh"
|
source "$(dirname "${BASH_SOURCE}")/lib/init.sh"
|
||||||
|
source "$(dirname "${BASH_SOURCE}")/lib/generator-help.sh"
|
||||||
|
|
||||||
go build -o "${OS_OUTPUT_BINPATH}/defaulter-gen" "k8s.io/code-generator/cmd/defaulter-gen"
|
go build -o "${OS_OUTPUT_BINPATH}/defaulter-gen" "k8s.io/code-generator/cmd/defaulter-gen"
|
||||||
|
|
||||||
${OS_OUTPUT_BINPATH}/defaulter-gen \
|
${OS_OUTPUT_BINPATH}/defaulter-gen \
|
||||||
--go-header-file "hack/boilerplate/boilerplate.go.txt" \
|
--go-header-file "hack/boilerplate/boilerplate.go.txt" \
|
||||||
--input-dirs "${PRJ_PREFIX}/pkg/apis/componentconfig/v1alpha1,${PRJ_PREFIX}/pkg/api/v1alpha1" \
|
--input-dirs "$(find_dirs_containing_comment_tags "+k8s:defaulter-gen=")" \
|
||||||
--extra-peer-dirs "${PRJ_PREFIX}/pkg/apis/componentconfig/v1alpha1,${PRJ_PREFIX}/pkg/api/v1alpha1" \
|
--extra-peer-dirs "${PRJ_PREFIX}/pkg/apis/componentconfig/v1alpha1,${PRJ_PREFIX}/pkg/api/v1alpha1" \
|
||||||
--output-file-base zz_generated.defaults
|
--output-file-base zz_generated.defaults
|
||||||
|
|||||||
@@ -23,7 +23,7 @@ DESCHEDULER_ROOT=$(dirname "${BASH_SOURCE}")/..
|
|||||||
|
|
||||||
GO_VERSION=($(go version))
|
GO_VERSION=($(go version))
|
||||||
|
|
||||||
if [[ -z $(echo "${GO_VERSION[2]}" | grep -E 'go1.2|go1.3|go1.4|go1.5|go1.6|go1.7|go1.8|go1.9|go1.10|go1.11|go1.12|go1.13|go1.14') ]]; then
|
if [[ -z $(echo "${GO_VERSION[2]}" | grep -E 'go1.17|go1.18|go1.19') ]]; then
|
||||||
echo "Unknown go version '${GO_VERSION[2]}', skipping gofmt."
|
echo "Unknown go version '${GO_VERSION[2]}', skipping gofmt."
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|||||||
25
hack/update-toc.sh
Executable file
25
hack/update-toc.sh
Executable file
@@ -0,0 +1,25 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Copyright 2021 The Kubernetes Authors.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
set -o errexit
|
||||||
|
set -o nounset
|
||||||
|
set -o pipefail
|
||||||
|
|
||||||
|
source "$(dirname "${BASH_SOURCE}")/lib/init.sh"
|
||||||
|
|
||||||
|
go build -o "${OS_OUTPUT_BINPATH}/mdtoc" "sigs.k8s.io/mdtoc"
|
||||||
|
|
||||||
|
${OS_OUTPUT_BINPATH}/mdtoc --inplace README.md
|
||||||
1
hack/verify-chart.sh
Executable file
1
hack/verify-chart.sh
Executable file
@@ -0,0 +1 @@
|
|||||||
|
${CONTAINER_ENGINE:-docker} run -it --rm --network host --workdir=/data --volume ~/.kube/config:/root/.kube/config:ro --volume $(pwd):/data quay.io/helmpack/chart-testing:v3.7.0 /bin/bash -c "git config --global --add safe.directory /data; ct install --config=.github/ci/ct.yaml --helm-extra-set-args=\"--set=kind=Deployment\""
|
||||||
31
hack/verify-conversions.sh
Executable file
31
hack/verify-conversions.sh
Executable file
@@ -0,0 +1,31 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -o errexit
|
||||||
|
set -o nounset
|
||||||
|
set -o pipefail
|
||||||
|
|
||||||
|
source "$(dirname "${BASH_SOURCE}")/lib/init.sh"
|
||||||
|
DESCHEDULER_ROOT=$(dirname "${BASH_SOURCE}")/..
|
||||||
|
mkdir -p "${DESCHEDULER_ROOT}/_tmp"
|
||||||
|
_tmpdir="$(mktemp -d "${DESCHEDULER_ROOT}/_tmp/kube-verify.XXXXXX")"
|
||||||
|
|
||||||
|
_deschedulertmp="${_tmpdir}"
|
||||||
|
mkdir -p "${_deschedulertmp}"
|
||||||
|
|
||||||
|
git archive --format=tar --prefix=descheduler/ "$(git write-tree)" | (cd "${_deschedulertmp}" && tar xf -)
|
||||||
|
_deschedulertmp="${_deschedulertmp}/descheduler"
|
||||||
|
|
||||||
|
pushd "${_deschedulertmp}" > /dev/null 2>&1
|
||||||
|
./hack/update-generated-conversions.sh
|
||||||
|
popd > /dev/null 2>&1
|
||||||
|
|
||||||
|
pushd "${DESCHEDULER_ROOT}" > /dev/null 2>&1
|
||||||
|
if ! _out="$(diff -Naupr pkg/ "${_deschedulertmp}/pkg/")"; then
|
||||||
|
echo "Generated output differs:" >&2
|
||||||
|
echo "${_out}" >&2
|
||||||
|
echo "Generated conversions verify failed. Please run ./hack/update-generated-conversions.sh"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
popd > /dev/null 2>&1
|
||||||
|
|
||||||
|
echo "Generated conversions verified."
|
||||||
31
hack/verify-deep-copies.sh
Executable file
31
hack/verify-deep-copies.sh
Executable file
@@ -0,0 +1,31 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -o errexit
|
||||||
|
set -o nounset
|
||||||
|
set -o pipefail
|
||||||
|
|
||||||
|
source "$(dirname "${BASH_SOURCE}")/lib/init.sh"
|
||||||
|
DESCHEDULER_ROOT=$(dirname "${BASH_SOURCE}")/..
|
||||||
|
mkdir -p "${DESCHEDULER_ROOT}/_tmp"
|
||||||
|
_tmpdir="$(mktemp -d "${DESCHEDULER_ROOT}/_tmp/kube-verify.XXXXXX")"
|
||||||
|
|
||||||
|
_deschedulertmp="${_tmpdir}"
|
||||||
|
mkdir -p "${_deschedulertmp}"
|
||||||
|
|
||||||
|
git archive --format=tar --prefix=descheduler/ "$(git write-tree)" | (cd "${_deschedulertmp}" && tar xf -)
|
||||||
|
_deschedulertmp="${_deschedulertmp}/descheduler"
|
||||||
|
|
||||||
|
pushd "${_deschedulertmp}" > /dev/null 2>&1
|
||||||
|
./hack/update-generated-deep-copies.sh
|
||||||
|
popd > /dev/null 2>&1
|
||||||
|
|
||||||
|
pushd "${DESCHEDULER_ROOT}" > /dev/null 2>&1
|
||||||
|
if ! _out="$(diff -Naupr pkg/ "${_deschedulertmp}/pkg/")"; then
|
||||||
|
echo "Generated deep-copies output differs:" >&2
|
||||||
|
echo "${_out}" >&2
|
||||||
|
echo "Generated deep-copies verify failed. Please run ./hack/update-generated-deep-copies.sh"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
popd > /dev/null 2>&1
|
||||||
|
|
||||||
|
echo "Generated deep-copies verified."
|
||||||
29
hack/verify-defaulters.sh
Executable file
29
hack/verify-defaulters.sh
Executable file
@@ -0,0 +1,29 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -o errexit
|
||||||
|
set -o nounset
|
||||||
|
set -o pipefail
|
||||||
|
|
||||||
|
source "$(dirname "${BASH_SOURCE}")/lib/init.sh"
|
||||||
|
DESCHEDULER_ROOT=$(dirname "${BASH_SOURCE}")/..
|
||||||
|
_tmpdir="$(mktemp -d "${DESCHEDULER_ROOT}/_tmp/kube-verify.XXXXXX")"
|
||||||
|
|
||||||
|
_deschedulertmp="${_tmpdir}"
|
||||||
|
mkdir -p "${_deschedulertmp}"
|
||||||
|
|
||||||
|
git archive --format=tar --prefix=descheduler/ "$(git write-tree)" | (cd "${_deschedulertmp}" && tar xf -)
|
||||||
|
_deschedulertmp="${_deschedulertmp}/descheduler"
|
||||||
|
|
||||||
|
pushd "${_deschedulertmp}" > /dev/null 2>&1
|
||||||
|
./hack/update-generated-defaulters.sh
|
||||||
|
popd > /dev/null 2>&1
|
||||||
|
|
||||||
|
pushd "${DESCHEDULER_ROOT}" > /dev/null 2>&1
|
||||||
|
if ! _out="$(diff -Naupr pkg/ "${_deschedulertmp}/pkg/")"; then
|
||||||
|
echo "Generated defaulters output differs:" >&2
|
||||||
|
echo "${_out}" >&2
|
||||||
|
echo "Generated defaulters verify failed. Please run ./hack/update-generated-defaulters.sh"
|
||||||
|
fi
|
||||||
|
popd > /dev/null 2>&1
|
||||||
|
|
||||||
|
echo "Generated Defaulters verified."
|
||||||
@@ -23,7 +23,7 @@ DESCHEDULER_ROOT=$(dirname "${BASH_SOURCE}")/..
|
|||||||
|
|
||||||
GO_VERSION=($(go version))
|
GO_VERSION=($(go version))
|
||||||
|
|
||||||
if [[ -z $(echo "${GO_VERSION[2]}" | grep -E 'go1.2|go1.3|go1.4|go1.5|go1.6|go1.7|go1.8|go1.9|go1.10|go1.11|go1.12|go1.13|go1.14') ]]; then
|
if [[ -z $(echo "${GO_VERSION[2]}" | grep -E 'go1.17|go1.18|go1.19') ]]; then
|
||||||
echo "Unknown go version '${GO_VERSION[2]}', skipping gofmt."
|
echo "Unknown go version '${GO_VERSION[2]}', skipping gofmt."
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|||||||
19
hack/verify-govet.sh
Executable file
19
hack/verify-govet.sh
Executable file
@@ -0,0 +1,19 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Copyright 2021 The Kubernetes Authors.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
source "$(dirname "${BASH_SOURCE}")/lib/init.sh"
|
||||||
|
|
||||||
|
go vet ${OS_ROOT}/...
|
||||||
41
hack/verify-spelling.sh
Executable file
41
hack/verify-spelling.sh
Executable file
@@ -0,0 +1,41 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
# Copyright 2018 The Kubernetes Authors.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
# This script checks commonly misspelled English words in all files in the
|
||||||
|
# working directory by client9/misspell package.
|
||||||
|
# Usage: `hack/verify-spelling.sh`.
|
||||||
|
|
||||||
|
set -o errexit
|
||||||
|
set -o nounset
|
||||||
|
set -o pipefail
|
||||||
|
|
||||||
|
KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/..
|
||||||
|
export KUBE_ROOT
|
||||||
|
source "${KUBE_ROOT}/hack/lib/init.sh"
|
||||||
|
|
||||||
|
# Ensure that we find the binaries we build before anything else.
|
||||||
|
export GOBIN="${OS_OUTPUT_BINPATH}"
|
||||||
|
PATH="${GOBIN}:${PATH}"
|
||||||
|
|
||||||
|
# Install tools we need
|
||||||
|
pushd "${KUBE_ROOT}" >/dev/null
|
||||||
|
GO111MODULE=on go install github.com/client9/misspell/cmd/misspell
|
||||||
|
popd >/dev/null
|
||||||
|
|
||||||
|
# Spell checking
|
||||||
|
# All the skipping files are defined in hack/.spelling_failures
|
||||||
|
skipping_file="${KUBE_ROOT}/hack/.spelling_failures"
|
||||||
|
failing_packages=$(sed "s| | -e |g" "${skipping_file}")
|
||||||
|
git ls-files | grep -v -e "${failing_packages}" | xargs misspell -i "Creater,creater,ect" -error -o stderr
|
||||||
29
hack/verify-toc.sh
Executable file
29
hack/verify-toc.sh
Executable file
@@ -0,0 +1,29 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Copyright 2021 The Kubernetes Authors.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
set -o errexit
|
||||||
|
set -o nounset
|
||||||
|
set -o pipefail
|
||||||
|
|
||||||
|
source "$(dirname "${BASH_SOURCE}")/lib/init.sh"
|
||||||
|
|
||||||
|
go build -o "${OS_OUTPUT_BINPATH}/mdtoc" "sigs.k8s.io/mdtoc"
|
||||||
|
|
||||||
|
if ! ${OS_OUTPUT_BINPATH}/mdtoc --inplace --dryrun README.md
|
||||||
|
then
|
||||||
|
echo "ERROR: Changes detected to table of contents. Run ./hack/update-toc.sh" >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
6
kubernetes/base/kustomization.yaml
Normal file
6
kubernetes/base/kustomization.yaml
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||||
|
kind: Kustomization
|
||||||
|
|
||||||
|
resources:
|
||||||
|
- configmap.yaml
|
||||||
|
- rbac.yaml
|
||||||
50
kubernetes/base/rbac.yaml
Normal file
50
kubernetes/base/rbac.yaml
Normal file
@@ -0,0 +1,50 @@
|
|||||||
|
---
|
||||||
|
kind: ClusterRole
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
metadata:
|
||||||
|
name: descheduler-cluster-role
|
||||||
|
rules:
|
||||||
|
- apiGroups: ["events.k8s.io"]
|
||||||
|
resources: ["events"]
|
||||||
|
verbs: ["create", "update"]
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources: ["nodes"]
|
||||||
|
verbs: ["get", "watch", "list"]
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources: ["namespaces"]
|
||||||
|
verbs: ["get", "watch", "list"]
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources: ["pods"]
|
||||||
|
verbs: ["get", "watch", "list", "delete"]
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources: ["pods/eviction"]
|
||||||
|
verbs: ["create"]
|
||||||
|
- apiGroups: ["scheduling.k8s.io"]
|
||||||
|
resources: ["priorityclasses"]
|
||||||
|
verbs: ["get", "watch", "list"]
|
||||||
|
- apiGroups: ["coordination.k8s.io"]
|
||||||
|
resources: ["leases"]
|
||||||
|
verbs: ["create"]
|
||||||
|
- apiGroups: ["coordination.k8s.io"]
|
||||||
|
resources: ["leases"]
|
||||||
|
resourceNames: ["descheduler"]
|
||||||
|
verbs: ["get", "patch", "delete"]
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: ServiceAccount
|
||||||
|
metadata:
|
||||||
|
name: descheduler-sa
|
||||||
|
namespace: kube-system
|
||||||
|
---
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: ClusterRoleBinding
|
||||||
|
metadata:
|
||||||
|
name: descheduler-cluster-role-binding
|
||||||
|
roleRef:
|
||||||
|
apiGroup: rbac.authorization.k8s.io
|
||||||
|
kind: ClusterRole
|
||||||
|
name: descheduler-cluster-role
|
||||||
|
subjects:
|
||||||
|
- name: descheduler-sa
|
||||||
|
kind: ServiceAccount
|
||||||
|
namespace: kube-system
|
||||||
@@ -1,35 +0,0 @@
|
|||||||
---
|
|
||||||
apiVersion: batch/v1beta1
|
|
||||||
kind: CronJob
|
|
||||||
metadata:
|
|
||||||
name: descheduler-cronjob
|
|
||||||
namespace: kube-system
|
|
||||||
spec:
|
|
||||||
schedule: "*/2 * * * *"
|
|
||||||
concurrencyPolicy: "Forbid"
|
|
||||||
jobTemplate:
|
|
||||||
spec:
|
|
||||||
template:
|
|
||||||
metadata:
|
|
||||||
name: descheduler-pod
|
|
||||||
spec:
|
|
||||||
priorityClassName: system-cluster-critical
|
|
||||||
containers:
|
|
||||||
- name: descheduler
|
|
||||||
image: us.gcr.io/k8s-artifacts-prod/descheduler/descheduler:v0.18.0
|
|
||||||
volumeMounts:
|
|
||||||
- mountPath: /policy-dir
|
|
||||||
name: policy-volume
|
|
||||||
command:
|
|
||||||
- "/bin/descheduler"
|
|
||||||
args:
|
|
||||||
- "--policy-config-file"
|
|
||||||
- "/policy-dir/policy.yaml"
|
|
||||||
- "--v"
|
|
||||||
- "3"
|
|
||||||
restartPolicy: "Never"
|
|
||||||
serviceAccountName: descheduler-sa
|
|
||||||
volumes:
|
|
||||||
- name: policy-volume
|
|
||||||
configMap:
|
|
||||||
name: descheduler-policy-configmap
|
|
||||||
55
kubernetes/cronjob/cronjob.yaml
Normal file
55
kubernetes/cronjob/cronjob.yaml
Normal file
@@ -0,0 +1,55 @@
|
|||||||
|
---
|
||||||
|
apiVersion: batch/v1
|
||||||
|
kind: CronJob
|
||||||
|
metadata:
|
||||||
|
name: descheduler-cronjob
|
||||||
|
namespace: kube-system
|
||||||
|
spec:
|
||||||
|
schedule: "*/2 * * * *"
|
||||||
|
concurrencyPolicy: "Forbid"
|
||||||
|
jobTemplate:
|
||||||
|
spec:
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
name: descheduler-pod
|
||||||
|
spec:
|
||||||
|
priorityClassName: system-cluster-critical
|
||||||
|
containers:
|
||||||
|
- name: descheduler
|
||||||
|
image: registry.k8s.io/descheduler/descheduler:v0.26.1
|
||||||
|
volumeMounts:
|
||||||
|
- mountPath: /policy-dir
|
||||||
|
name: policy-volume
|
||||||
|
command:
|
||||||
|
- "/bin/descheduler"
|
||||||
|
args:
|
||||||
|
- "--policy-config-file"
|
||||||
|
- "/policy-dir/policy.yaml"
|
||||||
|
- "--v"
|
||||||
|
- "3"
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
cpu: "500m"
|
||||||
|
memory: "256Mi"
|
||||||
|
livenessProbe:
|
||||||
|
failureThreshold: 3
|
||||||
|
httpGet:
|
||||||
|
path: /healthz
|
||||||
|
port: 10258
|
||||||
|
scheme: HTTPS
|
||||||
|
initialDelaySeconds: 3
|
||||||
|
periodSeconds: 10
|
||||||
|
securityContext:
|
||||||
|
allowPrivilegeEscalation: false
|
||||||
|
capabilities:
|
||||||
|
drop:
|
||||||
|
- ALL
|
||||||
|
privileged: false
|
||||||
|
readOnlyRootFilesystem: true
|
||||||
|
runAsNonRoot: true
|
||||||
|
restartPolicy: "Never"
|
||||||
|
serviceAccountName: descheduler-sa
|
||||||
|
volumes:
|
||||||
|
- name: policy-volume
|
||||||
|
configMap:
|
||||||
|
name: descheduler-policy-configmap
|
||||||
6
kubernetes/cronjob/kustomization.yaml
Normal file
6
kubernetes/cronjob/kustomization.yaml
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||||
|
kind: Kustomization
|
||||||
|
|
||||||
|
resources:
|
||||||
|
- ../base
|
||||||
|
- cronjob.yaml
|
||||||
62
kubernetes/deployment/deployment.yaml
Normal file
62
kubernetes/deployment/deployment.yaml
Normal file
@@ -0,0 +1,62 @@
|
|||||||
|
apiVersion: apps/v1
|
||||||
|
kind: Deployment
|
||||||
|
metadata:
|
||||||
|
name: descheduler
|
||||||
|
namespace: kube-system
|
||||||
|
labels:
|
||||||
|
app: descheduler
|
||||||
|
spec:
|
||||||
|
replicas: 1
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
app: descheduler
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app: descheduler
|
||||||
|
spec:
|
||||||
|
priorityClassName: system-cluster-critical
|
||||||
|
serviceAccountName: descheduler-sa
|
||||||
|
containers:
|
||||||
|
- name: descheduler
|
||||||
|
image: registry.k8s.io/descheduler/descheduler:v0.26.1
|
||||||
|
imagePullPolicy: IfNotPresent
|
||||||
|
command:
|
||||||
|
- "/bin/descheduler"
|
||||||
|
args:
|
||||||
|
- "--policy-config-file"
|
||||||
|
- "/policy-dir/policy.yaml"
|
||||||
|
- "--descheduling-interval"
|
||||||
|
- "5m"
|
||||||
|
- "--v"
|
||||||
|
- "3"
|
||||||
|
ports:
|
||||||
|
- containerPort: 10258
|
||||||
|
protocol: TCP
|
||||||
|
livenessProbe:
|
||||||
|
failureThreshold: 3
|
||||||
|
httpGet:
|
||||||
|
path: /healthz
|
||||||
|
port: 10258
|
||||||
|
scheme: HTTPS
|
||||||
|
initialDelaySeconds: 3
|
||||||
|
periodSeconds: 10
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
cpu: 500m
|
||||||
|
memory: 256Mi
|
||||||
|
securityContext:
|
||||||
|
allowPrivilegeEscalation: false
|
||||||
|
capabilities:
|
||||||
|
drop:
|
||||||
|
- ALL
|
||||||
|
privileged: false
|
||||||
|
readOnlyRootFilesystem: true
|
||||||
|
runAsNonRoot: true
|
||||||
|
volumeMounts:
|
||||||
|
- mountPath: /policy-dir
|
||||||
|
name: policy-volume
|
||||||
|
volumes:
|
||||||
|
- name: policy-volume
|
||||||
|
configMap:
|
||||||
|
name: descheduler-policy-configmap
|
||||||
6
kubernetes/deployment/kustomization.yaml
Normal file
6
kubernetes/deployment/kustomization.yaml
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||||
|
kind: Kustomization
|
||||||
|
|
||||||
|
resources:
|
||||||
|
- ../base
|
||||||
|
- deployment.yaml
|
||||||
@@ -1,33 +0,0 @@
|
|||||||
---
|
|
||||||
apiVersion: batch/v1
|
|
||||||
kind: Job
|
|
||||||
metadata:
|
|
||||||
name: descheduler-job
|
|
||||||
namespace: kube-system
|
|
||||||
spec:
|
|
||||||
parallelism: 1
|
|
||||||
completions: 1
|
|
||||||
template:
|
|
||||||
metadata:
|
|
||||||
name: descheduler-pod
|
|
||||||
spec:
|
|
||||||
priorityClassName: system-cluster-critical
|
|
||||||
containers:
|
|
||||||
- name: descheduler
|
|
||||||
image: us.gcr.io/k8s-artifacts-prod/descheduler/descheduler:v0.18.0
|
|
||||||
volumeMounts:
|
|
||||||
- mountPath: /policy-dir
|
|
||||||
name: policy-volume
|
|
||||||
command:
|
|
||||||
- "/bin/descheduler"
|
|
||||||
args:
|
|
||||||
- "--policy-config-file"
|
|
||||||
- "/policy-dir/policy.yaml"
|
|
||||||
- "--v"
|
|
||||||
- "3"
|
|
||||||
restartPolicy: "Never"
|
|
||||||
serviceAccountName: descheduler-sa
|
|
||||||
volumes:
|
|
||||||
- name: policy-volume
|
|
||||||
configMap:
|
|
||||||
name: descheduler-policy-configmap
|
|
||||||
53
kubernetes/job/job.yaml
Normal file
53
kubernetes/job/job.yaml
Normal file
@@ -0,0 +1,53 @@
|
|||||||
|
---
|
||||||
|
apiVersion: batch/v1
|
||||||
|
kind: Job
|
||||||
|
metadata:
|
||||||
|
name: descheduler-job
|
||||||
|
namespace: kube-system
|
||||||
|
spec:
|
||||||
|
parallelism: 1
|
||||||
|
completions: 1
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
name: descheduler-pod
|
||||||
|
spec:
|
||||||
|
priorityClassName: system-cluster-critical
|
||||||
|
containers:
|
||||||
|
- name: descheduler
|
||||||
|
image: registry.k8s.io/descheduler/descheduler:v0.26.1
|
||||||
|
volumeMounts:
|
||||||
|
- mountPath: /policy-dir
|
||||||
|
name: policy-volume
|
||||||
|
command:
|
||||||
|
- "/bin/descheduler"
|
||||||
|
args:
|
||||||
|
- "--policy-config-file"
|
||||||
|
- "/policy-dir/policy.yaml"
|
||||||
|
- "--v"
|
||||||
|
- "3"
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
cpu: "500m"
|
||||||
|
memory: "256Mi"
|
||||||
|
livenessProbe:
|
||||||
|
failureThreshold: 3
|
||||||
|
httpGet:
|
||||||
|
path: /healthz
|
||||||
|
port: 10258
|
||||||
|
scheme: HTTPS
|
||||||
|
initialDelaySeconds: 3
|
||||||
|
periodSeconds: 10
|
||||||
|
securityContext:
|
||||||
|
allowPrivilegeEscalation: false
|
||||||
|
capabilities:
|
||||||
|
drop:
|
||||||
|
- ALL
|
||||||
|
privileged: false
|
||||||
|
readOnlyRootFilesystem: true
|
||||||
|
runAsNonRoot: true
|
||||||
|
restartPolicy: "Never"
|
||||||
|
serviceAccountName: descheduler-sa
|
||||||
|
volumes:
|
||||||
|
- name: policy-volume
|
||||||
|
configMap:
|
||||||
|
name: descheduler-policy-configmap
|
||||||
6
kubernetes/job/kustomization.yaml
Normal file
6
kubernetes/job/kustomization.yaml
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||||
|
kind: Kustomization
|
||||||
|
|
||||||
|
resources:
|
||||||
|
- ../base
|
||||||
|
- job.yaml
|
||||||
@@ -1,40 +0,0 @@
|
|||||||
---
|
|
||||||
kind: ClusterRole
|
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
|
||||||
metadata:
|
|
||||||
name: descheduler-cluster-role
|
|
||||||
namespace: kube-system
|
|
||||||
rules:
|
|
||||||
- apiGroups: [""]
|
|
||||||
resources: ["events"]
|
|
||||||
verbs: ["create", "update"]
|
|
||||||
- apiGroups: [""]
|
|
||||||
resources: ["nodes"]
|
|
||||||
verbs: ["get", "watch", "list"]
|
|
||||||
- apiGroups: [""]
|
|
||||||
resources: ["pods"]
|
|
||||||
verbs: ["get", "watch", "list", "delete"]
|
|
||||||
- apiGroups: [""]
|
|
||||||
resources: ["pods/eviction"]
|
|
||||||
verbs: ["create"]
|
|
||||||
---
|
|
||||||
apiVersion: v1
|
|
||||||
kind: ServiceAccount
|
|
||||||
metadata:
|
|
||||||
name: descheduler-sa
|
|
||||||
namespace: kube-system
|
|
||||||
---
|
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
|
||||||
kind: ClusterRoleBinding
|
|
||||||
metadata:
|
|
||||||
name: descheduler-cluster-role-binding
|
|
||||||
namespace: kube-system
|
|
||||||
roleRef:
|
|
||||||
apiGroup: rbac.authorization.k8s.io
|
|
||||||
kind: ClusterRole
|
|
||||||
name: descheduler-cluster-role
|
|
||||||
subjects:
|
|
||||||
- name: descheduler-sa
|
|
||||||
kind: ServiceAccount
|
|
||||||
namespace: kube-system
|
|
||||||
|
|
||||||
72
metrics/metrics.go
Normal file
72
metrics/metrics.go
Normal file
@@ -0,0 +1,72 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2021 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package metrics
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"k8s.io/component-base/metrics"
|
||||||
|
"k8s.io/component-base/metrics/legacyregistry"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/version"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// DeschedulerSubsystem - subsystem name used by descheduler
|
||||||
|
DeschedulerSubsystem = "descheduler"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
PodsEvicted = metrics.NewCounterVec(
|
||||||
|
&metrics.CounterOpts{
|
||||||
|
Subsystem: DeschedulerSubsystem,
|
||||||
|
Name: "pods_evicted",
|
||||||
|
Help: "Number of evicted pods, by the result, by the strategy, by the namespace, by the node name. 'error' result means a pod could not be evicted",
|
||||||
|
StabilityLevel: metrics.ALPHA,
|
||||||
|
}, []string{"result", "strategy", "namespace", "node"})
|
||||||
|
|
||||||
|
buildInfo = metrics.NewGauge(
|
||||||
|
&metrics.GaugeOpts{
|
||||||
|
Subsystem: DeschedulerSubsystem,
|
||||||
|
Name: "build_info",
|
||||||
|
Help: "Build info about descheduler, including Go version, Descheduler version, Git SHA, Git branch",
|
||||||
|
ConstLabels: map[string]string{"GoVersion": version.Get().GoVersion, "AppVersion": version.Get().Major + "." + version.Get().Minor, "DeschedulerVersion": version.Get().GitVersion, "GitBranch": version.Get().GitBranch, "GitSha1": version.Get().GitSha1},
|
||||||
|
StabilityLevel: metrics.ALPHA,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
metricsList = []metrics.Registerable{
|
||||||
|
PodsEvicted,
|
||||||
|
buildInfo,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
var registerMetrics sync.Once
|
||||||
|
|
||||||
|
// Register all metrics.
|
||||||
|
func Register() {
|
||||||
|
// Register the metrics.
|
||||||
|
registerMetrics.Do(func() {
|
||||||
|
RegisterMetrics(metricsList...)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// RegisterMetrics registers a list of metrics.
|
||||||
|
func RegisterMetrics(extraMetrics ...metrics.Registerable) {
|
||||||
|
for _, metric := range extraMetrics {
|
||||||
|
legacyregistry.MustRegister(metric)
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -19,8 +19,6 @@ package api
|
|||||||
import (
|
import (
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
|
|
||||||
"sigs.k8s.io/descheduler/pkg/descheduler/scheme"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@@ -35,12 +33,6 @@ const GroupName = "descheduler"
|
|||||||
// SchemeGroupVersion is group version used to register these objects
|
// SchemeGroupVersion is group version used to register these objects
|
||||||
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal}
|
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal}
|
||||||
|
|
||||||
func init() {
|
|
||||||
if err := addKnownTypes(scheme.Scheme); err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Kind takes an unqualified kind and returns a Group qualified GroupKind
|
// Kind takes an unqualified kind and returns a Group qualified GroupKind
|
||||||
func Kind(kind string) schema.GroupKind {
|
func Kind(kind string) schema.GroupKind {
|
||||||
return SchemeGroupVersion.WithKind(kind).GroupKind()
|
return SchemeGroupVersion.WithKind(kind).GroupKind()
|
||||||
|
|||||||
23
pkg/api/sort.go
Normal file
23
pkg/api/sort.go
Normal file
@@ -0,0 +1,23 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2017 The Kubernetes Authors.
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package api
|
||||||
|
|
||||||
|
import "sort"
|
||||||
|
|
||||||
|
func SortProfilesByName(profiles []Profile) []Profile {
|
||||||
|
sort.Slice(profiles, func(i, j int) bool {
|
||||||
|
return profiles[i].Name < profiles[j].Name
|
||||||
|
})
|
||||||
|
return profiles
|
||||||
|
}
|
||||||
@@ -19,6 +19,7 @@ package api
|
|||||||
import (
|
import (
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
)
|
)
|
||||||
|
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
@@ -26,47 +27,58 @@ import (
|
|||||||
type DeschedulerPolicy struct {
|
type DeschedulerPolicy struct {
|
||||||
metav1.TypeMeta
|
metav1.TypeMeta
|
||||||
|
|
||||||
// Strategies
|
// Profiles
|
||||||
Strategies StrategyList
|
Profiles []Profile
|
||||||
|
|
||||||
|
// NodeSelector for a set of nodes to operate over
|
||||||
|
NodeSelector *string
|
||||||
|
|
||||||
|
// MaxNoOfPodsToEvictPerNode restricts maximum of pods to be evicted per node.
|
||||||
|
MaxNoOfPodsToEvictPerNode *uint
|
||||||
|
|
||||||
|
// MaxNoOfPodsToEvictPerNamespace restricts maximum of pods to be evicted per namespace.
|
||||||
|
MaxNoOfPodsToEvictPerNamespace *uint
|
||||||
}
|
}
|
||||||
|
|
||||||
type StrategyName string
|
// Namespaces carries a list of included/excluded namespaces
|
||||||
type StrategyList map[StrategyName]DeschedulerStrategy
|
// for which a given strategy is applicable
|
||||||
|
type Namespaces struct {
|
||||||
type DeschedulerStrategy struct {
|
Include []string
|
||||||
// Enabled or disabled
|
Exclude []string
|
||||||
Enabled bool
|
|
||||||
|
|
||||||
// Weight
|
|
||||||
Weight int
|
|
||||||
|
|
||||||
// Strategy parameters
|
|
||||||
Params *StrategyParameters
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Only one of its members may be specified
|
type (
|
||||||
type StrategyParameters struct {
|
Percentage float64
|
||||||
NodeResourceUtilizationThresholds *NodeResourceUtilizationThresholds
|
ResourceThresholds map[v1.ResourceName]Percentage
|
||||||
NodeAffinityType []string
|
)
|
||||||
PodsHavingTooManyRestarts *PodsHavingTooManyRestarts
|
|
||||||
MaxPodLifeTimeSeconds *uint
|
type PriorityThreshold struct {
|
||||||
RemoveDuplicates *RemoveDuplicates
|
Value *int32
|
||||||
|
Name string
|
||||||
}
|
}
|
||||||
|
|
||||||
type Percentage float64
|
type Profile struct {
|
||||||
type ResourceThresholds map[v1.ResourceName]Percentage
|
Name string
|
||||||
|
PluginConfigs []PluginConfig
|
||||||
type NodeResourceUtilizationThresholds struct {
|
Plugins Plugins
|
||||||
Thresholds ResourceThresholds
|
|
||||||
TargetThresholds ResourceThresholds
|
|
||||||
NumberOfNodes int
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type PodsHavingTooManyRestarts struct {
|
type PluginConfig struct {
|
||||||
PodRestartThreshold int32
|
Name string
|
||||||
IncludingInitContainers bool
|
Args runtime.Object
|
||||||
}
|
}
|
||||||
|
|
||||||
type RemoveDuplicates struct {
|
type Plugins struct {
|
||||||
ExcludeOwnerKinds []string
|
PreSort PluginSet
|
||||||
|
Sort PluginSet
|
||||||
|
Deschedule PluginSet
|
||||||
|
Balance PluginSet
|
||||||
|
Evict PluginSet
|
||||||
|
Filter PluginSet
|
||||||
|
PreEvictionFilter PluginSet
|
||||||
|
}
|
||||||
|
|
||||||
|
type PluginSet struct {
|
||||||
|
Enabled []string
|
||||||
|
Disabled []string
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -15,7 +15,6 @@ limitations under the License.
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
// +k8s:deepcopy-gen=package,register
|
// +k8s:deepcopy-gen=package,register
|
||||||
// +k8s:conversion-gen=sigs.k8s.io/descheduler/pkg/api
|
|
||||||
// +k8s:defaulter-gen=TypeMeta
|
// +k8s:defaulter-gen=TypeMeta
|
||||||
|
|
||||||
// Package v1alpha1 is the v1alpha1 version of the descheduler API
|
// Package v1alpha1 is the v1alpha1 version of the descheduler API
|
||||||
|
|||||||
@@ -28,8 +28,10 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// GroupName is the group name used in this package
|
// GroupName is the group name used in this package
|
||||||
const GroupName = "descheduler"
|
const (
|
||||||
const GroupVersion = "v1alpha1"
|
GroupName = "descheduler"
|
||||||
|
GroupVersion = "v1alpha1"
|
||||||
|
)
|
||||||
|
|
||||||
// SchemeGroupVersion is group version used to register these objects
|
// SchemeGroupVersion is group version used to register these objects
|
||||||
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: GroupVersion}
|
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: GroupVersion}
|
||||||
|
|||||||
@@ -28,10 +28,33 @@ type DeschedulerPolicy struct {
|
|||||||
|
|
||||||
// Strategies
|
// Strategies
|
||||||
Strategies StrategyList `json:"strategies,omitempty"`
|
Strategies StrategyList `json:"strategies,omitempty"`
|
||||||
|
|
||||||
|
// NodeSelector for a set of nodes to operate over
|
||||||
|
NodeSelector *string `json:"nodeSelector,omitempty"`
|
||||||
|
|
||||||
|
// EvictFailedBarePods allows pods without ownerReferences and in failed phase to be evicted.
|
||||||
|
EvictFailedBarePods *bool `json:"evictFailedBarePods,omitempty"`
|
||||||
|
|
||||||
|
// EvictLocalStoragePods allows pods using local storage to be evicted.
|
||||||
|
EvictLocalStoragePods *bool `json:"evictLocalStoragePods,omitempty"`
|
||||||
|
|
||||||
|
// EvictSystemCriticalPods allows eviction of pods of any priority (including Kubernetes system pods)
|
||||||
|
EvictSystemCriticalPods *bool `json:"evictSystemCriticalPods,omitempty"`
|
||||||
|
|
||||||
|
// IgnorePVCPods prevents pods with PVCs from being evicted.
|
||||||
|
IgnorePVCPods *bool `json:"ignorePvcPods,omitempty"`
|
||||||
|
|
||||||
|
// MaxNoOfPodsToEvictPerNode restricts maximum of pods to be evicted per node.
|
||||||
|
MaxNoOfPodsToEvictPerNode *uint `json:"maxNoOfPodsToEvictPerNode,omitempty"`
|
||||||
|
|
||||||
|
// MaxNoOfPodsToEvictPerNamespace restricts maximum of pods to be evicted per namespace.
|
||||||
|
MaxNoOfPodsToEvictPerNamespace *uint `json:"maxNoOfPodsToEvictPerNamespace,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type StrategyName string
|
type (
|
||||||
type StrategyList map[StrategyName]DeschedulerStrategy
|
StrategyName string
|
||||||
|
StrategyList map[StrategyName]DeschedulerStrategy
|
||||||
|
)
|
||||||
|
|
||||||
type DeschedulerStrategy struct {
|
type DeschedulerStrategy struct {
|
||||||
// Enabled or disabled
|
// Enabled or disabled
|
||||||
@@ -44,22 +67,41 @@ type DeschedulerStrategy struct {
|
|||||||
Params *StrategyParameters `json:"params,omitempty"`
|
Params *StrategyParameters `json:"params,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Only one of its members may be specified
|
// Namespaces carries a list of included/excluded namespaces
|
||||||
|
// for which a given strategy is applicable.
|
||||||
|
type Namespaces struct {
|
||||||
|
Include []string `json:"include"`
|
||||||
|
Exclude []string `json:"exclude"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Besides Namespaces ThresholdPriority and ThresholdPriorityClassName only one of its members may be specified
|
||||||
type StrategyParameters struct {
|
type StrategyParameters struct {
|
||||||
NodeResourceUtilizationThresholds *NodeResourceUtilizationThresholds `json:"nodeResourceUtilizationThresholds,omitempty"`
|
NodeResourceUtilizationThresholds *NodeResourceUtilizationThresholds `json:"nodeResourceUtilizationThresholds,omitempty"`
|
||||||
NodeAffinityType []string `json:"nodeAffinityType,omitempty"`
|
NodeAffinityType []string `json:"nodeAffinityType,omitempty"`
|
||||||
PodsHavingTooManyRestarts *PodsHavingTooManyRestarts `json:"podsHavingTooManyRestarts,omitempty"`
|
PodsHavingTooManyRestarts *PodsHavingTooManyRestarts `json:"podsHavingTooManyRestarts,omitempty"`
|
||||||
MaxPodLifeTimeSeconds *uint `json:"maxPodLifeTimeSeconds,omitempty"`
|
PodLifeTime *PodLifeTime `json:"podLifeTime,omitempty"`
|
||||||
RemoveDuplicates *RemoveDuplicates `json:"removeDuplicates,omitempty"`
|
RemoveDuplicates *RemoveDuplicates `json:"removeDuplicates,omitempty"`
|
||||||
|
FailedPods *FailedPods `json:"failedPods,omitempty"`
|
||||||
|
IncludeSoftConstraints bool `json:"includeSoftConstraints"`
|
||||||
|
Namespaces *Namespaces `json:"namespaces"`
|
||||||
|
ThresholdPriority *int32 `json:"thresholdPriority"`
|
||||||
|
ThresholdPriorityClassName string `json:"thresholdPriorityClassName"`
|
||||||
|
LabelSelector *metav1.LabelSelector `json:"labelSelector"`
|
||||||
|
NodeFit bool `json:"nodeFit"`
|
||||||
|
IncludePreferNoSchedule bool `json:"includePreferNoSchedule"`
|
||||||
|
ExcludedTaints []string `json:"excludedTaints,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type Percentage float64
|
type (
|
||||||
type ResourceThresholds map[v1.ResourceName]Percentage
|
Percentage float64
|
||||||
|
ResourceThresholds map[v1.ResourceName]Percentage
|
||||||
|
)
|
||||||
|
|
||||||
type NodeResourceUtilizationThresholds struct {
|
type NodeResourceUtilizationThresholds struct {
|
||||||
Thresholds ResourceThresholds `json:"thresholds,omitempty"`
|
UseDeviationThresholds bool `json:"useDeviationThresholds,omitempty"`
|
||||||
TargetThresholds ResourceThresholds `json:"targetThresholds,omitempty"`
|
Thresholds ResourceThresholds `json:"thresholds,omitempty"`
|
||||||
NumberOfNodes int `json:"numberOfNodes,omitempty"`
|
TargetThresholds ResourceThresholds `json:"targetThresholds,omitempty"`
|
||||||
|
NumberOfNodes int `json:"numberOfNodes,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type PodsHavingTooManyRestarts struct {
|
type PodsHavingTooManyRestarts struct {
|
||||||
@@ -70,3 +112,18 @@ type PodsHavingTooManyRestarts struct {
|
|||||||
type RemoveDuplicates struct {
|
type RemoveDuplicates struct {
|
||||||
ExcludeOwnerKinds []string `json:"excludeOwnerKinds,omitempty"`
|
ExcludeOwnerKinds []string `json:"excludeOwnerKinds,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type PodLifeTime struct {
|
||||||
|
MaxPodLifeTimeSeconds *uint `json:"maxPodLifeTimeSeconds,omitempty"`
|
||||||
|
States []string `json:"states,omitempty"`
|
||||||
|
|
||||||
|
// Deprecated: Use States instead.
|
||||||
|
PodStatusPhases []string `json:"podStatusPhases,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type FailedPods struct {
|
||||||
|
ExcludeOwnerKinds []string `json:"excludeOwnerKinds,omitempty"`
|
||||||
|
MinPodLifetimeSeconds *uint `json:"minPodLifetimeSeconds,omitempty"`
|
||||||
|
Reasons []string `json:"reasons,omitempty"`
|
||||||
|
IncludingInitContainers bool `json:"includingInitContainers,omitempty"`
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,237 +0,0 @@
|
|||||||
// +build !ignore_autogenerated
|
|
||||||
|
|
||||||
/*
|
|
||||||
Copyright 2020 The Kubernetes Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
// Code generated by conversion-gen. DO NOT EDIT.
|
|
||||||
|
|
||||||
package v1alpha1
|
|
||||||
|
|
||||||
import (
|
|
||||||
unsafe "unsafe"
|
|
||||||
|
|
||||||
conversion "k8s.io/apimachinery/pkg/conversion"
|
|
||||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
|
||||||
api "sigs.k8s.io/descheduler/pkg/api"
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
localSchemeBuilder.Register(RegisterConversions)
|
|
||||||
}
|
|
||||||
|
|
||||||
// RegisterConversions adds conversion functions to the given scheme.
|
|
||||||
// Public to allow building arbitrary schemes.
|
|
||||||
func RegisterConversions(s *runtime.Scheme) error {
|
|
||||||
if err := s.AddGeneratedConversionFunc((*DeschedulerPolicy)(nil), (*api.DeschedulerPolicy)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
|
||||||
return Convert_v1alpha1_DeschedulerPolicy_To_api_DeschedulerPolicy(a.(*DeschedulerPolicy), b.(*api.DeschedulerPolicy), scope)
|
|
||||||
}); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := s.AddGeneratedConversionFunc((*api.DeschedulerPolicy)(nil), (*DeschedulerPolicy)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
|
||||||
return Convert_api_DeschedulerPolicy_To_v1alpha1_DeschedulerPolicy(a.(*api.DeschedulerPolicy), b.(*DeschedulerPolicy), scope)
|
|
||||||
}); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := s.AddGeneratedConversionFunc((*DeschedulerStrategy)(nil), (*api.DeschedulerStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
|
||||||
return Convert_v1alpha1_DeschedulerStrategy_To_api_DeschedulerStrategy(a.(*DeschedulerStrategy), b.(*api.DeschedulerStrategy), scope)
|
|
||||||
}); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := s.AddGeneratedConversionFunc((*api.DeschedulerStrategy)(nil), (*DeschedulerStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
|
||||||
return Convert_api_DeschedulerStrategy_To_v1alpha1_DeschedulerStrategy(a.(*api.DeschedulerStrategy), b.(*DeschedulerStrategy), scope)
|
|
||||||
}); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := s.AddGeneratedConversionFunc((*NodeResourceUtilizationThresholds)(nil), (*api.NodeResourceUtilizationThresholds)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
|
||||||
return Convert_v1alpha1_NodeResourceUtilizationThresholds_To_api_NodeResourceUtilizationThresholds(a.(*NodeResourceUtilizationThresholds), b.(*api.NodeResourceUtilizationThresholds), scope)
|
|
||||||
}); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := s.AddGeneratedConversionFunc((*api.NodeResourceUtilizationThresholds)(nil), (*NodeResourceUtilizationThresholds)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
|
||||||
return Convert_api_NodeResourceUtilizationThresholds_To_v1alpha1_NodeResourceUtilizationThresholds(a.(*api.NodeResourceUtilizationThresholds), b.(*NodeResourceUtilizationThresholds), scope)
|
|
||||||
}); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := s.AddGeneratedConversionFunc((*PodsHavingTooManyRestarts)(nil), (*api.PodsHavingTooManyRestarts)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
|
||||||
return Convert_v1alpha1_PodsHavingTooManyRestarts_To_api_PodsHavingTooManyRestarts(a.(*PodsHavingTooManyRestarts), b.(*api.PodsHavingTooManyRestarts), scope)
|
|
||||||
}); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := s.AddGeneratedConversionFunc((*api.PodsHavingTooManyRestarts)(nil), (*PodsHavingTooManyRestarts)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
|
||||||
return Convert_api_PodsHavingTooManyRestarts_To_v1alpha1_PodsHavingTooManyRestarts(a.(*api.PodsHavingTooManyRestarts), b.(*PodsHavingTooManyRestarts), scope)
|
|
||||||
}); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := s.AddGeneratedConversionFunc((*RemoveDuplicates)(nil), (*api.RemoveDuplicates)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
|
||||||
return Convert_v1alpha1_RemoveDuplicates_To_api_RemoveDuplicates(a.(*RemoveDuplicates), b.(*api.RemoveDuplicates), scope)
|
|
||||||
}); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := s.AddGeneratedConversionFunc((*api.RemoveDuplicates)(nil), (*RemoveDuplicates)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
|
||||||
return Convert_api_RemoveDuplicates_To_v1alpha1_RemoveDuplicates(a.(*api.RemoveDuplicates), b.(*RemoveDuplicates), scope)
|
|
||||||
}); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := s.AddGeneratedConversionFunc((*StrategyParameters)(nil), (*api.StrategyParameters)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
|
||||||
return Convert_v1alpha1_StrategyParameters_To_api_StrategyParameters(a.(*StrategyParameters), b.(*api.StrategyParameters), scope)
|
|
||||||
}); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := s.AddGeneratedConversionFunc((*api.StrategyParameters)(nil), (*StrategyParameters)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
|
||||||
return Convert_api_StrategyParameters_To_v1alpha1_StrategyParameters(a.(*api.StrategyParameters), b.(*StrategyParameters), scope)
|
|
||||||
}); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func autoConvert_v1alpha1_DeschedulerPolicy_To_api_DeschedulerPolicy(in *DeschedulerPolicy, out *api.DeschedulerPolicy, s conversion.Scope) error {
|
|
||||||
out.Strategies = *(*api.StrategyList)(unsafe.Pointer(&in.Strategies))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Convert_v1alpha1_DeschedulerPolicy_To_api_DeschedulerPolicy is an autogenerated conversion function.
|
|
||||||
func Convert_v1alpha1_DeschedulerPolicy_To_api_DeschedulerPolicy(in *DeschedulerPolicy, out *api.DeschedulerPolicy, s conversion.Scope) error {
|
|
||||||
return autoConvert_v1alpha1_DeschedulerPolicy_To_api_DeschedulerPolicy(in, out, s)
|
|
||||||
}
|
|
||||||
|
|
||||||
func autoConvert_api_DeschedulerPolicy_To_v1alpha1_DeschedulerPolicy(in *api.DeschedulerPolicy, out *DeschedulerPolicy, s conversion.Scope) error {
|
|
||||||
out.Strategies = *(*StrategyList)(unsafe.Pointer(&in.Strategies))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Convert_api_DeschedulerPolicy_To_v1alpha1_DeschedulerPolicy is an autogenerated conversion function.
|
|
||||||
func Convert_api_DeschedulerPolicy_To_v1alpha1_DeschedulerPolicy(in *api.DeschedulerPolicy, out *DeschedulerPolicy, s conversion.Scope) error {
|
|
||||||
return autoConvert_api_DeschedulerPolicy_To_v1alpha1_DeschedulerPolicy(in, out, s)
|
|
||||||
}
|
|
||||||
|
|
||||||
func autoConvert_v1alpha1_DeschedulerStrategy_To_api_DeschedulerStrategy(in *DeschedulerStrategy, out *api.DeschedulerStrategy, s conversion.Scope) error {
|
|
||||||
out.Enabled = in.Enabled
|
|
||||||
out.Weight = in.Weight
|
|
||||||
out.Params = (*api.StrategyParameters)(unsafe.Pointer(in.Params))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Convert_v1alpha1_DeschedulerStrategy_To_api_DeschedulerStrategy is an autogenerated conversion function.
|
|
||||||
func Convert_v1alpha1_DeschedulerStrategy_To_api_DeschedulerStrategy(in *DeschedulerStrategy, out *api.DeschedulerStrategy, s conversion.Scope) error {
|
|
||||||
return autoConvert_v1alpha1_DeschedulerStrategy_To_api_DeschedulerStrategy(in, out, s)
|
|
||||||
}
|
|
||||||
|
|
||||||
func autoConvert_api_DeschedulerStrategy_To_v1alpha1_DeschedulerStrategy(in *api.DeschedulerStrategy, out *DeschedulerStrategy, s conversion.Scope) error {
|
|
||||||
out.Enabled = in.Enabled
|
|
||||||
out.Weight = in.Weight
|
|
||||||
out.Params = (*StrategyParameters)(unsafe.Pointer(in.Params))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Convert_api_DeschedulerStrategy_To_v1alpha1_DeschedulerStrategy is an autogenerated conversion function.
|
|
||||||
func Convert_api_DeschedulerStrategy_To_v1alpha1_DeschedulerStrategy(in *api.DeschedulerStrategy, out *DeschedulerStrategy, s conversion.Scope) error {
|
|
||||||
return autoConvert_api_DeschedulerStrategy_To_v1alpha1_DeschedulerStrategy(in, out, s)
|
|
||||||
}
|
|
||||||
|
|
||||||
func autoConvert_v1alpha1_NodeResourceUtilizationThresholds_To_api_NodeResourceUtilizationThresholds(in *NodeResourceUtilizationThresholds, out *api.NodeResourceUtilizationThresholds, s conversion.Scope) error {
|
|
||||||
out.Thresholds = *(*api.ResourceThresholds)(unsafe.Pointer(&in.Thresholds))
|
|
||||||
out.TargetThresholds = *(*api.ResourceThresholds)(unsafe.Pointer(&in.TargetThresholds))
|
|
||||||
out.NumberOfNodes = in.NumberOfNodes
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Convert_v1alpha1_NodeResourceUtilizationThresholds_To_api_NodeResourceUtilizationThresholds is an autogenerated conversion function.
|
|
||||||
func Convert_v1alpha1_NodeResourceUtilizationThresholds_To_api_NodeResourceUtilizationThresholds(in *NodeResourceUtilizationThresholds, out *api.NodeResourceUtilizationThresholds, s conversion.Scope) error {
|
|
||||||
return autoConvert_v1alpha1_NodeResourceUtilizationThresholds_To_api_NodeResourceUtilizationThresholds(in, out, s)
|
|
||||||
}
|
|
||||||
|
|
||||||
func autoConvert_api_NodeResourceUtilizationThresholds_To_v1alpha1_NodeResourceUtilizationThresholds(in *api.NodeResourceUtilizationThresholds, out *NodeResourceUtilizationThresholds, s conversion.Scope) error {
|
|
||||||
out.Thresholds = *(*ResourceThresholds)(unsafe.Pointer(&in.Thresholds))
|
|
||||||
out.TargetThresholds = *(*ResourceThresholds)(unsafe.Pointer(&in.TargetThresholds))
|
|
||||||
out.NumberOfNodes = in.NumberOfNodes
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Convert_api_NodeResourceUtilizationThresholds_To_v1alpha1_NodeResourceUtilizationThresholds is an autogenerated conversion function.
|
|
||||||
func Convert_api_NodeResourceUtilizationThresholds_To_v1alpha1_NodeResourceUtilizationThresholds(in *api.NodeResourceUtilizationThresholds, out *NodeResourceUtilizationThresholds, s conversion.Scope) error {
|
|
||||||
return autoConvert_api_NodeResourceUtilizationThresholds_To_v1alpha1_NodeResourceUtilizationThresholds(in, out, s)
|
|
||||||
}
|
|
||||||
|
|
||||||
func autoConvert_v1alpha1_PodsHavingTooManyRestarts_To_api_PodsHavingTooManyRestarts(in *PodsHavingTooManyRestarts, out *api.PodsHavingTooManyRestarts, s conversion.Scope) error {
|
|
||||||
out.PodRestartThreshold = in.PodRestartThreshold
|
|
||||||
out.IncludingInitContainers = in.IncludingInitContainers
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Convert_v1alpha1_PodsHavingTooManyRestarts_To_api_PodsHavingTooManyRestarts is an autogenerated conversion function.
|
|
||||||
func Convert_v1alpha1_PodsHavingTooManyRestarts_To_api_PodsHavingTooManyRestarts(in *PodsHavingTooManyRestarts, out *api.PodsHavingTooManyRestarts, s conversion.Scope) error {
|
|
||||||
return autoConvert_v1alpha1_PodsHavingTooManyRestarts_To_api_PodsHavingTooManyRestarts(in, out, s)
|
|
||||||
}
|
|
||||||
|
|
||||||
func autoConvert_api_PodsHavingTooManyRestarts_To_v1alpha1_PodsHavingTooManyRestarts(in *api.PodsHavingTooManyRestarts, out *PodsHavingTooManyRestarts, s conversion.Scope) error {
|
|
||||||
out.PodRestartThreshold = in.PodRestartThreshold
|
|
||||||
out.IncludingInitContainers = in.IncludingInitContainers
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Convert_api_PodsHavingTooManyRestarts_To_v1alpha1_PodsHavingTooManyRestarts is an autogenerated conversion function.
|
|
||||||
func Convert_api_PodsHavingTooManyRestarts_To_v1alpha1_PodsHavingTooManyRestarts(in *api.PodsHavingTooManyRestarts, out *PodsHavingTooManyRestarts, s conversion.Scope) error {
|
|
||||||
return autoConvert_api_PodsHavingTooManyRestarts_To_v1alpha1_PodsHavingTooManyRestarts(in, out, s)
|
|
||||||
}
|
|
||||||
|
|
||||||
func autoConvert_v1alpha1_RemoveDuplicates_To_api_RemoveDuplicates(in *RemoveDuplicates, out *api.RemoveDuplicates, s conversion.Scope) error {
|
|
||||||
out.ExcludeOwnerKinds = *(*[]string)(unsafe.Pointer(&in.ExcludeOwnerKinds))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Convert_v1alpha1_RemoveDuplicates_To_api_RemoveDuplicates is an autogenerated conversion function.
|
|
||||||
func Convert_v1alpha1_RemoveDuplicates_To_api_RemoveDuplicates(in *RemoveDuplicates, out *api.RemoveDuplicates, s conversion.Scope) error {
|
|
||||||
return autoConvert_v1alpha1_RemoveDuplicates_To_api_RemoveDuplicates(in, out, s)
|
|
||||||
}
|
|
||||||
|
|
||||||
func autoConvert_api_RemoveDuplicates_To_v1alpha1_RemoveDuplicates(in *api.RemoveDuplicates, out *RemoveDuplicates, s conversion.Scope) error {
|
|
||||||
out.ExcludeOwnerKinds = *(*[]string)(unsafe.Pointer(&in.ExcludeOwnerKinds))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Convert_api_RemoveDuplicates_To_v1alpha1_RemoveDuplicates is an autogenerated conversion function.
|
|
||||||
func Convert_api_RemoveDuplicates_To_v1alpha1_RemoveDuplicates(in *api.RemoveDuplicates, out *RemoveDuplicates, s conversion.Scope) error {
|
|
||||||
return autoConvert_api_RemoveDuplicates_To_v1alpha1_RemoveDuplicates(in, out, s)
|
|
||||||
}
|
|
||||||
|
|
||||||
func autoConvert_v1alpha1_StrategyParameters_To_api_StrategyParameters(in *StrategyParameters, out *api.StrategyParameters, s conversion.Scope) error {
|
|
||||||
out.NodeResourceUtilizationThresholds = (*api.NodeResourceUtilizationThresholds)(unsafe.Pointer(in.NodeResourceUtilizationThresholds))
|
|
||||||
out.NodeAffinityType = *(*[]string)(unsafe.Pointer(&in.NodeAffinityType))
|
|
||||||
out.PodsHavingTooManyRestarts = (*api.PodsHavingTooManyRestarts)(unsafe.Pointer(in.PodsHavingTooManyRestarts))
|
|
||||||
out.MaxPodLifeTimeSeconds = (*uint)(unsafe.Pointer(in.MaxPodLifeTimeSeconds))
|
|
||||||
out.RemoveDuplicates = (*api.RemoveDuplicates)(unsafe.Pointer(in.RemoveDuplicates))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Convert_v1alpha1_StrategyParameters_To_api_StrategyParameters is an autogenerated conversion function.
|
|
||||||
func Convert_v1alpha1_StrategyParameters_To_api_StrategyParameters(in *StrategyParameters, out *api.StrategyParameters, s conversion.Scope) error {
|
|
||||||
return autoConvert_v1alpha1_StrategyParameters_To_api_StrategyParameters(in, out, s)
|
|
||||||
}
|
|
||||||
|
|
||||||
func autoConvert_api_StrategyParameters_To_v1alpha1_StrategyParameters(in *api.StrategyParameters, out *StrategyParameters, s conversion.Scope) error {
|
|
||||||
out.NodeResourceUtilizationThresholds = (*NodeResourceUtilizationThresholds)(unsafe.Pointer(in.NodeResourceUtilizationThresholds))
|
|
||||||
out.NodeAffinityType = *(*[]string)(unsafe.Pointer(&in.NodeAffinityType))
|
|
||||||
out.PodsHavingTooManyRestarts = (*PodsHavingTooManyRestarts)(unsafe.Pointer(in.PodsHavingTooManyRestarts))
|
|
||||||
out.MaxPodLifeTimeSeconds = (*uint)(unsafe.Pointer(in.MaxPodLifeTimeSeconds))
|
|
||||||
out.RemoveDuplicates = (*RemoveDuplicates)(unsafe.Pointer(in.RemoveDuplicates))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Convert_api_StrategyParameters_To_v1alpha1_StrategyParameters is an autogenerated conversion function.
|
|
||||||
func Convert_api_StrategyParameters_To_v1alpha1_StrategyParameters(in *api.StrategyParameters, out *StrategyParameters, s conversion.Scope) error {
|
|
||||||
return autoConvert_api_StrategyParameters_To_v1alpha1_StrategyParameters(in, out, s)
|
|
||||||
}
|
|
||||||
@@ -1,7 +1,8 @@
|
|||||||
|
//go:build !ignore_autogenerated
|
||||||
// +build !ignore_autogenerated
|
// +build !ignore_autogenerated
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Copyright 2020 The Kubernetes Authors.
|
Copyright 2023 The Kubernetes Authors.
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
you may not use this file except in compliance with the License.
|
you may not use this file except in compliance with the License.
|
||||||
@@ -21,6 +22,7 @@ limitations under the License.
|
|||||||
package v1alpha1
|
package v1alpha1
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -35,6 +37,41 @@ func (in *DeschedulerPolicy) DeepCopyInto(out *DeschedulerPolicy) {
|
|||||||
(*out)[key] = *val.DeepCopy()
|
(*out)[key] = *val.DeepCopy()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if in.NodeSelector != nil {
|
||||||
|
in, out := &in.NodeSelector, &out.NodeSelector
|
||||||
|
*out = new(string)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
if in.EvictFailedBarePods != nil {
|
||||||
|
in, out := &in.EvictFailedBarePods, &out.EvictFailedBarePods
|
||||||
|
*out = new(bool)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
if in.EvictLocalStoragePods != nil {
|
||||||
|
in, out := &in.EvictLocalStoragePods, &out.EvictLocalStoragePods
|
||||||
|
*out = new(bool)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
if in.EvictSystemCriticalPods != nil {
|
||||||
|
in, out := &in.EvictSystemCriticalPods, &out.EvictSystemCriticalPods
|
||||||
|
*out = new(bool)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
if in.IgnorePVCPods != nil {
|
||||||
|
in, out := &in.IgnorePVCPods, &out.IgnorePVCPods
|
||||||
|
*out = new(bool)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
if in.MaxNoOfPodsToEvictPerNode != nil {
|
||||||
|
in, out := &in.MaxNoOfPodsToEvictPerNode, &out.MaxNoOfPodsToEvictPerNode
|
||||||
|
*out = new(uint)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
if in.MaxNoOfPodsToEvictPerNamespace != nil {
|
||||||
|
in, out := &in.MaxNoOfPodsToEvictPerNamespace, &out.MaxNoOfPodsToEvictPerNamespace
|
||||||
|
*out = new(uint)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -77,6 +114,63 @@ func (in *DeschedulerStrategy) DeepCopy() *DeschedulerStrategy {
|
|||||||
return out
|
return out
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *FailedPods) DeepCopyInto(out *FailedPods) {
|
||||||
|
*out = *in
|
||||||
|
if in.ExcludeOwnerKinds != nil {
|
||||||
|
in, out := &in.ExcludeOwnerKinds, &out.ExcludeOwnerKinds
|
||||||
|
*out = make([]string, len(*in))
|
||||||
|
copy(*out, *in)
|
||||||
|
}
|
||||||
|
if in.MinPodLifetimeSeconds != nil {
|
||||||
|
in, out := &in.MinPodLifetimeSeconds, &out.MinPodLifetimeSeconds
|
||||||
|
*out = new(uint)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
if in.Reasons != nil {
|
||||||
|
in, out := &in.Reasons, &out.Reasons
|
||||||
|
*out = make([]string, len(*in))
|
||||||
|
copy(*out, *in)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FailedPods.
|
||||||
|
func (in *FailedPods) DeepCopy() *FailedPods {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(FailedPods)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *Namespaces) DeepCopyInto(out *Namespaces) {
|
||||||
|
*out = *in
|
||||||
|
if in.Include != nil {
|
||||||
|
in, out := &in.Include, &out.Include
|
||||||
|
*out = make([]string, len(*in))
|
||||||
|
copy(*out, *in)
|
||||||
|
}
|
||||||
|
if in.Exclude != nil {
|
||||||
|
in, out := &in.Exclude, &out.Exclude
|
||||||
|
*out = make([]string, len(*in))
|
||||||
|
copy(*out, *in)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Namespaces.
|
||||||
|
func (in *Namespaces) DeepCopy() *Namespaces {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(Namespaces)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
func (in *NodeResourceUtilizationThresholds) DeepCopyInto(out *NodeResourceUtilizationThresholds) {
|
func (in *NodeResourceUtilizationThresholds) DeepCopyInto(out *NodeResourceUtilizationThresholds) {
|
||||||
*out = *in
|
*out = *in
|
||||||
@@ -107,6 +201,37 @@ func (in *NodeResourceUtilizationThresholds) DeepCopy() *NodeResourceUtilization
|
|||||||
return out
|
return out
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *PodLifeTime) DeepCopyInto(out *PodLifeTime) {
|
||||||
|
*out = *in
|
||||||
|
if in.MaxPodLifeTimeSeconds != nil {
|
||||||
|
in, out := &in.MaxPodLifeTimeSeconds, &out.MaxPodLifeTimeSeconds
|
||||||
|
*out = new(uint)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
if in.States != nil {
|
||||||
|
in, out := &in.States, &out.States
|
||||||
|
*out = make([]string, len(*in))
|
||||||
|
copy(*out, *in)
|
||||||
|
}
|
||||||
|
if in.PodStatusPhases != nil {
|
||||||
|
in, out := &in.PodStatusPhases, &out.PodStatusPhases
|
||||||
|
*out = make([]string, len(*in))
|
||||||
|
copy(*out, *in)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodLifeTime.
|
||||||
|
func (in *PodLifeTime) DeepCopy() *PodLifeTime {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(PodLifeTime)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
func (in *PodsHavingTooManyRestarts) DeepCopyInto(out *PodsHavingTooManyRestarts) {
|
func (in *PodsHavingTooManyRestarts) DeepCopyInto(out *PodsHavingTooManyRestarts) {
|
||||||
*out = *in
|
*out = *in
|
||||||
@@ -206,16 +331,41 @@ func (in *StrategyParameters) DeepCopyInto(out *StrategyParameters) {
|
|||||||
*out = new(PodsHavingTooManyRestarts)
|
*out = new(PodsHavingTooManyRestarts)
|
||||||
**out = **in
|
**out = **in
|
||||||
}
|
}
|
||||||
if in.MaxPodLifeTimeSeconds != nil {
|
if in.PodLifeTime != nil {
|
||||||
in, out := &in.MaxPodLifeTimeSeconds, &out.MaxPodLifeTimeSeconds
|
in, out := &in.PodLifeTime, &out.PodLifeTime
|
||||||
*out = new(uint)
|
*out = new(PodLifeTime)
|
||||||
**out = **in
|
(*in).DeepCopyInto(*out)
|
||||||
}
|
}
|
||||||
if in.RemoveDuplicates != nil {
|
if in.RemoveDuplicates != nil {
|
||||||
in, out := &in.RemoveDuplicates, &out.RemoveDuplicates
|
in, out := &in.RemoveDuplicates, &out.RemoveDuplicates
|
||||||
*out = new(RemoveDuplicates)
|
*out = new(RemoveDuplicates)
|
||||||
(*in).DeepCopyInto(*out)
|
(*in).DeepCopyInto(*out)
|
||||||
}
|
}
|
||||||
|
if in.FailedPods != nil {
|
||||||
|
in, out := &in.FailedPods, &out.FailedPods
|
||||||
|
*out = new(FailedPods)
|
||||||
|
(*in).DeepCopyInto(*out)
|
||||||
|
}
|
||||||
|
if in.Namespaces != nil {
|
||||||
|
in, out := &in.Namespaces, &out.Namespaces
|
||||||
|
*out = new(Namespaces)
|
||||||
|
(*in).DeepCopyInto(*out)
|
||||||
|
}
|
||||||
|
if in.ThresholdPriority != nil {
|
||||||
|
in, out := &in.ThresholdPriority, &out.ThresholdPriority
|
||||||
|
*out = new(int32)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
if in.LabelSelector != nil {
|
||||||
|
in, out := &in.LabelSelector, &out.LabelSelector
|
||||||
|
*out = new(v1.LabelSelector)
|
||||||
|
(*in).DeepCopyInto(*out)
|
||||||
|
}
|
||||||
|
if in.ExcludedTaints != nil {
|
||||||
|
in, out := &in.ExcludedTaints, &out.ExcludedTaints
|
||||||
|
*out = make([]string, len(*in))
|
||||||
|
copy(*out, *in)
|
||||||
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,7 +1,8 @@
|
|||||||
|
//go:build !ignore_autogenerated
|
||||||
// +build !ignore_autogenerated
|
// +build !ignore_autogenerated
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Copyright 2020 The Kubernetes Authors.
|
Copyright 2023 The Kubernetes Authors.
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
you may not use this file except in compliance with the License.
|
you may not use this file except in compliance with the License.
|
||||||
|
|||||||
@@ -1,7 +1,8 @@
|
|||||||
|
//go:build !ignore_autogenerated
|
||||||
// +build !ignore_autogenerated
|
// +build !ignore_autogenerated
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Copyright 2020 The Kubernetes Authors.
|
Copyright 2023 The Kubernetes Authors.
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
you may not use this file except in compliance with the License.
|
you may not use this file except in compliance with the License.
|
||||||
@@ -28,13 +29,28 @@ import (
|
|||||||
func (in *DeschedulerPolicy) DeepCopyInto(out *DeschedulerPolicy) {
|
func (in *DeschedulerPolicy) DeepCopyInto(out *DeschedulerPolicy) {
|
||||||
*out = *in
|
*out = *in
|
||||||
out.TypeMeta = in.TypeMeta
|
out.TypeMeta = in.TypeMeta
|
||||||
if in.Strategies != nil {
|
if in.Profiles != nil {
|
||||||
in, out := &in.Strategies, &out.Strategies
|
in, out := &in.Profiles, &out.Profiles
|
||||||
*out = make(StrategyList, len(*in))
|
*out = make([]Profile, len(*in))
|
||||||
for key, val := range *in {
|
for i := range *in {
|
||||||
(*out)[key] = *val.DeepCopy()
|
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if in.NodeSelector != nil {
|
||||||
|
in, out := &in.NodeSelector, &out.NodeSelector
|
||||||
|
*out = new(string)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
if in.MaxNoOfPodsToEvictPerNode != nil {
|
||||||
|
in, out := &in.MaxNoOfPodsToEvictPerNode, &out.MaxNoOfPodsToEvictPerNode
|
||||||
|
*out = new(uint)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
if in.MaxNoOfPodsToEvictPerNamespace != nil {
|
||||||
|
in, out := &in.MaxNoOfPodsToEvictPerNamespace, &out.MaxNoOfPodsToEvictPerNamespace
|
||||||
|
*out = new(uint)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -57,89 +73,140 @@ func (in *DeschedulerPolicy) DeepCopyObject() runtime.Object {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
func (in *DeschedulerStrategy) DeepCopyInto(out *DeschedulerStrategy) {
|
func (in *Namespaces) DeepCopyInto(out *Namespaces) {
|
||||||
*out = *in
|
*out = *in
|
||||||
if in.Params != nil {
|
if in.Include != nil {
|
||||||
in, out := &in.Params, &out.Params
|
in, out := &in.Include, &out.Include
|
||||||
*out = new(StrategyParameters)
|
*out = make([]string, len(*in))
|
||||||
(*in).DeepCopyInto(*out)
|
copy(*out, *in)
|
||||||
}
|
}
|
||||||
return
|
if in.Exclude != nil {
|
||||||
}
|
in, out := &in.Exclude, &out.Exclude
|
||||||
|
|
||||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeschedulerStrategy.
|
|
||||||
func (in *DeschedulerStrategy) DeepCopy() *DeschedulerStrategy {
|
|
||||||
if in == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
out := new(DeschedulerStrategy)
|
|
||||||
in.DeepCopyInto(out)
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
|
||||||
func (in *NodeResourceUtilizationThresholds) DeepCopyInto(out *NodeResourceUtilizationThresholds) {
|
|
||||||
*out = *in
|
|
||||||
if in.Thresholds != nil {
|
|
||||||
in, out := &in.Thresholds, &out.Thresholds
|
|
||||||
*out = make(ResourceThresholds, len(*in))
|
|
||||||
for key, val := range *in {
|
|
||||||
(*out)[key] = val
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if in.TargetThresholds != nil {
|
|
||||||
in, out := &in.TargetThresholds, &out.TargetThresholds
|
|
||||||
*out = make(ResourceThresholds, len(*in))
|
|
||||||
for key, val := range *in {
|
|
||||||
(*out)[key] = val
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeResourceUtilizationThresholds.
|
|
||||||
func (in *NodeResourceUtilizationThresholds) DeepCopy() *NodeResourceUtilizationThresholds {
|
|
||||||
if in == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
out := new(NodeResourceUtilizationThresholds)
|
|
||||||
in.DeepCopyInto(out)
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
|
||||||
func (in *PodsHavingTooManyRestarts) DeepCopyInto(out *PodsHavingTooManyRestarts) {
|
|
||||||
*out = *in
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodsHavingTooManyRestarts.
|
|
||||||
func (in *PodsHavingTooManyRestarts) DeepCopy() *PodsHavingTooManyRestarts {
|
|
||||||
if in == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
out := new(PodsHavingTooManyRestarts)
|
|
||||||
in.DeepCopyInto(out)
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
|
||||||
func (in *RemoveDuplicates) DeepCopyInto(out *RemoveDuplicates) {
|
|
||||||
*out = *in
|
|
||||||
if in.ExcludeOwnerKinds != nil {
|
|
||||||
in, out := &in.ExcludeOwnerKinds, &out.ExcludeOwnerKinds
|
|
||||||
*out = make([]string, len(*in))
|
*out = make([]string, len(*in))
|
||||||
copy(*out, *in)
|
copy(*out, *in)
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RemoveDuplicates.
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Namespaces.
|
||||||
func (in *RemoveDuplicates) DeepCopy() *RemoveDuplicates {
|
func (in *Namespaces) DeepCopy() *Namespaces {
|
||||||
if in == nil {
|
if in == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
out := new(RemoveDuplicates)
|
out := new(Namespaces)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *PluginConfig) DeepCopyInto(out *PluginConfig) {
|
||||||
|
*out = *in
|
||||||
|
if in.Args != nil {
|
||||||
|
out.Args = in.Args.DeepCopyObject()
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PluginConfig.
|
||||||
|
func (in *PluginConfig) DeepCopy() *PluginConfig {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(PluginConfig)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *PluginSet) DeepCopyInto(out *PluginSet) {
|
||||||
|
*out = *in
|
||||||
|
if in.Enabled != nil {
|
||||||
|
in, out := &in.Enabled, &out.Enabled
|
||||||
|
*out = make([]string, len(*in))
|
||||||
|
copy(*out, *in)
|
||||||
|
}
|
||||||
|
if in.Disabled != nil {
|
||||||
|
in, out := &in.Disabled, &out.Disabled
|
||||||
|
*out = make([]string, len(*in))
|
||||||
|
copy(*out, *in)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PluginSet.
|
||||||
|
func (in *PluginSet) DeepCopy() *PluginSet {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(PluginSet)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *Plugins) DeepCopyInto(out *Plugins) {
|
||||||
|
*out = *in
|
||||||
|
in.PreSort.DeepCopyInto(&out.PreSort)
|
||||||
|
in.Sort.DeepCopyInto(&out.Sort)
|
||||||
|
in.Deschedule.DeepCopyInto(&out.Deschedule)
|
||||||
|
in.Balance.DeepCopyInto(&out.Balance)
|
||||||
|
in.Evict.DeepCopyInto(&out.Evict)
|
||||||
|
in.Filter.DeepCopyInto(&out.Filter)
|
||||||
|
in.PreEvictionFilter.DeepCopyInto(&out.PreEvictionFilter)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Plugins.
|
||||||
|
func (in *Plugins) DeepCopy() *Plugins {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(Plugins)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *PriorityThreshold) DeepCopyInto(out *PriorityThreshold) {
|
||||||
|
*out = *in
|
||||||
|
if in.Value != nil {
|
||||||
|
in, out := &in.Value, &out.Value
|
||||||
|
*out = new(int32)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityThreshold.
|
||||||
|
func (in *PriorityThreshold) DeepCopy() *PriorityThreshold {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(PriorityThreshold)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *Profile) DeepCopyInto(out *Profile) {
|
||||||
|
*out = *in
|
||||||
|
if in.PluginConfigs != nil {
|
||||||
|
in, out := &in.PluginConfigs, &out.PluginConfigs
|
||||||
|
*out = make([]PluginConfig, len(*in))
|
||||||
|
for i := range *in {
|
||||||
|
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
in.Plugins.DeepCopyInto(&out.Plugins)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Profile.
|
||||||
|
func (in *Profile) DeepCopy() *Profile {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(Profile)
|
||||||
in.DeepCopyInto(out)
|
in.DeepCopyInto(out)
|
||||||
return out
|
return out
|
||||||
}
|
}
|
||||||
@@ -165,66 +232,3 @@ func (in ResourceThresholds) DeepCopy() ResourceThresholds {
|
|||||||
in.DeepCopyInto(out)
|
in.DeepCopyInto(out)
|
||||||
return *out
|
return *out
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
|
||||||
func (in StrategyList) DeepCopyInto(out *StrategyList) {
|
|
||||||
{
|
|
||||||
in := &in
|
|
||||||
*out = make(StrategyList, len(*in))
|
|
||||||
for key, val := range *in {
|
|
||||||
(*out)[key] = *val.DeepCopy()
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StrategyList.
|
|
||||||
func (in StrategyList) DeepCopy() StrategyList {
|
|
||||||
if in == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
out := new(StrategyList)
|
|
||||||
in.DeepCopyInto(out)
|
|
||||||
return *out
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
|
||||||
func (in *StrategyParameters) DeepCopyInto(out *StrategyParameters) {
|
|
||||||
*out = *in
|
|
||||||
if in.NodeResourceUtilizationThresholds != nil {
|
|
||||||
in, out := &in.NodeResourceUtilizationThresholds, &out.NodeResourceUtilizationThresholds
|
|
||||||
*out = new(NodeResourceUtilizationThresholds)
|
|
||||||
(*in).DeepCopyInto(*out)
|
|
||||||
}
|
|
||||||
if in.NodeAffinityType != nil {
|
|
||||||
in, out := &in.NodeAffinityType, &out.NodeAffinityType
|
|
||||||
*out = make([]string, len(*in))
|
|
||||||
copy(*out, *in)
|
|
||||||
}
|
|
||||||
if in.PodsHavingTooManyRestarts != nil {
|
|
||||||
in, out := &in.PodsHavingTooManyRestarts, &out.PodsHavingTooManyRestarts
|
|
||||||
*out = new(PodsHavingTooManyRestarts)
|
|
||||||
**out = **in
|
|
||||||
}
|
|
||||||
if in.MaxPodLifeTimeSeconds != nil {
|
|
||||||
in, out := &in.MaxPodLifeTimeSeconds, &out.MaxPodLifeTimeSeconds
|
|
||||||
*out = new(uint)
|
|
||||||
**out = **in
|
|
||||||
}
|
|
||||||
if in.RemoveDuplicates != nil {
|
|
||||||
in, out := &in.RemoveDuplicates, &out.RemoveDuplicates
|
|
||||||
*out = new(RemoveDuplicates)
|
|
||||||
(*in).DeepCopyInto(*out)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StrategyParameters.
|
|
||||||
func (in *StrategyParameters) DeepCopy() *StrategyParameters {
|
|
||||||
if in == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
out := new(StrategyParameters)
|
|
||||||
in.DeepCopyInto(out)
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -19,8 +19,6 @@ package componentconfig
|
|||||||
import (
|
import (
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
|
|
||||||
"sigs.k8s.io/descheduler/pkg/descheduler/scheme"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@@ -34,12 +32,6 @@ const GroupName = "deschedulercomponentconfig"
|
|||||||
// SchemeGroupVersion is group version used to register these objects
|
// SchemeGroupVersion is group version used to register these objects
|
||||||
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal}
|
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal}
|
||||||
|
|
||||||
func init() {
|
|
||||||
if err := addKnownTypes(scheme.Scheme); err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Kind takes an unqualified kind and returns a Group qualified GroupKind
|
// Kind takes an unqualified kind and returns a Group qualified GroupKind
|
||||||
func Kind(kind string) schema.GroupKind {
|
func Kind(kind string) schema.GroupKind {
|
||||||
return SchemeGroupVersion.WithKind(kind).GroupKind()
|
return SchemeGroupVersion.WithKind(kind).GroupKind()
|
||||||
|
|||||||
@@ -20,6 +20,8 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
componentbaseconfig "k8s.io/component-base/config"
|
||||||
|
registry "k8s.io/component-base/logs/api/v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
@@ -48,4 +50,14 @@ type DeschedulerConfiguration struct {
|
|||||||
|
|
||||||
// EvictLocalStoragePods allows pods using local storage to be evicted.
|
// EvictLocalStoragePods allows pods using local storage to be evicted.
|
||||||
EvictLocalStoragePods bool
|
EvictLocalStoragePods bool
|
||||||
|
|
||||||
|
// IgnorePVCPods sets whether PVC pods should be allowed to be evicted
|
||||||
|
IgnorePVCPods bool
|
||||||
|
|
||||||
|
// LeaderElection starts Deployment using leader election loop
|
||||||
|
LeaderElection componentbaseconfig.LeaderElectionConfiguration
|
||||||
|
|
||||||
|
// Logging specifies the options of logging.
|
||||||
|
// Refer [Logs Options](https://github.com/kubernetes/component-base/blob/master/logs/api/v1/options.go) for more information.
|
||||||
|
Logging registry.LoggingConfiguration
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -28,8 +28,10 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// GroupName is the group name use in this package
|
// GroupName is the group name use in this package
|
||||||
const GroupName = "deschedulercomponentconfig"
|
const (
|
||||||
const GroupVersion = "v1alpha1"
|
GroupName = "deschedulercomponentconfig"
|
||||||
|
GroupVersion = "v1alpha1"
|
||||||
|
)
|
||||||
|
|
||||||
// SchemeGroupVersion is group version used to register these objects
|
// SchemeGroupVersion is group version used to register these objects
|
||||||
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: GroupVersion}
|
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: GroupVersion}
|
||||||
|
|||||||
@@ -20,6 +20,8 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
componentbaseconfig "k8s.io/component-base/config"
|
||||||
|
registry "k8s.io/component-base/logs/api/v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
@@ -48,4 +50,14 @@ type DeschedulerConfiguration struct {
|
|||||||
|
|
||||||
// EvictLocalStoragePods allows pods using local storage to be evicted.
|
// EvictLocalStoragePods allows pods using local storage to be evicted.
|
||||||
EvictLocalStoragePods bool `json:"evictLocalStoragePods,omitempty"`
|
EvictLocalStoragePods bool `json:"evictLocalStoragePods,omitempty"`
|
||||||
|
|
||||||
|
// IgnorePVCPods sets whether PVC pods should be allowed to be evicted
|
||||||
|
IgnorePVCPods bool `json:"ignorePvcPods,omitempty"`
|
||||||
|
|
||||||
|
// LeaderElection starts Deployment using leader election loop
|
||||||
|
LeaderElection componentbaseconfig.LeaderElectionConfiguration `json:"leaderElection,omitempty"`
|
||||||
|
|
||||||
|
// Logging specifies the options of logging.
|
||||||
|
// Refer [Logs Options](https://github.com/kubernetes/component-base/blob/master/logs/api/v1/options.go) for more information.
|
||||||
|
Logging registry.LoggingConfiguration `json:"logging,omitempty"`
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,7 +1,8 @@
|
|||||||
|
//go:build !ignore_autogenerated
|
||||||
// +build !ignore_autogenerated
|
// +build !ignore_autogenerated
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Copyright 2020 The Kubernetes Authors.
|
Copyright 2023 The Kubernetes Authors.
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
you may not use this file except in compliance with the License.
|
you may not use this file except in compliance with the License.
|
||||||
@@ -56,6 +57,9 @@ func autoConvert_v1alpha1_DeschedulerConfiguration_To_componentconfig_Deschedule
|
|||||||
out.NodeSelector = in.NodeSelector
|
out.NodeSelector = in.NodeSelector
|
||||||
out.MaxNoOfPodsToEvictPerNode = in.MaxNoOfPodsToEvictPerNode
|
out.MaxNoOfPodsToEvictPerNode = in.MaxNoOfPodsToEvictPerNode
|
||||||
out.EvictLocalStoragePods = in.EvictLocalStoragePods
|
out.EvictLocalStoragePods = in.EvictLocalStoragePods
|
||||||
|
out.IgnorePVCPods = in.IgnorePVCPods
|
||||||
|
out.LeaderElection = in.LeaderElection
|
||||||
|
out.Logging = in.Logging
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -72,6 +76,9 @@ func autoConvert_componentconfig_DeschedulerConfiguration_To_v1alpha1_Deschedule
|
|||||||
out.NodeSelector = in.NodeSelector
|
out.NodeSelector = in.NodeSelector
|
||||||
out.MaxNoOfPodsToEvictPerNode = in.MaxNoOfPodsToEvictPerNode
|
out.MaxNoOfPodsToEvictPerNode = in.MaxNoOfPodsToEvictPerNode
|
||||||
out.EvictLocalStoragePods = in.EvictLocalStoragePods
|
out.EvictLocalStoragePods = in.EvictLocalStoragePods
|
||||||
|
out.IgnorePVCPods = in.IgnorePVCPods
|
||||||
|
out.LeaderElection = in.LeaderElection
|
||||||
|
out.Logging = in.Logging
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,7 +1,8 @@
|
|||||||
|
//go:build !ignore_autogenerated
|
||||||
// +build !ignore_autogenerated
|
// +build !ignore_autogenerated
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Copyright 2020 The Kubernetes Authors.
|
Copyright 2023 The Kubernetes Authors.
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
you may not use this file except in compliance with the License.
|
you may not use this file except in compliance with the License.
|
||||||
@@ -28,6 +29,8 @@ import (
|
|||||||
func (in *DeschedulerConfiguration) DeepCopyInto(out *DeschedulerConfiguration) {
|
func (in *DeschedulerConfiguration) DeepCopyInto(out *DeschedulerConfiguration) {
|
||||||
*out = *in
|
*out = *in
|
||||||
out.TypeMeta = in.TypeMeta
|
out.TypeMeta = in.TypeMeta
|
||||||
|
out.LeaderElection = in.LeaderElection
|
||||||
|
in.Logging.DeepCopyInto(&out.Logging)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,7 +1,8 @@
|
|||||||
|
//go:build !ignore_autogenerated
|
||||||
// +build !ignore_autogenerated
|
// +build !ignore_autogenerated
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Copyright 2020 The Kubernetes Authors.
|
Copyright 2023 The Kubernetes Authors.
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
you may not use this file except in compliance with the License.
|
you may not use this file except in compliance with the License.
|
||||||
|
|||||||
@@ -1,7 +1,8 @@
|
|||||||
|
//go:build !ignore_autogenerated
|
||||||
// +build !ignore_autogenerated
|
// +build !ignore_autogenerated
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Copyright 2020 The Kubernetes Authors.
|
Copyright 2023 The Kubernetes Authors.
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
you may not use this file except in compliance with the License.
|
you may not use this file except in compliance with the License.
|
||||||
@@ -28,6 +29,8 @@ import (
|
|||||||
func (in *DeschedulerConfiguration) DeepCopyInto(out *DeschedulerConfiguration) {
|
func (in *DeschedulerConfiguration) DeepCopyInto(out *DeschedulerConfiguration) {
|
||||||
*out = *in
|
*out = *in
|
||||||
out.TypeMeta = in.TypeMeta
|
out.TypeMeta = in.TypeMeta
|
||||||
|
out.LeaderElection = in.LeaderElection
|
||||||
|
in.Logging.DeepCopyInto(&out.Logging)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -26,7 +26,7 @@ import (
|
|||||||
"k8s.io/client-go/tools/clientcmd"
|
"k8s.io/client-go/tools/clientcmd"
|
||||||
)
|
)
|
||||||
|
|
||||||
func CreateClient(kubeconfig string) (clientset.Interface, error) {
|
func CreateClient(kubeconfig, userAgt string) (clientset.Interface, error) {
|
||||||
var cfg *rest.Config
|
var cfg *rest.Config
|
||||||
if len(kubeconfig) != 0 {
|
if len(kubeconfig) != 0 {
|
||||||
master, err := GetMasterFromKubeconfig(kubeconfig)
|
master, err := GetMasterFromKubeconfig(kubeconfig)
|
||||||
@@ -47,7 +47,11 @@ func CreateClient(kubeconfig string) (clientset.Interface, error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return clientset.NewForConfig(cfg)
|
if len(userAgt) != 0 {
|
||||||
|
return clientset.NewForConfig(rest.AddUserAgent(cfg, userAgt))
|
||||||
|
} else {
|
||||||
|
return clientset.NewForConfig(cfg)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func GetMasterFromKubeconfig(filename string) (string, error) {
|
func GetMasterFromKubeconfig(filename string) (string, error) {
|
||||||
|
|||||||
@@ -20,30 +20,46 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"k8s.io/api/core/v1"
|
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
|
||||||
"k8s.io/klog/v2"
|
"k8s.io/klog/v2"
|
||||||
|
|
||||||
|
v1 "k8s.io/api/core/v1"
|
||||||
|
policy "k8s.io/api/policy/v1"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
"k8s.io/client-go/informers"
|
"k8s.io/client-go/informers"
|
||||||
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
|
fakeclientset "k8s.io/client-go/kubernetes/fake"
|
||||||
|
listersv1 "k8s.io/client-go/listers/core/v1"
|
||||||
|
schedulingv1 "k8s.io/client-go/listers/scheduling/v1"
|
||||||
|
core "k8s.io/client-go/testing"
|
||||||
|
|
||||||
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
|
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
|
||||||
|
"sigs.k8s.io/descheduler/metrics"
|
||||||
"sigs.k8s.io/descheduler/pkg/api"
|
"sigs.k8s.io/descheduler/pkg/api"
|
||||||
"sigs.k8s.io/descheduler/pkg/descheduler/client"
|
"sigs.k8s.io/descheduler/pkg/descheduler/client"
|
||||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||||
eutils "sigs.k8s.io/descheduler/pkg/descheduler/evictions/utils"
|
eutils "sigs.k8s.io/descheduler/pkg/descheduler/evictions/utils"
|
||||||
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
|
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
|
||||||
"sigs.k8s.io/descheduler/pkg/descheduler/strategies"
|
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/framework"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/framework/plugins/defaultevictor"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/framework/plugins/pluginbuilder"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/utils"
|
||||||
)
|
)
|
||||||
|
|
||||||
func Run(rs *options.DeschedulerServer) error {
|
func Run(ctx context.Context, rs *options.DeschedulerServer) error {
|
||||||
ctx := context.Background()
|
metrics.Register()
|
||||||
rsclient, err := client.CreateClient(rs.KubeconfigFile)
|
|
||||||
|
rsclient, eventClient, err := createClients(rs.KubeconfigFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
rs.Client = rsclient
|
rs.Client = rsclient
|
||||||
|
rs.EventClient = eventClient
|
||||||
|
|
||||||
deschedulerPolicy, err := LoadPolicyConfig(rs.PolicyConfigFile)
|
deschedulerPolicy, err := LoadPolicyConfig(rs.PolicyConfigFile, rs.Client, pluginbuilder.PluginRegistry)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -56,63 +72,369 @@ func Run(rs *options.DeschedulerServer) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
stopChannel := make(chan struct{})
|
runFn := func() error {
|
||||||
return RunDeschedulerStrategies(ctx, rs, deschedulerPolicy, evictionPolicyGroupVersion, stopChannel)
|
return RunDeschedulerStrategies(ctx, rs, deschedulerPolicy, evictionPolicyGroupVersion)
|
||||||
}
|
|
||||||
|
|
||||||
type strategyFunction func(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor)
|
|
||||||
|
|
||||||
func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer, deschedulerPolicy *api.DeschedulerPolicy, evictionPolicyGroupVersion string, stopChannel chan struct{}) error {
|
|
||||||
sharedInformerFactory := informers.NewSharedInformerFactory(rs.Client, 0)
|
|
||||||
nodeInformer := sharedInformerFactory.Core().V1().Nodes()
|
|
||||||
|
|
||||||
sharedInformerFactory.Start(stopChannel)
|
|
||||||
sharedInformerFactory.WaitForCacheSync(stopChannel)
|
|
||||||
|
|
||||||
strategyFuncs := map[string]strategyFunction{
|
|
||||||
"RemoveDuplicates": strategies.RemoveDuplicatePods,
|
|
||||||
"LowNodeUtilization": strategies.LowNodeUtilization,
|
|
||||||
"RemovePodsViolatingInterPodAntiAffinity": strategies.RemovePodsViolatingInterPodAntiAffinity,
|
|
||||||
"RemovePodsViolatingNodeAffinity": strategies.RemovePodsViolatingNodeAffinity,
|
|
||||||
"RemovePodsViolatingNodeTaints": strategies.RemovePodsViolatingNodeTaints,
|
|
||||||
"RemovePodsHavingTooManyRestarts": strategies.RemovePodsHavingTooManyRestarts,
|
|
||||||
"PodLifeTime": strategies.PodLifeTime,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
wait.Until(func() {
|
if rs.LeaderElection.LeaderElect && rs.DeschedulingInterval.Seconds() == 0 {
|
||||||
nodes, err := nodeutil.ReadyNodes(ctx, rs.Client, nodeInformer, rs.NodeSelector, stopChannel)
|
return fmt.Errorf("leaderElection must be used with deschedulingInterval")
|
||||||
|
}
|
||||||
|
|
||||||
|
if rs.LeaderElection.LeaderElect && rs.DryRun {
|
||||||
|
klog.V(1).InfoS("Warning: DryRun is set to True. You need to disable it to use Leader Election.")
|
||||||
|
}
|
||||||
|
|
||||||
|
if rs.LeaderElection.LeaderElect && !rs.DryRun {
|
||||||
|
if err := NewLeaderElection(runFn, rsclient, &rs.LeaderElection, ctx); err != nil {
|
||||||
|
return fmt.Errorf("leaderElection: %w", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return runFn()
|
||||||
|
}
|
||||||
|
|
||||||
|
func cachedClient(
|
||||||
|
realClient clientset.Interface,
|
||||||
|
podLister listersv1.PodLister,
|
||||||
|
nodeLister listersv1.NodeLister,
|
||||||
|
namespaceLister listersv1.NamespaceLister,
|
||||||
|
priorityClassLister schedulingv1.PriorityClassLister,
|
||||||
|
) (clientset.Interface, error) {
|
||||||
|
fakeClient := fakeclientset.NewSimpleClientset()
|
||||||
|
// simulate a pod eviction by deleting a pod
|
||||||
|
fakeClient.PrependReactor("create", "pods", func(action core.Action) (bool, runtime.Object, error) {
|
||||||
|
if action.GetSubresource() == "eviction" {
|
||||||
|
createAct, matched := action.(core.CreateActionImpl)
|
||||||
|
if !matched {
|
||||||
|
return false, nil, fmt.Errorf("unable to convert action to core.CreateActionImpl")
|
||||||
|
}
|
||||||
|
eviction, matched := createAct.Object.(*policy.Eviction)
|
||||||
|
if !matched {
|
||||||
|
return false, nil, fmt.Errorf("unable to convert action object into *policy.Eviction")
|
||||||
|
}
|
||||||
|
if err := fakeClient.Tracker().Delete(action.GetResource(), eviction.GetNamespace(), eviction.GetName()); err != nil {
|
||||||
|
return false, nil, fmt.Errorf("unable to delete pod %v/%v: %v", eviction.GetNamespace(), eviction.GetName(), err)
|
||||||
|
}
|
||||||
|
return true, nil, nil
|
||||||
|
}
|
||||||
|
// fallback to the default reactor
|
||||||
|
return false, nil, nil
|
||||||
|
})
|
||||||
|
|
||||||
|
klog.V(3).Infof("Pulling resources for the cached client from the cluster")
|
||||||
|
pods, err := podLister.List(labels.Everything())
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("unable to list pods: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, item := range pods {
|
||||||
|
if _, err := fakeClient.CoreV1().Pods(item.Namespace).Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
|
||||||
|
return nil, fmt.Errorf("unable to copy pod: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
nodes, err := nodeLister.List(labels.Everything())
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("unable to list nodes: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, item := range nodes {
|
||||||
|
if _, err := fakeClient.CoreV1().Nodes().Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
|
||||||
|
return nil, fmt.Errorf("unable to copy node: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
namespaces, err := namespaceLister.List(labels.Everything())
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("unable to list namespaces: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, item := range namespaces {
|
||||||
|
if _, err := fakeClient.CoreV1().Namespaces().Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
|
||||||
|
return nil, fmt.Errorf("unable to copy node: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
priorityClasses, err := priorityClassLister.List(labels.Everything())
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("unable to list priorityclasses: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, item := range priorityClasses {
|
||||||
|
if _, err := fakeClient.SchedulingV1().PriorityClasses().Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
|
||||||
|
return nil, fmt.Errorf("unable to copy priorityclass: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return fakeClient, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// evictorImpl implements the Evictor interface so plugins
|
||||||
|
// can evict a pod without importing a specific pod evictor
|
||||||
|
type evictorImpl struct {
|
||||||
|
podEvictor *evictions.PodEvictor
|
||||||
|
evictorFilter framework.EvictorPlugin
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ framework.Evictor = &evictorImpl{}
|
||||||
|
|
||||||
|
// Filter checks if a pod can be evicted
|
||||||
|
func (ei *evictorImpl) Filter(pod *v1.Pod) bool {
|
||||||
|
return ei.evictorFilter.Filter(pod)
|
||||||
|
}
|
||||||
|
|
||||||
|
// PreEvictionFilter checks if pod can be evicted right before eviction
|
||||||
|
func (ei *evictorImpl) PreEvictionFilter(pod *v1.Pod) bool {
|
||||||
|
return ei.evictorFilter.PreEvictionFilter(pod)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Evict evicts a pod (no pre-check performed)
|
||||||
|
func (ei *evictorImpl) Evict(ctx context.Context, pod *v1.Pod, opts evictions.EvictOptions) bool {
|
||||||
|
return ei.podEvictor.EvictPod(ctx, pod, opts)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ei *evictorImpl) NodeLimitExceeded(node *v1.Node) bool {
|
||||||
|
return ei.podEvictor.NodeLimitExceeded(node)
|
||||||
|
}
|
||||||
|
|
||||||
|
// handleImpl implements the framework handle which gets passed to plugins
|
||||||
|
type handleImpl struct {
|
||||||
|
clientSet clientset.Interface
|
||||||
|
getPodsAssignedToNodeFunc podutil.GetPodsAssignedToNodeFunc
|
||||||
|
sharedInformerFactory informers.SharedInformerFactory
|
||||||
|
evictor *evictorImpl
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ framework.Handle = &handleImpl{}
|
||||||
|
|
||||||
|
// ClientSet retrieves kube client set
|
||||||
|
func (hi *handleImpl) ClientSet() clientset.Interface {
|
||||||
|
return hi.clientSet
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetPodsAssignedToNodeFunc retrieves GetPodsAssignedToNodeFunc implementation
|
||||||
|
func (hi *handleImpl) GetPodsAssignedToNodeFunc() podutil.GetPodsAssignedToNodeFunc {
|
||||||
|
return hi.getPodsAssignedToNodeFunc
|
||||||
|
}
|
||||||
|
|
||||||
|
// SharedInformerFactory retrieves shared informer factory
|
||||||
|
func (hi *handleImpl) SharedInformerFactory() informers.SharedInformerFactory {
|
||||||
|
return hi.sharedInformerFactory
|
||||||
|
}
|
||||||
|
|
||||||
|
// Evictor retrieves evictor so plugins can filter and evict pods
|
||||||
|
func (hi *handleImpl) Evictor() framework.Evictor {
|
||||||
|
return hi.evictor
|
||||||
|
}
|
||||||
|
|
||||||
|
func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer, deschedulerPolicy *api.DeschedulerPolicy, evictionPolicyGroupVersion string) error {
|
||||||
|
sharedInformerFactory := informers.NewSharedInformerFactory(rs.Client, 0)
|
||||||
|
podInformer := sharedInformerFactory.Core().V1().Pods().Informer()
|
||||||
|
podLister := sharedInformerFactory.Core().V1().Pods().Lister()
|
||||||
|
nodeLister := sharedInformerFactory.Core().V1().Nodes().Lister()
|
||||||
|
namespaceLister := sharedInformerFactory.Core().V1().Namespaces().Lister()
|
||||||
|
priorityClassLister := sharedInformerFactory.Scheduling().V1().PriorityClasses().Lister()
|
||||||
|
|
||||||
|
ctx, cancel := context.WithCancel(ctx)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("build get pods assigned to node function error: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
sharedInformerFactory.Start(ctx.Done())
|
||||||
|
sharedInformerFactory.WaitForCacheSync(ctx.Done())
|
||||||
|
|
||||||
|
var nodeSelector string
|
||||||
|
if deschedulerPolicy.NodeSelector != nil {
|
||||||
|
nodeSelector = *deschedulerPolicy.NodeSelector
|
||||||
|
}
|
||||||
|
|
||||||
|
var eventClient clientset.Interface
|
||||||
|
if rs.DryRun {
|
||||||
|
eventClient = fakeclientset.NewSimpleClientset()
|
||||||
|
} else {
|
||||||
|
eventClient = rs.Client
|
||||||
|
}
|
||||||
|
|
||||||
|
eventBroadcaster, eventRecorder := utils.GetRecorderAndBroadcaster(ctx, eventClient)
|
||||||
|
defer eventBroadcaster.Shutdown()
|
||||||
|
|
||||||
|
wait.NonSlidingUntil(func() {
|
||||||
|
nodes, err := nodeutil.ReadyNodes(ctx, rs.Client, nodeLister, nodeSelector)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.V(1).Infof("Unable to get ready nodes: %v", err)
|
klog.V(1).InfoS("Unable to get ready nodes", "err", err)
|
||||||
close(stopChannel)
|
cancel()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(nodes) <= 1 {
|
if len(nodes) <= 1 {
|
||||||
klog.V(1).Infof("The cluster size is 0 or 1 meaning eviction causes service disruption or degradation. So aborting..")
|
klog.V(1).InfoS("The cluster size is 0 or 1 meaning eviction causes service disruption or degradation. So aborting..")
|
||||||
close(stopChannel)
|
cancel()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var podEvictorClient clientset.Interface
|
||||||
|
// When the dry mode is enable, collect all the relevant objects (mostly pods) under a fake client.
|
||||||
|
// So when evicting pods while running multiple strategies in a row have the cummulative effect
|
||||||
|
// as is when evicting pods for real.
|
||||||
|
if rs.DryRun {
|
||||||
|
klog.V(3).Infof("Building a cached client from the cluster for the dry run")
|
||||||
|
// Create a new cache so we start from scratch without any leftovers
|
||||||
|
fakeClient, err := cachedClient(rs.Client, podLister, nodeLister, namespaceLister, priorityClassLister)
|
||||||
|
if err != nil {
|
||||||
|
klog.Error(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
fakeSharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
|
||||||
|
getPodsAssignedToNode, err = podutil.BuildGetPodsAssignedToNodeFunc(fakeSharedInformerFactory.Core().V1().Pods().Informer())
|
||||||
|
if err != nil {
|
||||||
|
klog.Errorf("build get pods assigned to node function error: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
fakeCtx, cncl := context.WithCancel(context.TODO())
|
||||||
|
defer cncl()
|
||||||
|
fakeSharedInformerFactory.Start(fakeCtx.Done())
|
||||||
|
fakeSharedInformerFactory.WaitForCacheSync(fakeCtx.Done())
|
||||||
|
|
||||||
|
podEvictorClient = fakeClient
|
||||||
|
} else {
|
||||||
|
podEvictorClient = rs.Client
|
||||||
|
}
|
||||||
|
|
||||||
|
klog.V(3).Infof("Building a pod evictor")
|
||||||
podEvictor := evictions.NewPodEvictor(
|
podEvictor := evictions.NewPodEvictor(
|
||||||
rs.Client,
|
podEvictorClient,
|
||||||
evictionPolicyGroupVersion,
|
evictionPolicyGroupVersion,
|
||||||
rs.DryRun,
|
rs.DryRun,
|
||||||
rs.MaxNoOfPodsToEvictPerNode,
|
deschedulerPolicy.MaxNoOfPodsToEvictPerNode,
|
||||||
|
deschedulerPolicy.MaxNoOfPodsToEvictPerNamespace,
|
||||||
nodes,
|
nodes,
|
||||||
rs.EvictLocalStoragePods,
|
!rs.DisableMetrics,
|
||||||
|
eventRecorder,
|
||||||
)
|
)
|
||||||
|
|
||||||
for name, f := range strategyFuncs {
|
var enabledDeschedulePlugins []framework.DeschedulePlugin
|
||||||
if strategy := deschedulerPolicy.Strategies[api.StrategyName(name)]; strategy.Enabled {
|
var enabledBalancePlugins []framework.BalancePlugin
|
||||||
f(ctx, rs.Client, strategy, nodes, podEvictor)
|
|
||||||
|
// Build plugins
|
||||||
|
for _, profile := range deschedulerPolicy.Profiles {
|
||||||
|
pc := getPluginConfig(defaultevictor.PluginName, profile.PluginConfigs)
|
||||||
|
if pc == nil {
|
||||||
|
klog.ErrorS(fmt.Errorf("unable to get plugin config"), "skipping plugin", "plugin", defaultevictor.PluginName, "profile", profile.Name)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
evictorFilter, err := defaultevictor.New(
|
||||||
|
pc.Args,
|
||||||
|
&handleImpl{
|
||||||
|
clientSet: rs.Client,
|
||||||
|
getPodsAssignedToNodeFunc: getPodsAssignedToNode,
|
||||||
|
sharedInformerFactory: sharedInformerFactory,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
klog.ErrorS(fmt.Errorf("unable to construct a plugin"), "skipping plugin", "plugin", defaultevictor.PluginName)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
handle := &handleImpl{
|
||||||
|
clientSet: rs.Client,
|
||||||
|
getPodsAssignedToNodeFunc: getPodsAssignedToNode,
|
||||||
|
sharedInformerFactory: sharedInformerFactory,
|
||||||
|
evictor: &evictorImpl{
|
||||||
|
podEvictor: podEvictor,
|
||||||
|
evictorFilter: evictorFilter.(framework.EvictorPlugin),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
// Assuming only a list of enabled extension points.
|
||||||
|
// Later, when a default list of plugins and their extension points is established,
|
||||||
|
// compute the list of enabled extension points as (DefaultEnabled + Enabled - Disabled)
|
||||||
|
for _, plugin := range append(profile.Plugins.Deschedule.Enabled, profile.Plugins.Balance.Enabled...) {
|
||||||
|
pc := getPluginConfig(plugin, profile.PluginConfigs)
|
||||||
|
if pc == nil {
|
||||||
|
klog.ErrorS(fmt.Errorf("unable to get plugin config"), "skipping plugin", "plugin", plugin)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
registryPlugin, ok := pluginbuilder.PluginRegistry[plugin]
|
||||||
|
pgFnc := registryPlugin.PluginBuilder
|
||||||
|
if !ok {
|
||||||
|
klog.ErrorS(fmt.Errorf("unable to find plugin in the pluginsMap"), "skipping plugin", "plugin", plugin)
|
||||||
|
}
|
||||||
|
pg, err := pgFnc(pc.Args, handle)
|
||||||
|
if err != nil {
|
||||||
|
klog.ErrorS(err, "unable to initialize a plugin", "pluginName", plugin)
|
||||||
|
}
|
||||||
|
if pg != nil {
|
||||||
|
switch v := pg.(type) {
|
||||||
|
case framework.DeschedulePlugin:
|
||||||
|
enabledDeschedulePlugins = append(enabledDeschedulePlugins, v)
|
||||||
|
case framework.BalancePlugin:
|
||||||
|
enabledBalancePlugins = append(enabledBalancePlugins, v)
|
||||||
|
default:
|
||||||
|
klog.ErrorS(fmt.Errorf("unknown plugin extension point"), "skipping plugin", "plugin", plugin)
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Execute extension points
|
||||||
|
for _, pg := range enabledDeschedulePlugins {
|
||||||
|
// TODO: strategyName should be accessible from within the strategy using a framework
|
||||||
|
// handle or function which the Evictor has access to. For migration/in-progress framework
|
||||||
|
// work, we are currently passing this via context. To be removed
|
||||||
|
// (See discussion thread https://github.com/kubernetes-sigs/descheduler/pull/885#discussion_r919962292)
|
||||||
|
childCtx := context.WithValue(ctx, "strategyName", pg.Name())
|
||||||
|
status := pg.Deschedule(childCtx, nodes)
|
||||||
|
if status != nil && status.Err != nil {
|
||||||
|
klog.ErrorS(status.Err, "plugin finished with error", "pluginName", pg.Name())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, pg := range enabledBalancePlugins {
|
||||||
|
// TODO: strategyName should be accessible from within the strategy using a framework
|
||||||
|
// handle or function which the Evictor has access to. For migration/in-progress framework
|
||||||
|
// work, we are currently passing this via context. To be removed
|
||||||
|
// (See discussion thread https://github.com/kubernetes-sigs/descheduler/pull/885#discussion_r919962292)
|
||||||
|
childCtx := context.WithValue(ctx, "strategyName", pg.Name())
|
||||||
|
status := pg.Balance(childCtx, nodes)
|
||||||
|
if status != nil && status.Err != nil {
|
||||||
|
klog.ErrorS(status.Err, "plugin finished with error", "pluginName", pg.Name())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
klog.V(1).InfoS("Number of evicted pods", "totalEvicted", podEvictor.TotalEvicted())
|
||||||
|
|
||||||
// If there was no interval specified, send a signal to the stopChannel to end the wait.Until loop after 1 iteration
|
// If there was no interval specified, send a signal to the stopChannel to end the wait.Until loop after 1 iteration
|
||||||
if rs.DeschedulingInterval.Seconds() == 0 {
|
if rs.DeschedulingInterval.Seconds() == 0 {
|
||||||
close(stopChannel)
|
cancel()
|
||||||
}
|
}
|
||||||
}, rs.DeschedulingInterval, stopChannel)
|
}, rs.DeschedulingInterval, ctx.Done())
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func getPluginConfig(pluginName string, pluginConfigs []api.PluginConfig) *api.PluginConfig {
|
||||||
|
for _, pluginConfig := range pluginConfigs {
|
||||||
|
if pluginConfig.Name == pluginName {
|
||||||
|
return &pluginConfig
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func createClients(kubeconfig string) (clientset.Interface, clientset.Interface, error) {
|
||||||
|
kClient, err := client.CreateClient(kubeconfig, "descheduler")
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
eventClient, err := client.CreateClient(kubeconfig, "")
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return kClient, eventClient, nil
|
||||||
|
}
|
||||||
|
|||||||
@@ -3,20 +3,27 @@ package descheduler
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"strings"
|
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
|
policy "k8s.io/api/policy/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
fakeclientset "k8s.io/client-go/kubernetes/fake"
|
fakeclientset "k8s.io/client-go/kubernetes/fake"
|
||||||
|
core "k8s.io/client-go/testing"
|
||||||
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
|
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
|
||||||
"sigs.k8s.io/descheduler/pkg/api"
|
"sigs.k8s.io/descheduler/pkg/api"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/api/v1alpha1"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/framework/plugins/pluginbuilder"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/framework/plugins/removeduplicates"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/framework/plugins/removepodsviolatingnodetaints"
|
||||||
"sigs.k8s.io/descheduler/test"
|
"sigs.k8s.io/descheduler/test"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestTaintsUpdated(t *testing.T) {
|
func TestTaintsUpdated(t *testing.T) {
|
||||||
|
pluginbuilder.PluginRegistry = pluginbuilder.NewRegistry()
|
||||||
|
pluginbuilder.Register(removepodsviolatingnodetaints.PluginName, removepodsviolatingnodetaints.New, &removepodsviolatingnodetaints.RemovePodsViolatingNodeTaintsArgs{}, pluginbuilder.PluginRegistry)
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
n1 := test.BuildTestNode("n1", 2000, 3000, 10, nil)
|
n1 := test.BuildTestNode("n1", 2000, 3000, 10, nil)
|
||||||
n2 := test.BuildTestNode("n2", 2000, 3000, 10, nil)
|
n2 := test.BuildTestNode("n2", 2000, 3000, 10, nil)
|
||||||
@@ -27,29 +34,22 @@ func TestTaintsUpdated(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
client := fakeclientset.NewSimpleClientset(n1, n2, p1)
|
client := fakeclientset.NewSimpleClientset(n1, n2, p1)
|
||||||
dp := &api.DeschedulerPolicy{
|
eventClient := fakeclientset.NewSimpleClientset(n1, n2, p1)
|
||||||
Strategies: api.StrategyList{
|
dp := &v1alpha1.DeschedulerPolicy{
|
||||||
"RemovePodsViolatingNodeTaints": api.DeschedulerStrategy{
|
Strategies: v1alpha1.StrategyList{
|
||||||
|
"RemovePodsViolatingNodeTaints": v1alpha1.DeschedulerStrategy{
|
||||||
Enabled: true,
|
Enabled: true,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
stopChannel := make(chan struct{})
|
rs, err := options.NewDeschedulerServer()
|
||||||
defer close(stopChannel)
|
if err != nil {
|
||||||
|
t.Fatalf("Unable to initialize server: %v", err)
|
||||||
rs := options.NewDeschedulerServer()
|
}
|
||||||
rs.Client = client
|
rs.Client = client
|
||||||
rs.DeschedulingInterval = 100 * time.Millisecond
|
rs.EventClient = eventClient
|
||||||
go func() {
|
|
||||||
err := RunDeschedulerStrategies(ctx, rs, dp, "v1beta1", stopChannel)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Unable to run descheduler strategies: %v", err)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Wait for few cycles and then verify the only pod still exists
|
|
||||||
time.Sleep(300 * time.Millisecond)
|
|
||||||
pods, err := client.CoreV1().Pods(p1.Namespace).List(ctx, metav1.ListOptions{})
|
pods, err := client.CoreV1().Pods(p1.Namespace).List(ctx, metav1.ListOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Unable to list pods: %v", err)
|
t.Errorf("Unable to list pods: %v", err)
|
||||||
@@ -71,24 +71,165 @@ func TestTaintsUpdated(t *testing.T) {
|
|||||||
t.Fatalf("Unable to update node: %v\n", err)
|
t.Fatalf("Unable to update node: %v\n", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := wait.PollImmediate(100*time.Millisecond, time.Second, func() (bool, error) {
|
var evictedPods []string
|
||||||
// Get over evicted pod result in panic
|
client.PrependReactor("create", "pods", podEvictionReactionFuc(&evictedPods))
|
||||||
//pods, err := client.CoreV1().Pods(p1.Namespace).Get(p1.Name, metav1.GetOptions{})
|
|
||||||
// List is better, it does not panic.
|
|
||||||
// Though once the pod is evicted, List starts to error with "can't assign or convert v1beta1.Eviction into v1.Pod"
|
|
||||||
pods, err := client.CoreV1().Pods(p1.Namespace).List(ctx, metav1.ListOptions{})
|
|
||||||
if err == nil {
|
|
||||||
if len(pods.Items) > 0 {
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
return true, nil
|
|
||||||
}
|
|
||||||
if strings.Contains(err.Error(), "can't assign or convert v1beta1.Eviction into v1.Pod") {
|
|
||||||
return true, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return false, nil
|
internalDeschedulerPolicy, err := V1alpha1ToInternal(client, dp, pluginbuilder.PluginRegistry)
|
||||||
}); err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Unable to evict pod, node taint did not get propagated to descheduler strategies")
|
t.Fatalf("Unable to convert v1alpha1 to v1alpha2: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := RunDeschedulerStrategies(ctx, rs, internalDeschedulerPolicy, "v1"); err != nil {
|
||||||
|
t.Fatalf("Unable to run descheduler strategies: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(evictedPods) != 1 {
|
||||||
|
t.Fatalf("Unable to evict pod, node taint did not get propagated to descheduler strategies %v\n", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDuplicate(t *testing.T) {
|
||||||
|
pluginbuilder.PluginRegistry = pluginbuilder.NewRegistry()
|
||||||
|
pluginbuilder.Register(removeduplicates.PluginName, removeduplicates.New, &removeduplicates.RemoveDuplicatesArgs{}, pluginbuilder.PluginRegistry)
|
||||||
|
ctx := context.Background()
|
||||||
|
node1 := test.BuildTestNode("n1", 2000, 3000, 10, nil)
|
||||||
|
node2 := test.BuildTestNode("n2", 2000, 3000, 10, nil)
|
||||||
|
|
||||||
|
p1 := test.BuildTestPod("p1", 100, 0, node1.Name, nil)
|
||||||
|
p1.Namespace = "dev"
|
||||||
|
p2 := test.BuildTestPod("p2", 100, 0, node1.Name, nil)
|
||||||
|
p2.Namespace = "dev"
|
||||||
|
p3 := test.BuildTestPod("p3", 100, 0, node1.Name, nil)
|
||||||
|
p3.Namespace = "dev"
|
||||||
|
|
||||||
|
ownerRef1 := test.GetReplicaSetOwnerRefList()
|
||||||
|
p1.ObjectMeta.OwnerReferences = ownerRef1
|
||||||
|
p2.ObjectMeta.OwnerReferences = ownerRef1
|
||||||
|
p3.ObjectMeta.OwnerReferences = ownerRef1
|
||||||
|
|
||||||
|
client := fakeclientset.NewSimpleClientset(node1, node2, p1, p2, p3)
|
||||||
|
eventClient := fakeclientset.NewSimpleClientset(node1, node2, p1, p2, p3)
|
||||||
|
dp := &v1alpha1.DeschedulerPolicy{
|
||||||
|
Strategies: v1alpha1.StrategyList{
|
||||||
|
"RemoveDuplicates": v1alpha1.DeschedulerStrategy{
|
||||||
|
Enabled: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
rs, err := options.NewDeschedulerServer()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Unable to initialize server: %v", err)
|
||||||
|
}
|
||||||
|
rs.Client = client
|
||||||
|
rs.EventClient = eventClient
|
||||||
|
|
||||||
|
pods, err := client.CoreV1().Pods(p1.Namespace).List(ctx, metav1.ListOptions{})
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Unable to list pods: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(pods.Items) != 3 {
|
||||||
|
t.Errorf("Pods number should be 3 before evict")
|
||||||
|
}
|
||||||
|
|
||||||
|
var evictedPods []string
|
||||||
|
client.PrependReactor("create", "pods", podEvictionReactionFuc(&evictedPods))
|
||||||
|
|
||||||
|
internalDeschedulerPolicy, err := V1alpha1ToInternal(client, dp, pluginbuilder.PluginRegistry)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Unable to convert v1alpha1 to v1alpha2: %v", err)
|
||||||
|
}
|
||||||
|
if err := RunDeschedulerStrategies(ctx, rs, internalDeschedulerPolicy, "v1"); err != nil {
|
||||||
|
t.Fatalf("Unable to run descheduler strategies: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(evictedPods) == 0 {
|
||||||
|
t.Fatalf("Unable to evict pod, node taint did not get propagated to descheduler strategies %v\n", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRootCancel(t *testing.T) {
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
n1 := test.BuildTestNode("n1", 2000, 3000, 10, nil)
|
||||||
|
n2 := test.BuildTestNode("n2", 2000, 3000, 10, nil)
|
||||||
|
client := fakeclientset.NewSimpleClientset(n1, n2)
|
||||||
|
eventClient := fakeclientset.NewSimpleClientset(n1, n2)
|
||||||
|
dp := &api.DeschedulerPolicy{
|
||||||
|
Profiles: []api.Profile{}, // no strategies needed for this test
|
||||||
|
}
|
||||||
|
|
||||||
|
rs, err := options.NewDeschedulerServer()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Unable to initialize server: %v", err)
|
||||||
|
}
|
||||||
|
rs.Client = client
|
||||||
|
rs.EventClient = eventClient
|
||||||
|
rs.DeschedulingInterval = 100 * time.Millisecond
|
||||||
|
errChan := make(chan error, 1)
|
||||||
|
defer close(errChan)
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
err := RunDeschedulerStrategies(ctx, rs, dp, "v1")
|
||||||
|
errChan <- err
|
||||||
|
}()
|
||||||
|
cancel()
|
||||||
|
select {
|
||||||
|
case err := <-errChan:
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Unable to run descheduler strategies: %v", err)
|
||||||
|
}
|
||||||
|
case <-time.After(1 * time.Second):
|
||||||
|
t.Fatal("Root ctx should have canceled immediately")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRootCancelWithNoInterval(t *testing.T) {
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
n1 := test.BuildTestNode("n1", 2000, 3000, 10, nil)
|
||||||
|
n2 := test.BuildTestNode("n2", 2000, 3000, 10, nil)
|
||||||
|
client := fakeclientset.NewSimpleClientset(n1, n2)
|
||||||
|
eventClient := fakeclientset.NewSimpleClientset(n1, n2)
|
||||||
|
dp := &api.DeschedulerPolicy{
|
||||||
|
Profiles: []api.Profile{}, // no strategies needed for this test
|
||||||
|
}
|
||||||
|
|
||||||
|
rs, err := options.NewDeschedulerServer()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Unable to initialize server: %v", err)
|
||||||
|
}
|
||||||
|
rs.Client = client
|
||||||
|
rs.EventClient = eventClient
|
||||||
|
rs.DeschedulingInterval = 0
|
||||||
|
errChan := make(chan error, 1)
|
||||||
|
defer close(errChan)
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
err := RunDeschedulerStrategies(ctx, rs, dp, "v1")
|
||||||
|
errChan <- err
|
||||||
|
}()
|
||||||
|
cancel()
|
||||||
|
select {
|
||||||
|
case err := <-errChan:
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Unable to run descheduler strategies: %v", err)
|
||||||
|
}
|
||||||
|
case <-time.After(1 * time.Second):
|
||||||
|
t.Fatal("Root ctx should have canceled immediately")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func podEvictionReactionFuc(evictedPods *[]string) func(action core.Action) (bool, runtime.Object, error) {
|
||||||
|
return func(action core.Action) (bool, runtime.Object, error) {
|
||||||
|
if action.GetSubresource() == "eviction" {
|
||||||
|
createAct, matched := action.(core.CreateActionImpl)
|
||||||
|
if !matched {
|
||||||
|
return false, nil, fmt.Errorf("unable to convert action to core.CreateActionImpl")
|
||||||
|
}
|
||||||
|
if eviction, matched := createAct.Object.(*policy.Eviction); matched {
|
||||||
|
*evictedPods = append(*evictedPods, eviction.GetName())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false, nil, nil // fallback to the default reactor
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -19,146 +19,160 @@ package evictions
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"strings"
|
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
policy "k8s.io/api/policy/v1beta1"
|
policy "k8s.io/api/policy/v1"
|
||||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/util/errors"
|
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/client-go/kubernetes/scheme"
|
"k8s.io/client-go/tools/events"
|
||||||
clientcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
|
||||||
"k8s.io/client-go/tools/record"
|
|
||||||
"k8s.io/klog/v2"
|
"k8s.io/klog/v2"
|
||||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
"sigs.k8s.io/descheduler/metrics"
|
||||||
"sigs.k8s.io/descheduler/pkg/utils"
|
|
||||||
|
|
||||||
eutils "sigs.k8s.io/descheduler/pkg/descheduler/evictions/utils"
|
eutils "sigs.k8s.io/descheduler/pkg/descheduler/evictions/utils"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
// nodePodEvictedCount keeps count of pods evicted on node
|
||||||
evictPodAnnotationKey = "descheduler.alpha.kubernetes.io/evict"
|
type (
|
||||||
|
nodePodEvictedCount map[string]uint
|
||||||
|
namespacePodEvictCount map[string]uint
|
||||||
)
|
)
|
||||||
|
|
||||||
// nodePodEvictedCount keeps count of pods evicted on node
|
|
||||||
type nodePodEvictedCount map[*v1.Node]int
|
|
||||||
|
|
||||||
type PodEvictor struct {
|
type PodEvictor struct {
|
||||||
client clientset.Interface
|
client clientset.Interface
|
||||||
policyGroupVersion string
|
nodes []*v1.Node
|
||||||
dryRun bool
|
policyGroupVersion string
|
||||||
maxPodsToEvictPerNode int
|
dryRun bool
|
||||||
nodepodCount nodePodEvictedCount
|
maxPodsToEvictPerNode *uint
|
||||||
evictLocalStoragePods bool
|
maxPodsToEvictPerNamespace *uint
|
||||||
|
nodepodCount nodePodEvictedCount
|
||||||
|
namespacePodCount namespacePodEvictCount
|
||||||
|
metricsEnabled bool
|
||||||
|
eventRecorder events.EventRecorder
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewPodEvictor(
|
func NewPodEvictor(
|
||||||
client clientset.Interface,
|
client clientset.Interface,
|
||||||
policyGroupVersion string,
|
policyGroupVersion string,
|
||||||
dryRun bool,
|
dryRun bool,
|
||||||
maxPodsToEvictPerNode int,
|
maxPodsToEvictPerNode *uint,
|
||||||
|
maxPodsToEvictPerNamespace *uint,
|
||||||
nodes []*v1.Node,
|
nodes []*v1.Node,
|
||||||
evictLocalStoragePods bool,
|
metricsEnabled bool,
|
||||||
|
eventRecorder events.EventRecorder,
|
||||||
) *PodEvictor {
|
) *PodEvictor {
|
||||||
var nodePodCount = make(nodePodEvictedCount)
|
nodePodCount := make(nodePodEvictedCount)
|
||||||
|
namespacePodCount := make(namespacePodEvictCount)
|
||||||
for _, node := range nodes {
|
for _, node := range nodes {
|
||||||
// Initialize podsEvicted till now with 0.
|
// Initialize podsEvicted till now with 0.
|
||||||
nodePodCount[node] = 0
|
nodePodCount[node.Name] = 0
|
||||||
}
|
}
|
||||||
|
|
||||||
return &PodEvictor{
|
return &PodEvictor{
|
||||||
client: client,
|
client: client,
|
||||||
policyGroupVersion: policyGroupVersion,
|
nodes: nodes,
|
||||||
dryRun: dryRun,
|
policyGroupVersion: policyGroupVersion,
|
||||||
maxPodsToEvictPerNode: maxPodsToEvictPerNode,
|
dryRun: dryRun,
|
||||||
nodepodCount: nodePodCount,
|
maxPodsToEvictPerNode: maxPodsToEvictPerNode,
|
||||||
evictLocalStoragePods: evictLocalStoragePods,
|
maxPodsToEvictPerNamespace: maxPodsToEvictPerNamespace,
|
||||||
|
nodepodCount: nodePodCount,
|
||||||
|
namespacePodCount: namespacePodCount,
|
||||||
|
metricsEnabled: metricsEnabled,
|
||||||
|
eventRecorder: eventRecorder,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsEvictable checks if a pod is evictable or not.
|
|
||||||
func (pe *PodEvictor) IsEvictable(pod *v1.Pod) bool {
|
|
||||||
checkErrs := []error{}
|
|
||||||
if IsCriticalPod(pod) {
|
|
||||||
checkErrs = append(checkErrs, fmt.Errorf("pod is critical"))
|
|
||||||
}
|
|
||||||
|
|
||||||
ownerRefList := podutil.OwnerRef(pod)
|
|
||||||
if IsDaemonsetPod(ownerRefList) {
|
|
||||||
checkErrs = append(checkErrs, fmt.Errorf("pod is a DaemonSet pod"))
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(ownerRefList) == 0 {
|
|
||||||
checkErrs = append(checkErrs, fmt.Errorf("pod does not have any ownerrefs"))
|
|
||||||
}
|
|
||||||
|
|
||||||
if !pe.evictLocalStoragePods && IsPodWithLocalStorage(pod) {
|
|
||||||
checkErrs = append(checkErrs, fmt.Errorf("pod has local storage and descheduler is not configured with --evict-local-storage-pods"))
|
|
||||||
}
|
|
||||||
|
|
||||||
if IsMirrorPod(pod) {
|
|
||||||
checkErrs = append(checkErrs, fmt.Errorf("pod is a mirror pod"))
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(checkErrs) > 0 && !HaveEvictAnnotation(pod) {
|
|
||||||
klog.V(4).Infof("Pod %s in namespace %s is not evictable: Pod lacks an eviction annotation and fails the following checks: %v", pod.Name, pod.Namespace, errors.NewAggregate(checkErrs).Error())
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// NodeEvicted gives a number of pods evicted for node
|
// NodeEvicted gives a number of pods evicted for node
|
||||||
func (pe *PodEvictor) NodeEvicted(node *v1.Node) int {
|
func (pe *PodEvictor) NodeEvicted(node *v1.Node) uint {
|
||||||
return pe.nodepodCount[node]
|
return pe.nodepodCount[node.Name]
|
||||||
}
|
}
|
||||||
|
|
||||||
// TotalEvicted gives a number of pods evicted through all nodes
|
// TotalEvicted gives a number of pods evicted through all nodes
|
||||||
func (pe *PodEvictor) TotalEvicted() int {
|
func (pe *PodEvictor) TotalEvicted() uint {
|
||||||
var total int
|
var total uint
|
||||||
for _, count := range pe.nodepodCount {
|
for _, count := range pe.nodepodCount {
|
||||||
total += count
|
total += count
|
||||||
}
|
}
|
||||||
return total
|
return total
|
||||||
}
|
}
|
||||||
|
|
||||||
// EvictPod returns non-nil error only when evicting a pod on a node is not
|
// NodeLimitExceeded checks if the number of evictions for a node was exceeded
|
||||||
// possible (due to maxPodsToEvictPerNode constraint). Success is true when the pod
|
func (pe *PodEvictor) NodeLimitExceeded(node *v1.Node) bool {
|
||||||
// is evicted on the server side.
|
if pe.maxPodsToEvictPerNode != nil {
|
||||||
func (pe *PodEvictor) EvictPod(ctx context.Context, pod *v1.Pod, node *v1.Node, reasons ...string) (bool, error) {
|
return pe.nodepodCount[node.Name] == *pe.maxPodsToEvictPerNode
|
||||||
var reason string
|
|
||||||
if len(reasons) > 0 {
|
|
||||||
reason = " (" + strings.Join(reasons, ", ") + ")"
|
|
||||||
}
|
}
|
||||||
if pe.maxPodsToEvictPerNode > 0 && pe.nodepodCount[node]+1 > pe.maxPodsToEvictPerNode {
|
return false
|
||||||
return false, fmt.Errorf("Maximum number %v of evicted pods per %q node reached", pe.maxPodsToEvictPerNode, node.Name)
|
|
||||||
}
|
|
||||||
|
|
||||||
err := evictPod(ctx, pe.client, pod, pe.policyGroupVersion, pe.dryRun)
|
|
||||||
if err != nil {
|
|
||||||
// err is used only for logging purposes
|
|
||||||
klog.Errorf("Error evicting pod: %#v in namespace %#v%s: %#v", pod.Name, pod.Namespace, reason, err)
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
pe.nodepodCount[node]++
|
|
||||||
if pe.dryRun {
|
|
||||||
klog.V(1).Infof("Evicted pod in dry run mode: %#v in namespace %#v%s", pod.Name, pod.Namespace, reason)
|
|
||||||
} else {
|
|
||||||
klog.V(1).Infof("Evicted pod: %#v in namespace %#v%s", pod.Name, pod.Namespace, reason)
|
|
||||||
eventBroadcaster := record.NewBroadcaster()
|
|
||||||
eventBroadcaster.StartLogging(klog.V(3).Infof)
|
|
||||||
eventBroadcaster.StartRecordingToSink(&clientcorev1.EventSinkImpl{Interface: pe.client.CoreV1().Events(pod.Namespace)})
|
|
||||||
r := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "sigs.k8s.io.descheduler"})
|
|
||||||
r.Event(pod, v1.EventTypeNormal, "Descheduled", fmt.Sprintf("pod evicted by sigs.k8s.io/descheduler%s", reason))
|
|
||||||
}
|
|
||||||
return true, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func evictPod(ctx context.Context, client clientset.Interface, pod *v1.Pod, policyGroupVersion string, dryRun bool) error {
|
// EvictOptions provides a handle for passing additional info to EvictPod
|
||||||
if dryRun {
|
type EvictOptions struct {
|
||||||
return nil
|
// Reason allows for passing details about the specific eviction for logging.
|
||||||
|
Reason string
|
||||||
|
}
|
||||||
|
|
||||||
|
// EvictPod evicts a pod while exercising eviction limits.
|
||||||
|
// Returns true when the pod is evicted on the server side.
|
||||||
|
func (pe *PodEvictor) EvictPod(ctx context.Context, pod *v1.Pod, opts EvictOptions) bool {
|
||||||
|
// TODO: Replace context-propagated Strategy name with a defined framework handle for accessing Strategy info
|
||||||
|
strategy := ""
|
||||||
|
if ctx.Value("strategyName") != nil {
|
||||||
|
strategy = ctx.Value("strategyName").(string)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if pod.Spec.NodeName != "" {
|
||||||
|
if pe.maxPodsToEvictPerNode != nil && pe.nodepodCount[pod.Spec.NodeName]+1 > *pe.maxPodsToEvictPerNode {
|
||||||
|
if pe.metricsEnabled {
|
||||||
|
metrics.PodsEvicted.With(map[string]string{"result": "maximum number of pods per node reached", "strategy": strategy, "namespace": pod.Namespace, "node": pod.Spec.NodeName}).Inc()
|
||||||
|
}
|
||||||
|
klog.ErrorS(fmt.Errorf("Maximum number of evicted pods per node reached"), "limit", *pe.maxPodsToEvictPerNode, "node", pod.Spec.NodeName)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if pe.maxPodsToEvictPerNamespace != nil && pe.namespacePodCount[pod.Namespace]+1 > *pe.maxPodsToEvictPerNamespace {
|
||||||
|
if pe.metricsEnabled {
|
||||||
|
metrics.PodsEvicted.With(map[string]string{"result": "maximum number of pods per namespace reached", "strategy": strategy, "namespace": pod.Namespace, "node": pod.Spec.NodeName}).Inc()
|
||||||
|
}
|
||||||
|
klog.ErrorS(fmt.Errorf("Maximum number of evicted pods per namespace reached"), "limit", *pe.maxPodsToEvictPerNamespace, "namespace", pod.Namespace)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
err := evictPod(ctx, pe.client, pod, pe.policyGroupVersion)
|
||||||
|
if err != nil {
|
||||||
|
// err is used only for logging purposes
|
||||||
|
klog.ErrorS(err, "Error evicting pod", "pod", klog.KObj(pod), "reason", opts.Reason)
|
||||||
|
if pe.metricsEnabled {
|
||||||
|
metrics.PodsEvicted.With(map[string]string{"result": "error", "strategy": strategy, "namespace": pod.Namespace, "node": pod.Spec.NodeName}).Inc()
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if pod.Spec.NodeName != "" {
|
||||||
|
pe.nodepodCount[pod.Spec.NodeName]++
|
||||||
|
}
|
||||||
|
pe.namespacePodCount[pod.Namespace]++
|
||||||
|
|
||||||
|
if pe.metricsEnabled {
|
||||||
|
metrics.PodsEvicted.With(map[string]string{"result": "success", "strategy": strategy, "namespace": pod.Namespace, "node": pod.Spec.NodeName}).Inc()
|
||||||
|
}
|
||||||
|
|
||||||
|
if pe.dryRun {
|
||||||
|
klog.V(1).InfoS("Evicted pod in dry run mode", "pod", klog.KObj(pod), "reason", opts.Reason, "strategy", strategy, "node", pod.Spec.NodeName)
|
||||||
|
} else {
|
||||||
|
klog.V(1).InfoS("Evicted pod", "pod", klog.KObj(pod), "reason", opts.Reason, "strategy", strategy, "node", pod.Spec.NodeName)
|
||||||
|
reason := opts.Reason
|
||||||
|
if len(reason) == 0 {
|
||||||
|
reason = strategy
|
||||||
|
if len(reason) == 0 {
|
||||||
|
reason = "NotSet"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
pe.eventRecorder.Eventf(pod, nil, v1.EventTypeNormal, reason, "Descheduled", "pod evicted from %v node by sigs.k8s.io/descheduler", pod.Spec.NodeName)
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func evictPod(ctx context.Context, client clientset.Interface, pod *v1.Pod, policyGroupVersion string) error {
|
||||||
deleteOptions := &metav1.DeleteOptions{}
|
deleteOptions := &metav1.DeleteOptions{}
|
||||||
// GracePeriodSeconds ?
|
// GracePeriodSeconds ?
|
||||||
eviction := &policy.Eviction{
|
eviction := &policy.Eviction{
|
||||||
@@ -172,7 +186,7 @@ func evictPod(ctx context.Context, client clientset.Interface, pod *v1.Pod, poli
|
|||||||
},
|
},
|
||||||
DeleteOptions: deleteOptions,
|
DeleteOptions: deleteOptions,
|
||||||
}
|
}
|
||||||
err := client.PolicyV1beta1().Evictions(eviction.Namespace).Evict(ctx, eviction)
|
err := client.PolicyV1().Evictions(eviction.Namespace).Evict(ctx, eviction)
|
||||||
|
|
||||||
if apierrors.IsTooManyRequests(err) {
|
if apierrors.IsTooManyRequests(err) {
|
||||||
return fmt.Errorf("error when evicting pod (ignoring) %q: %v", pod.Name, err)
|
return fmt.Errorf("error when evicting pod (ignoring) %q: %v", pod.Name, err)
|
||||||
@@ -182,37 +196,3 @@ func evictPod(ctx context.Context, client clientset.Interface, pod *v1.Pod, poli
|
|||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func IsCriticalPod(pod *v1.Pod) bool {
|
|
||||||
return utils.IsCriticalPod(pod)
|
|
||||||
}
|
|
||||||
|
|
||||||
func IsDaemonsetPod(ownerRefList []metav1.OwnerReference) bool {
|
|
||||||
for _, ownerRef := range ownerRefList {
|
|
||||||
if ownerRef.Kind == "DaemonSet" {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsMirrorPod checks whether the pod is a mirror pod.
|
|
||||||
func IsMirrorPod(pod *v1.Pod) bool {
|
|
||||||
return utils.IsMirrorPod(pod)
|
|
||||||
}
|
|
||||||
|
|
||||||
// HaveEvictAnnotation checks if the pod have evict annotation
|
|
||||||
func HaveEvictAnnotation(pod *v1.Pod) bool {
|
|
||||||
_, found := pod.ObjectMeta.Annotations[evictPodAnnotationKey]
|
|
||||||
return found
|
|
||||||
}
|
|
||||||
|
|
||||||
func IsPodWithLocalStorage(pod *v1.Pod) bool {
|
|
||||||
for _, volume := range pod.Spec.Volumes {
|
|
||||||
if volume.HostPath != nil || volume.EmptyDir != nil {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -62,169 +62,13 @@ func TestEvictPod(t *testing.T) {
|
|||||||
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
|
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
|
||||||
return true, &v1.PodList{Items: test.pods}, nil
|
return true, &v1.PodList{Items: test.pods}, nil
|
||||||
})
|
})
|
||||||
got := evictPod(ctx, fakeClient, test.pod, "v1", false)
|
got := evictPod(ctx, fakeClient, test.pod, "v1")
|
||||||
if got != test.want {
|
if got != test.want {
|
||||||
t.Errorf("Test error for Desc: %s. Expected %v pod eviction to be %v, got %v", test.description, test.pod.Name, test.want, got)
|
t.Errorf("Test error for Desc: %s. Expected %v pod eviction to be %v, got %v", test.description, test.pod.Name, test.want, got)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestIsEvictable(t *testing.T) {
|
|
||||||
n1 := test.BuildTestNode("node1", 1000, 2000, 13, nil)
|
|
||||||
type testCase struct {
|
|
||||||
pod *v1.Pod
|
|
||||||
runBefore func(*v1.Pod)
|
|
||||||
evictLocalStoragePods bool
|
|
||||||
result bool
|
|
||||||
}
|
|
||||||
|
|
||||||
testCases := []testCase{
|
|
||||||
{
|
|
||||||
pod: test.BuildTestPod("p1", 400, 0, n1.Name, nil),
|
|
||||||
runBefore: func(pod *v1.Pod) {
|
|
||||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
|
||||||
},
|
|
||||||
evictLocalStoragePods: false,
|
|
||||||
result: true,
|
|
||||||
}, {
|
|
||||||
pod: test.BuildTestPod("p2", 400, 0, n1.Name, nil),
|
|
||||||
runBefore: func(pod *v1.Pod) {
|
|
||||||
pod.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"}
|
|
||||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
|
||||||
},
|
|
||||||
evictLocalStoragePods: false,
|
|
||||||
result: true,
|
|
||||||
}, {
|
|
||||||
pod: test.BuildTestPod("p3", 400, 0, n1.Name, nil),
|
|
||||||
runBefore: func(pod *v1.Pod) {
|
|
||||||
pod.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
|
|
||||||
},
|
|
||||||
evictLocalStoragePods: false,
|
|
||||||
result: true,
|
|
||||||
}, {
|
|
||||||
pod: test.BuildTestPod("p4", 400, 0, n1.Name, nil),
|
|
||||||
runBefore: func(pod *v1.Pod) {
|
|
||||||
pod.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"}
|
|
||||||
pod.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
|
|
||||||
},
|
|
||||||
evictLocalStoragePods: false,
|
|
||||||
result: true,
|
|
||||||
}, {
|
|
||||||
pod: test.BuildTestPod("p5", 400, 0, n1.Name, nil),
|
|
||||||
runBefore: func(pod *v1.Pod) {
|
|
||||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
|
||||||
pod.Spec.Volumes = []v1.Volume{
|
|
||||||
{
|
|
||||||
Name: "sample",
|
|
||||||
VolumeSource: v1.VolumeSource{
|
|
||||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
|
||||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
|
||||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
},
|
|
||||||
evictLocalStoragePods: false,
|
|
||||||
result: false,
|
|
||||||
}, {
|
|
||||||
pod: test.BuildTestPod("p6", 400, 0, n1.Name, nil),
|
|
||||||
runBefore: func(pod *v1.Pod) {
|
|
||||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
|
||||||
pod.Spec.Volumes = []v1.Volume{
|
|
||||||
{
|
|
||||||
Name: "sample",
|
|
||||||
VolumeSource: v1.VolumeSource{
|
|
||||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
|
||||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
|
||||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
},
|
|
||||||
evictLocalStoragePods: true,
|
|
||||||
result: true,
|
|
||||||
}, {
|
|
||||||
pod: test.BuildTestPod("p7", 400, 0, n1.Name, nil),
|
|
||||||
runBefore: func(pod *v1.Pod) {
|
|
||||||
pod.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"}
|
|
||||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
|
||||||
pod.Spec.Volumes = []v1.Volume{
|
|
||||||
{
|
|
||||||
Name: "sample",
|
|
||||||
VolumeSource: v1.VolumeSource{
|
|
||||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
|
||||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
|
||||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
},
|
|
||||||
evictLocalStoragePods: false,
|
|
||||||
result: true,
|
|
||||||
}, {
|
|
||||||
pod: test.BuildTestPod("p8", 400, 0, n1.Name, nil),
|
|
||||||
runBefore: func(pod *v1.Pod) {
|
|
||||||
pod.ObjectMeta.OwnerReferences = test.GetDaemonSetOwnerRefList()
|
|
||||||
},
|
|
||||||
evictLocalStoragePods: false,
|
|
||||||
result: false,
|
|
||||||
}, {
|
|
||||||
pod: test.BuildTestPod("p9", 400, 0, n1.Name, nil),
|
|
||||||
runBefore: func(pod *v1.Pod) {
|
|
||||||
pod.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"}
|
|
||||||
pod.ObjectMeta.OwnerReferences = test.GetDaemonSetOwnerRefList()
|
|
||||||
},
|
|
||||||
evictLocalStoragePods: false,
|
|
||||||
result: true,
|
|
||||||
}, {
|
|
||||||
pod: test.BuildTestPod("p10", 400, 0, n1.Name, nil),
|
|
||||||
runBefore: func(pod *v1.Pod) {
|
|
||||||
pod.Annotations = test.GetMirrorPodAnnotation()
|
|
||||||
},
|
|
||||||
evictLocalStoragePods: false,
|
|
||||||
result: false,
|
|
||||||
}, {
|
|
||||||
pod: test.BuildTestPod("p11", 400, 0, n1.Name, nil),
|
|
||||||
runBefore: func(pod *v1.Pod) {
|
|
||||||
pod.Annotations = test.GetMirrorPodAnnotation()
|
|
||||||
pod.Annotations["descheduler.alpha.kubernetes.io/evict"] = "true"
|
|
||||||
},
|
|
||||||
evictLocalStoragePods: false,
|
|
||||||
result: true,
|
|
||||||
}, {
|
|
||||||
pod: test.BuildTestPod("p12", 400, 0, n1.Name, nil),
|
|
||||||
runBefore: func(pod *v1.Pod) {
|
|
||||||
priority := utils.SystemCriticalPriority
|
|
||||||
pod.Spec.Priority = &priority
|
|
||||||
},
|
|
||||||
evictLocalStoragePods: false,
|
|
||||||
result: false,
|
|
||||||
}, {
|
|
||||||
pod: test.BuildTestPod("p13", 400, 0, n1.Name, nil),
|
|
||||||
runBefore: func(pod *v1.Pod) {
|
|
||||||
priority := utils.SystemCriticalPriority
|
|
||||||
pod.Spec.Priority = &priority
|
|
||||||
pod.Annotations = map[string]string{
|
|
||||||
"descheduler.alpha.kubernetes.io/evict": "true",
|
|
||||||
}
|
|
||||||
},
|
|
||||||
evictLocalStoragePods: false,
|
|
||||||
result: true,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, test := range testCases {
|
|
||||||
test.runBefore(test.pod)
|
|
||||||
podEvictor := &PodEvictor{
|
|
||||||
evictLocalStoragePods: test.evictLocalStoragePods,
|
|
||||||
}
|
|
||||||
result := podEvictor.IsEvictable(test.pod)
|
|
||||||
if result != test.result {
|
|
||||||
t.Errorf("IsEvictable should return for pod %s %t, but it returns %t", test.pod.Name, test.result, result)
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
}
|
|
||||||
func TestPodTypes(t *testing.T) {
|
func TestPodTypes(t *testing.T) {
|
||||||
n1 := test.BuildTestNode("node1", 1000, 2000, 9, nil)
|
n1 := test.BuildTestNode("node1", 1000, 2000, 9, nil)
|
||||||
p1 := test.BuildTestPod("p1", 400, 0, n1.Name, nil)
|
p1 := test.BuildTestPod("p1", 400, 0, n1.Name, nil)
|
||||||
@@ -233,15 +77,11 @@ func TestPodTypes(t *testing.T) {
|
|||||||
p2 := test.BuildTestPod("p2", 400, 0, n1.Name, nil)
|
p2 := test.BuildTestPod("p2", 400, 0, n1.Name, nil)
|
||||||
p3 := test.BuildTestPod("p3", 400, 0, n1.Name, nil)
|
p3 := test.BuildTestPod("p3", 400, 0, n1.Name, nil)
|
||||||
p4 := test.BuildTestPod("p4", 400, 0, n1.Name, nil)
|
p4 := test.BuildTestPod("p4", 400, 0, n1.Name, nil)
|
||||||
p5 := test.BuildTestPod("p5", 400, 0, n1.Name, nil)
|
|
||||||
p6 := test.BuildTestPod("p6", 400, 0, n1.Name, nil)
|
|
||||||
|
|
||||||
p6.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
|
||||||
|
|
||||||
p1.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
|
p1.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
|
||||||
// The following 4 pods won't get evicted.
|
// The following 4 pods won't get evicted.
|
||||||
// A daemonset.
|
// A daemonset.
|
||||||
//p2.Annotations = test.GetDaemonSetAnnotation()
|
// p2.Annotations = test.GetDaemonSetAnnotation()
|
||||||
p2.ObjectMeta.OwnerReferences = test.GetDaemonSetOwnerRefList()
|
p2.ObjectMeta.OwnerReferences = test.GetDaemonSetOwnerRefList()
|
||||||
// A pod with local storage.
|
// A pod with local storage.
|
||||||
p3.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
p3.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||||
@@ -251,34 +91,25 @@ func TestPodTypes(t *testing.T) {
|
|||||||
VolumeSource: v1.VolumeSource{
|
VolumeSource: v1.VolumeSource{
|
||||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
|
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
// A Mirror Pod.
|
// A Mirror Pod.
|
||||||
p4.Annotations = test.GetMirrorPodAnnotation()
|
p4.Annotations = test.GetMirrorPodAnnotation()
|
||||||
// A Critical Pod.
|
if !utils.IsMirrorPod(p4) {
|
||||||
p5.Namespace = "kube-system"
|
|
||||||
priority := utils.SystemCriticalPriority
|
|
||||||
p5.Spec.Priority = &priority
|
|
||||||
systemCriticalPriority := utils.SystemCriticalPriority
|
|
||||||
p5.Spec.Priority = &systemCriticalPriority
|
|
||||||
if !IsMirrorPod(p4) {
|
|
||||||
t.Errorf("Expected p4 to be a mirror pod.")
|
t.Errorf("Expected p4 to be a mirror pod.")
|
||||||
}
|
}
|
||||||
if !IsCriticalPod(p5) {
|
if !utils.IsPodWithLocalStorage(p3) {
|
||||||
t.Errorf("Expected p5 to be a critical pod.")
|
|
||||||
}
|
|
||||||
if !IsPodWithLocalStorage(p3) {
|
|
||||||
t.Errorf("Expected p3 to be a pod with local storage.")
|
t.Errorf("Expected p3 to be a pod with local storage.")
|
||||||
}
|
}
|
||||||
ownerRefList := podutil.OwnerRef(p2)
|
ownerRefList := podutil.OwnerRef(p2)
|
||||||
if !IsDaemonsetPod(ownerRefList) {
|
if !utils.IsDaemonsetPod(ownerRefList) {
|
||||||
t.Errorf("Expected p2 to be a daemonset pod.")
|
t.Errorf("Expected p2 to be a daemonset pod.")
|
||||||
}
|
}
|
||||||
ownerRefList = podutil.OwnerRef(p1)
|
ownerRefList = podutil.OwnerRef(p1)
|
||||||
if IsDaemonsetPod(ownerRefList) || IsPodWithLocalStorage(p1) || IsCriticalPod(p1) || IsMirrorPod(p1) {
|
if utils.IsDaemonsetPod(ownerRefList) || utils.IsPodWithLocalStorage(p1) || utils.IsCriticalPriorityPod(p1) || utils.IsMirrorPod(p1) || utils.IsStaticPod(p1) {
|
||||||
t.Errorf("Expected p1 to be a normal pod.")
|
t.Errorf("Expected p1 to be a normal pod.")
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|||||||
100
pkg/descheduler/leaderelection.go
Normal file
100
pkg/descheduler/leaderelection.go
Normal file
@@ -0,0 +1,100 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2022 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package descheduler
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"k8s.io/apimachinery/pkg/util/uuid"
|
||||||
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
|
"k8s.io/client-go/tools/leaderelection"
|
||||||
|
"k8s.io/client-go/tools/leaderelection/resourcelock"
|
||||||
|
componentbaseconfig "k8s.io/component-base/config"
|
||||||
|
"k8s.io/klog/v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
// NewLeaderElection starts the leader election code loop
|
||||||
|
func NewLeaderElection(
|
||||||
|
run func() error,
|
||||||
|
client clientset.Interface,
|
||||||
|
LeaderElectionConfig *componentbaseconfig.LeaderElectionConfiguration,
|
||||||
|
ctx context.Context,
|
||||||
|
) error {
|
||||||
|
var id string
|
||||||
|
|
||||||
|
if hostname, err := os.Hostname(); err != nil {
|
||||||
|
// on errors, make sure we're unique
|
||||||
|
id = string(uuid.NewUUID())
|
||||||
|
} else {
|
||||||
|
// add a uniquifier so that two processes on the same host don't accidentally both become active
|
||||||
|
id = hostname + "_" + string(uuid.NewUUID())
|
||||||
|
}
|
||||||
|
|
||||||
|
klog.V(3).Infof("Assigned unique lease holder id: %s", id)
|
||||||
|
|
||||||
|
if len(LeaderElectionConfig.ResourceNamespace) == 0 {
|
||||||
|
return fmt.Errorf("namespace may not be empty")
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(LeaderElectionConfig.ResourceName) == 0 {
|
||||||
|
return fmt.Errorf("name may not be empty")
|
||||||
|
}
|
||||||
|
|
||||||
|
lock, err := resourcelock.New(
|
||||||
|
LeaderElectionConfig.ResourceLock,
|
||||||
|
LeaderElectionConfig.ResourceNamespace,
|
||||||
|
LeaderElectionConfig.ResourceName,
|
||||||
|
client.CoreV1(),
|
||||||
|
client.CoordinationV1(),
|
||||||
|
resourcelock.ResourceLockConfig{
|
||||||
|
Identity: id,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("unable to create leader election lock: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
leaderelection.RunOrDie(ctx, leaderelection.LeaderElectionConfig{
|
||||||
|
Lock: lock,
|
||||||
|
ReleaseOnCancel: true,
|
||||||
|
LeaseDuration: LeaderElectionConfig.LeaseDuration.Duration,
|
||||||
|
RenewDeadline: LeaderElectionConfig.RenewDeadline.Duration,
|
||||||
|
RetryPeriod: LeaderElectionConfig.RetryPeriod.Duration,
|
||||||
|
Callbacks: leaderelection.LeaderCallbacks{
|
||||||
|
OnStartedLeading: func(ctx context.Context) {
|
||||||
|
klog.V(1).InfoS("Started leading")
|
||||||
|
err := run()
|
||||||
|
if err != nil {
|
||||||
|
klog.Error(err)
|
||||||
|
}
|
||||||
|
},
|
||||||
|
OnStoppedLeading: func() {
|
||||||
|
klog.V(1).InfoS("Leader lost")
|
||||||
|
},
|
||||||
|
OnNewLeader: func(identity string) {
|
||||||
|
// Just got the lock
|
||||||
|
if identity == id {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
klog.V(1).Infof("New leader elected: %v", identity)
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
return nil
|
||||||
|
}
|
||||||
@@ -18,18 +18,22 @@ package node
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"k8s.io/api/core/v1"
|
"fmt"
|
||||||
|
|
||||||
|
v1 "k8s.io/api/core/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/labels"
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
coreinformers "k8s.io/client-go/informers/core/v1"
|
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
|
listersv1 "k8s.io/client-go/listers/core/v1"
|
||||||
"k8s.io/klog/v2"
|
"k8s.io/klog/v2"
|
||||||
|
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||||
"sigs.k8s.io/descheduler/pkg/utils"
|
"sigs.k8s.io/descheduler/pkg/utils"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ReadyNodes returns ready nodes irrespective of whether they are
|
// ReadyNodes returns ready nodes irrespective of whether they are
|
||||||
// schedulable or not.
|
// schedulable or not.
|
||||||
func ReadyNodes(ctx context.Context, client clientset.Interface, nodeInformer coreinformers.NodeInformer, nodeSelector string, stopChannel <-chan struct{}) ([]*v1.Node, error) {
|
func ReadyNodes(ctx context.Context, client clientset.Interface, nodeLister listersv1.NodeLister, nodeSelector string) ([]*v1.Node, error) {
|
||||||
ns, err := labels.Parse(nodeSelector)
|
ns, err := labels.Parse(nodeSelector)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return []*v1.Node{}, err
|
return []*v1.Node{}, err
|
||||||
@@ -37,12 +41,12 @@ func ReadyNodes(ctx context.Context, client clientset.Interface, nodeInformer co
|
|||||||
|
|
||||||
var nodes []*v1.Node
|
var nodes []*v1.Node
|
||||||
// err is defined above
|
// err is defined above
|
||||||
if nodes, err = nodeInformer.Lister().List(ns); err != nil {
|
if nodes, err = nodeLister.List(ns); err != nil {
|
||||||
return []*v1.Node{}, err
|
return []*v1.Node{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(nodes) == 0 {
|
if len(nodes) == 0 {
|
||||||
klog.V(2).Infof("node lister returned empty list, now fetch directly")
|
klog.V(2).InfoS("Node lister returned empty list, now fetch directly")
|
||||||
|
|
||||||
nItems, err := client.CoreV1().Nodes().List(ctx, metav1.ListOptions{LabelSelector: nodeSelector})
|
nItems, err := client.CoreV1().Nodes().List(ctx, metav1.ListOptions{LabelSelector: nodeSelector})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -77,63 +81,210 @@ func IsReady(node *v1.Node) bool {
|
|||||||
// - NodeOutOfDisk condition status is ConditionFalse,
|
// - NodeOutOfDisk condition status is ConditionFalse,
|
||||||
// - NodeNetworkUnavailable condition status is ConditionFalse.
|
// - NodeNetworkUnavailable condition status is ConditionFalse.
|
||||||
if cond.Type == v1.NodeReady && cond.Status != v1.ConditionTrue {
|
if cond.Type == v1.NodeReady && cond.Status != v1.ConditionTrue {
|
||||||
klog.V(1).Infof("Ignoring node %v with %v condition status %v", node.Name, cond.Type, cond.Status)
|
klog.V(1).InfoS("Ignoring node", "node", klog.KObj(node), "condition", cond.Type, "status", cond.Status)
|
||||||
return false
|
return false
|
||||||
} /*else if cond.Type == v1.NodeOutOfDisk && cond.Status != v1.ConditionFalse {
|
} /*else if cond.Type == v1.NodeOutOfDisk && cond.Status != v1.ConditionFalse {
|
||||||
klog.V(4).Infof("Ignoring node %v with %v condition status %v", node.Name, cond.Type, cond.Status)
|
klog.V(4).InfoS("Ignoring node with condition status", "node", klog.KObj(node.Name), "condition", cond.Type, "status", cond.Status)
|
||||||
return false
|
return false
|
||||||
} else if cond.Type == v1.NodeNetworkUnavailable && cond.Status != v1.ConditionFalse {
|
} else if cond.Type == v1.NodeNetworkUnavailable && cond.Status != v1.ConditionFalse {
|
||||||
klog.V(4).Infof("Ignoring node %v with %v condition status %v", node.Name, cond.Type, cond.Status)
|
klog.V(4).InfoS("Ignoring node with condition status", "node", klog.KObj(node.Name), "condition", cond.Type, "status", cond.Status)
|
||||||
return false
|
return false
|
||||||
}*/
|
}*/
|
||||||
}
|
}
|
||||||
// Ignore nodes that are marked unschedulable
|
// Ignore nodes that are marked unschedulable
|
||||||
/*if node.Spec.Unschedulable {
|
/*if node.Spec.Unschedulable {
|
||||||
klog.V(4).Infof("Ignoring node %v since it is unschedulable", node.Name)
|
klog.V(4).InfoS("Ignoring node since it is unschedulable", "node", klog.KObj(node.Name))
|
||||||
return false
|
return false
|
||||||
}*/
|
}*/
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsNodeUnschedulable checks if the node is unschedulable. This is helper function to check only in case of
|
// NodeFit returns true if the provided pod can be scheduled onto the provided node.
|
||||||
// underutilized node so that they won't be accounted for.
|
// This function is used when the NodeFit pod filtering feature of the Descheduler is enabled.
|
||||||
func IsNodeUnschedulable(node *v1.Node) bool {
|
// This function currently considers a subset of the Kubernetes Scheduler's predicates when
|
||||||
return node.Spec.Unschedulable
|
// deciding if a pod would fit on a node, but more predicates may be added in the future.
|
||||||
|
func NodeFit(nodeIndexer podutil.GetPodsAssignedToNodeFunc, pod *v1.Pod, node *v1.Node) []error {
|
||||||
|
// Check node selector and required affinity
|
||||||
|
var errors []error
|
||||||
|
if ok, err := utils.PodMatchNodeSelector(pod, node); err != nil {
|
||||||
|
errors = append(errors, err)
|
||||||
|
} else if !ok {
|
||||||
|
errors = append(errors, fmt.Errorf("pod node selector does not match the node label"))
|
||||||
|
}
|
||||||
|
// Check taints (we only care about NoSchedule and NoExecute taints)
|
||||||
|
ok := utils.TolerationsTolerateTaintsWithFilter(pod.Spec.Tolerations, node.Spec.Taints, func(taint *v1.Taint) bool {
|
||||||
|
return taint.Effect == v1.TaintEffectNoSchedule || taint.Effect == v1.TaintEffectNoExecute
|
||||||
|
})
|
||||||
|
if !ok {
|
||||||
|
errors = append(errors, fmt.Errorf("pod does not tolerate taints on the node"))
|
||||||
|
}
|
||||||
|
// Check if the pod can fit on a node based off it's requests
|
||||||
|
if pod.Spec.NodeName == "" || pod.Spec.NodeName != node.Name {
|
||||||
|
ok, reqErrors := fitsRequest(nodeIndexer, pod, node)
|
||||||
|
if !ok {
|
||||||
|
errors = append(errors, reqErrors...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Check if node is schedulable
|
||||||
|
if IsNodeUnschedulable(node) {
|
||||||
|
errors = append(errors, fmt.Errorf("node is not schedulable"))
|
||||||
|
}
|
||||||
|
|
||||||
|
return errors
|
||||||
}
|
}
|
||||||
|
|
||||||
// PodFitsAnyNode checks if the given pod fits any of the given nodes, based on
|
// PodFitsAnyOtherNode checks if the given pod will fit any of the given nodes, besides the node
|
||||||
// multiple criteria, like, pod node selector matching the node label, node
|
// the pod is already running on. The predicates used to determine if the pod will fit can be found in the NodeFit function.
|
||||||
// being schedulable or not.
|
func PodFitsAnyOtherNode(nodeIndexer podutil.GetPodsAssignedToNodeFunc, pod *v1.Pod, nodes []*v1.Node) bool {
|
||||||
func PodFitsAnyNode(pod *v1.Pod, nodes []*v1.Node) bool {
|
|
||||||
for _, node := range nodes {
|
for _, node := range nodes {
|
||||||
|
// Skip node pod is already on
|
||||||
ok, err := utils.PodMatchNodeSelector(pod, node)
|
if node.Name == pod.Spec.NodeName {
|
||||||
if err != nil || !ok {
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if !IsNodeUnschedulable(node) {
|
|
||||||
klog.V(2).Infof("Pod %v can possibly be scheduled on %v", pod.Name, node.Name)
|
errors := NodeFit(nodeIndexer, pod, node)
|
||||||
|
if len(errors) == 0 {
|
||||||
|
klog.V(4).InfoS("Pod fits on node", "pod", klog.KObj(pod), "node", klog.KObj(node))
|
||||||
return true
|
return true
|
||||||
|
} else {
|
||||||
|
klog.V(4).InfoS("Pod does not fit on node", "pod", klog.KObj(pod), "node", klog.KObj(node))
|
||||||
|
for _, err := range errors {
|
||||||
|
klog.V(4).InfoS(err.Error())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// PodFitsCurrentNode checks if the given pod fits on the given node if the pod
|
// PodFitsAnyNode checks if the given pod will fit any of the given nodes. The predicates used
|
||||||
// node selector matches the node label.
|
// to determine if the pod will fit can be found in the NodeFit function.
|
||||||
func PodFitsCurrentNode(pod *v1.Pod, node *v1.Node) bool {
|
func PodFitsAnyNode(nodeIndexer podutil.GetPodsAssignedToNodeFunc, pod *v1.Pod, nodes []*v1.Node) bool {
|
||||||
ok, err := utils.PodMatchNodeSelector(pod, node)
|
for _, node := range nodes {
|
||||||
|
errors := NodeFit(nodeIndexer, pod, node)
|
||||||
if err != nil {
|
if len(errors) == 0 {
|
||||||
klog.Error(err)
|
klog.V(4).InfoS("Pod fits on node", "pod", klog.KObj(pod), "node", klog.KObj(node))
|
||||||
return false
|
return true
|
||||||
|
} else {
|
||||||
|
klog.V(4).InfoS("Pod does not fit on node", "pod", klog.KObj(pod), "node", klog.KObj(node))
|
||||||
|
for _, err := range errors {
|
||||||
|
klog.V(4).InfoS(err.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// PodFitsCurrentNode checks if the given pod will fit onto the given node. The predicates used
|
||||||
|
// to determine if the pod will fit can be found in the NodeFit function.
|
||||||
|
func PodFitsCurrentNode(nodeIndexer podutil.GetPodsAssignedToNodeFunc, pod *v1.Pod, node *v1.Node) bool {
|
||||||
|
errors := NodeFit(nodeIndexer, pod, node)
|
||||||
|
if len(errors) == 0 {
|
||||||
|
klog.V(4).InfoS("Pod fits on node", "pod", klog.KObj(pod), "node", klog.KObj(node))
|
||||||
|
return true
|
||||||
|
} else {
|
||||||
|
klog.V(4).InfoS("Pod does not fit on node", "pod", klog.KObj(pod), "node", klog.KObj(node))
|
||||||
|
for _, err := range errors {
|
||||||
|
klog.V(4).InfoS(err.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsNodeUnschedulable checks if the node is unschedulable. This is a helper function to check only in case of
|
||||||
|
// underutilized node so that they won't be accounted for.
|
||||||
|
func IsNodeUnschedulable(node *v1.Node) bool {
|
||||||
|
return node.Spec.Unschedulable
|
||||||
|
}
|
||||||
|
|
||||||
|
// fitsRequest determines if a pod can fit on a node based on its resource requests. It returns true if
|
||||||
|
// the pod will fit.
|
||||||
|
func fitsRequest(nodeIndexer podutil.GetPodsAssignedToNodeFunc, pod *v1.Pod, node *v1.Node) (bool, []error) {
|
||||||
|
var insufficientResources []error
|
||||||
|
|
||||||
|
// Get pod requests
|
||||||
|
podRequests, _ := utils.PodRequestsAndLimits(pod)
|
||||||
|
resourceNames := make([]v1.ResourceName, 0, len(podRequests))
|
||||||
|
for name := range podRequests {
|
||||||
|
resourceNames = append(resourceNames, name)
|
||||||
|
}
|
||||||
|
|
||||||
|
availableResources, err := nodeAvailableResources(nodeIndexer, node, resourceNames)
|
||||||
|
if err != nil {
|
||||||
|
return false, []error{err}
|
||||||
|
}
|
||||||
|
|
||||||
|
podFitsOnNode := true
|
||||||
|
for _, resource := range resourceNames {
|
||||||
|
podResourceRequest := podRequests[resource]
|
||||||
|
availableResource, ok := availableResources[resource]
|
||||||
|
if !ok || podResourceRequest.MilliValue() > availableResource.MilliValue() {
|
||||||
|
insufficientResources = append(insufficientResources, fmt.Errorf("insufficient %v", resource))
|
||||||
|
podFitsOnNode = false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return podFitsOnNode, insufficientResources
|
||||||
|
}
|
||||||
|
|
||||||
|
// nodeAvailableResources returns resources mapped to the quanitity available on the node.
|
||||||
|
func nodeAvailableResources(nodeIndexer podutil.GetPodsAssignedToNodeFunc, node *v1.Node, resourceNames []v1.ResourceName) (map[v1.ResourceName]*resource.Quantity, error) {
|
||||||
|
podsOnNode, err := podutil.ListPodsOnANode(node.Name, nodeIndexer, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
nodeUtilization := NodeUtilization(podsOnNode, resourceNames)
|
||||||
|
remainingResources := map[v1.ResourceName]*resource.Quantity{
|
||||||
|
v1.ResourceCPU: resource.NewMilliQuantity(node.Status.Allocatable.Cpu().MilliValue()-nodeUtilization[v1.ResourceCPU].MilliValue(), resource.DecimalSI),
|
||||||
|
v1.ResourceMemory: resource.NewQuantity(node.Status.Allocatable.Memory().Value()-nodeUtilization[v1.ResourceMemory].Value(), resource.BinarySI),
|
||||||
|
v1.ResourcePods: resource.NewQuantity(node.Status.Allocatable.Pods().Value()-nodeUtilization[v1.ResourcePods].Value(), resource.DecimalSI),
|
||||||
|
}
|
||||||
|
for _, name := range resourceNames {
|
||||||
|
if !IsBasicResource(name) {
|
||||||
|
if _, exists := node.Status.Allocatable[name]; exists {
|
||||||
|
allocatableResource := node.Status.Allocatable[name]
|
||||||
|
remainingResources[name] = resource.NewQuantity(allocatableResource.Value()-nodeUtilization[name].Value(), resource.DecimalSI)
|
||||||
|
} else {
|
||||||
|
remainingResources[name] = resource.NewQuantity(0, resource.DecimalSI)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return remainingResources, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// NodeUtilization returns the resources requested by the given pods. Only resources supplied in the resourceNames parameter are calculated.
|
||||||
|
func NodeUtilization(pods []*v1.Pod, resourceNames []v1.ResourceName) map[v1.ResourceName]*resource.Quantity {
|
||||||
|
totalReqs := map[v1.ResourceName]*resource.Quantity{
|
||||||
|
v1.ResourceCPU: resource.NewMilliQuantity(0, resource.DecimalSI),
|
||||||
|
v1.ResourceMemory: resource.NewQuantity(0, resource.BinarySI),
|
||||||
|
v1.ResourcePods: resource.NewQuantity(int64(len(pods)), resource.DecimalSI),
|
||||||
|
}
|
||||||
|
for _, name := range resourceNames {
|
||||||
|
if !IsBasicResource(name) {
|
||||||
|
totalReqs[name] = resource.NewQuantity(0, resource.DecimalSI)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, pod := range pods {
|
||||||
|
req, _ := utils.PodRequestsAndLimits(pod)
|
||||||
|
for _, name := range resourceNames {
|
||||||
|
quantity, ok := req[name]
|
||||||
|
if ok && name != v1.ResourcePods {
|
||||||
|
// As Quantity.Add says: Add adds the provided y quantity to the current value. If the current value is zero,
|
||||||
|
// the format of the quantity will be updated to the format of y.
|
||||||
|
totalReqs[name].Add(quantity)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return totalReqs
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsBasicResource checks if resource is basic native.
|
||||||
|
func IsBasicResource(name v1.ResourceName) bool {
|
||||||
|
switch name {
|
||||||
|
case v1.ResourceCPU, v1.ResourceMemory, v1.ResourcePods:
|
||||||
|
return true
|
||||||
|
default:
|
||||||
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
if !ok {
|
|
||||||
klog.V(2).Infof("Pod %v does not fit on node %v", pod.Name, node.Name)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
klog.V(2).Infof("Pod %v fits on node %v", pod.Name, node.Name)
|
|
||||||
return true
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -20,10 +20,13 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
"k8s.io/client-go/informers"
|
"k8s.io/client-go/informers"
|
||||||
"k8s.io/client-go/kubernetes/fake"
|
"k8s.io/client-go/kubernetes/fake"
|
||||||
|
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||||
"sigs.k8s.io/descheduler/test"
|
"sigs.k8s.io/descheduler/test"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -53,7 +56,6 @@ func TestReadyNodes(t *testing.T) {
|
|||||||
if IsReady(node5) {
|
if IsReady(node5) {
|
||||||
t.Errorf("Expected %v to be not ready", node5.Name)
|
t.Errorf("Expected %v to be not ready", node5.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestReadyNodesWithNodeSelector(t *testing.T) {
|
func TestReadyNodesWithNodeSelector(t *testing.T) {
|
||||||
@@ -67,14 +69,14 @@ func TestReadyNodesWithNodeSelector(t *testing.T) {
|
|||||||
nodeSelector := "type=compute"
|
nodeSelector := "type=compute"
|
||||||
|
|
||||||
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
|
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
|
||||||
nodeInformer := sharedInformerFactory.Core().V1().Nodes()
|
nodeLister := sharedInformerFactory.Core().V1().Nodes().Lister()
|
||||||
|
|
||||||
stopChannel := make(chan struct{}, 0)
|
stopChannel := make(chan struct{})
|
||||||
sharedInformerFactory.Start(stopChannel)
|
sharedInformerFactory.Start(stopChannel)
|
||||||
sharedInformerFactory.WaitForCacheSync(stopChannel)
|
sharedInformerFactory.WaitForCacheSync(stopChannel)
|
||||||
defer close(stopChannel)
|
defer close(stopChannel)
|
||||||
|
|
||||||
nodes, _ := ReadyNodes(ctx, fakeClient, nodeInformer, nodeSelector, nil)
|
nodes, _ := ReadyNodes(ctx, fakeClient, nodeLister, nodeSelector)
|
||||||
|
|
||||||
if nodes[0].Name != "node1" {
|
if nodes[0].Name != "node1" {
|
||||||
t.Errorf("Expected node1, got %s", nodes[0].Name)
|
t.Errorf("Expected node1, got %s", nodes[0].Name)
|
||||||
@@ -108,11 +110,9 @@ func TestIsNodeUnschedulable(t *testing.T) {
|
|||||||
t.Errorf("Test %#v failed", test.description)
|
t.Errorf("Test %#v failed", test.description)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestPodFitsCurrentNode(t *testing.T) {
|
func TestPodFitsCurrentNode(t *testing.T) {
|
||||||
|
|
||||||
nodeLabelKey := "kubernetes.io/desiredNode"
|
nodeLabelKey := "kubernetes.io/desiredNode"
|
||||||
nodeLabelValue := "yes"
|
nodeLabelValue := "yes"
|
||||||
|
|
||||||
@@ -147,13 +147,13 @@ func TestPodFitsCurrentNode(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
node: &v1.Node{
|
node: test.BuildTestNode("node1", 64000, 128*1000*1000*1000, 200, func(node *v1.Node) {
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
node.ObjectMeta.Labels = map[string]string{
|
||||||
Labels: map[string]string{
|
nodeLabelKey: nodeLabelValue,
|
||||||
nodeLabelKey: nodeLabelValue,
|
}
|
||||||
},
|
|
||||||
},
|
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(1000*1000*1000*1000, resource.DecimalSI)
|
||||||
},
|
}),
|
||||||
success: true,
|
success: true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -181,217 +181,582 @@ func TestPodFitsCurrentNode(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
node: &v1.Node{
|
node: test.BuildTestNode("node1", 64000, 128*1000*1000*1000, 200, func(node *v1.Node) {
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
node.ObjectMeta.Labels = map[string]string{
|
||||||
Labels: map[string]string{
|
nodeLabelKey: "no",
|
||||||
nodeLabelKey: "no",
|
}
|
||||||
},
|
|
||||||
},
|
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(1000*1000*1000*1000, resource.DecimalSI)
|
||||||
},
|
}),
|
||||||
success: false,
|
success: false,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tc := range tests {
|
for _, tc := range tests {
|
||||||
actual := PodFitsCurrentNode(tc.pod, tc.node)
|
t.Run(tc.description, func(t *testing.T) {
|
||||||
if actual != tc.success {
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
t.Errorf("Test %#v failed", tc.description)
|
defer cancel()
|
||||||
}
|
|
||||||
|
var objs []runtime.Object
|
||||||
|
objs = append(objs, tc.node)
|
||||||
|
objs = append(objs, tc.pod)
|
||||||
|
|
||||||
|
fakeClient := fake.NewSimpleClientset(objs...)
|
||||||
|
|
||||||
|
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
|
||||||
|
podInformer := sharedInformerFactory.Core().V1().Pods().Informer()
|
||||||
|
|
||||||
|
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Build get pods assigned to node function error: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
sharedInformerFactory.Start(ctx.Done())
|
||||||
|
sharedInformerFactory.WaitForCacheSync(ctx.Done())
|
||||||
|
|
||||||
|
actual := PodFitsCurrentNode(getPodsAssignedToNode, tc.pod, tc.node)
|
||||||
|
if actual != tc.success {
|
||||||
|
t.Errorf("Test %#v failed", tc.description)
|
||||||
|
}
|
||||||
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestPodFitsAnyNode(t *testing.T) {
|
func TestPodFitsAnyOtherNode(t *testing.T) {
|
||||||
|
|
||||||
nodeLabelKey := "kubernetes.io/desiredNode"
|
nodeLabelKey := "kubernetes.io/desiredNode"
|
||||||
nodeLabelValue := "yes"
|
nodeLabelValue := "yes"
|
||||||
|
nodeTaintKey := "hardware"
|
||||||
|
nodeTaintValue := "gpu"
|
||||||
|
|
||||||
|
// Staging node has no scheduling restrictions, but the pod always starts here and PodFitsAnyOtherNode() doesn't take into account the node the pod is running on.
|
||||||
|
nodeNames := []string{"node1", "node2", "stagingNode"}
|
||||||
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
description string
|
description string
|
||||||
pod *v1.Pod
|
pod *v1.Pod
|
||||||
nodes []*v1.Node
|
nodes []*v1.Node
|
||||||
success bool
|
success bool
|
||||||
|
podsOnNodes []*v1.Pod
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
description: "Pod expected to fit one of the nodes",
|
description: "Pod fits another node matching node affinity",
|
||||||
pod: &v1.Pod{
|
pod: test.BuildTestPod("p1", 0, 0, nodeNames[2], func(pod *v1.Pod) {
|
||||||
Spec: v1.PodSpec{
|
pod.Spec.NodeSelector = map[string]string{
|
||||||
Affinity: &v1.Affinity{
|
nodeLabelKey: nodeLabelValue,
|
||||||
NodeAffinity: &v1.NodeAffinity{
|
}
|
||||||
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
}),
|
||||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
|
||||||
{
|
|
||||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
|
||||||
{
|
|
||||||
Key: nodeLabelKey,
|
|
||||||
Operator: "In",
|
|
||||||
Values: []string{
|
|
||||||
nodeLabelValue,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
nodes: []*v1.Node{
|
nodes: []*v1.Node{
|
||||||
{
|
test.BuildTestNode(nodeNames[0], 64000, 128*1000*1000*1000, 200, func(node *v1.Node) {
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
node.ObjectMeta.Labels = map[string]string{
|
||||||
Labels: map[string]string{
|
nodeLabelKey: nodeLabelValue,
|
||||||
nodeLabelKey: nodeLabelValue,
|
}
|
||||||
},
|
|
||||||
},
|
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(1000*1000*1000*1000, resource.DecimalSI)
|
||||||
},
|
}),
|
||||||
{
|
test.BuildTestNode(nodeNames[1], 64000, 128*1000*1000*1000, 200, func(node *v1.Node) {
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
node.ObjectMeta.Labels = map[string]string{
|
||||||
Labels: map[string]string{
|
nodeLabelKey: "no",
|
||||||
nodeLabelKey: "no",
|
}
|
||||||
},
|
|
||||||
},
|
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(1000*1000*1000*1000, resource.DecimalSI)
|
||||||
},
|
}),
|
||||||
|
test.BuildTestNode(nodeNames[2], 0, 0, 0, nil),
|
||||||
},
|
},
|
||||||
success: true,
|
podsOnNodes: []*v1.Pod{},
|
||||||
|
success: true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
description: "Pod expected to fit one of the nodes (error node first)",
|
description: "Pod expected to fit one of the nodes",
|
||||||
pod: &v1.Pod{
|
pod: test.BuildTestPod("p1", 0, 0, nodeNames[2], func(pod *v1.Pod) {
|
||||||
Spec: v1.PodSpec{
|
pod.Spec.NodeSelector = map[string]string{
|
||||||
Affinity: &v1.Affinity{
|
nodeLabelKey: nodeLabelValue,
|
||||||
NodeAffinity: &v1.NodeAffinity{
|
}
|
||||||
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
}),
|
||||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
|
||||||
{
|
|
||||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
|
||||||
{
|
|
||||||
Key: nodeLabelKey,
|
|
||||||
Operator: "In",
|
|
||||||
Values: []string{
|
|
||||||
nodeLabelValue,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
nodes: []*v1.Node{
|
nodes: []*v1.Node{
|
||||||
{
|
test.BuildTestNode(nodeNames[0], 64000, 128*1000*1000*1000, 200, func(node *v1.Node) {
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
node.ObjectMeta.Labels = map[string]string{
|
||||||
Labels: map[string]string{
|
nodeLabelKey: "no",
|
||||||
nodeLabelKey: "no",
|
}
|
||||||
},
|
|
||||||
},
|
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(1000*1000*1000*1000, resource.DecimalSI)
|
||||||
},
|
}),
|
||||||
{
|
test.BuildTestNode(nodeNames[1], 64000, 128*1000*1000*1000, 200, func(node *v1.Node) {
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
node.ObjectMeta.Labels = map[string]string{
|
||||||
Labels: map[string]string{
|
nodeLabelKey: nodeLabelValue,
|
||||||
nodeLabelKey: nodeLabelValue,
|
}
|
||||||
},
|
|
||||||
},
|
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(1000*1000*1000*1000, resource.DecimalSI)
|
||||||
},
|
}),
|
||||||
|
test.BuildTestNode(nodeNames[2], 0, 0, 0, nil),
|
||||||
},
|
},
|
||||||
success: true,
|
podsOnNodes: []*v1.Pod{},
|
||||||
|
success: true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
description: "Pod expected to fit none of the nodes",
|
description: "Pod expected to fit none of the nodes",
|
||||||
pod: &v1.Pod{
|
pod: test.BuildTestPod("p1", 0, 0, nodeNames[2], func(pod *v1.Pod) {
|
||||||
Spec: v1.PodSpec{
|
pod.Spec.NodeSelector = map[string]string{
|
||||||
Affinity: &v1.Affinity{
|
nodeLabelKey: nodeLabelValue,
|
||||||
NodeAffinity: &v1.NodeAffinity{
|
}
|
||||||
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
}),
|
||||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
|
||||||
{
|
|
||||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
|
||||||
{
|
|
||||||
Key: nodeLabelKey,
|
|
||||||
Operator: "In",
|
|
||||||
Values: []string{
|
|
||||||
nodeLabelValue,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
nodes: []*v1.Node{
|
nodes: []*v1.Node{
|
||||||
{
|
test.BuildTestNode(nodeNames[0], 64000, 128*1000*1000*1000, 200, func(node *v1.Node) {
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
node.ObjectMeta.Labels = map[string]string{
|
||||||
Labels: map[string]string{
|
nodeLabelKey: "unfit1",
|
||||||
nodeLabelKey: "unfit1",
|
}
|
||||||
|
|
||||||
|
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(1000*1000*1000*1000, resource.DecimalSI)
|
||||||
|
}),
|
||||||
|
test.BuildTestNode(nodeNames[1], 64000, 128*1000*1000*1000, 200, func(node *v1.Node) {
|
||||||
|
node.ObjectMeta.Labels = map[string]string{
|
||||||
|
nodeLabelKey: "unfit2",
|
||||||
|
}
|
||||||
|
|
||||||
|
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(1000*1000*1000*1000, resource.DecimalSI)
|
||||||
|
}),
|
||||||
|
test.BuildTestNode(nodeNames[2], 0, 0, 0, nil),
|
||||||
|
},
|
||||||
|
podsOnNodes: []*v1.Pod{},
|
||||||
|
success: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
description: "Nodes are unschedulable but labels match, should fail",
|
||||||
|
pod: test.BuildTestPod("p1", 0, 0, nodeNames[2], func(pod *v1.Pod) {
|
||||||
|
pod.Spec.NodeSelector = map[string]string{
|
||||||
|
nodeLabelKey: nodeLabelValue,
|
||||||
|
}
|
||||||
|
}),
|
||||||
|
nodes: []*v1.Node{
|
||||||
|
test.BuildTestNode(nodeNames[0], 64000, 128*1000*1000*1000, 200, func(node *v1.Node) {
|
||||||
|
node.ObjectMeta.Labels = map[string]string{
|
||||||
|
nodeLabelKey: nodeLabelValue,
|
||||||
|
}
|
||||||
|
|
||||||
|
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(1000*1000*1000*1000, resource.DecimalSI)
|
||||||
|
node.Spec.Unschedulable = true
|
||||||
|
}),
|
||||||
|
test.BuildTestNode(nodeNames[1], 64000, 128*1000*1000*1000, 200, func(node *v1.Node) {
|
||||||
|
node.ObjectMeta.Labels = map[string]string{
|
||||||
|
nodeLabelKey: "no",
|
||||||
|
}
|
||||||
|
|
||||||
|
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(1000*1000*1000*1000, resource.DecimalSI)
|
||||||
|
}),
|
||||||
|
test.BuildTestNode(nodeNames[2], 0, 0, 0, nil),
|
||||||
|
},
|
||||||
|
podsOnNodes: []*v1.Pod{},
|
||||||
|
success: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
description: "Both nodes are tained, should fail",
|
||||||
|
pod: test.BuildTestPod("p1", 2000, 2*1000*1000*1000, nodeNames[2], func(pod *v1.Pod) {
|
||||||
|
pod.Spec.NodeSelector = map[string]string{
|
||||||
|
nodeLabelKey: nodeLabelValue,
|
||||||
|
}
|
||||||
|
pod.Spec.Containers[0].Resources.Requests[v1.ResourceEphemeralStorage] = *resource.NewQuantity(10*1000*1000*1000, resource.DecimalSI)
|
||||||
|
}),
|
||||||
|
nodes: []*v1.Node{
|
||||||
|
test.BuildTestNode(nodeNames[0], 64000, 128*1000*1000*1000, 200, func(node *v1.Node) {
|
||||||
|
node.ObjectMeta.Labels = map[string]string{
|
||||||
|
nodeLabelKey: nodeLabelValue,
|
||||||
|
}
|
||||||
|
|
||||||
|
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(1000*1000*1000*1000, resource.DecimalSI)
|
||||||
|
node.Spec.Taints = []v1.Taint{
|
||||||
|
{
|
||||||
|
Key: nodeTaintKey,
|
||||||
|
Value: nodeTaintValue,
|
||||||
|
Effect: v1.TaintEffectNoSchedule,
|
||||||
},
|
},
|
||||||
},
|
}
|
||||||
},
|
}),
|
||||||
{
|
test.BuildTestNode(nodeNames[1], 64000, 128*1000*1000*1000, 200, func(node *v1.Node) {
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
node.ObjectMeta.Labels = map[string]string{
|
||||||
Labels: map[string]string{
|
nodeLabelKey: nodeLabelValue,
|
||||||
nodeLabelKey: "unfit2",
|
}
|
||||||
|
|
||||||
|
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(1000*1000*1000*1000, resource.DecimalSI)
|
||||||
|
node.Spec.Taints = []v1.Taint{
|
||||||
|
{
|
||||||
|
Key: nodeTaintKey,
|
||||||
|
Value: nodeTaintValue,
|
||||||
|
Effect: v1.TaintEffectNoSchedule,
|
||||||
},
|
},
|
||||||
},
|
}
|
||||||
},
|
}),
|
||||||
|
test.BuildTestNode(nodeNames[2], 0, 0, 0, nil),
|
||||||
|
},
|
||||||
|
podsOnNodes: []*v1.Pod{},
|
||||||
|
success: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
description: "Two nodes matches node selector, one of them is tained, there is a pod on the available node, and requests are low, should pass",
|
||||||
|
pod: test.BuildTestPod("p1", 2000, 2*1000*1000*1000, nodeNames[2], func(pod *v1.Pod) {
|
||||||
|
pod.Spec.NodeSelector = map[string]string{
|
||||||
|
nodeLabelKey: nodeLabelValue,
|
||||||
|
}
|
||||||
|
pod.Spec.Containers[0].Resources.Requests[v1.ResourceEphemeralStorage] = *resource.NewQuantity(10*1000*1000*1000, resource.DecimalSI)
|
||||||
|
}),
|
||||||
|
nodes: []*v1.Node{
|
||||||
|
test.BuildTestNode(nodeNames[0], 64000, 128*1000*1000*1000, 200, func(node *v1.Node) {
|
||||||
|
node.ObjectMeta.Labels = map[string]string{
|
||||||
|
nodeLabelKey: nodeLabelValue,
|
||||||
|
}
|
||||||
|
|
||||||
|
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(1000*1000*1000*1000, resource.DecimalSI)
|
||||||
|
node.Spec.Taints = []v1.Taint{
|
||||||
|
{
|
||||||
|
Key: nodeTaintKey,
|
||||||
|
Value: nodeTaintValue,
|
||||||
|
Effect: v1.TaintEffectNoSchedule,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}),
|
||||||
|
test.BuildTestNode(nodeNames[1], 64000, 128*1000*1000*1000, 200, func(node *v1.Node) {
|
||||||
|
node.ObjectMeta.Labels = map[string]string{
|
||||||
|
nodeLabelKey: nodeLabelValue,
|
||||||
|
}
|
||||||
|
|
||||||
|
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(1000*1000*1000*1000, resource.DecimalSI)
|
||||||
|
}),
|
||||||
|
test.BuildTestNode(nodeNames[2], 0, 0, 0, nil),
|
||||||
|
},
|
||||||
|
podsOnNodes: []*v1.Pod{
|
||||||
|
test.BuildTestPod("test-pod", 12*1000, 20*1000*1000*1000, nodeNames[1], func(pod *v1.Pod) {
|
||||||
|
pod.ObjectMeta = metav1.ObjectMeta{
|
||||||
|
Namespace: "test",
|
||||||
|
Labels: map[string]string{
|
||||||
|
"test": "true",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
pod.Spec.Containers[0].Resources.Requests[v1.ResourceEphemeralStorage] = *resource.NewQuantity(40*1000*1000*1000, resource.DecimalSI)
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
success: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
description: "Two nodes matches node selector, one of them is tained, but CPU requests are too big, should fail",
|
||||||
|
pod: test.BuildTestPod("p1", 2000, 2*1000*1000*1000, nodeNames[2], func(pod *v1.Pod) {
|
||||||
|
pod.Spec.NodeSelector = map[string]string{
|
||||||
|
nodeLabelKey: nodeLabelValue,
|
||||||
|
}
|
||||||
|
pod.Spec.Containers[0].Resources.Requests[v1.ResourceEphemeralStorage] = *resource.NewQuantity(10*1000*1000*1000, resource.DecimalSI)
|
||||||
|
}),
|
||||||
|
nodes: []*v1.Node{
|
||||||
|
test.BuildTestNode(nodeNames[0], 64000, 128*1000*1000*1000, 200, func(node *v1.Node) {
|
||||||
|
node.ObjectMeta.Labels = map[string]string{
|
||||||
|
nodeLabelKey: nodeLabelValue,
|
||||||
|
}
|
||||||
|
|
||||||
|
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(1000*1000*1000*1000, resource.DecimalSI)
|
||||||
|
node.Spec.Taints = []v1.Taint{
|
||||||
|
{
|
||||||
|
Key: nodeTaintKey,
|
||||||
|
Value: nodeTaintValue,
|
||||||
|
Effect: v1.TaintEffectNoSchedule,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}),
|
||||||
|
// Notice that this node only has 4 cores, the pod already on the node below requests 3 cores, and the pod above requests 2 cores
|
||||||
|
test.BuildTestNode(nodeNames[1], 4000, 8*1000*1000*1000, 12, func(node *v1.Node) {
|
||||||
|
node.ObjectMeta.Labels = map[string]string{
|
||||||
|
nodeLabelKey: nodeLabelValue,
|
||||||
|
}
|
||||||
|
|
||||||
|
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(200*1000*1000*1000, resource.DecimalSI)
|
||||||
|
}),
|
||||||
|
test.BuildTestNode(nodeNames[2], 0, 0, 0, nil),
|
||||||
|
},
|
||||||
|
podsOnNodes: []*v1.Pod{
|
||||||
|
test.BuildTestPod("3-core-pod", 3000, 4*1000*1000*1000, nodeNames[1], func(pod *v1.Pod) {
|
||||||
|
pod.ObjectMeta = metav1.ObjectMeta{
|
||||||
|
Namespace: "test",
|
||||||
|
Labels: map[string]string{
|
||||||
|
"test": "true",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
pod.Spec.Containers[0].Resources.Requests[v1.ResourceEphemeralStorage] = *resource.NewQuantity(10*1000*1000*1000, resource.DecimalSI)
|
||||||
|
}),
|
||||||
},
|
},
|
||||||
success: false,
|
success: false,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
description: "Nodes are unschedulable but labels match, should fail",
|
description: "Two nodes matches node selector, one of them is tained, but memory requests are too big, should fail",
|
||||||
pod: &v1.Pod{
|
pod: test.BuildTestPod("p1", 2000, 5*1000*1000*1000, nodeNames[2], func(pod *v1.Pod) {
|
||||||
Spec: v1.PodSpec{
|
pod.Spec.NodeSelector = map[string]string{
|
||||||
Affinity: &v1.Affinity{
|
nodeLabelKey: nodeLabelValue,
|
||||||
NodeAffinity: &v1.NodeAffinity{
|
}
|
||||||
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
pod.Spec.Containers[0].Resources.Requests[v1.ResourceEphemeralStorage] = *resource.NewQuantity(10*1000*1000*1000, resource.DecimalSI)
|
||||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
}),
|
||||||
{
|
|
||||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
|
||||||
{
|
|
||||||
Key: nodeLabelKey,
|
|
||||||
Operator: "In",
|
|
||||||
Values: []string{
|
|
||||||
nodeLabelValue,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
nodes: []*v1.Node{
|
nodes: []*v1.Node{
|
||||||
{
|
test.BuildTestNode(nodeNames[0], 64000, 128*1000*1000*1000, 200, func(node *v1.Node) {
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
node.ObjectMeta.Labels = map[string]string{
|
||||||
Labels: map[string]string{
|
nodeLabelKey: nodeLabelValue,
|
||||||
nodeLabelKey: nodeLabelValue,
|
}
|
||||||
|
|
||||||
|
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(1000*1000*1000*1000, resource.DecimalSI)
|
||||||
|
node.Spec.Taints = []v1.Taint{
|
||||||
|
{
|
||||||
|
Key: nodeTaintKey,
|
||||||
|
Value: nodeTaintValue,
|
||||||
|
Effect: v1.TaintEffectNoSchedule,
|
||||||
},
|
},
|
||||||
},
|
}
|
||||||
Spec: v1.NodeSpec{
|
}),
|
||||||
Unschedulable: true,
|
// Notice that this node only has 8GB of memory, the pod already on the node below requests 4GB, and the pod above requests 5GB
|
||||||
},
|
test.BuildTestNode(nodeNames[1], 10*1000, 8*1000*1000*1000, 12, func(node *v1.Node) {
|
||||||
},
|
node.ObjectMeta.Labels = map[string]string{
|
||||||
{
|
nodeLabelKey: nodeLabelValue,
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
}
|
||||||
|
|
||||||
|
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(200*1000*1000*1000, resource.DecimalSI)
|
||||||
|
}),
|
||||||
|
test.BuildTestNode(nodeNames[2], 0, 0, 0, nil),
|
||||||
|
},
|
||||||
|
podsOnNodes: []*v1.Pod{
|
||||||
|
test.BuildTestPod("4GB-mem-pod", 2000, 4*1000*1000*1000, nodeNames[1], func(pod *v1.Pod) {
|
||||||
|
pod.ObjectMeta = metav1.ObjectMeta{
|
||||||
|
Namespace: "test",
|
||||||
Labels: map[string]string{
|
Labels: map[string]string{
|
||||||
nodeLabelKey: "no",
|
"test": "true",
|
||||||
},
|
},
|
||||||
},
|
}
|
||||||
},
|
pod.Spec.Containers[0].Resources.Requests[v1.ResourceEphemeralStorage] = *resource.NewQuantity(10*1000*1000*1000, resource.DecimalSI)
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
success: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
description: "Two nodes matches node selector, one of them is tained, but ephemeral storage requests are too big, should fail",
|
||||||
|
pod: test.BuildTestPod("p1", 2000, 4*1000*1000*1000, nodeNames[2], func(pod *v1.Pod) {
|
||||||
|
pod.Spec.NodeSelector = map[string]string{
|
||||||
|
nodeLabelKey: nodeLabelValue,
|
||||||
|
}
|
||||||
|
pod.Spec.Containers[0].Resources.Requests[v1.ResourceEphemeralStorage] = *resource.NewQuantity(10*1000*1000*1000, resource.DecimalSI)
|
||||||
|
}),
|
||||||
|
nodes: []*v1.Node{
|
||||||
|
test.BuildTestNode(nodeNames[0], 64000, 128*1000*1000*1000, 200, func(node *v1.Node) {
|
||||||
|
node.ObjectMeta.Labels = map[string]string{
|
||||||
|
nodeLabelKey: nodeLabelValue,
|
||||||
|
}
|
||||||
|
|
||||||
|
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(1000*1000*1000*1000, resource.DecimalSI)
|
||||||
|
node.Spec.Taints = []v1.Taint{
|
||||||
|
{
|
||||||
|
Key: nodeTaintKey,
|
||||||
|
Value: nodeTaintValue,
|
||||||
|
Effect: v1.TaintEffectNoSchedule,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}),
|
||||||
|
// Notice that this node only has 20GB of storage, the pod already on the node below requests 11GB, and the pod above requests 10GB
|
||||||
|
test.BuildTestNode(nodeNames[1], 10*1000, 8*1000*1000*1000, 12, func(node *v1.Node) {
|
||||||
|
node.ObjectMeta.Labels = map[string]string{
|
||||||
|
nodeLabelKey: nodeLabelValue,
|
||||||
|
}
|
||||||
|
|
||||||
|
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(20*1000*1000*1000, resource.DecimalSI)
|
||||||
|
}),
|
||||||
|
test.BuildTestNode(nodeNames[2], 0, 0, 0, nil),
|
||||||
|
},
|
||||||
|
podsOnNodes: []*v1.Pod{
|
||||||
|
test.BuildTestPod("11GB-storage-pod", 2000, 4*1000*1000*1000, nodeNames[1], func(pod *v1.Pod) {
|
||||||
|
pod.ObjectMeta = metav1.ObjectMeta{
|
||||||
|
Namespace: "test",
|
||||||
|
Labels: map[string]string{
|
||||||
|
"test": "true",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
pod.Spec.Containers[0].Resources.Requests[v1.ResourceEphemeralStorage] = *resource.NewQuantity(11*1000*1000*1000, resource.DecimalSI)
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
success: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
description: "Two nodes matches node selector, one of them is tained, but custom resource requests are too big, should fail",
|
||||||
|
pod: test.BuildTestPod("p1", 2000, 2*1000*1000*1000, nodeNames[2], func(pod *v1.Pod) {
|
||||||
|
pod.Spec.NodeSelector = map[string]string{
|
||||||
|
nodeLabelKey: nodeLabelValue,
|
||||||
|
}
|
||||||
|
pod.Spec.Containers[0].Resources.Requests[v1.ResourceEphemeralStorage] = *resource.NewQuantity(10*1000*1000*1000, resource.DecimalSI)
|
||||||
|
pod.Spec.Containers[0].Resources.Requests["example.com/custom-resource"] = *resource.NewQuantity(10, resource.DecimalSI)
|
||||||
|
}),
|
||||||
|
nodes: []*v1.Node{
|
||||||
|
test.BuildTestNode(nodeNames[0], 64000, 128*1000*1000*1000, 200, func(node *v1.Node) {
|
||||||
|
node.ObjectMeta.Labels = map[string]string{
|
||||||
|
nodeLabelKey: nodeLabelValue,
|
||||||
|
}
|
||||||
|
|
||||||
|
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(1000*1000*1000*1000, resource.DecimalSI)
|
||||||
|
node.Spec.Taints = []v1.Taint{
|
||||||
|
{
|
||||||
|
Key: nodeTaintKey,
|
||||||
|
Value: nodeTaintValue,
|
||||||
|
Effect: v1.TaintEffectNoSchedule,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
node.Status.Allocatable["example.com/custom-resource"] = *resource.NewQuantity(15, resource.DecimalSI)
|
||||||
|
}),
|
||||||
|
// Notice that this node only has 15 of the custom resource, the pod already on the node below requests 10, and the pod above requests 10
|
||||||
|
test.BuildTestNode(nodeNames[1], 10*1000, 8*1000*1000*1000, 12, func(node *v1.Node) {
|
||||||
|
node.ObjectMeta.Labels = map[string]string{
|
||||||
|
nodeLabelKey: nodeLabelValue,
|
||||||
|
}
|
||||||
|
|
||||||
|
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(200*1000*1000*1000, resource.DecimalSI)
|
||||||
|
node.Status.Allocatable["example.com/custom-resource"] = *resource.NewQuantity(15, resource.DecimalSI)
|
||||||
|
}),
|
||||||
|
test.BuildTestNode(nodeNames[2], 0, 0, 0, nil),
|
||||||
|
},
|
||||||
|
podsOnNodes: []*v1.Pod{
|
||||||
|
test.BuildTestPod("10-custom-resource-pod", 0, 0, nodeNames[1], func(pod *v1.Pod) {
|
||||||
|
pod.ObjectMeta = metav1.ObjectMeta{
|
||||||
|
Namespace: "test",
|
||||||
|
Labels: map[string]string{
|
||||||
|
"test": "true",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
pod.Spec.Containers[0].Resources.Requests["example.com/custom-resource"] = *resource.NewQuantity(10, resource.DecimalSI)
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
success: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
description: "Two nodes matches node selector, one of them is tained, CPU requests will fit, and pod Overhead is low enough, should pass",
|
||||||
|
pod: test.BuildTestPod("p1", 1000, 2*1000*1000*1000, nodeNames[2], func(pod *v1.Pod) {
|
||||||
|
pod.Spec.NodeSelector = map[string]string{
|
||||||
|
nodeLabelKey: nodeLabelValue,
|
||||||
|
}
|
||||||
|
pod.Spec.Containers[0].Resources.Requests[v1.ResourceEphemeralStorage] = *resource.NewQuantity(10*1000*1000*1000, resource.DecimalSI)
|
||||||
|
}),
|
||||||
|
nodes: []*v1.Node{
|
||||||
|
test.BuildTestNode(nodeNames[0], 64000, 128*1000*1000*1000, 200, func(node *v1.Node) {
|
||||||
|
node.ObjectMeta.Labels = map[string]string{
|
||||||
|
nodeLabelKey: nodeLabelValue,
|
||||||
|
}
|
||||||
|
|
||||||
|
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(1000*1000*1000*1000, resource.DecimalSI)
|
||||||
|
node.Spec.Taints = []v1.Taint{
|
||||||
|
{
|
||||||
|
Key: nodeTaintKey,
|
||||||
|
Value: nodeTaintValue,
|
||||||
|
Effect: v1.TaintEffectNoSchedule,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}),
|
||||||
|
// Notice that this node has 5 CPU cores, the pod below requests 2 cores, and has CPU overhead of 1 cores, and the pod above requests 1 core
|
||||||
|
test.BuildTestNode(nodeNames[1], 5000, 8*1000*1000*1000, 12, func(node *v1.Node) {
|
||||||
|
node.ObjectMeta.Labels = map[string]string{
|
||||||
|
nodeLabelKey: nodeLabelValue,
|
||||||
|
}
|
||||||
|
|
||||||
|
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(200*1000*1000*1000, resource.DecimalSI)
|
||||||
|
}),
|
||||||
|
test.BuildTestNode(nodeNames[2], 0, 0, 0, nil),
|
||||||
|
},
|
||||||
|
podsOnNodes: []*v1.Pod{
|
||||||
|
test.BuildTestPod("3-core-pod", 2000, 4*1000*1000*1000, nodeNames[1], func(pod *v1.Pod) {
|
||||||
|
pod.ObjectMeta = metav1.ObjectMeta{
|
||||||
|
Namespace: "test",
|
||||||
|
Labels: map[string]string{
|
||||||
|
"test": "true",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
pod.Spec.Containers[0].Resources.Requests[v1.ResourceEphemeralStorage] = *resource.NewQuantity(10*1000*1000*1000, resource.DecimalSI)
|
||||||
|
pod.Spec.Overhead = createResourceList(1000, 1000*1000*1000, 1000*1000*1000)
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
success: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
description: "Two nodes matches node selector, one of them is tained, CPU requests will fit, but pod Overhead is too high, should fail",
|
||||||
|
pod: test.BuildTestPod("p1", 2000, 2*1000*1000*1000, nodeNames[2], func(pod *v1.Pod) {
|
||||||
|
pod.Spec.NodeSelector = map[string]string{
|
||||||
|
nodeLabelKey: nodeLabelValue,
|
||||||
|
}
|
||||||
|
pod.Spec.Containers[0].Resources.Requests[v1.ResourceEphemeralStorage] = *resource.NewQuantity(10*1000*1000*1000, resource.DecimalSI)
|
||||||
|
}),
|
||||||
|
nodes: []*v1.Node{
|
||||||
|
test.BuildTestNode(nodeNames[0], 64000, 128*1000*1000*1000, 200, func(node *v1.Node) {
|
||||||
|
node.ObjectMeta.Labels = map[string]string{
|
||||||
|
nodeLabelKey: nodeLabelValue,
|
||||||
|
}
|
||||||
|
|
||||||
|
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(1000*1000*1000*1000, resource.DecimalSI)
|
||||||
|
node.Spec.Taints = []v1.Taint{
|
||||||
|
{
|
||||||
|
Key: nodeTaintKey,
|
||||||
|
Value: nodeTaintValue,
|
||||||
|
Effect: v1.TaintEffectNoSchedule,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}),
|
||||||
|
// Notice that this node only has 5 CPU cores, the pod below requests 2 cores, but has CPU overhead of 2 cores, and the pod above requests 2 cores
|
||||||
|
test.BuildTestNode(nodeNames[1], 5000, 8*1000*1000*1000, 12, func(node *v1.Node) {
|
||||||
|
node.ObjectMeta.Labels = map[string]string{
|
||||||
|
nodeLabelKey: nodeLabelValue,
|
||||||
|
}
|
||||||
|
|
||||||
|
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(200*1000*1000*1000, resource.DecimalSI)
|
||||||
|
}),
|
||||||
|
test.BuildTestNode(nodeNames[2], 0, 0, 0, nil),
|
||||||
|
},
|
||||||
|
podsOnNodes: []*v1.Pod{
|
||||||
|
test.BuildTestPod("3-core-pod", 2000, 4*1000*1000*1000, nodeNames[1], func(pod *v1.Pod) {
|
||||||
|
pod.ObjectMeta = metav1.ObjectMeta{
|
||||||
|
Namespace: "test",
|
||||||
|
Labels: map[string]string{
|
||||||
|
"test": "true",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
pod.Spec.Containers[0].Resources.Requests[v1.ResourceEphemeralStorage] = *resource.NewQuantity(10*1000*1000*1000, resource.DecimalSI)
|
||||||
|
pod.Spec.Overhead = createResourceList(2000, 1000*1000*1000, 1000*1000*1000)
|
||||||
|
}),
|
||||||
},
|
},
|
||||||
success: false,
|
success: false,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tc := range tests {
|
for _, tc := range tests {
|
||||||
actual := PodFitsAnyNode(tc.pod, tc.nodes)
|
t.Run(tc.description, func(t *testing.T) {
|
||||||
if actual != tc.success {
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
t.Errorf("Test %#v failed", tc.description)
|
defer cancel()
|
||||||
}
|
|
||||||
|
var objs []runtime.Object
|
||||||
|
for _, node := range tc.nodes {
|
||||||
|
objs = append(objs, node)
|
||||||
|
}
|
||||||
|
for _, pod := range tc.podsOnNodes {
|
||||||
|
objs = append(objs, pod)
|
||||||
|
}
|
||||||
|
objs = append(objs, tc.pod)
|
||||||
|
|
||||||
|
fakeClient := fake.NewSimpleClientset(objs...)
|
||||||
|
|
||||||
|
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
|
||||||
|
podInformer := sharedInformerFactory.Core().V1().Pods().Informer()
|
||||||
|
|
||||||
|
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Build get pods assigned to node function error: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
sharedInformerFactory.Start(ctx.Done())
|
||||||
|
sharedInformerFactory.WaitForCacheSync(ctx.Done())
|
||||||
|
|
||||||
|
actual := PodFitsAnyOtherNode(getPodsAssignedToNode, tc.pod, tc.nodes)
|
||||||
|
if actual != tc.success {
|
||||||
|
t.Errorf("Test %#v failed", tc.description)
|
||||||
|
}
|
||||||
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// createResourceList builds a small resource list of core resources
|
||||||
|
func createResourceList(cpu, memory, ephemeralStorage int64) v1.ResourceList {
|
||||||
|
resourceList := make(map[v1.ResourceName]resource.Quantity)
|
||||||
|
resourceList[v1.ResourceCPU] = *resource.NewMilliQuantity(cpu, resource.DecimalSI)
|
||||||
|
resourceList[v1.ResourceMemory] = *resource.NewQuantity(memory, resource.DecimalSI)
|
||||||
|
resourceList[v1.ResourceEphemeralStorage] = *resource.NewQuantity(ephemeralStorage, resource.DecimalSI)
|
||||||
|
return resourceList
|
||||||
|
}
|
||||||
|
|||||||
@@ -17,39 +17,173 @@ limitations under the License.
|
|||||||
package pod
|
package pod
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"sort"
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/fields"
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
"k8s.io/apimachinery/pkg/util/sets"
|
||||||
|
"k8s.io/client-go/tools/cache"
|
||||||
|
|
||||||
"sigs.k8s.io/descheduler/pkg/utils"
|
"sigs.k8s.io/descheduler/pkg/utils"
|
||||||
"sort"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// ListPodsOnANode lists all of the pods on a node
|
const (
|
||||||
// It also accepts an optional "filter" function which can be used to further limit the pods that are returned.
|
nodeNameKeyIndex = "spec.nodeName"
|
||||||
// (Usually this is podEvictor.IsEvictable, in order to only list the evictable pods on a node, but can
|
)
|
||||||
// be used by strategies to extend IsEvictable if there are further restrictions, such as with NodeAffinity).
|
|
||||||
// The filter function should return true if the pod should be returned from ListPodsOnANode
|
|
||||||
func ListPodsOnANode(ctx context.Context, client clientset.Interface, node *v1.Node, filter func(pod *v1.Pod) bool) ([]*v1.Pod, error) {
|
|
||||||
fieldSelector, err := fields.ParseSelector("spec.nodeName=" + node.Name + ",status.phase!=" + string(v1.PodSucceeded) + ",status.phase!=" + string(v1.PodFailed))
|
|
||||||
if err != nil {
|
|
||||||
return []*v1.Pod{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
podList, err := client.CoreV1().Pods(v1.NamespaceAll).List(ctx,
|
// FilterFunc is a filter for a pod.
|
||||||
metav1.ListOptions{FieldSelector: fieldSelector.String()})
|
type FilterFunc func(*v1.Pod) bool
|
||||||
if err != nil {
|
|
||||||
return []*v1.Pod{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
pods := make([]*v1.Pod, 0)
|
// GetPodsAssignedToNodeFunc is a function which accept a node name and a pod filter function
|
||||||
for i := range podList.Items {
|
// as input and returns the pods that assigned to the node.
|
||||||
if filter != nil && !filter(&podList.Items[i]) {
|
type GetPodsAssignedToNodeFunc func(string, FilterFunc) ([]*v1.Pod, error)
|
||||||
continue
|
|
||||||
|
// WrapFilterFuncs wraps a set of FilterFunc in one.
|
||||||
|
func WrapFilterFuncs(filters ...FilterFunc) FilterFunc {
|
||||||
|
return func(pod *v1.Pod) bool {
|
||||||
|
for _, filter := range filters {
|
||||||
|
if filter != nil && !filter(pod) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
}
|
}
|
||||||
pods = append(pods, &podList.Items[i])
|
return true
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type Options struct {
|
||||||
|
filter FilterFunc
|
||||||
|
includedNamespaces sets.String
|
||||||
|
excludedNamespaces sets.String
|
||||||
|
labelSelector *metav1.LabelSelector
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewOptions returns an empty Options.
|
||||||
|
func NewOptions() *Options {
|
||||||
|
return &Options{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithFilter sets a pod filter.
|
||||||
|
// The filter function should return true if the pod should be returned from ListPodsOnANode
|
||||||
|
func (o *Options) WithFilter(filter FilterFunc) *Options {
|
||||||
|
o.filter = filter
|
||||||
|
return o
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithNamespaces sets included namespaces
|
||||||
|
func (o *Options) WithNamespaces(namespaces sets.String) *Options {
|
||||||
|
o.includedNamespaces = namespaces
|
||||||
|
return o
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithoutNamespaces sets excluded namespaces
|
||||||
|
func (o *Options) WithoutNamespaces(namespaces sets.String) *Options {
|
||||||
|
o.excludedNamespaces = namespaces
|
||||||
|
return o
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithLabelSelector sets a pod label selector
|
||||||
|
func (o *Options) WithLabelSelector(labelSelector *metav1.LabelSelector) *Options {
|
||||||
|
o.labelSelector = labelSelector
|
||||||
|
return o
|
||||||
|
}
|
||||||
|
|
||||||
|
// BuildFilterFunc builds a final FilterFunc based on Options.
|
||||||
|
func (o *Options) BuildFilterFunc() (FilterFunc, error) {
|
||||||
|
var s labels.Selector
|
||||||
|
var err error
|
||||||
|
if o.labelSelector != nil {
|
||||||
|
s, err = metav1.LabelSelectorAsSelector(o.labelSelector)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return func(pod *v1.Pod) bool {
|
||||||
|
if o.filter != nil && !o.filter(pod) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if len(o.includedNamespaces) > 0 && !o.includedNamespaces.Has(pod.Namespace) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if len(o.excludedNamespaces) > 0 && o.excludedNamespaces.Has(pod.Namespace) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if s != nil && !s.Matches(labels.Set(pod.GetLabels())) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// BuildGetPodsAssignedToNodeFunc establishes an indexer to map the pods and their assigned nodes.
|
||||||
|
// It returns a function to help us get all the pods that assigned to a node based on the indexer.
|
||||||
|
func BuildGetPodsAssignedToNodeFunc(podInformer cache.SharedIndexInformer) (GetPodsAssignedToNodeFunc, error) {
|
||||||
|
// Establish an indexer to map the pods and their assigned nodes.
|
||||||
|
err := podInformer.AddIndexers(cache.Indexers{
|
||||||
|
nodeNameKeyIndex: func(obj interface{}) ([]string, error) {
|
||||||
|
pod, ok := obj.(*v1.Pod)
|
||||||
|
if !ok {
|
||||||
|
return []string{}, nil
|
||||||
|
}
|
||||||
|
if len(pod.Spec.NodeName) == 0 {
|
||||||
|
return []string{}, nil
|
||||||
|
}
|
||||||
|
return []string{pod.Spec.NodeName}, nil
|
||||||
|
},
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// The indexer helps us get all the pods that assigned to a node.
|
||||||
|
podIndexer := podInformer.GetIndexer()
|
||||||
|
getPodsAssignedToNode := func(nodeName string, filter FilterFunc) ([]*v1.Pod, error) {
|
||||||
|
objs, err := podIndexer.ByIndex(nodeNameKeyIndex, nodeName)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
pods := make([]*v1.Pod, 0, len(objs))
|
||||||
|
for _, obj := range objs {
|
||||||
|
pod, ok := obj.(*v1.Pod)
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if filter(pod) {
|
||||||
|
pods = append(pods, pod)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return pods, nil
|
||||||
|
}
|
||||||
|
return getPodsAssignedToNode, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListPodsOnANode lists all pods on a node.
|
||||||
|
// It also accepts a "filter" function which can be used to further limit the pods that are returned.
|
||||||
|
// (Usually this is podEvictor.Evictable().IsEvictable, in order to only list the evictable pods on a node, but can
|
||||||
|
// be used by strategies to extend it if there are further restrictions, such as with NodeAffinity).
|
||||||
|
func ListPodsOnANode(
|
||||||
|
nodeName string,
|
||||||
|
getPodsAssignedToNode GetPodsAssignedToNodeFunc,
|
||||||
|
filter FilterFunc,
|
||||||
|
) ([]*v1.Pod, error) {
|
||||||
|
// Succeeded and failed pods are not considered because they don't occupy any resource.
|
||||||
|
f := func(pod *v1.Pod) bool {
|
||||||
|
return pod.Status.Phase != v1.PodSucceeded && pod.Status.Phase != v1.PodFailed
|
||||||
|
}
|
||||||
|
return ListAllPodsOnANode(nodeName, getPodsAssignedToNode, WrapFilterFuncs(f, filter))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListAllPodsOnANode lists all the pods on a node no matter what the phase of the pod is.
|
||||||
|
func ListAllPodsOnANode(
|
||||||
|
nodeName string,
|
||||||
|
getPodsAssignedToNode GetPodsAssignedToNodeFunc,
|
||||||
|
filter FilterFunc,
|
||||||
|
) ([]*v1.Pod, error) {
|
||||||
|
pods, err := getPodsAssignedToNode(nodeName, filter)
|
||||||
|
if err != nil {
|
||||||
|
return []*v1.Pod{}, err
|
||||||
|
}
|
||||||
|
|
||||||
return pods, nil
|
return pods, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -93,3 +227,10 @@ func SortPodsBasedOnPriorityLowToHigh(pods []*v1.Pod) {
|
|||||||
return *pods[i].Spec.Priority < *pods[j].Spec.Priority
|
return *pods[i].Spec.Priority < *pods[j].Spec.Priority
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SortPodsBasedOnAge sorts Pods from oldest to most recent in place
|
||||||
|
func SortPodsBasedOnAge(pods []*v1.Pod) {
|
||||||
|
sort.Slice(pods, func(i, j int) bool {
|
||||||
|
return pods[i].CreationTimestamp.Before(&pods[j].CreationTimestamp)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user