mirror of
https://github.com/kubernetes-sigs/descheduler.git
synced 2026-01-26 13:29:11 +01:00
Compare commits
793 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8d37557743 | ||
|
|
5081ad84b5 | ||
|
|
afb1d75ce1 | ||
|
|
8e3ef9a6b3 | ||
|
|
778a18c550 | ||
|
|
1a98a566b3 | ||
|
|
a643c619c9 | ||
|
|
203388ff1a | ||
|
|
2844f80a35 | ||
|
|
901a16ecbc | ||
|
|
e0f086ff85 | ||
|
|
8752a28025 | ||
|
|
24884c7568 | ||
|
|
175f648045 | ||
|
|
f50a3fa119 | ||
|
|
551eced42a | ||
|
|
3635a8171c | ||
|
|
796f347305 | ||
|
|
13abbe7f09 | ||
|
|
e4df54d2d1 | ||
|
|
c38f617e40 | ||
|
|
e6551564c4 | ||
|
|
3a991dd50c | ||
|
|
77cb406052 | ||
|
|
921a5680ab | ||
|
|
1529180d70 | ||
|
|
2d9143d129 | ||
|
|
e9c0833b6f | ||
|
|
8462cf56d7 | ||
|
|
a60d6a527d | ||
|
|
2b23694704 | ||
|
|
d0a95bee2f | ||
|
|
57a910f5d1 | ||
|
|
ccaedde183 | ||
|
|
2020642b6f | ||
|
|
96ff5d2dd9 | ||
|
|
d8718d7db3 | ||
|
|
1e5165ba9f | ||
|
|
8e74f8bd77 | ||
|
|
2424928019 | ||
|
|
e6314d2c7e | ||
|
|
271ee3c7e3 | ||
|
|
e58686c142 | ||
|
|
0b2c10d6ce | ||
|
|
cac3b9185b | ||
|
|
94888e653c | ||
|
|
936578b238 | ||
|
|
4fa7bf978c | ||
|
|
2f7c496944 | ||
|
|
5fe3ca86ff | ||
|
|
0ff8ecb41e | ||
|
|
08ed129a07 | ||
|
|
49ad197dfc | ||
|
|
82201d0e48 | ||
|
|
2b95332e8c | ||
|
|
e8ed62e540 | ||
|
|
e5725de7bb | ||
|
|
c47e811937 | ||
|
|
e0bac4c371 | ||
|
|
73a7adf572 | ||
|
|
5cf381a817 | ||
|
|
4603182320 | ||
|
|
ad207775ff | ||
|
|
d0f11a41c0 | ||
|
|
c7524705b3 | ||
|
|
50f9513cbb | ||
|
|
6fd80ba29c | ||
|
|
5b557941fa | ||
|
|
c6229934a0 | ||
|
|
ed28eaeccc | ||
|
|
3be910c238 | ||
|
|
d96dd6da2d | ||
|
|
f7c26ef41f | ||
|
|
57ad9cc91b | ||
|
|
926339594d | ||
|
|
1ba53ad68c | ||
|
|
6eb37ce079 | ||
|
|
54d660eee0 | ||
|
|
cf219fbfae | ||
|
|
d1d9ea0c48 | ||
|
|
4448d9c670 | ||
|
|
3909f3acae | ||
|
|
9f1274f3ab | ||
|
|
e6926e11ea | ||
|
|
16228c9dd1 | ||
|
|
57dabbca5c | ||
|
|
04439c6e64 | ||
|
|
0e0e688fe8 | ||
|
|
0603de4353 | ||
|
|
ea911db6dc | ||
|
|
c079c7aaae | ||
|
|
5420988a28 | ||
|
|
b56a1ab80a | ||
|
|
a6b34c1130 | ||
|
|
0de8002b7d | ||
|
|
84d648ff60 | ||
|
|
6ad6f6fce5 | ||
|
|
b7100ad871 | ||
|
|
38c0f1c639 | ||
|
|
64d7901d82 | ||
|
|
ab1015e5fa | ||
|
|
1753bf4422 | ||
|
|
7cb44dca27 | ||
|
|
2ec4b8b674 | ||
|
|
0d0633488d | ||
|
|
f3b3853d9d | ||
|
|
e550e5e22a | ||
|
|
2bf37ff495 | ||
|
|
fa84ec6774 | ||
|
|
e18e0416b1 | ||
|
|
34282162f8 | ||
|
|
7a043d31be | ||
|
|
b0e5d64bd7 | ||
|
|
1c9ac2daee | ||
|
|
c6b67e8a6f | ||
|
|
2e4873d103 | ||
|
|
3e483c4d85 | ||
|
|
032ea70380 | ||
|
|
df84dc4548 | ||
|
|
d4fa83f8bc | ||
|
|
448dbceadd | ||
|
|
b83b064992 | ||
|
|
133a0049e3 | ||
|
|
50b1c1337d | ||
|
|
d5deed44ca | ||
|
|
0f785b9530 | ||
|
|
eb1f0ecc14 | ||
|
|
b59995eeb8 | ||
|
|
d998d82357 | ||
|
|
f51ea72ce0 | ||
|
|
fe8d4c0d21 | ||
|
|
3843a2d5d1 | ||
|
|
839a237d6a | ||
|
|
4cd1e66ef3 | ||
|
|
2f18864fa5 | ||
|
|
6e71068f4f | ||
|
|
e40620effa | ||
|
|
d7dc0abd7b | ||
|
|
012ca2398f | ||
|
|
f07089d7b3 | ||
|
|
a54b59f208 | ||
|
|
bfd5feaf60 | ||
|
|
41d46d0d3b | ||
|
|
646c13ae15 | ||
|
|
3b9d3d9719 | ||
|
|
449383caa3 | ||
|
|
31fd097c0a | ||
|
|
11143d5b2c | ||
|
|
8480e03e9c | ||
|
|
0397425010 | ||
|
|
5396282e3d | ||
|
|
a9ff644de6 | ||
|
|
fe8e17f72c | ||
|
|
a1709e9edd | ||
|
|
24c0ca2ef9 | ||
|
|
9b26abd538 | ||
|
|
fc83c13166 | ||
|
|
9b69962053 | ||
|
|
4edbecc85d | ||
|
|
54f67266bb | ||
|
|
4ba48b018c | ||
|
|
2a3529c543 | ||
|
|
58408d710b | ||
|
|
161f66a12f | ||
|
|
6bde95c9a1 | ||
|
|
724ff8a188 | ||
|
|
feae158a50 | ||
|
|
780ac7a51e | ||
|
|
c4afb6bb30 | ||
|
|
8b5c4e805d | ||
|
|
f4e24a408f | ||
|
|
2781106d49 | ||
|
|
534a30a058 | ||
|
|
bb55741320 | ||
|
|
079bd6157b | ||
|
|
92cb1b23ed | ||
|
|
832facc526 | ||
|
|
c4fa6c472f | ||
|
|
a848dac3cf | ||
|
|
43a2ccf9c4 | ||
|
|
60cf3aeb95 | ||
|
|
84b174e841 | ||
|
|
40337d064d | ||
|
|
9fe585c854 | ||
|
|
4fce2ca2f1 | ||
|
|
4c11de0403 | ||
|
|
a9099efc45 | ||
|
|
6edb644f2e | ||
|
|
c239e1199f | ||
|
|
b713b7852a | ||
|
|
5d07d0c8e2 | ||
|
|
7076ba0760 | ||
|
|
81b816d4a4 | ||
|
|
9ebc909c7f | ||
|
|
af01b675b0 | ||
|
|
ce6ce5a058 | ||
|
|
bd4f6d4fcd | ||
|
|
6a4181158a | ||
|
|
a2746d09e8 | ||
|
|
b5d7219391 | ||
|
|
b09d5d99dc | ||
|
|
dbcc20f37f | ||
|
|
51340b56b8 | ||
|
|
160669817e | ||
|
|
6ca4479892 | ||
|
|
92740a25d4 | ||
|
|
56e4daccaf | ||
|
|
b546832b66 | ||
|
|
39e5b34af3 | ||
|
|
e699e08d13 | ||
|
|
af26b57e5e | ||
|
|
22fe589ae6 | ||
|
|
0a11b5a138 | ||
|
|
363f02710b | ||
|
|
6abfa232e7 | ||
|
|
bbfb12a120 | ||
|
|
5df2a0c516 | ||
|
|
8ecd14289a | ||
|
|
131ed42a4c | ||
|
|
6b8d4cd5a7 | ||
|
|
24a06511a2 | ||
|
|
09c7d1be0a | ||
|
|
f5666882de | ||
|
|
701f22404b | ||
|
|
d5fa60bdd5 | ||
|
|
9e14f733b7 | ||
|
|
29ade13ce7 | ||
|
|
03518badb8 | ||
|
|
24458fb0ca | ||
|
|
1c5b32763b | ||
|
|
3bd031bbb3 | ||
|
|
ea6e9f22b9 | ||
|
|
73309a3948 | ||
|
|
105313a0e3 | ||
|
|
d368cbed32 | ||
|
|
5f4dfbc922 | ||
|
|
967911e4c1 | ||
|
|
726091712d | ||
|
|
854afa7c73 | ||
|
|
2517268b1f | ||
|
|
fea8beabab | ||
|
|
7f9c95fa16 | ||
|
|
6f7e2a271e | ||
|
|
217ebdfa73 | ||
|
|
e014fda58e | ||
|
|
6d0360fd16 | ||
|
|
01a87b6143 | ||
|
|
f3e871492c | ||
|
|
bf8d744686 | ||
|
|
477d12968e | ||
|
|
dcb4136a96 | ||
|
|
2c0afafccf | ||
|
|
e56617253f | ||
|
|
500aaea4dd | ||
|
|
4444811f26 | ||
|
|
b63c6fac27 | ||
|
|
dfc76906d4 | ||
|
|
fbd17d4caf | ||
|
|
9c01589fb9 | ||
|
|
a0942afaa1 | ||
|
|
16fa21a4a6 | ||
|
|
241f1325c9 | ||
|
|
c1a63a557a | ||
|
|
e45e21368a | ||
|
|
f24b367479 | ||
|
|
8ba9cb1df7 | ||
|
|
d502f05910 | ||
|
|
241f47d947 | ||
|
|
19424f4119 | ||
|
|
635a40f305 | ||
|
|
1804d2e3a2 | ||
|
|
fea4870243 | ||
|
|
f54df67d11 | ||
|
|
1a998037f8 | ||
|
|
c481877c03 | ||
|
|
67df39690b | ||
|
|
674f14da78 | ||
|
|
0cfbdf642b | ||
|
|
c86d1c7eb2 | ||
|
|
f67c265533 | ||
|
|
969921640c | ||
|
|
0273fd7597 | ||
|
|
e84d0c5587 | ||
|
|
5267ec407c | ||
|
|
6714d8e0b7 | ||
|
|
b3439eab41 | ||
|
|
509118587a | ||
|
|
f482537dff | ||
|
|
0f95817746 | ||
|
|
70d1fadae7 | ||
|
|
499beb2fd7 | ||
|
|
de24f3854b | ||
|
|
a5e8ba1a70 | ||
|
|
45ad48042f | ||
|
|
550de966c7 | ||
|
|
c94342db31 | ||
|
|
94f1c7dd8d | ||
|
|
bf91e6790e | ||
|
|
251f44e568 | ||
|
|
922c4f6a63 | ||
|
|
2b5ec01381 | ||
|
|
7bcd562ff5 | ||
|
|
03852d0914 | ||
|
|
5f1e9a97c4 | ||
|
|
cd6f2cd4cb | ||
|
|
e679c7fabc | ||
|
|
6f5918d765 | ||
|
|
5a46ba0630 | ||
|
|
c1323719f4 | ||
|
|
8795fe6b90 | ||
|
|
a3f8bb0369 | ||
|
|
cd3c3bf4da | ||
|
|
652ee87bf5 | ||
|
|
5225ec4597 | ||
|
|
b2af720ddb | ||
|
|
94f07996f7 | ||
|
|
4839d5f369 | ||
|
|
022e07c278 | ||
|
|
620d71abdf | ||
|
|
85d00ab457 | ||
|
|
b30bd40860 | ||
|
|
4108362158 | ||
|
|
b27dc5f14e | ||
|
|
3c54378749 | ||
|
|
6240aa68f7 | ||
|
|
301af7fd9c | ||
|
|
41d529ebe2 | ||
|
|
cc6bb633ba | ||
|
|
31cf70c34c | ||
|
|
3399619395 | ||
|
|
cfc4cce08b | ||
|
|
f9e9f0654a | ||
|
|
73af0e84fa | ||
|
|
b33928ac91 | ||
|
|
3ac0c408de | ||
|
|
149f900811 | ||
|
|
9ede04ba9b | ||
|
|
f9cbed8b71 | ||
|
|
fa4da031e4 | ||
|
|
9511f308d0 | ||
|
|
52f43f0fcb | ||
|
|
4bb0ceeed5 | ||
|
|
279f648e9a | ||
|
|
411ec740ff | ||
|
|
6237ba5a43 | ||
|
|
5d65a9ad68 | ||
|
|
28f3f867c3 | ||
|
|
00f79aa28d | ||
|
|
6042d717e9 | ||
|
|
7afa54519f | ||
|
|
8c3a80fbf9 | ||
|
|
11b9829885 | ||
|
|
4798559545 | ||
|
|
8b34d6eb94 | ||
|
|
70700a1c97 | ||
|
|
d7420eb945 | ||
|
|
c9cfeb35c2 | ||
|
|
fda63a816f | ||
|
|
6329b6c27b | ||
|
|
9b4f781c5c | ||
|
|
63039fcfd6 | ||
|
|
d25f3757d6 | ||
|
|
1303fe6eb9 | ||
|
|
1682cc9462 | ||
|
|
605927676f | ||
|
|
dc41e6a41c | ||
|
|
e37c27313e | ||
|
|
e5d9756ebe | ||
|
|
e6f1c6f78a | ||
|
|
fceebded6d | ||
|
|
08b2dffa42 | ||
|
|
745e29959c | ||
|
|
aa1bab2c4a | ||
|
|
19c3e02b44 | ||
|
|
a45057200f | ||
|
|
74d6be3943 | ||
|
|
1fb3445692 | ||
|
|
195082d33b | ||
|
|
03dbc93961 | ||
|
|
d27f64480b | ||
|
|
5645663b71 | ||
|
|
dbc8092282 | ||
|
|
50d2b246d9 | ||
|
|
6220aca03e | ||
|
|
674993d23a | ||
|
|
f4c3f9b18f | ||
|
|
d65a7c4783 | ||
|
|
89541f7545 | ||
|
|
17f769c1c1 | ||
|
|
eb9e62f047 | ||
|
|
6ccd80f2ee | ||
|
|
d8251b9086 | ||
|
|
4cd1f45d90 | ||
|
|
2dc3f53a13 | ||
|
|
f5d8a02f79 | ||
|
|
a7c51ffae0 | ||
|
|
9746fd300f | ||
|
|
b5e17f91cd | ||
|
|
f5524153ba | ||
|
|
6d693d06fb | ||
|
|
4d7a6ee9be | ||
|
|
0fdaac6042 | ||
|
|
bb7ab369d7 | ||
|
|
d96cca2221 | ||
|
|
6ee87d9d7c | ||
|
|
95ce2a4ff7 | ||
|
|
19e1387bf1 | ||
|
|
ec4c5bed5d | ||
|
|
ae38aa63af | ||
|
|
cdcd677aa0 | ||
|
|
a5eb9fc36d | ||
|
|
96efd2312b | ||
|
|
e7699c4f6b | ||
|
|
d0fbebb77c | ||
|
|
46bb5b6f55 | ||
|
|
b799ed074a | ||
|
|
eee41ee111 | ||
|
|
6ac81e0b9c | ||
|
|
5970899029 | ||
|
|
5bb0389538 | ||
|
|
92cb6a378a | ||
|
|
b09932e92a | ||
|
|
63603f38d6 | ||
|
|
42db31683f | ||
|
|
c40a9c397f | ||
|
|
4014ebad92 | ||
|
|
bb7cb05571 | ||
|
|
30b2bd5d9f | ||
|
|
8d5ab05aa0 | ||
|
|
db501da34d | ||
|
|
8d60370612 | ||
|
|
052f011288 | ||
|
|
6e23579bd0 | ||
|
|
7331f4e5de | ||
|
|
11f1333af7 | ||
|
|
74f70fdbc9 | ||
|
|
0006fb039d | ||
|
|
0894f7740c | ||
|
|
c3f07dc366 | ||
|
|
ca8f1051eb | ||
|
|
c7692a2e9f | ||
|
|
53badf7b61 | ||
|
|
f801c5f72f | ||
|
|
327880ba51 | ||
|
|
7bb8b4feda | ||
|
|
36b1e1f061 | ||
|
|
4507a90bb6 | ||
|
|
550f68306c | ||
|
|
2b668566ce | ||
|
|
ee414ea366 | ||
|
|
5761b5d595 | ||
|
|
07f476dfc4 | ||
|
|
f5e9f07321 | ||
|
|
05c69ee26a | ||
|
|
1623e09122 | ||
|
|
696aa7c505 | ||
|
|
c53dce0805 | ||
|
|
cd8b5a0354 | ||
|
|
b51f24eb8e | ||
|
|
ae3b4368ee | ||
|
|
61eef93618 | ||
|
|
fa0a2ec6fe | ||
|
|
7ece10a643 | ||
|
|
71c8eae47e | ||
|
|
267b0837dc | ||
|
|
c713537d56 | ||
|
|
e374229707 | ||
|
|
f834581a8e | ||
|
|
15fcde5229 | ||
|
|
96c5dd3941 | ||
|
|
65a03e76bf | ||
|
|
43525f6493 | ||
|
|
7457626f62 | ||
|
|
08c22e8921 | ||
|
|
4ff533ec17 | ||
|
|
7680e3d079 | ||
|
|
305801dd0e | ||
|
|
9951b85d60 | ||
|
|
ff21ec9432 | ||
|
|
5e15d77bf2 | ||
|
|
d833c73fc4 | ||
|
|
795a80dfb0 | ||
|
|
f5e4acdd8a | ||
|
|
eb4c1bb355 | ||
|
|
6dfa95cc87 | ||
|
|
eb9d974a8b | ||
|
|
d41a1f4a56 | ||
|
|
138ad556a3 | ||
|
|
37124e6e45 | ||
|
|
bd412bf87f | ||
|
|
6f9b31f568 | ||
|
|
c858740c4f | ||
|
|
bfefe634a1 | ||
|
|
7b7b9e1cd7 | ||
|
|
0a4b8b0a25 | ||
|
|
f28183dcbe | ||
|
|
abdf79454f | ||
|
|
46b570b71d | ||
|
|
616a9b5f6b | ||
|
|
003a4cdc2b | ||
|
|
54ea05d8bb | ||
|
|
d0eea0cabb | ||
|
|
f0297dfe03 | ||
|
|
ef1f36f8e4 | ||
|
|
a733c95dcc | ||
|
|
5a81a0661b | ||
|
|
83e04960af | ||
|
|
2a8dc69cbb | ||
|
|
5d82d08af3 | ||
|
|
c265825166 | ||
|
|
ed0126fb63 | ||
|
|
8c7267b379 | ||
|
|
d85ce22975 | ||
|
|
a9091a1e37 | ||
|
|
d3b0ac8e06 | ||
|
|
435674fb44 | ||
|
|
eacbae72fd | ||
|
|
34550d4b7c | ||
|
|
c6ff87dbd6 | ||
|
|
04efe65f90 | ||
|
|
55afde6251 | ||
|
|
7039b6c8aa | ||
|
|
cff984261e | ||
|
|
4819ab9c69 | ||
|
|
25336da708 | ||
|
|
4941f6a16b | ||
|
|
d7e93058d4 | ||
|
|
c20a595370 | ||
|
|
eec1104d6e | ||
|
|
741b35edf5 | ||
|
|
c01cfcf3b6 | ||
|
|
643cd472ef | ||
|
|
668d727fc2 | ||
|
|
423ee35846 | ||
|
|
31c7855212 | ||
|
|
211f3942b6 | ||
|
|
beae282735 | ||
|
|
635348efb9 | ||
|
|
fa335c782f | ||
|
|
b019a58525 | ||
|
|
78eef6c343 | ||
|
|
cbcefb5d2f | ||
|
|
149085fb57 | ||
|
|
991eddb691 | ||
|
|
91de471376 | ||
|
|
c2d7e22749 | ||
|
|
e7c42794a0 | ||
|
|
3a8dfc07ed | ||
|
|
077b7f6505 | ||
|
|
240fa93bc5 | ||
|
|
6c7f846917 | ||
|
|
6db7c3b92c | ||
|
|
030267107a | ||
|
|
1c300a9881 | ||
|
|
0e9b33b822 | ||
|
|
36e3d1e703 | ||
|
|
f53264b613 | ||
|
|
414554ae5e | ||
|
|
150f945592 | ||
|
|
9a84afece1 | ||
|
|
e0c101c5ae | ||
|
|
e3a562aea0 | ||
|
|
4966e8ee08 | ||
|
|
d3542d5892 | ||
|
|
62d04b0fc7 | ||
|
|
6ecbc85448 | ||
|
|
b9b1eae6fb | ||
|
|
e95e42930d | ||
|
|
3cabb69014 | ||
|
|
ad8f90f177 | ||
|
|
8a62cf1699 | ||
|
|
a6b54dae99 | ||
|
|
112684bcb9 | ||
|
|
682e07c3cd | ||
|
|
9593ce16d9 | ||
|
|
2545c8b031 | ||
|
|
c9793e7029 | ||
|
|
4757132452 | ||
|
|
561b3b67b3 | ||
|
|
566f33e6ad | ||
|
|
83a75bac80 | ||
|
|
d8772f5685 | ||
|
|
df510187d6 | ||
|
|
137b9b72e7 | ||
|
|
09b4979673 | ||
|
|
eff8185d7c | ||
|
|
83ee94dd08 | ||
|
|
aae52ac2ee | ||
|
|
a7c4295c58 | ||
|
|
7a6e095451 | ||
|
|
28c17d240d | ||
|
|
6dd91b6a22 | ||
|
|
7d93551c34 | ||
|
|
853c43737d | ||
|
|
d8dab9d134 | ||
|
|
8dc7b475d9 | ||
|
|
15a971494e | ||
|
|
5364e17c62 | ||
|
|
fc8c581d7a | ||
|
|
d7482bd618 | ||
|
|
d065f9904b | ||
|
|
fb8cdc10c7 | ||
|
|
55cf45a6ba | ||
|
|
e2a23f2848 | ||
|
|
c567768845 | ||
|
|
9510891f42 | ||
|
|
73858beeea | ||
|
|
c3346e9806 | ||
|
|
cc92eaa35d | ||
|
|
816f8cb682 | ||
|
|
5f12ade97b | ||
|
|
2b68e65238 | ||
|
|
86d0f3b038 | ||
|
|
c9f64dfe37 | ||
|
|
431597dd43 | ||
|
|
861f057d1b | ||
|
|
d845040d77 | ||
|
|
30d05382b6 | ||
|
|
ca02665d14 | ||
|
|
2328d89897 | ||
|
|
ac089fe5ce | ||
|
|
00e23dbc07 | ||
|
|
3401edab53 | ||
|
|
e3865fcf8e | ||
|
|
361aa01c51 | ||
|
|
c77f240e37 | ||
|
|
d9a77393cc | ||
|
|
44c7eb5285 | ||
|
|
462bbbbb47 | ||
|
|
18e3fd3de5 | ||
|
|
fdf94304d0 | ||
|
|
ea0ba7d39a | ||
|
|
5be355d815 | ||
|
|
5efec68fd3 | ||
|
|
b3cc62dac6 | ||
|
|
3be0a9f80d | ||
|
|
906bca0802 | ||
|
|
d193bc1370 | ||
|
|
6654aeff99 | ||
|
|
e3d06d1541 | ||
|
|
b44d7718b3 | ||
|
|
531d6ddc49 | ||
|
|
bbc902b86f | ||
|
|
1fdcbcd008 | ||
|
|
2fdcfc04d5 | ||
|
|
872953b9cf | ||
|
|
9276f0e555 | ||
|
|
cee12a5019 | ||
|
|
456110b508 | ||
|
|
edab9d7fed | ||
|
|
7563b5561b | ||
|
|
8b210b08f6 | ||
|
|
a3d33909fa | ||
|
|
4b9e732c18 | ||
|
|
dd54f1a656 | ||
|
|
11044ed89d | ||
|
|
e5d4a2eba6 | ||
|
|
c9e3c63b85 | ||
|
|
5b1d551ffd | ||
|
|
b176dd2e77 | ||
|
|
9ea6aa536e | ||
|
|
bd2c217010 | ||
|
|
c42670e1cc | ||
|
|
5e25e21ca2 | ||
|
|
0af97c1b5e | ||
|
|
1652ba7976 | ||
|
|
5af668e89a | ||
|
|
26adf87323 | ||
|
|
0a58cf4535 | ||
|
|
3116dad75e | ||
|
|
fb1b5fc690 | ||
|
|
dc7f9efc19 | ||
|
|
f1127541aa | ||
|
|
9d6b6094cd | ||
|
|
e35eb4a0b5 | ||
|
|
2e6f14103b | ||
|
|
17ef1d5e5f | ||
|
|
7788d53d0b | ||
|
|
5f66ed8401 | ||
|
|
cd4d09726c | ||
|
|
68a106aed0 | ||
|
|
1931bd6c1a | ||
|
|
9e28f0b362 | ||
|
|
7245a31f52 | ||
|
|
66a2a87e49 | ||
|
|
e7ceddf2bc | ||
|
|
e3d25a9ab4 | ||
|
|
992e00ecd2 | ||
|
|
d157a4359b | ||
|
|
6a08b5661a | ||
|
|
7094c404c9 | ||
|
|
20c610c65a | ||
|
|
934a06381d | ||
|
|
674c1db05c | ||
|
|
dee89a6cc1 | ||
|
|
60fbaca305 | ||
|
|
164d2b0729 | ||
|
|
023a2f2a47 | ||
|
|
d1c6f3f709 | ||
|
|
fc1688057a | ||
|
|
e6e200b93c | ||
|
|
5d843d1f08 | ||
|
|
6c981cc067 | ||
|
|
22a3a6ea1d | ||
|
|
294bddb5e2 | ||
|
|
0a9d1959e2 | ||
|
|
19ee5d80b5 | ||
|
|
14d9e175c2 | ||
|
|
468e138070 | ||
|
|
db13b2ac73 | ||
|
|
40ca53e0a5 | ||
|
|
35d8367fe5 | ||
|
|
345dd9cf27 | ||
|
|
81f471fe05 | ||
|
|
aa5e8770f5 | ||
|
|
2690d139c5 | ||
|
|
cd192ce5fc | ||
|
|
048f3fd1e5 | ||
|
|
a079fd2757 | ||
|
|
ae0a9ed525 | ||
|
|
0a815e8786 | ||
|
|
0115748fe8 | ||
|
|
d0305dac3f | ||
|
|
72d6a8aa33 | ||
|
|
654fdbba94 | ||
|
|
d73471327b | ||
|
|
1294c8a2c2 | ||
|
|
ddd3dd6f19 | ||
|
|
fad9e8dc39 | ||
|
|
450a5c290b | ||
|
|
af2198428e | ||
|
|
bc7be54e2e | ||
|
|
ccb61fc800 | ||
|
|
61819e3fec | ||
|
|
a7ceb67109 | ||
|
|
57a28e9a8f | ||
|
|
c1d87dd93c | ||
|
|
34fb602101 | ||
|
|
a6af54ab30 | ||
|
|
e41ef8cca3 | ||
|
|
d26cd4b317 | ||
|
|
f7d0acb731 | ||
|
|
f1f8b2eaa7 | ||
|
|
0a7f14d75e | ||
|
|
de76f9b14c | ||
|
|
8b84bb26ff | ||
|
|
bb25192163 | ||
|
|
40bb490f4c | ||
|
|
08729f6ef9 | ||
|
|
3dd7de8132 | ||
|
|
471aeb5ea4 | ||
|
|
65e7093ee7 | ||
|
|
fc0cd4ba30 | ||
|
|
ba3eac6c57 | ||
|
|
11a95ce8fb | ||
|
|
29a9fc6b56 | ||
|
|
d3c2f25685 | ||
|
|
e858c9ee80 | ||
|
|
44752e5e83 | ||
|
|
7123f30783 | ||
|
|
a82cf7cea4 | ||
|
|
72318868b0 | ||
|
|
589fb95236 | ||
|
|
7df543d137 | ||
|
|
1d7f429ba1 | ||
|
|
bf29a6073f | ||
|
|
65635bdb2e | ||
|
|
fd961557d0 | ||
|
|
c29bdc1dbe | ||
|
|
ffecc54bf5 | ||
|
|
f6f6fbab10 | ||
|
|
12c217477c | ||
|
|
20a4798465 | ||
|
|
a201f222e5 | ||
|
|
6e705fde85 | ||
|
|
5763554be4 | ||
|
|
6f873d5e69 | ||
|
|
445ae92caa | ||
|
|
8f3c0cf4b8 | ||
|
|
2a280f9a20 | ||
|
|
0503f53904 | ||
|
|
e0a9dfcb76 | ||
|
|
5db49f2ce1 | ||
|
|
582bd67681 | ||
|
|
a63f815116 | ||
|
|
afc17a62ea | ||
|
|
6dbc8a1fcc | ||
|
|
d2bd16a12d | ||
|
|
955d0eb228 | ||
|
|
a490726245 | ||
|
|
16a504fb87 | ||
|
|
d54b73a6ba | ||
|
|
344dc0f3c2 |
46
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
46
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
@@ -0,0 +1,46 @@
|
||||
---
|
||||
name: Bug report
|
||||
about: Create a bug report to help improve descheduler
|
||||
title: ''
|
||||
labels: 'kind/bug'
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
<!-- Please answer these questions before submitting your bug report. Thanks! -->
|
||||
|
||||
**What version of descheduler are you using?**
|
||||
|
||||
descheduler version:
|
||||
|
||||
|
||||
**Does this issue reproduce with the latest release?**
|
||||
|
||||
|
||||
**Which descheduler CLI options are you using?**
|
||||
|
||||
|
||||
**Please provide a copy of your descheduler policy config file**
|
||||
|
||||
|
||||
**What k8s version are you using (`kubectl version`)?**
|
||||
|
||||
<details><summary><code>kubectl version</code> Output</summary><br><pre>
|
||||
$ kubectl version
|
||||
|
||||
</pre></details>
|
||||
|
||||
|
||||
**What did you do?**
|
||||
|
||||
<!--
|
||||
If possible, provide a recipe for reproducing the error.
|
||||
A detailed sequence of steps describing what to do to observe the issue is good.
|
||||
A complete runnable bash shell script is best.
|
||||
-->
|
||||
|
||||
|
||||
**What did you expect to see?**
|
||||
|
||||
|
||||
**What did you see instead?**
|
||||
26
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
26
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
@@ -0,0 +1,26 @@
|
||||
---
|
||||
name: Feature request
|
||||
about: Suggest an idea for descheduler
|
||||
title: ''
|
||||
labels: 'kind/feature'
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
<!-- Please answer these questions before submitting your feature request. Thanks! -->
|
||||
|
||||
**Is your feature request related to a problem? Please describe.**
|
||||
<!-- A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] -->
|
||||
|
||||
**Describe the solution you'd like**
|
||||
<!-- A clear and concise description of what you want to happen. -->
|
||||
|
||||
**Describe alternatives you've considered**
|
||||
<!-- A clear and concise description of any alternative solutions or features you've considered. -->
|
||||
|
||||
**What version of descheduler are you using?**
|
||||
|
||||
descheduler version:
|
||||
|
||||
**Additional context**
|
||||
<!-- Add any other context or screenshots about the feature request here. -->
|
||||
18
.github/ISSUE_TEMPLATE/misc_request.md
vendored
Normal file
18
.github/ISSUE_TEMPLATE/misc_request.md
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
---
|
||||
name: Miscellaneous
|
||||
about: Not a bug and not a feature
|
||||
title: ''
|
||||
labels: ''
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
<!--
|
||||
Please do not use this to submit a bug report or feature request. Use the
|
||||
bug report or feature request options instead.
|
||||
|
||||
Also, please consider posting in the Kubernetes Slack #sig-scheduling channel
|
||||
instead of opening an issue if this is a support request.
|
||||
|
||||
Thanks!
|
||||
-->
|
||||
31
.github/workflows/release.yaml
vendored
Normal file
31
.github/workflows/release.yaml
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
name: Release Charts
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- release-*
|
||||
|
||||
jobs:
|
||||
release:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Configure Git
|
||||
run: |
|
||||
git config user.name "$GITHUB_ACTOR"
|
||||
git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
|
||||
|
||||
- name: Install Helm
|
||||
uses: azure/setup-helm@v1
|
||||
with:
|
||||
version: v3.4.0
|
||||
|
||||
- name: Run chart-releaser
|
||||
uses: helm/chart-releaser-action@v1.1.0
|
||||
env:
|
||||
CR_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
|
||||
CR_RELEASE_NAME_TEMPLATE: "descheduler-helm-chart-{{ .Version }}"
|
||||
7
.gitignore
vendored
Normal file
7
.gitignore
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
_output/
|
||||
_tmp/
|
||||
vendordiff.patch
|
||||
.idea/
|
||||
*.code-workspace
|
||||
.vscode/
|
||||
kind
|
||||
15
.golangci.yml
Normal file
15
.golangci.yml
Normal file
@@ -0,0 +1,15 @@
|
||||
run:
|
||||
deadline: 2m
|
||||
|
||||
linters:
|
||||
disable-all: true
|
||||
enable:
|
||||
- gofmt
|
||||
- gosimple
|
||||
- gocyclo
|
||||
- misspell
|
||||
- govet
|
||||
|
||||
linters-settings:
|
||||
goimports:
|
||||
local-prefixes: sigs.k8s.io/descheduler
|
||||
@@ -1,7 +0,0 @@
|
||||
language: go
|
||||
go:
|
||||
- 1.8.3
|
||||
script:
|
||||
- hack/verify-gofmt.sh
|
||||
- make build
|
||||
- make test
|
||||
23
CONTRIBUTING.md
Normal file
23
CONTRIBUTING.md
Normal file
@@ -0,0 +1,23 @@
|
||||
# Contributing Guidelines
|
||||
|
||||
Welcome to Kubernetes. We are excited about the prospect of you joining our [community](https://github.com/kubernetes/community)! The Kubernetes community abides by the CNCF [code of conduct](code-of-conduct.md). Here is an excerpt:
|
||||
|
||||
_As contributors and maintainers of this project, and in the interest of fostering an open and welcoming community, we pledge to respect all people who contribute through reporting issues, posting feature requests, updating documentation, submitting pull requests or patches, and other activities._
|
||||
|
||||
## Getting Started
|
||||
|
||||
We have full documentation on how to get started contributing here:
|
||||
|
||||
- [Contributor License Agreement](https://git.k8s.io/community/CLA.md) Kubernetes projects require that you sign a Contributor License Agreement (CLA) before we can accept your pull requests
|
||||
- [Kubernetes Contributor Guide](http://git.k8s.io/community/contributors/guide) - Main contributor documentation, or you can just jump directly to the [contributing section](http://git.k8s.io/community/contributors/guide#contributing)
|
||||
- [Contributor Cheat Sheet](https://git.k8s.io/community/contributors/guide/contributor-cheatsheet/README.md) - Common resources for existing developers
|
||||
|
||||
## Mentorship
|
||||
|
||||
- [Mentoring Initiatives](https://git.k8s.io/community/mentoring) - We have a diverse set of mentorship programs available that are always looking for volunteers!
|
||||
|
||||
|
||||
## Contact Information
|
||||
|
||||
- [Slack channel](https://kubernetes.slack.com/messages/sig-scheduling)
|
||||
- [Mailing list](https://groups.google.com/forum/#!forum/kubernetes-sig-scheduling)
|
||||
18
Dockerfile
18
Dockerfile
@@ -11,10 +11,20 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
FROM golang:1.17.3
|
||||
|
||||
FROM fedora
|
||||
WORKDIR /go/src/sigs.k8s.io/descheduler
|
||||
COPY . .
|
||||
ARG ARCH
|
||||
ARG VERSION
|
||||
RUN VERSION=${VERSION} make build.$ARCH
|
||||
|
||||
MAINTAINER Avesh Agarwal <avagarwa@redhat.com>
|
||||
FROM scratch
|
||||
|
||||
COPY _output/bin/descheduler /bin/descheduler
|
||||
CMD ["/bin/descheduler --help"]
|
||||
MAINTAINER Kubernetes SIG Scheduling <kubernetes-sig-scheduling@googlegroups.com>
|
||||
|
||||
USER 1000
|
||||
|
||||
COPY --from=0 /go/src/sigs.k8s.io/descheduler/_output/bin/descheduler /bin/descheduler
|
||||
|
||||
CMD ["/bin/descheduler", "--help"]
|
||||
|
||||
22
Dockerfile.dev
Normal file
22
Dockerfile.dev
Normal file
@@ -0,0 +1,22 @@
|
||||
# Copyright 2017 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
FROM scratch
|
||||
|
||||
MAINTAINER Kubernetes SIG Scheduling <kubernetes-sig-scheduling@googlegroups.com>
|
||||
|
||||
USER 1000
|
||||
|
||||
COPY _output/bin/descheduler /bin/descheduler
|
||||
|
||||
CMD ["/bin/descheduler", "--help"]
|
||||
127
Makefile
127
Makefile
@@ -14,29 +14,134 @@
|
||||
|
||||
.PHONY: test
|
||||
|
||||
# VERSION is currently based on the last commit
|
||||
VERSION=`git describe --tags`
|
||||
COMMIT=`git rev-parse HEAD`
|
||||
BUILD=`date +%FT%T%z`
|
||||
LDFLAG_LOCATION=github.com/kubernetes-incubator/descheduler/cmd/descheduler/app
|
||||
# VERSION is based on a date stamp plus the last commit
|
||||
VERSION?=v$(shell date +%Y%m%d)-$(shell git describe --tags --match "v*")
|
||||
BRANCH?=$(shell git branch --show-current)
|
||||
SHA1?=$(shell git rev-parse HEAD)
|
||||
BUILD=$(shell date +%FT%T%z)
|
||||
LDFLAG_LOCATION=sigs.k8s.io/descheduler/pkg/version
|
||||
ARCHS = amd64 arm arm64
|
||||
|
||||
LDFLAGS=-ldflags "-X ${LDFLAG_LOCATION}.version=${VERSION} -X ${LDFLAG_LOCATION}.buildDate=${BUILD} -X ${LDFLAG_LOCATION}.gitCommit=${COMMIT}"
|
||||
LDFLAGS=-ldflags "-X ${LDFLAG_LOCATION}.version=${VERSION} -X ${LDFLAG_LOCATION}.buildDate=${BUILD} -X ${LDFLAG_LOCATION}.gitbranch=${BRANCH} -X ${LDFLAG_LOCATION}.gitsha1=${SHA1}"
|
||||
|
||||
GOLANGCI_VERSION := v1.43.0
|
||||
HAS_GOLANGCI := $(shell ls _output/bin/golangci-lint 2> /dev/null)
|
||||
|
||||
# REGISTRY is the container registry to push
|
||||
# into. The default is to push to the staging
|
||||
# registry, not production.
|
||||
REGISTRY?=gcr.io/k8s-staging-descheduler
|
||||
|
||||
# IMAGE is the image name of descheduler
|
||||
# Should this be changed?
|
||||
IMAGE:=descheduler:$(VERSION)
|
||||
|
||||
# IMAGE_GCLOUD is the image name of descheduler in the remote registry
|
||||
IMAGE_GCLOUD:=$(REGISTRY)/descheduler:$(VERSION)
|
||||
|
||||
# TODO: upload binaries to GCS bucket
|
||||
#
|
||||
# In the future binaries can be uploaded to
|
||||
# GCS bucket gs://k8s-staging-descheduler.
|
||||
|
||||
HAS_HELM := $(shell which helm 2> /dev/null)
|
||||
|
||||
all: build
|
||||
|
||||
build:
|
||||
go build ${LDFLAGS} -o _output/bin/descheduler github.com/kubernetes-incubator/descheduler/cmd/descheduler
|
||||
CGO_ENABLED=0 go build ${LDFLAGS} -o _output/bin/descheduler sigs.k8s.io/descheduler/cmd/descheduler
|
||||
|
||||
image: build
|
||||
docker build -t $(IMAGE) .
|
||||
build.amd64:
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build ${LDFLAGS} -o _output/bin/descheduler sigs.k8s.io/descheduler/cmd/descheduler
|
||||
|
||||
build.arm:
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=arm GOARM=7 go build ${LDFLAGS} -o _output/bin/descheduler sigs.k8s.io/descheduler/cmd/descheduler
|
||||
|
||||
build.arm64:
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=arm64 go build ${LDFLAGS} -o _output/bin/descheduler sigs.k8s.io/descheduler/cmd/descheduler
|
||||
|
||||
dev-image: build
|
||||
docker build -f Dockerfile.dev -t $(IMAGE) .
|
||||
|
||||
image:
|
||||
docker build --build-arg VERSION="$(VERSION)" --build-arg ARCH="amd64" -t $(IMAGE) .
|
||||
|
||||
image.amd64:
|
||||
docker build --build-arg VERSION="$(VERSION)" --build-arg ARCH="amd64" -t $(IMAGE)-amd64 .
|
||||
|
||||
image.arm:
|
||||
docker build --build-arg VERSION="$(VERSION)" --build-arg ARCH="arm" -t $(IMAGE)-arm .
|
||||
|
||||
image.arm64:
|
||||
docker build --build-arg VERSION="$(VERSION)" --build-arg ARCH="arm64" -t $(IMAGE)-arm64 .
|
||||
|
||||
push: image
|
||||
gcloud auth configure-docker
|
||||
docker tag $(IMAGE) $(IMAGE_GCLOUD)
|
||||
docker push $(IMAGE_GCLOUD)
|
||||
|
||||
push-all: image.amd64 image.arm image.arm64
|
||||
gcloud auth configure-docker
|
||||
for arch in $(ARCHS); do \
|
||||
docker tag $(IMAGE)-$${arch} $(IMAGE_GCLOUD)-$${arch} ;\
|
||||
docker push $(IMAGE_GCLOUD)-$${arch} ;\
|
||||
done
|
||||
DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create $(IMAGE_GCLOUD) $(addprefix --amend $(IMAGE_GCLOUD)-, $(ARCHS))
|
||||
for arch in $(ARCHS); do \
|
||||
DOCKER_CLI_EXPERIMENTAL=enabled docker manifest annotate --arch $${arch} $(IMAGE_GCLOUD) $(IMAGE_GCLOUD)-$${arch} ;\
|
||||
done
|
||||
DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push $(IMAGE_GCLOUD) ;\
|
||||
|
||||
clean:
|
||||
rm -rf _output
|
||||
rm -rf _tmp
|
||||
|
||||
test:
|
||||
verify: verify-govet verify-spelling verify-gofmt verify-vendor lint lint-chart verify-toc verify-gen
|
||||
|
||||
verify-govet:
|
||||
./hack/verify-govet.sh
|
||||
|
||||
verify-spelling:
|
||||
./hack/verify-spelling.sh
|
||||
|
||||
verify-gofmt:
|
||||
./hack/verify-gofmt.sh
|
||||
|
||||
verify-vendor:
|
||||
./hack/verify-vendor.sh
|
||||
|
||||
verify-toc:
|
||||
./hack/verify-toc.sh
|
||||
|
||||
test-unit:
|
||||
./test/run-unit-tests.sh
|
||||
|
||||
test-e2e:
|
||||
./test/run-e2e-tests.sh
|
||||
|
||||
gen:
|
||||
./hack/update-generated-conversions.sh
|
||||
./hack/update-generated-deep-copies.sh
|
||||
./hack/update-generated-defaulters.sh
|
||||
./hack/update-toc.sh
|
||||
|
||||
verify-gen:
|
||||
./hack/verify-conversions.sh
|
||||
./hack/verify-deep-copies.sh
|
||||
./hack/verify-defaulters.sh
|
||||
|
||||
lint:
|
||||
ifndef HAS_GOLANGCI
|
||||
curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b ./_output/bin ${GOLANGCI_VERSION}
|
||||
endif
|
||||
./_output/bin/golangci-lint run
|
||||
|
||||
lint-chart: ensure-helm-install
|
||||
helm lint ./charts/descheduler
|
||||
|
||||
test-helm: ensure-helm-install
|
||||
./test/run-helm-tests.sh
|
||||
|
||||
ensure-helm-install:
|
||||
ifndef HAS_HELM
|
||||
curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 && chmod 700 ./get_helm.sh && ./get_helm.sh
|
||||
endif
|
||||
16
OWNERS
16
OWNERS
@@ -1,8 +1,16 @@
|
||||
approvers:
|
||||
- aveshagarwal
|
||||
- ravisantoshgudimetla
|
||||
- jayunit100
|
||||
- damemi
|
||||
- ingvagabund
|
||||
- seanmalloy
|
||||
reviewers:
|
||||
- aveshagarwal
|
||||
- k82cn
|
||||
- ravisantoshgudimetla
|
||||
- damemi
|
||||
- seanmalloy
|
||||
- ingvagabund
|
||||
- lixiang233
|
||||
emeritus_approvers:
|
||||
- aveshagarwal
|
||||
- k82cn
|
||||
- ravisantoshgudimetla
|
||||
- jayunit100
|
||||
|
||||
875
README.md
875
README.md
@@ -1,13 +1,14 @@
|
||||
# Descheduler for Kubernetes
|
||||
[](https://goreportcard.com/report/sigs.k8s.io/descheduler)
|
||||

|
||||
|
||||
## Introduction
|
||||
# Descheduler for Kubernetes
|
||||
|
||||
Scheduling in Kubernetes is the process of binding pending pods to nodes, and is performed by
|
||||
a component of Kubernetes called kube-scheduler. The scheduler's decisions, whether or where a
|
||||
pod can or can not be scheduled, are guided by its configurable policy which comprises of set of
|
||||
rules, called predicates and priorities. The scheduler's decisions are influenced by its view of
|
||||
a Kubernetes cluster at that point of time when a new pod appears first time for scheduling.
|
||||
As Kubernetes clusters are very dynamic and their state change over time, there may be desired
|
||||
a Kubernetes cluster at that point of time when a new pod appears for scheduling.
|
||||
As Kubernetes clusters are very dynamic and their state changes over time, there may be desire
|
||||
to move already running pods to some other nodes for various reasons:
|
||||
|
||||
* Some nodes are under or over utilized.
|
||||
@@ -21,151 +22,176 @@ Descheduler, based on its policy, finds pods that can be moved and evicts them.
|
||||
note, in current implementation, descheduler does not schedule replacement of evicted pods
|
||||
but relies on the default scheduler for that.
|
||||
|
||||
## Build and Run
|
||||
Table of Contents
|
||||
=================
|
||||
<!-- toc -->
|
||||
- [Quick Start](#quick-start)
|
||||
- [Run As A Job](#run-as-a-job)
|
||||
- [Run As A CronJob](#run-as-a-cronjob)
|
||||
- [Run As A Deployment](#run-as-a-deployment)
|
||||
- [Install Using Helm](#install-using-helm)
|
||||
- [Install Using Kustomize](#install-using-kustomize)
|
||||
- [User Guide](#user-guide)
|
||||
- [Policy and Strategies](#policy-and-strategies)
|
||||
- [RemoveDuplicates](#removeduplicates)
|
||||
- [LowNodeUtilization](#lownodeutilization)
|
||||
- [HighNodeUtilization](#highnodeutilization)
|
||||
- [RemovePodsViolatingInterPodAntiAffinity](#removepodsviolatinginterpodantiaffinity)
|
||||
- [RemovePodsViolatingNodeAffinity](#removepodsviolatingnodeaffinity)
|
||||
- [RemovePodsViolatingNodeTaints](#removepodsviolatingnodetaints)
|
||||
- [RemovePodsViolatingTopologySpreadConstraint](#removepodsviolatingtopologyspreadconstraint)
|
||||
- [RemovePodsHavingTooManyRestarts](#removepodshavingtoomanyrestarts)
|
||||
- [PodLifeTime](#podlifetime)
|
||||
- [RemoveFailedPods](#removefailedpods)
|
||||
- [Filter Pods](#filter-pods)
|
||||
- [Namespace filtering](#namespace-filtering)
|
||||
- [Priority filtering](#priority-filtering)
|
||||
- [Label filtering](#label-filtering)
|
||||
- [Node Fit filtering](#node-fit-filtering)
|
||||
- [Pod Evictions](#pod-evictions)
|
||||
- [Pod Disruption Budget (PDB)](#pod-disruption-budget-pdb)
|
||||
- [Metrics](#metrics)
|
||||
- [Compatibility Matrix](#compatibility-matrix)
|
||||
- [Getting Involved and Contributing](#getting-involved-and-contributing)
|
||||
- [Communicating With Contributors](#communicating-with-contributors)
|
||||
- [Roadmap](#roadmap)
|
||||
- [Code of conduct](#code-of-conduct)
|
||||
<!-- /toc -->
|
||||
|
||||
Build descheduler:
|
||||
## Quick Start
|
||||
|
||||
```sh
|
||||
$ make
|
||||
```
|
||||
The descheduler can be run as a `Job`, `CronJob`, or `Deployment` inside of a k8s cluster. It has the
|
||||
advantage of being able to be run multiple times without needing user intervention.
|
||||
The descheduler pod is run as a critical pod in the `kube-system` namespace to avoid
|
||||
being evicted by itself or by the kubelet.
|
||||
|
||||
and run descheduler:
|
||||
|
||||
```sh
|
||||
$ ./_output/bin/descheduler --kubeconfig <path to kubeconfig> --policy-config-file <path-to-policy-file>
|
||||
```
|
||||
|
||||
For more information about available options run:
|
||||
```
|
||||
$ ./_output/bin/descheduler --help
|
||||
```
|
||||
|
||||
## Running Descheduler as a Job Inside of a Pod
|
||||
|
||||
Descheduler can be run as a job inside of a pod. It has the advantage of
|
||||
being able to be run multiple times without needing user intervention.
|
||||
Descheduler pod is run as a critical pod to avoid being evicted by itself,
|
||||
or by kubelet due to an eviction event. Since critical pods are created in
|
||||
`kube-system` namespace, descheduler job and its pod will also be created
|
||||
in `kube-system` namespace.
|
||||
|
||||
### Create a container image
|
||||
|
||||
First we create a simple Docker image utilizing the Dockerfile found in the root directory:
|
||||
### Run As A Job
|
||||
|
||||
```
|
||||
$ make image
|
||||
kubectl create -f kubernetes/base/rbac.yaml
|
||||
kubectl create -f kubernetes/base/configmap.yaml
|
||||
kubectl create -f kubernetes/job/job.yaml
|
||||
```
|
||||
|
||||
### Create a cluster role
|
||||
|
||||
To give necessary permissions for the descheduler to work in a pod, create a cluster role:
|
||||
### Run As A CronJob
|
||||
|
||||
```
|
||||
$ cat << EOF| kubectl create -f -
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: descheduler-cluster-role
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["nodes"]
|
||||
verbs: ["get", "watch", "list"]
|
||||
- apiGroups: [""]
|
||||
resources: ["pods"]
|
||||
verbs: ["get", "watch", "list", "delete"]
|
||||
EOF
|
||||
kubectl create -f kubernetes/base/rbac.yaml
|
||||
kubectl create -f kubernetes/base/configmap.yaml
|
||||
kubectl create -f kubernetes/cronjob/cronjob.yaml
|
||||
```
|
||||
|
||||
### Create the service account which will be used to run the job:
|
||||
### Run As A Deployment
|
||||
|
||||
```
|
||||
$ kubectl create sa descheduler-sa -n kube-system
|
||||
kubectl create -f kubernetes/base/rbac.yaml
|
||||
kubectl create -f kubernetes/base/configmap.yaml
|
||||
kubectl create -f kubernetes/deployment/deployment.yaml
|
||||
```
|
||||
|
||||
### Bind the cluster role to the service account:
|
||||
### Install Using Helm
|
||||
|
||||
```
|
||||
$ kubectl create clusterrolebinding descheduler-cluster-role-binding \
|
||||
--clusterrole=descheduler-cluster-role \
|
||||
--serviceaccount=kube-system:descheduler-sa
|
||||
```
|
||||
### Create a configmap to store descheduler policy
|
||||
Starting with release v0.18.0 there is an official helm chart that can be used to install the
|
||||
descheduler. See the [helm chart README](https://github.com/kubernetes-sigs/descheduler/blob/master/charts/descheduler/README.md) for detailed instructions.
|
||||
|
||||
Descheduler policy is created as a ConfigMap in `kube-system` namespace
|
||||
so that it can be mounted as a volume inside pod.
|
||||
The descheduler helm chart is also listed on the [artifact hub](https://artifacthub.io/packages/helm/descheduler/descheduler).
|
||||
|
||||
```
|
||||
$ kubectl create configmap descheduler-policy-configmap \
|
||||
-n kube-system --from-file=<path-to-policy-dir/policy.yaml>
|
||||
```
|
||||
### Create the job specification (descheduler-job.yaml)
|
||||
### Install Using Kustomize
|
||||
|
||||
You can use kustomize to install descheduler.
|
||||
See the [resources | Kustomize](https://kubectl.docs.kubernetes.io/references/kustomize/resource/) for detailed instructions.
|
||||
|
||||
Run As A Job
|
||||
```
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: descheduler-job
|
||||
namespace: kube-system
|
||||
spec:
|
||||
parallelism: 1
|
||||
completions: 1
|
||||
template:
|
||||
metadata:
|
||||
name: descheduler-pod
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: "true"
|
||||
spec:
|
||||
containers:
|
||||
- name: descheduler
|
||||
image: descheduler
|
||||
volumeMounts:
|
||||
- mountPath: /policy-dir
|
||||
name: policy-volume
|
||||
command:
|
||||
- "/bin/sh"
|
||||
- "-ec"
|
||||
- |
|
||||
/bin/descheduler --policy-config-file /policy-dir/policy.yaml
|
||||
restartPolicy: "Never"
|
||||
serviceAccountName: descheduler-sa
|
||||
volumes:
|
||||
- name: policy-volume
|
||||
configMap:
|
||||
name: descheduler-policy-configmap
|
||||
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/job?ref=v0.22.0' | kubectl apply -f -
|
||||
```
|
||||
|
||||
Please note that the pod template is configured with critical pod annotation, and
|
||||
the policy `policy-file` is mounted as a volume from the config map.
|
||||
Run As A CronJob
|
||||
```
|
||||
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/cronjob?ref=v0.22.0' | kubectl apply -f -
|
||||
```
|
||||
|
||||
### Run the descheduler as a job in a pod:
|
||||
Run As A Deployment
|
||||
```
|
||||
$ kubectl create -f descheduler-job.yaml
|
||||
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/deployment?ref=v0.22.0' | kubectl apply -f -
|
||||
```
|
||||
|
||||
## User Guide
|
||||
|
||||
See the [user guide](docs/user-guide.md) in the `/docs` directory.
|
||||
|
||||
## Policy and Strategies
|
||||
|
||||
Descheduler's policy is configurable and includes strategies to be enabled or disabled.
|
||||
Two strategies, `RemoveDuplicates` and `LowNodeUtilization` are currently implemented.
|
||||
As part of the policy, the parameters associated with the strategies can be configured too.
|
||||
By default, all strategies are enabled.
|
||||
|
||||
Descheduler's policy is configurable and includes strategies that can be enabled or disabled. By default, all strategies are enabled.
|
||||
|
||||
The policy includes a common configuration that applies to all the strategies:
|
||||
| Name | Default Value | Description |
|
||||
|------|---------------|-------------|
|
||||
| `nodeSelector` | `nil` | limiting the nodes which are processed |
|
||||
| `evictLocalStoragePods` | `false` | allows eviction of pods with local storage |
|
||||
| `evictSystemCriticalPods` | `false` | [Warning: Will evict Kubernetes system pods] allows eviction of pods with any priority, including system pods like kube-dns |
|
||||
| `ignorePvcPods` | `false` | set whether PVC pods should be evicted or ignored |
|
||||
| `maxNoOfPodsToEvictPerNode` | `nil` | maximum number of pods evicted from each node (summed through all strategies) |
|
||||
| `evictFailedBarePods` | `false` | allow eviction of pods without owner references and in failed phase |
|
||||
|
||||
As part of the policy, the parameters associated with each strategy can be configured.
|
||||
See each strategy for details on available parameters.
|
||||
|
||||
**Policy:**
|
||||
|
||||
```yaml
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
nodeSelector: prod=dev
|
||||
evictFailedBarePods: false
|
||||
evictLocalStoragePods: true
|
||||
evictSystemCriticalPods: true
|
||||
maxNoOfPodsToEvictPerNode: 40
|
||||
ignorePvcPods: false
|
||||
strategies:
|
||||
...
|
||||
```
|
||||
|
||||
The following diagram provides a visualization of most of the strategies to help
|
||||
categorize how strategies fit together.
|
||||
|
||||

|
||||
|
||||
### RemoveDuplicates
|
||||
|
||||
This strategy makes sure that there is only one pod associated with a Replica Set (RS),
|
||||
Replication Controller (RC), Deployment, or Job running on same node. If there are more,
|
||||
This strategy makes sure that there is only one pod associated with a ReplicaSet (RS),
|
||||
ReplicationController (RC), StatefulSet, or Job running on the same node. If there are more,
|
||||
those duplicate pods are evicted for better spreading of pods in a cluster. This issue could happen
|
||||
if some nodes went down due to whatever reasons, and pods on them were moved to other nodes leading to
|
||||
more than one pod associated with RS or RC, for example, running on same node. Once the failed nodes
|
||||
are ready again, this strategy could be enabled to evict those duplicate pods. Currently, there are no
|
||||
parameters associated with this strategy. To disable this strategy, the policy would look like:
|
||||
more than one pod associated with a RS or RC, for example, running on the same node. Once the failed nodes
|
||||
are ready again, this strategy could be enabled to evict those duplicate pods.
|
||||
|
||||
```
|
||||
It provides one optional parameter, `excludeOwnerKinds`, which is a list of OwnerRef `Kind`s. If a pod
|
||||
has any of these `Kind`s listed as an `OwnerRef`, that pod will not be considered for eviction. Note that
|
||||
pods created by Deployments are considered for eviction by this strategy. The `excludeOwnerKinds` parameter
|
||||
should include `ReplicaSet` to have pods created by Deployments excluded.
|
||||
|
||||
**Parameters:**
|
||||
|
||||
|Name|Type|
|
||||
|---|---|
|
||||
|`excludeOwnerKinds`|list(string)|
|
||||
|`namespaces`|(see [namespace filtering](#namespace-filtering))|
|
||||
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))|
|
||||
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))|
|
||||
|`nodeFit`|bool (see [node fit filtering](#node-fit-filtering))|
|
||||
|
||||
**Example:**
|
||||
```yaml
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
strategies:
|
||||
"RemoveDuplicates":
|
||||
enabled: false
|
||||
enabled: true
|
||||
params:
|
||||
removeDuplicates:
|
||||
excludeOwnerKinds:
|
||||
- "ReplicaSet"
|
||||
```
|
||||
|
||||
### LowNodeUtilization
|
||||
@@ -174,20 +200,38 @@ This strategy finds nodes that are under utilized and evicts pods, if possible,
|
||||
in the hope that recreation of evicted pods will be scheduled on these underutilized nodes. The
|
||||
parameters of this strategy are configured under `nodeResourceUtilizationThresholds`.
|
||||
|
||||
The under utilization of nodes is determined by a configurable threshold, `thresholds`. The threshold
|
||||
`thresholds` can be configured for cpu, memory, and number of pods in terms of percentage. If a node's
|
||||
usage is below threshold for all (cpu, memory, and number of pods), the node is considered underutilized.
|
||||
Currently, pods' request resource requirements are considered for computing node resource utilization.
|
||||
The under utilization of nodes is determined by a configurable threshold `thresholds`. The threshold
|
||||
`thresholds` can be configured for cpu, memory, number of pods, and extended resources in terms of percentage (the percentage is
|
||||
calculated as the current resources requested on the node vs [total allocatable](https://kubernetes.io/docs/concepts/architecture/nodes/#capacity).
|
||||
For pods, this means the number of pods on the node as a fraction of the pod capacity set for that node).
|
||||
|
||||
If a node's usage is below threshold for all (cpu, memory, number of pods and extended resources), the node is considered underutilized.
|
||||
Currently, pods request resource requirements are considered for computing node resource utilization.
|
||||
|
||||
There is another configurable threshold, `targetThresholds`, that is used to compute those potential nodes
|
||||
from where pods could be evicted. Any node, between the thresholds, `thresholds` and `targetThresholds` is
|
||||
from where pods could be evicted. If a node's usage is above targetThreshold for any (cpu, memory, number of pods, or extended resources),
|
||||
the node is considered over utilized. Any node between the thresholds, `thresholds` and `targetThresholds` is
|
||||
considered appropriately utilized and is not considered for eviction. The threshold, `targetThresholds`,
|
||||
can be configured for cpu, memory, and number of pods too in terms of percentage.
|
||||
|
||||
These thresholds, `thresholds` and `targetThresholds`, could be tuned as per your cluster requirements.
|
||||
An example of the policy for this strategy would look like:
|
||||
These thresholds, `thresholds` and `targetThresholds`, could be tuned as per your cluster requirements. Note that this
|
||||
strategy evicts pods from `overutilized nodes` (those with usage above `targetThresholds`) to `underutilized nodes`
|
||||
(those with usage below `thresholds`), it will abort if any number of `underutilized nodes` or `overutilized nodes` is zero.
|
||||
|
||||
```
|
||||
**Parameters:**
|
||||
|
||||
|Name|Type|
|
||||
|---|---|
|
||||
|`thresholds`|map(string:int)|
|
||||
|`targetThresholds`|map(string:int)|
|
||||
|`numberOfNodes`|int|
|
||||
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))|
|
||||
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))|
|
||||
|`nodeFit`|bool (see [node fit filtering](#node-fit-filtering))|
|
||||
|
||||
**Example:**
|
||||
|
||||
```yaml
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
strategies:
|
||||
@@ -205,43 +249,594 @@ strategies:
|
||||
"pods": 50
|
||||
```
|
||||
|
||||
There is another parameter associated with `LowNodeUtilization` strategy, called `numberOfNodes`.
|
||||
This parameter can be configured to activate the strategy only when number of under utilized nodes
|
||||
Policy should pass the following validation checks:
|
||||
* Three basic native types of resources are supported: `cpu`, `memory` and `pods`.
|
||||
If any of these resource types is not specified, all its thresholds default to 100% to avoid nodes going from underutilized to overutilized.
|
||||
* Extended resources are supported. For example, resource type `nvidia.com/gpu` is specified for GPU node utilization. Extended resources are optional,
|
||||
and will not be used to compute node's usage if it's not specified in `thresholds` and `targetThresholds` explicitly.
|
||||
* `thresholds` or `targetThresholds` can not be nil and they must configure exactly the same types of resources.
|
||||
* The valid range of the resource's percentage value is \[0, 100\]
|
||||
* Percentage value of `thresholds` can not be greater than `targetThresholds` for the same resource.
|
||||
|
||||
There is another parameter associated with the `LowNodeUtilization` strategy, called `numberOfNodes`.
|
||||
This parameter can be configured to activate the strategy only when the number of under utilized nodes
|
||||
are above the configured value. This could be helpful in large clusters where a few nodes could go
|
||||
under utilized frequently or for a short period of time. By default, `numberOfNodes` is set to zero.
|
||||
|
||||
### HighNodeUtilization
|
||||
|
||||
This strategy finds nodes that are under utilized and evicts pods from the nodes in the hope that these pods will be
|
||||
scheduled compactly into fewer nodes. Used in conjunction with node auto-scaling, this strategy is intended to help
|
||||
trigger down scaling of under utilized nodes.
|
||||
This strategy **must** be used with the scheduler strategy `MostRequestedPriority`. The parameters of this strategy are
|
||||
configured under `nodeResourceUtilizationThresholds`.
|
||||
|
||||
The under utilization of nodes is determined by a configurable threshold `thresholds`. The threshold
|
||||
`thresholds` can be configured for cpu, memory, number of pods, and extended resources in terms of percentage. The percentage is
|
||||
calculated as the current resources requested on the node vs [total allocatable](https://kubernetes.io/docs/concepts/architecture/nodes/#capacity).
|
||||
For pods, this means the number of pods on the node as a fraction of the pod capacity set for that node.
|
||||
|
||||
If a node's usage is below threshold for all (cpu, memory, number of pods and extended resources), the node is considered underutilized.
|
||||
Currently, pods request resource requirements are considered for computing node resource utilization.
|
||||
Any node above `thresholds` is considered appropriately utilized and is not considered for eviction.
|
||||
|
||||
The `thresholds` param could be tuned as per your cluster requirements. Note that this
|
||||
strategy evicts pods from `underutilized nodes` (those with usage below `thresholds`)
|
||||
so that they can be recreated in appropriately utilized nodes.
|
||||
The strategy will abort if any number of `underutilized nodes` or `appropriately utilized nodes` is zero.
|
||||
|
||||
**Parameters:**
|
||||
|
||||
|Name|Type|
|
||||
|---|---|
|
||||
|`thresholds`|map(string:int)|
|
||||
|`numberOfNodes`|int|
|
||||
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))|
|
||||
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))|
|
||||
|`nodeFit`|bool (see [node fit filtering](#node-fit-filtering))|
|
||||
|
||||
**Example:**
|
||||
|
||||
```yaml
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
strategies:
|
||||
"HighNodeUtilization":
|
||||
enabled: true
|
||||
params:
|
||||
nodeResourceUtilizationThresholds:
|
||||
thresholds:
|
||||
"cpu" : 20
|
||||
"memory": 20
|
||||
"pods": 20
|
||||
```
|
||||
|
||||
Policy should pass the following validation checks:
|
||||
* Three basic native types of resources are supported: `cpu`, `memory` and `pods`. If any of these resource types is not specified, all its thresholds default to 100%.
|
||||
* Extended resources are supported. For example, resource type `nvidia.com/gpu` is specified for GPU node utilization. Extended resources are optional, and will not be used to compute node's usage if it's not specified in `thresholds` explicitly.
|
||||
* `thresholds` can not be nil.
|
||||
* The valid range of the resource's percentage value is \[0, 100\]
|
||||
|
||||
There is another parameter associated with the `HighNodeUtilization` strategy, called `numberOfNodes`.
|
||||
This parameter can be configured to activate the strategy only when the number of under utilized nodes
|
||||
is above the configured value. This could be helpful in large clusters where a few nodes could go
|
||||
under utilized frequently or for a short period of time. By default, `numberOfNodes` is set to zero.
|
||||
|
||||
### RemovePodsViolatingInterPodAntiAffinity
|
||||
|
||||
This strategy makes sure that pods violating interpod anti-affinity are removed from nodes. For example,
|
||||
if there is podA on a node and podB and podC (running on the same node) have anti-affinity rules which prohibit
|
||||
them to run on the same node, then podA will be evicted from the node so that podB and podC could run. This
|
||||
issue could happen, when the anti-affinity rules for podB and podC are created when they are already running on
|
||||
node.
|
||||
|
||||
**Parameters:**
|
||||
|
||||
|Name|Type|
|
||||
|---|---|
|
||||
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))|
|
||||
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))|
|
||||
|`namespaces`|(see [namespace filtering](#namespace-filtering))|
|
||||
|`labelSelector`|(see [label filtering](#label-filtering))|
|
||||
|`nodeFit`|bool (see [node fit filtering](#node-fit-filtering))|
|
||||
|
||||
**Example:**
|
||||
|
||||
```yaml
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
strategies:
|
||||
"RemovePodsViolatingInterPodAntiAffinity":
|
||||
enabled: true
|
||||
```
|
||||
|
||||
### RemovePodsViolatingNodeAffinity
|
||||
|
||||
This strategy makes sure all pods violating
|
||||
[node affinity](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#node-affinity)
|
||||
are eventually removed from nodes. Node affinity rules allow a pod to specify
|
||||
`requiredDuringSchedulingIgnoredDuringExecution` type, which tells the scheduler
|
||||
to respect node affinity when scheduling the pod but kubelet to ignore
|
||||
in case node changes over time and no longer respects the affinity.
|
||||
When enabled, the strategy serves as a temporary implementation
|
||||
of `requiredDuringSchedulingRequiredDuringExecution` and evicts pod for kubelet
|
||||
that no longer respects node affinity.
|
||||
|
||||
For example, there is podA scheduled on nodeA which satisfies the node
|
||||
affinity rule `requiredDuringSchedulingIgnoredDuringExecution` at the time
|
||||
of scheduling. Over time nodeA stops to satisfy the rule. When the strategy gets
|
||||
executed and there is another node available that satisfies the node affinity rule,
|
||||
podA gets evicted from nodeA.
|
||||
|
||||
**Parameters:**
|
||||
|
||||
|Name|Type|
|
||||
|---|---|
|
||||
|`nodeAffinityType`|list(string)|
|
||||
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))|
|
||||
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))|
|
||||
|`namespaces`|(see [namespace filtering](#namespace-filtering))|
|
||||
|`labelSelector`|(see [label filtering](#label-filtering))|
|
||||
|`nodeFit`|bool (see [node fit filtering](#node-fit-filtering))|
|
||||
|
||||
**Example:**
|
||||
|
||||
```yaml
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
strategies:
|
||||
"RemovePodsViolatingNodeAffinity":
|
||||
enabled: true
|
||||
params:
|
||||
nodeAffinityType:
|
||||
- "requiredDuringSchedulingIgnoredDuringExecution"
|
||||
```
|
||||
|
||||
### RemovePodsViolatingNodeTaints
|
||||
|
||||
This strategy makes sure that pods violating NoSchedule taints on nodes are removed. For example there is a
|
||||
pod "podA" with a toleration to tolerate a taint ``key=value:NoSchedule`` scheduled and running on the tainted
|
||||
node. If the node's taint is subsequently updated/removed, taint is no longer satisfied by its pods' tolerations
|
||||
and will be evicted.
|
||||
|
||||
**Parameters:**
|
||||
|
||||
|Name|Type|
|
||||
|---|---|
|
||||
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))|
|
||||
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))|
|
||||
|`namespaces`|(see [namespace filtering](#namespace-filtering))|
|
||||
|`labelSelector`|(see [label filtering](#label-filtering))|
|
||||
|`nodeFit`|bool (see [node fit filtering](#node-fit-filtering))|
|
||||
|
||||
**Example:**
|
||||
|
||||
````yaml
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
strategies:
|
||||
"RemovePodsViolatingNodeTaints":
|
||||
enabled: true
|
||||
````
|
||||
|
||||
### RemovePodsViolatingTopologySpreadConstraint
|
||||
|
||||
This strategy makes sure that pods violating [topology spread constraints](https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/)
|
||||
are evicted from nodes. Specifically, it tries to evict the minimum number of pods required to balance topology domains to within each constraint's `maxSkew`.
|
||||
This strategy requires k8s version 1.18 at a minimum.
|
||||
|
||||
By default, this strategy only deals with hard constraints, setting parameter `includeSoftConstraints` to `true` will
|
||||
include soft constraints.
|
||||
|
||||
Strategy parameter `labelSelector` is not utilized when balancing topology domains and is only applied during eviction to determine if the pod can be evicted.
|
||||
|
||||
**Parameters:**
|
||||
|
||||
|Name|Type|
|
||||
|---|---|
|
||||
|`includeSoftConstraints`|bool|
|
||||
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))|
|
||||
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))|
|
||||
|`namespaces`|(see [namespace filtering](#namespace-filtering))|
|
||||
|`labelSelector`|(see [label filtering](#label-filtering))|
|
||||
|`nodeFit`|bool (see [node fit filtering](#node-fit-filtering))|
|
||||
|
||||
**Example:**
|
||||
|
||||
```yaml
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
strategies:
|
||||
"RemovePodsViolatingTopologySpreadConstraint":
|
||||
enabled: true
|
||||
params:
|
||||
includeSoftConstraints: false
|
||||
```
|
||||
|
||||
|
||||
### RemovePodsHavingTooManyRestarts
|
||||
|
||||
This strategy makes sure that pods having too many restarts are removed from nodes. For example a pod with EBS/PD that
|
||||
can't get the volume/disk attached to the instance, then the pod should be re-scheduled to other nodes. Its parameters
|
||||
include `podRestartThreshold`, which is the number of restarts (summed over all eligible containers) at which a pod
|
||||
should be evicted, and `includingInitContainers`, which determines whether init container restarts should be factored
|
||||
into that calculation.
|
||||
|
||||
**Parameters:**
|
||||
|
||||
|Name|Type|
|
||||
|---|---|
|
||||
|`podRestartThreshold`|int|
|
||||
|`includingInitContainers`|bool|
|
||||
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))|
|
||||
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))|
|
||||
|`namespaces`|(see [namespace filtering](#namespace-filtering))|
|
||||
|`labelSelector`|(see [label filtering](#label-filtering))|
|
||||
|`nodeFit`|bool (see [node fit filtering](#node-fit-filtering))|
|
||||
|
||||
**Example:**
|
||||
|
||||
```yaml
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
strategies:
|
||||
"RemovePodsHavingTooManyRestarts":
|
||||
enabled: true
|
||||
params:
|
||||
podsHavingTooManyRestarts:
|
||||
podRestartThreshold: 100
|
||||
includingInitContainers: true
|
||||
```
|
||||
|
||||
### PodLifeTime
|
||||
|
||||
This strategy evicts pods that are older than `maxPodLifeTimeSeconds`.
|
||||
|
||||
You can also specify `podStatusPhases` to `only` evict pods with specific `StatusPhases`, currently this parameter is limited
|
||||
to `Running` and `Pending`.
|
||||
|
||||
**Parameters:**
|
||||
|
||||
|Name|Type|
|
||||
|---|---|
|
||||
|`maxPodLifeTimeSeconds`|int|
|
||||
|`podStatusPhases`|list(string)|
|
||||
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))|
|
||||
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))|
|
||||
|`namespaces`|(see [namespace filtering](#namespace-filtering))|
|
||||
|`labelSelector`|(see [label filtering](#label-filtering))|
|
||||
|
||||
**Example:**
|
||||
|
||||
```yaml
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
strategies:
|
||||
"PodLifeTime":
|
||||
enabled: true
|
||||
params:
|
||||
podLifeTime:
|
||||
maxPodLifeTimeSeconds: 86400
|
||||
podStatusPhases:
|
||||
- "Pending"
|
||||
```
|
||||
|
||||
### RemoveFailedPods
|
||||
|
||||
This strategy evicts pods that are in failed status phase.
|
||||
You can provide an optional parameter to filter by failed `reasons`.
|
||||
`reasons` can be expanded to include reasons of InitContainers as well by setting the optional parameter `includingInitContainers` to `true`.
|
||||
You can specify an optional parameter `minPodLifetimeSeconds` to evict pods that are older than specified seconds.
|
||||
Lastly, you can specify the optional parameter `excludeOwnerKinds` and if a pod
|
||||
has any of these `Kind`s listed as an `OwnerRef`, that pod will not be considered for eviction.
|
||||
|
||||
**Parameters:**
|
||||
|
||||
|Name|Type|
|
||||
|---|---|
|
||||
|`minPodLifetimeSeconds`|uint|
|
||||
|`excludeOwnerKinds`|list(string)|
|
||||
|`reasons`|list(string)|
|
||||
|`includingInitContainers`|bool|
|
||||
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))|
|
||||
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))|
|
||||
|`namespaces`|(see [namespace filtering](#namespace-filtering))|
|
||||
|`labelSelector`|(see [label filtering](#label-filtering))|
|
||||
|`nodeFit`|bool (see [node fit filtering](#node-fit-filtering))|
|
||||
|
||||
**Example:**
|
||||
|
||||
```yaml
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
strategies:
|
||||
"RemoveFailedPods":
|
||||
enabled: true
|
||||
params:
|
||||
failedPods:
|
||||
reasons:
|
||||
- "NodeAffinity"
|
||||
includingInitContainers: true
|
||||
excludeOwnerKinds:
|
||||
- "Job"
|
||||
minPodLifetimeSeconds: 3600
|
||||
```
|
||||
|
||||
## Filter Pods
|
||||
|
||||
### Namespace filtering
|
||||
|
||||
The following strategies accept a `namespaces` parameter which allows to specify a list of including, resp. excluding namespaces:
|
||||
* `PodLifeTime`
|
||||
* `RemovePodsHavingTooManyRestarts`
|
||||
* `RemovePodsViolatingNodeTaints`
|
||||
* `RemovePodsViolatingNodeAffinity`
|
||||
* `RemovePodsViolatingInterPodAntiAffinity`
|
||||
* `RemoveDuplicates`
|
||||
* `RemovePodsViolatingTopologySpreadConstraint`
|
||||
* `RemoveFailedPods`
|
||||
|
||||
For example:
|
||||
|
||||
```yaml
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
strategies:
|
||||
"PodLifeTime":
|
||||
enabled: true
|
||||
params:
|
||||
podLifeTime:
|
||||
maxPodLifeTimeSeconds: 86400
|
||||
namespaces:
|
||||
include:
|
||||
- "namespace1"
|
||||
- "namespace2"
|
||||
```
|
||||
|
||||
In the examples `PodLifeTime` gets executed only over `namespace1` and `namespace2`.
|
||||
The similar holds for `exclude` field:
|
||||
|
||||
```yaml
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
strategies:
|
||||
"PodLifeTime":
|
||||
enabled: true
|
||||
params:
|
||||
podLifeTime:
|
||||
maxPodLifeTimeSeconds: 86400
|
||||
namespaces:
|
||||
exclude:
|
||||
- "namespace1"
|
||||
- "namespace2"
|
||||
```
|
||||
|
||||
The strategy gets executed over all namespaces but `namespace1` and `namespace2`.
|
||||
|
||||
It's not allowed to compute `include` with `exclude` field.
|
||||
|
||||
### Priority filtering
|
||||
|
||||
All strategies are able to configure a priority threshold, only pods under the threshold can be evicted. You can
|
||||
specify this threshold by setting `thresholdPriorityClassName`(setting the threshold to the value of the given
|
||||
priority class) or `thresholdPriority`(directly setting the threshold) parameters. By default, this threshold
|
||||
is set to the value of `system-cluster-critical` priority class.
|
||||
|
||||
Note: Setting `evictSystemCriticalPods` to true disables priority filtering entirely.
|
||||
|
||||
E.g.
|
||||
|
||||
Setting `thresholdPriority`
|
||||
```yaml
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
strategies:
|
||||
"PodLifeTime":
|
||||
enabled: true
|
||||
params:
|
||||
podLifeTime:
|
||||
maxPodLifeTimeSeconds: 86400
|
||||
thresholdPriority: 10000
|
||||
```
|
||||
|
||||
Setting `thresholdPriorityClassName`
|
||||
```yaml
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
strategies:
|
||||
"PodLifeTime":
|
||||
enabled: true
|
||||
params:
|
||||
podLifeTime:
|
||||
maxPodLifeTimeSeconds: 86400
|
||||
thresholdPriorityClassName: "priorityclass1"
|
||||
```
|
||||
|
||||
Note that you can't configure both `thresholdPriority` and `thresholdPriorityClassName`, if the given priority class
|
||||
does not exist, descheduler won't create it and will throw an error.
|
||||
|
||||
### Label filtering
|
||||
|
||||
The following strategies can configure a [standard kubernetes labelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#labelselector-v1-meta)
|
||||
to filter pods by their labels:
|
||||
|
||||
* `PodLifeTime`
|
||||
* `RemovePodsHavingTooManyRestarts`
|
||||
* `RemovePodsViolatingNodeTaints`
|
||||
* `RemovePodsViolatingNodeAffinity`
|
||||
* `RemovePodsViolatingInterPodAntiAffinity`
|
||||
* `RemovePodsViolatingTopologySpreadConstraint`
|
||||
* `RemoveFailedPods`
|
||||
|
||||
This allows running strategies among pods the descheduler is interested in.
|
||||
|
||||
For example:
|
||||
|
||||
```yaml
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
strategies:
|
||||
"PodLifeTime":
|
||||
enabled: true
|
||||
params:
|
||||
podLifeTime:
|
||||
maxPodLifeTimeSeconds: 86400
|
||||
labelSelector:
|
||||
matchLabels:
|
||||
component: redis
|
||||
matchExpressions:
|
||||
- {key: tier, operator: In, values: [cache]}
|
||||
- {key: environment, operator: NotIn, values: [dev]}
|
||||
```
|
||||
|
||||
|
||||
### Node Fit filtering
|
||||
|
||||
The following strategies accept a `nodeFit` boolean parameter which can optimize descheduling:
|
||||
* `RemoveDuplicates`
|
||||
* `LowNodeUtilization`
|
||||
* `HighNodeUtilization`
|
||||
* `RemovePodsViolatingInterPodAntiAffinity`
|
||||
* `RemovePodsViolatingNodeAffinity`
|
||||
* `RemovePodsViolatingNodeTaints`
|
||||
* `RemovePodsViolatingTopologySpreadConstraint`
|
||||
* `RemovePodsHavingTooManyRestarts`
|
||||
* `RemoveFailedPods`
|
||||
|
||||
If set to `true` the descheduler will consider whether or not the pods that meet eviction criteria will fit on other nodes before evicting them. If a pod cannot be rescheduled to another node, it will not be evicted. Currently the following criteria are considered when setting `nodeFit` to `true`:
|
||||
- A `nodeSelector` on the pod
|
||||
- Any `Tolerations` on the pod and any `Taints` on the other nodes
|
||||
- `nodeAffinity` on the pod
|
||||
- Whether any of the other nodes are marked as `unschedulable`
|
||||
|
||||
E.g.
|
||||
|
||||
```yaml
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
strategies:
|
||||
"LowNodeUtilization":
|
||||
enabled: true
|
||||
params:
|
||||
nodeResourceUtilizationThresholds:
|
||||
thresholds:
|
||||
"cpu" : 20
|
||||
"memory": 20
|
||||
"pods": 20
|
||||
targetThresholds:
|
||||
"cpu" : 50
|
||||
"memory": 50
|
||||
"pods": 50
|
||||
nodeFit: true
|
||||
```
|
||||
|
||||
Note that node fit filtering references the current pod spec, and not that of it's owner.
|
||||
Thus, if the pod is owned by a ReplicationController (and that ReplicationController was modified recently),
|
||||
the pod may be running with an outdated spec, which the descheduler will reference when determining node fit.
|
||||
This is expected behavior as the descheduler is a "best-effort" mechanism.
|
||||
|
||||
Using Deployments instead of ReplicationControllers provides an automated rollout of pod spec changes, therefore ensuring that the descheduler has an up-to-date view of the cluster state.
|
||||
|
||||
## Pod Evictions
|
||||
|
||||
When the descheduler decides to evict pods from a node, it employs following general mechanism:
|
||||
When the descheduler decides to evict pods from a node, it employs the following general mechanism:
|
||||
|
||||
* Critical pods (with annotations scheduler.alpha.kubernetes.io/critical-pod) are never evicted.
|
||||
* Pods (static or mirrored pods or stand alone pods) not part of an RC, RS, Deployment or Jobs are
|
||||
never evicted because these pods won't be recreated.
|
||||
* [Critical pods](https://kubernetes.io/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/) (with priorityClassName set to system-cluster-critical or system-node-critical) are never evicted (unless `evictSystemCriticalPods: true` is set).
|
||||
* Pods (static or mirrored pods or standalone pods) not part of an ReplicationController, ReplicaSet(Deployment), StatefulSet, or Job are
|
||||
never evicted because these pods won't be recreated. (Standalone pods in failed status phase can be evicted by setting `evictFailedBarePods: true`)
|
||||
* Pods associated with DaemonSets are never evicted.
|
||||
* Pods with local storage are never evicted.
|
||||
* Best efforts pods are evicted before Burstable and Guaranteed pods.
|
||||
* Pods with local storage are never evicted (unless `evictLocalStoragePods: true` is set).
|
||||
* Pods with PVCs are evicted (unless `ignorePvcPods: true` is set).
|
||||
* In `LowNodeUtilization` and `RemovePodsViolatingInterPodAntiAffinity`, pods are evicted by their priority from low to high, and if they have same priority,
|
||||
best effort pods are evicted before burstable and guaranteed pods.
|
||||
* All types of pods with the annotation `descheduler.alpha.kubernetes.io/evict` are eligible for eviction. This
|
||||
annotation is used to override checks which prevent eviction and users can select which pod is evicted.
|
||||
Users should know how and if the pod will be recreated.
|
||||
* Pods with a non-nil DeletionTimestamp are not evicted by default.
|
||||
|
||||
### Pod disruption Budget (PDB)
|
||||
Pods subject to Pod Disruption Budget (PDB) are not evicted if descheduling violates its pod
|
||||
disruption budget (PDB). The pods are evicted by using eviction subresource to handle PDB.
|
||||
Setting `--v=4` or greater on the Descheduler will log all reasons why any pod is not evictable.
|
||||
|
||||
### Pod Disruption Budget (PDB)
|
||||
|
||||
Pods subject to a Pod Disruption Budget(PDB) are not evicted if descheduling violates its PDB. The pods
|
||||
are evicted by using the eviction subresource to handle PDB.
|
||||
|
||||
## Metrics
|
||||
|
||||
| name | type | description |
|
||||
|-------|-------|----------------|
|
||||
| build_info | gauge | constant 1 |
|
||||
| pods_evicted | CounterVec | total number of pods evicted |
|
||||
|
||||
The metrics are served through https://localhost:10258/metrics by default.
|
||||
The address and port can be changed by setting `--binding-address` and `--secure-port` flags.
|
||||
|
||||
## Compatibility Matrix
|
||||
The below compatibility matrix shows the k8s client package(client-go, apimachinery, etc) versions that descheduler
|
||||
is compiled with. At this time descheduler does not have a hard dependency to a specific k8s release. However a
|
||||
particular descheduler release is only tested against the three latest k8s minor versions. For example descheduler
|
||||
v0.18 should work with k8s v1.18, v1.17, and v1.16.
|
||||
|
||||
Starting with descheduler release v0.18 the minor version of descheduler matches the minor version of the k8s client
|
||||
packages that it is compiled with.
|
||||
|
||||
Descheduler | Supported Kubernetes Version
|
||||
-------------|-----------------------------
|
||||
v0.22 | v1.22
|
||||
v0.21 | v1.21
|
||||
v0.20 | v1.20
|
||||
v0.19 | v1.19
|
||||
v0.18 | v1.18
|
||||
v0.10 | v1.17
|
||||
v0.4-v0.9 | v1.9+
|
||||
v0.1-v0.3 | v1.7-v1.8
|
||||
|
||||
|
||||
## Getting Involved and Contributing
|
||||
|
||||
Are you interested in contributing to descheduler? We, the
|
||||
maintainers and community, would love your suggestions, contributions, and help!
|
||||
Also, the maintainers can be contacted at any time to learn more about how to get
|
||||
involved.
|
||||
|
||||
To get started writing code see the [contributor guide](docs/contributor-guide.md) in the `/docs` directory.
|
||||
|
||||
In the interest of getting more new people involved we tag issues with
|
||||
[`good first issue`][good_first_issue].
|
||||
These are typically issues that have smaller scope but are good ways to start
|
||||
to get acquainted with the codebase.
|
||||
|
||||
We also encourage ALL active community participants to act as if they are
|
||||
maintainers, even if you don't have "official" write permissions. This is a
|
||||
community effort, we are here to serve the Kubernetes community. If you have an
|
||||
active interest and you want to get involved, you have real power! Don't assume
|
||||
that the only people who can get things done around here are the "maintainers".
|
||||
|
||||
We also would love to add more "official" maintainers, so show us what you can
|
||||
do!
|
||||
|
||||
This repository uses the Kubernetes bots. See a full list of the commands [here][prow].
|
||||
|
||||
### Communicating With Contributors
|
||||
|
||||
You can reach the contributors of this project at:
|
||||
|
||||
- [Slack channel](https://kubernetes.slack.com/messages/sig-scheduling)
|
||||
- [Mailing list](https://groups.google.com/forum/#!forum/kubernetes-sig-scheduling)
|
||||
|
||||
Learn how to engage with the Kubernetes community on the [community page](http://kubernetes.io/community/).
|
||||
|
||||
## Roadmap
|
||||
|
||||
This roadmap is not in any particular order.
|
||||
|
||||
* Addition of test cases (unit and end-to-end)
|
||||
* Ability to run inside a pod as a job
|
||||
* Strategy to consider taints and tolerations
|
||||
* Consideration of pod affinity and anti-affinity
|
||||
* Strategy to consider pod life time
|
||||
* Consideration of pod affinity
|
||||
* Strategy to consider number of pending pods
|
||||
* Integration with cluster autoscaler
|
||||
* Integration with metrics providers for obtaining real load metrics
|
||||
* Consideration of Kubernetes's scheduler's predicates
|
||||
|
||||
|
||||
## Note
|
||||
### Code of conduct
|
||||
|
||||
This project is under active development, and is not intended for production use.
|
||||
Any api could be changed any time with out any notice. That said, your feedback is
|
||||
very important and appreciated to make this project more stable and useful.
|
||||
Participation in the Kubernetes community is governed by the [Kubernetes Code of Conduct](code-of-conduct.md).
|
||||
|
||||
15
SECURITY_CONTACTS
Normal file
15
SECURITY_CONTACTS
Normal file
@@ -0,0 +1,15 @@
|
||||
# Defined below are the security contacts for this repo.
|
||||
#
|
||||
# They are the contact point for the Product Security Team to reach out
|
||||
# to for triaging and handling of incoming issues.
|
||||
#
|
||||
# The below names agree to abide by the
|
||||
# [Embargo Policy](https://github.com/kubernetes/sig-release/blob/master/security-release-process-documentation/security-release-process.md#embargo-policy)
|
||||
# and will be removed and replaced if they violate that agreement.
|
||||
#
|
||||
# DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE
|
||||
# INSTRUCTIONS AT https://kubernetes.io/security/
|
||||
|
||||
aveshagarwal
|
||||
k82cn
|
||||
ravisantoshgudimetla
|
||||
22
charts/descheduler/.helmignore
Normal file
22
charts/descheduler/.helmignore
Normal file
@@ -0,0 +1,22 @@
|
||||
# Patterns to ignore when building packages.
|
||||
# This supports shell glob matching, relative path matching, and
|
||||
# negation (prefixed with !). Only one pattern per line.
|
||||
.DS_Store
|
||||
# Common VCS dirs
|
||||
.git/
|
||||
.gitignore
|
||||
.bzr/
|
||||
.bzrignore
|
||||
.hg/
|
||||
.hgignore
|
||||
.svn/
|
||||
# Common backup files
|
||||
*.swp
|
||||
*.bak
|
||||
*.tmp
|
||||
*~
|
||||
# Various IDEs
|
||||
.project
|
||||
.idea/
|
||||
*.tmproj
|
||||
.vscode/
|
||||
16
charts/descheduler/Chart.yaml
Normal file
16
charts/descheduler/Chart.yaml
Normal file
@@ -0,0 +1,16 @@
|
||||
apiVersion: v1
|
||||
name: descheduler
|
||||
version: 0.23.0
|
||||
appVersion: 0.23.0
|
||||
description: Descheduler for Kubernetes is used to rebalance clusters by evicting pods that can potentially be scheduled on better nodes. In the current implementation, descheduler does not schedule replacement of evicted pods but relies on the default scheduler for that.
|
||||
keywords:
|
||||
- kubernetes
|
||||
- descheduler
|
||||
- kube-scheduler
|
||||
home: https://github.com/kubernetes-sigs/descheduler
|
||||
icon: https://kubernetes.io/images/favicon.png
|
||||
sources:
|
||||
- https://github.com/kubernetes-sigs/descheduler
|
||||
maintainers:
|
||||
- name: Kubernetes SIG Scheduling
|
||||
email: kubernetes-sig-scheduling@googlegroups.com
|
||||
72
charts/descheduler/README.md
Normal file
72
charts/descheduler/README.md
Normal file
@@ -0,0 +1,72 @@
|
||||
# Descheduler for Kubernetes
|
||||
|
||||
[Descheduler](https://github.com/kubernetes-sigs/descheduler/) for Kubernetes is used to rebalance clusters by evicting pods that can potentially be scheduled on better nodes. In the current implementation, descheduler does not schedule replacement of evicted pods but relies on the default scheduler for that.
|
||||
|
||||
## TL;DR:
|
||||
|
||||
```shell
|
||||
helm repo add descheduler https://kubernetes-sigs.github.io/descheduler/
|
||||
helm install my-release --namespace kube-system descheduler/descheduler
|
||||
```
|
||||
|
||||
## Introduction
|
||||
|
||||
This chart bootstraps a [descheduler](https://github.com/kubernetes-sigs/descheduler/) cron job on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- Kubernetes 1.14+
|
||||
|
||||
## Installing the Chart
|
||||
|
||||
To install the chart with the release name `my-release`:
|
||||
|
||||
```shell
|
||||
helm install --namespace kube-system my-release descheduler/descheduler
|
||||
```
|
||||
|
||||
The command deploys _descheduler_ on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation.
|
||||
|
||||
> **Tip**: List all releases using `helm list`
|
||||
|
||||
## Uninstalling the Chart
|
||||
|
||||
To uninstall/delete the `my-release` deployment:
|
||||
|
||||
```shell
|
||||
helm delete my-release
|
||||
```
|
||||
|
||||
The command removes all the Kubernetes components associated with the chart and deletes the release.
|
||||
|
||||
## Configuration
|
||||
|
||||
The following table lists the configurable parameters of the _descheduler_ chart and their default values.
|
||||
|
||||
| Parameter | Description | Default |
|
||||
| ------------------------------ | --------------------------------------------------------------------------------------------------------------------- | ------------------------------------ |
|
||||
| `kind` | Use as CronJob or Deployment | `CronJob` |
|
||||
| `image.repository` | Docker repository to use | `k8s.gcr.io/descheduler/descheduler` |
|
||||
| `image.tag` | Docker tag to use | `v[chart appVersion]` |
|
||||
| `image.pullPolicy` | Docker image pull policy | `IfNotPresent` |
|
||||
| `imagePullSecrets` | Docker repository secrets | `[]` |
|
||||
| `nameOverride` | String to partially override `descheduler.fullname` template (will prepend the release name) | `""` |
|
||||
| `fullnameOverride` | String to fully override `descheduler.fullname` template | `""` |
|
||||
| `cronJobApiVersion` | CronJob API Group Version | `"batch/v1"` |
|
||||
| `schedule` | The cron schedule to run the _descheduler_ job on | `"*/2 * * * *"` |
|
||||
| `startingDeadlineSeconds` | If set, configure `startingDeadlineSeconds` for the _descheduler_ job | `nil` |
|
||||
| `successfulJobsHistoryLimit` | If set, configure `successfulJobsHistoryLimit` for the _descheduler_ job | `nil` |
|
||||
| `failedJobsHistoryLimit` | If set, configure `failedJobsHistoryLimit` for the _descheduler_ job | `nil` |
|
||||
| `deschedulingInterval` | If using kind:Deployment, sets time between consecutive descheduler executions. | `5m` |
|
||||
| `cmdOptions` | The options to pass to the _descheduler_ command | _see values.yaml_ |
|
||||
| `deschedulerPolicy.strategies` | The _descheduler_ strategies to apply | _see values.yaml_ |
|
||||
| `priorityClassName` | The name of the priority class to add to pods | `system-cluster-critical` |
|
||||
| `rbac.create` | If `true`, create & use RBAC resources | `true` |
|
||||
| `podSecurityPolicy.create` | If `true`, create PodSecurityPolicy | `true` |
|
||||
| `resources` | Descheduler container CPU and memory requests/limits | _see values.yaml_ |
|
||||
| `serviceAccount.create` | If `true`, create a service account for the cron job | `true` |
|
||||
| `serviceAccount.name` | The name of the service account to use, if not set and create is true a name is generated using the fullname template | `nil` |
|
||||
| `nodeSelector` | Node selectors to run the descheduler cronjob on specific nodes | `nil` |
|
||||
| `tolerations` | tolerations to run the descheduler cronjob on specific nodes | `nil` |
|
||||
| `suspend` | Set spec.suspend in descheduler cronjob | `false` |
|
||||
| `commonLabels` | Labels to apply to all resources | `{}` |
|
||||
1
charts/descheduler/templates/NOTES.txt
Normal file
1
charts/descheduler/templates/NOTES.txt
Normal file
@@ -0,0 +1 @@
|
||||
Descheduler installed as a {{ .Values.kind }} .
|
||||
67
charts/descheduler/templates/_helpers.tpl
Normal file
67
charts/descheduler/templates/_helpers.tpl
Normal file
@@ -0,0 +1,67 @@
|
||||
{{/* vim: set filetype=mustache: */}}
|
||||
{{/*
|
||||
Expand the name of the chart.
|
||||
*/}}
|
||||
{{- define "descheduler.name" -}}
|
||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified app name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
If release name contains chart name it will be used as a full name.
|
||||
*/}}
|
||||
{{- define "descheduler.fullname" -}}
|
||||
{{- if .Values.fullnameOverride -}}
|
||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- $name := default .Chart.Name .Values.nameOverride -}}
|
||||
{{- if contains $name .Release.Name -}}
|
||||
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create chart name and version as used by the chart label.
|
||||
*/}}
|
||||
{{- define "descheduler.chart" -}}
|
||||
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Common labels
|
||||
*/}}
|
||||
{{- define "descheduler.labels" -}}
|
||||
app.kubernetes.io/name: {{ include "descheduler.name" . }}
|
||||
helm.sh/chart: {{ include "descheduler.chart" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{- if .Chart.AppVersion }}
|
||||
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
|
||||
{{- end }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
{{- if .Values.commonLabels}}
|
||||
{{ toYaml .Values.commonLabels }}
|
||||
{{- end }}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Selector labels
|
||||
*/}}
|
||||
{{- define "descheduler.selectorLabels" -}}
|
||||
app.kubernetes.io/name: {{ include "descheduler.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create the name of the service account to use
|
||||
*/}}
|
||||
{{- define "descheduler.serviceAccountName" -}}
|
||||
{{- if .Values.serviceAccount.create -}}
|
||||
{{ default (include "descheduler.fullname" .) .Values.serviceAccount.name }}
|
||||
{{- else -}}
|
||||
{{ default "default" .Values.serviceAccount.name }}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
34
charts/descheduler/templates/clusterrole.yaml
Normal file
34
charts/descheduler/templates/clusterrole.yaml
Normal file
@@ -0,0 +1,34 @@
|
||||
{{- if .Values.rbac.create -}}
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: {{ template "descheduler.fullname" . }}
|
||||
labels:
|
||||
{{- include "descheduler.labels" . | nindent 4 }}
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["events"]
|
||||
verbs: ["create", "update"]
|
||||
- apiGroups: [""]
|
||||
resources: ["nodes"]
|
||||
verbs: ["get", "watch", "list"]
|
||||
- apiGroups: [""]
|
||||
resources: ["namespaces"]
|
||||
verbs: ["get", "list"]
|
||||
- apiGroups: [""]
|
||||
resources: ["pods"]
|
||||
verbs: ["get", "watch", "list", "delete"]
|
||||
- apiGroups: [""]
|
||||
resources: ["pods/eviction"]
|
||||
verbs: ["create"]
|
||||
- apiGroups: ["scheduling.k8s.io"]
|
||||
resources: ["priorityclasses"]
|
||||
verbs: ["get", "watch", "list"]
|
||||
{{- if .Values.podSecurityPolicy.create }}
|
||||
- apiGroups: ['policy']
|
||||
resources: ['podsecuritypolicies']
|
||||
verbs: ['use']
|
||||
resourceNames:
|
||||
- {{ template "descheduler.fullname" . }}
|
||||
{{- end }}
|
||||
{{- end -}}
|
||||
16
charts/descheduler/templates/clusterrolebinding.yaml
Normal file
16
charts/descheduler/templates/clusterrolebinding.yaml
Normal file
@@ -0,0 +1,16 @@
|
||||
{{- if .Values.rbac.create -}}
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: {{ template "descheduler.fullname" . }}
|
||||
labels:
|
||||
{{- include "descheduler.labels" . | nindent 4 }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: {{ template "descheduler.fullname" . }}
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ template "descheduler.serviceAccountName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
{{- end -}}
|
||||
11
charts/descheduler/templates/configmap.yaml
Normal file
11
charts/descheduler/templates/configmap.yaml
Normal file
@@ -0,0 +1,11 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ template "descheduler.fullname" . }}
|
||||
labels:
|
||||
{{- include "descheduler.labels" . | nindent 4 }}
|
||||
data:
|
||||
policy.yaml: |
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
{{ toYaml .Values.deschedulerPolicy | trim | indent 4 }}
|
||||
94
charts/descheduler/templates/cronjob.yaml
Normal file
94
charts/descheduler/templates/cronjob.yaml
Normal file
@@ -0,0 +1,94 @@
|
||||
{{- if eq .Values.kind "CronJob" }}
|
||||
apiVersion: {{ .Values.cronJobApiVersion | default "batch/v1" }}
|
||||
kind: CronJob
|
||||
metadata:
|
||||
name: {{ template "descheduler.fullname" . }}
|
||||
labels:
|
||||
{{- include "descheduler.labels" . | nindent 4 }}
|
||||
spec:
|
||||
schedule: {{ .Values.schedule | quote }}
|
||||
{{- if .Values.suspend }}
|
||||
suspend: {{ .Values.suspend }}
|
||||
{{- end }}
|
||||
concurrencyPolicy: "Forbid"
|
||||
{{- if .Values.startingDeadlineSeconds }}
|
||||
startingDeadlineSeconds: {{ .Values.startingDeadlineSeconds }}
|
||||
{{- end }}
|
||||
{{- if .Values.successfulJobsHistoryLimit }}
|
||||
successfulJobsHistoryLimit: {{ .Values.successfulJobsHistoryLimit }}
|
||||
{{- end }}
|
||||
{{- if .Values.failedJobsHistoryLimit }}
|
||||
failedJobsHistoryLimit: {{ .Values.failedJobsHistoryLimit }}
|
||||
{{- end }}
|
||||
jobTemplate:
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
name: {{ template "descheduler.fullname" . }}
|
||||
annotations:
|
||||
checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }}
|
||||
{{- if .Values.podAnnotations }}
|
||||
{{- .Values.podAnnotations | toYaml | nindent 12 }}
|
||||
{{- end }}
|
||||
labels:
|
||||
{{- include "descheduler.selectorLabels" . | nindent 12 }}
|
||||
{{- if .Values.podLabels }}
|
||||
{{- .Values.podLabels | toYaml | nindent 12 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- with .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- toYaml . | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- with .Values.affinity }}
|
||||
affinity:
|
||||
{{- toYaml . | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- with .Values.tolerations }}
|
||||
tolerations:
|
||||
{{- toYaml . | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- if .Values.priorityClassName }}
|
||||
priorityClassName: {{ .Values.priorityClassName }}
|
||||
{{- end }}
|
||||
serviceAccountName: {{ template "descheduler.serviceAccountName" . }}
|
||||
restartPolicy: "Never"
|
||||
{{- with .Values.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- toYaml . | nindent 10 }}
|
||||
{{- end }}
|
||||
containers:
|
||||
- name: {{ .Chart.Name }}
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default (printf "v%s" .Chart.AppVersion) }}"
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
command:
|
||||
- "/bin/descheduler"
|
||||
args:
|
||||
- "--policy-config-file"
|
||||
- "/policy-dir/policy.yaml"
|
||||
{{- range $key, $value := .Values.cmdOptions }}
|
||||
- {{ printf "--%s" $key | quote }}
|
||||
{{- if $value }}
|
||||
- {{ $value | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
livenessProbe:
|
||||
{{- toYaml .Values.livenessProbe | nindent 16 }}
|
||||
resources:
|
||||
{{- toYaml .Values.resources | nindent 16 }}
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
privileged: false
|
||||
readOnlyRootFilesystem: true
|
||||
runAsNonRoot: true
|
||||
volumeMounts:
|
||||
- mountPath: /policy-dir
|
||||
name: policy-volume
|
||||
volumes:
|
||||
- name: policy-volume
|
||||
configMap:
|
||||
name: {{ template "descheduler.fullname" . }}
|
||||
{{- end }}
|
||||
81
charts/descheduler/templates/deployment.yaml
Normal file
81
charts/descheduler/templates/deployment.yaml
Normal file
@@ -0,0 +1,81 @@
|
||||
{{- if eq .Values.kind "Deployment" }}
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ template "descheduler.fullname" . }}
|
||||
labels:
|
||||
{{- include "descheduler.labels" . | nindent 4 }}
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "descheduler.selectorLabels" . | nindent 6 }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
{{- include "descheduler.selectorLabels" . | nindent 8 }}
|
||||
{{- if .Values.podLabels }}
|
||||
{{- .Values.podLabels | toYaml | nindent 8 }}
|
||||
{{- end }}
|
||||
annotations:
|
||||
checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }}
|
||||
{{- if .Values.podAnnotations }}
|
||||
{{- .Values.podAnnotations | toYaml | nindent 8 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- if .Values.priorityClassName }}
|
||||
priorityClassName: {{ .Values.priorityClassName }}
|
||||
{{- end }}
|
||||
serviceAccountName: {{ template "descheduler.serviceAccountName" . }}
|
||||
containers:
|
||||
- name: {{ .Chart.Name }}
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default (printf "v%s" .Chart.AppVersion) }}"
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
command:
|
||||
- "/bin/descheduler"
|
||||
args:
|
||||
- "--policy-config-file"
|
||||
- "/policy-dir/policy.yaml"
|
||||
- "--descheduling-interval"
|
||||
- {{ required "deschedulingInterval required for running as Deployment" .Values.deschedulingInterval }}
|
||||
{{- range $key, $value := .Values.cmdOptions }}
|
||||
- {{ printf "--%s" $key | quote }}
|
||||
{{- if $value }}
|
||||
- {{ $value | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
ports:
|
||||
- containerPort: 10258
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
{{- toYaml .Values.livenessProbe | nindent 12 }}
|
||||
resources:
|
||||
{{- toYaml .Values.resources | nindent 12 }}
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
privileged: false
|
||||
readOnlyRootFilesystem: true
|
||||
runAsNonRoot: true
|
||||
volumeMounts:
|
||||
- mountPath: /policy-dir
|
||||
name: policy-volume
|
||||
volumes:
|
||||
- name: policy-volume
|
||||
configMap:
|
||||
name: {{ template "descheduler.fullname" . }}
|
||||
{{- with .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.affinity }}
|
||||
affinity:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.tolerations }}
|
||||
tolerations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
38
charts/descheduler/templates/podsecuritypolicy.yaml
Normal file
38
charts/descheduler/templates/podsecuritypolicy.yaml
Normal file
@@ -0,0 +1,38 @@
|
||||
{{- if .Values.podSecurityPolicy.create -}}
|
||||
apiVersion: policy/v1beta1
|
||||
kind: PodSecurityPolicy
|
||||
metadata:
|
||||
name: {{ template "descheduler.fullname" . }}
|
||||
annotations:
|
||||
seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default,runtime/default'
|
||||
seccomp.security.alpha.kubernetes.io/defaultProfileName: 'runtime/default'
|
||||
spec:
|
||||
privileged: false
|
||||
allowPrivilegeEscalation: false
|
||||
requiredDropCapabilities:
|
||||
- ALL
|
||||
volumes:
|
||||
- 'configMap'
|
||||
- 'secret'
|
||||
hostNetwork: false
|
||||
hostIPC: false
|
||||
hostPID: false
|
||||
runAsUser:
|
||||
rule: 'MustRunAs'
|
||||
ranges:
|
||||
- min: 1
|
||||
max: 65535
|
||||
seLinux:
|
||||
rule: 'RunAsAny'
|
||||
supplementalGroups:
|
||||
rule: 'MustRunAs'
|
||||
ranges:
|
||||
- min: 1
|
||||
max: 65535
|
||||
fsGroup:
|
||||
rule: 'MustRunAs'
|
||||
ranges:
|
||||
- min: 1
|
||||
max: 65535
|
||||
readOnlyRootFilesystem: true
|
||||
{{- end -}}
|
||||
11
charts/descheduler/templates/serviceaccount.yaml
Normal file
11
charts/descheduler/templates/serviceaccount.yaml
Normal file
@@ -0,0 +1,11 @@
|
||||
{{- if .Values.serviceAccount.create -}}
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ template "descheduler.serviceAccountName" . }}
|
||||
labels:
|
||||
{{- include "descheduler.labels" . | nindent 4 }}
|
||||
{{- if .Values.serviceAccount.annotations }}
|
||||
annotations: {{ toYaml .Values.serviceAccount.annotations | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end -}}
|
||||
29
charts/descheduler/templates/tests/test-descheduler-pod.yaml
Normal file
29
charts/descheduler/templates/tests/test-descheduler-pod.yaml
Normal file
@@ -0,0 +1,29 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: descheduler-test-pod
|
||||
annotations:
|
||||
"helm.sh/hook": test
|
||||
spec:
|
||||
restartPolicy: Never
|
||||
serviceAccountName: descheduler-ci
|
||||
containers:
|
||||
- name: descheduler-test-container
|
||||
image: alpine:latest
|
||||
imagePullPolicy: IfNotPresent
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
drop:
|
||||
- All
|
||||
privileged: false
|
||||
runAsNonRoot: false
|
||||
command: ["/bin/ash"]
|
||||
args:
|
||||
- -c
|
||||
- >-
|
||||
apk --no-cache add curl &&
|
||||
curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl &&
|
||||
chmod +x ./kubectl &&
|
||||
mv ./kubectl /usr/local/bin/kubectl &&
|
||||
/usr/local/bin/kubectl get pods --namespace kube-system --token "$(cat /var/run/secrets/kubernetes.io/serviceaccount/token)" | grep "descheduler" | grep "Completed"
|
||||
119
charts/descheduler/values.yaml
Normal file
119
charts/descheduler/values.yaml
Normal file
@@ -0,0 +1,119 @@
|
||||
# Default values for descheduler.
|
||||
# This is a YAML-formatted file.
|
||||
# Declare variables to be passed into your templates.
|
||||
|
||||
# CronJob or Deployment
|
||||
kind: CronJob
|
||||
|
||||
image:
|
||||
repository: k8s.gcr.io/descheduler/descheduler
|
||||
# Overrides the image tag whose default is the chart version
|
||||
tag: ""
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
imagePullSecrets: []
|
||||
|
||||
resources:
|
||||
requests:
|
||||
cpu: 500m
|
||||
memory: 256Mi
|
||||
# limits:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
|
||||
nameOverride: ""
|
||||
fullnameOverride: ""
|
||||
|
||||
# labels that'll be applied to all resources
|
||||
commonLabels: {}
|
||||
|
||||
cronJobApiVersion: "batch/v1" # Use "batch/v1beta1" for k8s version < 1.21.0. TODO(@7i) remove with 1.23 release
|
||||
schedule: "*/2 * * * *"
|
||||
suspend: false
|
||||
#startingDeadlineSeconds: 200
|
||||
#successfulJobsHistoryLimit: 1
|
||||
#failedJobsHistoryLimit: 1
|
||||
|
||||
# Required when running as a Deployment
|
||||
deschedulingInterval: 5m
|
||||
|
||||
cmdOptions:
|
||||
v: 3
|
||||
# evict-local-storage-pods:
|
||||
# max-pods-to-evict-per-node: 10
|
||||
# node-selector: "key1=value1,key2=value2"
|
||||
|
||||
deschedulerPolicy:
|
||||
strategies:
|
||||
RemoveDuplicates:
|
||||
enabled: true
|
||||
RemovePodsViolatingNodeTaints:
|
||||
enabled: true
|
||||
RemovePodsViolatingNodeAffinity:
|
||||
enabled: true
|
||||
params:
|
||||
nodeAffinityType:
|
||||
- requiredDuringSchedulingIgnoredDuringExecution
|
||||
RemovePodsViolatingInterPodAntiAffinity:
|
||||
enabled: true
|
||||
LowNodeUtilization:
|
||||
enabled: true
|
||||
params:
|
||||
nodeResourceUtilizationThresholds:
|
||||
thresholds:
|
||||
cpu: 20
|
||||
memory: 20
|
||||
pods: 20
|
||||
targetThresholds:
|
||||
cpu: 50
|
||||
memory: 50
|
||||
pods: 50
|
||||
|
||||
priorityClassName: system-cluster-critical
|
||||
|
||||
nodeSelector: {}
|
||||
# foo: bar
|
||||
|
||||
affinity: {}
|
||||
# nodeAffinity:
|
||||
# requiredDuringSchedulingIgnoredDuringExecution:
|
||||
# nodeSelectorTerms:
|
||||
# - matchExpressions:
|
||||
# - key: kubernetes.io/e2e-az-name
|
||||
# operator: In
|
||||
# values:
|
||||
# - e2e-az1
|
||||
# - e2e-az2
|
||||
|
||||
tolerations: []
|
||||
# - key: 'management'
|
||||
# operator: 'Equal'
|
||||
# value: 'tool'
|
||||
# effect: 'NoSchedule'
|
||||
|
||||
rbac:
|
||||
# Specifies whether RBAC resources should be created
|
||||
create: true
|
||||
|
||||
podSecurityPolicy:
|
||||
# Specifies whether PodSecurityPolicy should be created.
|
||||
create: true
|
||||
|
||||
serviceAccount:
|
||||
# Specifies whether a ServiceAccount should be created
|
||||
create: true
|
||||
# The name of the ServiceAccount to use.
|
||||
# If not set and create is true, a name is generated using the fullname template
|
||||
name:
|
||||
# Specifies custom annotations for the serviceAccount
|
||||
annotations: {}
|
||||
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 10258
|
||||
scheme: HTTPS
|
||||
initialDelaySeconds: 3
|
||||
periodSeconds: 10
|
||||
|
||||
24
cloudbuild.yaml
Normal file
24
cloudbuild.yaml
Normal file
@@ -0,0 +1,24 @@
|
||||
# See https://cloud.google.com/cloud-build/docs/build-config
|
||||
|
||||
# this must be specified in seconds. If omitted, defaults to 600s (10 mins)
|
||||
timeout: 1200s
|
||||
# this prevents errors if you don't use both _GIT_TAG and _PULL_BASE_REF,
|
||||
# or any new substitutions added in the future.
|
||||
options:
|
||||
substitution_option: ALLOW_LOOSE
|
||||
steps:
|
||||
- name: 'gcr.io/k8s-staging-test-infra/gcb-docker-gcloud:v20211118-2f2d816b90'
|
||||
entrypoint: make
|
||||
env:
|
||||
- DOCKER_CLI_EXPERIMENTAL=enabled
|
||||
- VERSION=$_GIT_TAG
|
||||
- BASE_REF=$_PULL_BASE_REF
|
||||
args:
|
||||
- push-all
|
||||
substitutions:
|
||||
# _GIT_TAG will be filled with a git-based tag for the image, of the form vYYYYMMDD-hash, and
|
||||
# can be used as a substitution
|
||||
_GIT_TAG: '12345'
|
||||
# _PULL_BASE_REF will contain the ref that was pushed to to trigger this build -
|
||||
# a branch like 'master' or 'release-0.2', or a tag like 'v0.2'.
|
||||
_PULL_BASE_REF: 'master'
|
||||
@@ -18,41 +18,63 @@ limitations under the License.
|
||||
package options
|
||||
|
||||
import (
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
|
||||
// install the componentconfig api so we get its defaulting and conversion functions
|
||||
"github.com/kubernetes-incubator/descheduler/pkg/apis/componentconfig"
|
||||
_ "github.com/kubernetes-incubator/descheduler/pkg/apis/componentconfig/install"
|
||||
"github.com/kubernetes-incubator/descheduler/pkg/apis/componentconfig/v1alpha1"
|
||||
deschedulerscheme "github.com/kubernetes-incubator/descheduler/pkg/descheduler/scheme"
|
||||
|
||||
"github.com/spf13/pflag"
|
||||
|
||||
apiserveroptions "k8s.io/apiserver/pkg/server/options"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/apis/componentconfig"
|
||||
"sigs.k8s.io/descheduler/pkg/apis/componentconfig/v1alpha1"
|
||||
deschedulerscheme "sigs.k8s.io/descheduler/pkg/descheduler/scheme"
|
||||
)
|
||||
|
||||
const (
|
||||
DefaultDeschedulerPort = 10258
|
||||
)
|
||||
|
||||
// DeschedulerServer configuration
|
||||
type DeschedulerServer struct {
|
||||
componentconfig.DeschedulerConfiguration
|
||||
Client clientset.Interface
|
||||
|
||||
Client clientset.Interface
|
||||
SecureServing *apiserveroptions.SecureServingOptionsWithLoopback
|
||||
DisableMetrics bool
|
||||
}
|
||||
|
||||
// NewDeschedulerServer creates a new DeschedulerServer with default parameters
|
||||
func NewDeschedulerServer() *DeschedulerServer {
|
||||
versioned := v1alpha1.DeschedulerConfiguration{}
|
||||
deschedulerscheme.Scheme.Default(&versioned)
|
||||
cfg := componentconfig.DeschedulerConfiguration{}
|
||||
deschedulerscheme.Scheme.Convert(versioned, &cfg, nil)
|
||||
s := DeschedulerServer{
|
||||
DeschedulerConfiguration: cfg,
|
||||
func NewDeschedulerServer() (*DeschedulerServer, error) {
|
||||
cfg, err := newDefaultComponentConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &s
|
||||
|
||||
secureServing := apiserveroptions.NewSecureServingOptions().WithLoopback()
|
||||
secureServing.BindPort = DefaultDeschedulerPort
|
||||
|
||||
return &DeschedulerServer{
|
||||
DeschedulerConfiguration: *cfg,
|
||||
SecureServing: secureServing,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func newDefaultComponentConfig() (*componentconfig.DeschedulerConfiguration, error) {
|
||||
versionedCfg := v1alpha1.DeschedulerConfiguration{}
|
||||
deschedulerscheme.Scheme.Default(&versionedCfg)
|
||||
cfg := componentconfig.DeschedulerConfiguration{}
|
||||
if err := deschedulerscheme.Scheme.Convert(&versionedCfg, &cfg, nil); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &cfg, nil
|
||||
}
|
||||
|
||||
// AddFlags adds flags for a specific SchedulerServer to the specified FlagSet
|
||||
func (rs *DeschedulerServer) AddFlags(fs *pflag.FlagSet) {
|
||||
fs.DurationVar(&rs.DeschedulingInterval, "descheduling-interval", rs.DeschedulingInterval, "time interval between two consecutive descheduler executions")
|
||||
fs.StringVar(&rs.KubeconfigFile, "kubeconfig-file", rs.KubeconfigFile, "File with kube configuration.")
|
||||
fs.StringVar(&rs.Logging.Format, "logging-format", "text", `Sets the log format. Permitted formats: "text", "json". Non-default formats don't honor these flags: --add-dir-header, --alsologtostderr, --log-backtrace-at, --log-dir, --log-file, --log-file-max-size, --logtostderr, --skip-headers, --skip-log-headers, --stderrthreshold, --log-flush-frequency.\nNon-default choices are currently alpha and subject to change without warning.`)
|
||||
fs.DurationVar(&rs.DeschedulingInterval, "descheduling-interval", rs.DeschedulingInterval, "Time interval between two consecutive descheduler executions. Setting this value instructs the descheduler to run in a continuous loop at the interval specified.")
|
||||
fs.StringVar(&rs.KubeconfigFile, "kubeconfig", rs.KubeconfigFile, "File with kube configuration.")
|
||||
fs.StringVar(&rs.PolicyConfigFile, "policy-config-file", rs.PolicyConfigFile, "File with descheduler policy configuration.")
|
||||
fs.BoolVar(&rs.DryRun, "dry-run", rs.DryRun, "execute descheduler in dry run mode.")
|
||||
// node-selector query causes descheduler to run only on nodes that matches the node labels in the query
|
||||
fs.StringVar(&rs.NodeSelector, "node-selector", rs.NodeSelector, "Selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2)")
|
||||
fs.BoolVar(&rs.DisableMetrics, "disable-metrics", rs.DisableMetrics, "Disables metrics. The metrics are by default served through https://localhost:10258/metrics. Secure address, resp. port can be changed through --bind-address, resp. --secure-port flags.")
|
||||
|
||||
rs.SecureServing.AddFlags(fs)
|
||||
}
|
||||
|
||||
@@ -18,44 +18,86 @@ limitations under the License.
|
||||
package app
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"context"
|
||||
"io"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
||||
"github.com/kubernetes-incubator/descheduler/cmd/descheduler/app/options"
|
||||
"github.com/kubernetes-incubator/descheduler/pkg/descheduler"
|
||||
"k8s.io/apiserver/pkg/server/healthz"
|
||||
|
||||
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
aflag "k8s.io/apiserver/pkg/util/flag"
|
||||
"k8s.io/apiserver/pkg/util/logs"
|
||||
apiserver "k8s.io/apiserver/pkg/server"
|
||||
"k8s.io/apiserver/pkg/server/mux"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/component-base/config"
|
||||
_ "k8s.io/component-base/logs/json/register"
|
||||
"k8s.io/component-base/logs/registry"
|
||||
"k8s.io/component-base/metrics/legacyregistry"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
// NewDeschedulerCommand creates a *cobra.Command object with default parameters
|
||||
func NewDeschedulerCommand(out io.Writer) *cobra.Command {
|
||||
s := options.NewDeschedulerServer()
|
||||
s, err := options.NewDeschedulerServer()
|
||||
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "unable to initialize server")
|
||||
}
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "descheduler",
|
||||
Short: "descheduler",
|
||||
Long: `The descheduler evicts pods which may be bound to less desired nodes`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
logs.InitLogs()
|
||||
defer logs.FlushLogs()
|
||||
err := Run(s)
|
||||
// s.Logs.Config.Format = s.Logging.Format
|
||||
|
||||
// LoopbackClientConfig is a config for a privileged loopback connection
|
||||
var LoopbackClientConfig *restclient.Config
|
||||
var SecureServing *apiserver.SecureServingInfo
|
||||
if err := s.SecureServing.ApplyTo(&SecureServing, &LoopbackClientConfig); err != nil {
|
||||
klog.ErrorS(err, "failed to apply secure server configuration")
|
||||
return
|
||||
}
|
||||
|
||||
factory, _ := registry.LogRegistry.Get(s.Logging.Format)
|
||||
if factory == nil {
|
||||
klog.ClearLogger()
|
||||
} else {
|
||||
log, logrFlush := factory.Create(config.FormatOptions{})
|
||||
defer logrFlush()
|
||||
klog.SetLogger(log)
|
||||
}
|
||||
|
||||
ctx, done := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM)
|
||||
defer done()
|
||||
pathRecorderMux := mux.NewPathRecorderMux("descheduler")
|
||||
if !s.DisableMetrics {
|
||||
pathRecorderMux.Handle("/metrics", legacyregistry.HandlerWithReset())
|
||||
}
|
||||
|
||||
healthz.InstallHandler(pathRecorderMux, healthz.NamedCheck("Descheduler", healthz.PingHealthz.Check))
|
||||
|
||||
if _, err := SecureServing.Serve(pathRecorderMux, 0, ctx.Done()); err != nil {
|
||||
klog.Fatalf("failed to start secure server: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
err := Run(ctx, s)
|
||||
if err != nil {
|
||||
glog.Errorf("%v", err)
|
||||
klog.ErrorS(err, "descheduler server")
|
||||
}
|
||||
},
|
||||
}
|
||||
cmd.SetOutput(out)
|
||||
|
||||
cmd.SetOut(out)
|
||||
flags := cmd.Flags()
|
||||
flags.SetNormalizeFunc(aflag.WordSepNormalizeFunc)
|
||||
flags.AddGoFlagSet(flag.CommandLine)
|
||||
s.AddFlags(flags)
|
||||
return cmd
|
||||
}
|
||||
|
||||
func Run(rs *options.DeschedulerServer) error {
|
||||
return descheduler.Run(rs)
|
||||
func Run(ctx context.Context, rs *options.DeschedulerServer) error {
|
||||
return descheduler.Run(ctx, rs)
|
||||
}
|
||||
|
||||
@@ -18,69 +18,19 @@ package app
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sigs.k8s.io/descheduler/pkg/version"
|
||||
)
|
||||
|
||||
var (
|
||||
// gitCommit is a constant representing the source version that
|
||||
// generated this build. It should be set during build via -ldflags.
|
||||
gitCommit string
|
||||
// version is a constant representing the version tag that
|
||||
// generated this build. It should be set during build via -ldflags.
|
||||
version string
|
||||
// buildDate in ISO8601 format, output of $(date -u +'%Y-%m-%dT%H:%M:%SZ')
|
||||
//It should be set during build via -ldflags.
|
||||
buildDate string
|
||||
)
|
||||
|
||||
// Info holds the information related to descheduler app version.
|
||||
type Info struct {
|
||||
Major string `json:"major"`
|
||||
Minor string `json:"minor"`
|
||||
GitCommit string `json:"gitCommit"`
|
||||
GitVersion string `json:"gitVersion"`
|
||||
BuildDate string `json:"buildDate"`
|
||||
GoVersion string `json:"goVersion"`
|
||||
Compiler string `json:"compiler"`
|
||||
Platform string `json:"platform"`
|
||||
}
|
||||
|
||||
// Get returns the overall codebase version. It's for detecting
|
||||
// what code a binary was built from.
|
||||
func Get() Info {
|
||||
majorVersion, minorVersion := splitVersion(version)
|
||||
return Info{
|
||||
Major: majorVersion,
|
||||
Minor: minorVersion,
|
||||
GitCommit: gitCommit,
|
||||
GitVersion: version,
|
||||
BuildDate: buildDate,
|
||||
GoVersion: runtime.Version(),
|
||||
Compiler: runtime.Compiler,
|
||||
Platform: fmt.Sprintf("%s/%s", runtime.GOOS, runtime.GOARCH),
|
||||
}
|
||||
}
|
||||
|
||||
func NewVersionCommand() *cobra.Command {
|
||||
var versionCmd = &cobra.Command{
|
||||
Use: "version",
|
||||
Short: "Version of descheduler",
|
||||
Long: `Prints the version of descheduler.`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
fmt.Printf("Descheduler version %+v\n", Get())
|
||||
fmt.Printf("Descheduler version %+v\n", version.Get())
|
||||
},
|
||||
}
|
||||
return versionCmd
|
||||
}
|
||||
|
||||
// splitVersion splits the git version to generate major and minor versions needed.
|
||||
func splitVersion(version string) (string, string) {
|
||||
if version == "" {
|
||||
return "", ""
|
||||
}
|
||||
// A sample version would be of form v0.1.0-7-ge884046, so split at first '.' and
|
||||
// then return 0 and 1+(+ appended to follow semver convention) for major and minor versions.
|
||||
return strings.Trim(strings.Split(version, ".")[0], "v"), strings.Split(version, ".")[1] + "+"
|
||||
}
|
||||
|
||||
@@ -17,17 +17,23 @@ limitations under the License.
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/kubernetes-incubator/descheduler/cmd/descheduler/app"
|
||||
"os"
|
||||
|
||||
"k8s.io/component-base/cli"
|
||||
"k8s.io/klog/v2"
|
||||
"sigs.k8s.io/descheduler/cmd/descheduler/app"
|
||||
)
|
||||
|
||||
func init() {
|
||||
klog.SetOutput(os.Stdout)
|
||||
klog.InitFlags(nil)
|
||||
}
|
||||
|
||||
func main() {
|
||||
out := os.Stdout
|
||||
cmd := app.NewDeschedulerCommand(out)
|
||||
cmd.AddCommand(app.NewVersionCommand())
|
||||
if err := cmd.Execute(); err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
code := cli.Run(cmd)
|
||||
os.Exit(code)
|
||||
}
|
||||
|
||||
@@ -1,58 +1,3 @@
|
||||
## Kubernetes Community Code of Conduct
|
||||
# Kubernetes Community Code of Conduct
|
||||
|
||||
### Contributor Code of Conduct
|
||||
|
||||
As contributors and maintainers of this project, and in the interest of fostering
|
||||
an open and welcoming community, we pledge to respect all people who contribute
|
||||
through reporting issues, posting feature requests, updating documentation,
|
||||
submitting pull requests or patches, and other activities.
|
||||
|
||||
We are committed to making participation in this project a harassment-free experience for
|
||||
everyone, regardless of level of experience, gender, gender identity and expression,
|
||||
sexual orientation, disability, personal appearance, body size, race, ethnicity, age,
|
||||
religion, or nationality.
|
||||
|
||||
Examples of unacceptable behavior by participants include:
|
||||
|
||||
* The use of sexualized language or imagery
|
||||
* Personal attacks
|
||||
* Trolling or insulting/derogatory comments
|
||||
* Public or private harassment
|
||||
* Publishing other's private information, such as physical or electronic addresses,
|
||||
without explicit permission
|
||||
* Other unethical or unprofessional conduct.
|
||||
|
||||
Project maintainers have the right and responsibility to remove, edit, or reject
|
||||
comments, commits, code, wiki edits, issues, and other contributions that are not
|
||||
aligned to this Code of Conduct. By adopting this Code of Conduct, project maintainers
|
||||
commit themselves to fairly and consistently applying these principles to every aspect
|
||||
of managing this project. Project maintainers who do not follow or enforce the Code of
|
||||
Conduct may be permanently removed from the project team.
|
||||
|
||||
This code of conduct applies both within project spaces and in public spaces
|
||||
when an individual is representing the project or its community.
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting a Kubernetes maintainer, Sarah Novotny <sarahnovotny@google.com>, and/or Dan Kohn <dan@linuxfoundation.org>.
|
||||
|
||||
This Code of Conduct is adapted from the Contributor Covenant
|
||||
(http://contributor-covenant.org), version 1.2.0, available at
|
||||
http://contributor-covenant.org/version/1/2/0/
|
||||
|
||||
### Kubernetes Events Code of Conduct
|
||||
|
||||
Kubernetes events are working conferences intended for professional networking and collaboration in the
|
||||
Kubernetes community. Attendees are expected to behave according to professional standards and in accordance
|
||||
with their employer's policies on appropriate workplace behavior.
|
||||
|
||||
While at Kubernetes events or related social networking opportunities, attendees should not engage in
|
||||
discriminatory or offensive speech or actions regarding gender, sexuality, race, or religion. Speakers should
|
||||
be especially aware of these concerns.
|
||||
|
||||
The Kubernetes team does not condone any statements by speakers contrary to these standards. The Kubernetes
|
||||
team reserves the right to deny entrance and/or eject from an event (without refund) any individual found to
|
||||
be engaging in discriminatory or offensive speech or actions.
|
||||
|
||||
Please bring any concerns to to the immediate attention of Kubernetes event staff
|
||||
|
||||
|
||||
[]()
|
||||
Please refer to our [Kubernetes Community Code of Conduct](https://git.k8s.io/community/code-of-conduct.md)
|
||||
|
||||
5
docs/README.md
Normal file
5
docs/README.md
Normal file
@@ -0,0 +1,5 @@
|
||||
# Documentation Index
|
||||
|
||||
- [Contributor Guide](contributor-guide.md)
|
||||
- [Release Guide](release-guide.md)
|
||||
- [User Guide](user-guide.md)
|
||||
56
docs/contributor-guide.md
Normal file
56
docs/contributor-guide.md
Normal file
@@ -0,0 +1,56 @@
|
||||
# Contributor Guide
|
||||
|
||||
## Required Tools
|
||||
|
||||
- [Git](https://git-scm.com/downloads)
|
||||
- [Go 1.16+](https://golang.org/dl/)
|
||||
- [Docker](https://docs.docker.com/install/)
|
||||
- [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl)
|
||||
- [kind v0.10.0+](https://kind.sigs.k8s.io/)
|
||||
|
||||
## Build and Run
|
||||
|
||||
Build descheduler.
|
||||
```sh
|
||||
cd $GOPATH/src/sigs.k8s.io
|
||||
git clone https://github.com/kubernetes-sigs/descheduler.git
|
||||
cd descheduler
|
||||
make
|
||||
```
|
||||
|
||||
Run descheduler.
|
||||
```sh
|
||||
./_output/bin/descheduler --kubeconfig <path to kubeconfig> --policy-config-file <path-to-policy-file> --v 1
|
||||
```
|
||||
|
||||
View all CLI options.
|
||||
```
|
||||
./_output/bin/descheduler --help
|
||||
```
|
||||
|
||||
## Run Tests
|
||||
```
|
||||
GOOS=linux make dev-image
|
||||
kind create cluster --config hack/kind_config.yaml
|
||||
kind load docker-image <image name>
|
||||
kind get kubeconfig > /tmp/admin.conf
|
||||
export KUBECONFIG=/tmp/admin.conf
|
||||
make test-unit
|
||||
make test-e2e
|
||||
```
|
||||
|
||||
## Run Helm Tests
|
||||
Run the helm test for a particular descheduler release by setting below variables,
|
||||
```
|
||||
HELM_IMAGE_REPO="descheduler"
|
||||
HELM_IMAGE_TAG="helm-test"
|
||||
HELM_CHART_LOCATION="./charts/descheduler"
|
||||
```
|
||||
The helm tests runs as part of descheduler CI. But, to run it manually from the descheduler root,
|
||||
|
||||
```
|
||||
make test-helm
|
||||
```
|
||||
|
||||
### Miscellaneous
|
||||
See the [hack directory](https://github.com/kubernetes-sigs/descheduler/tree/master/hack) for additional tools and scripts used for developing the descheduler.
|
||||
74
docs/release-guide.md
Normal file
74
docs/release-guide.md
Normal file
@@ -0,0 +1,74 @@
|
||||
# Release Guide
|
||||
|
||||
## Container Image
|
||||
|
||||
### Semi-automatic
|
||||
|
||||
1. Make sure your repo is clean by git's standards
|
||||
2. Create a release branch `git checkout -b release-1.18` (not required for patch releases)
|
||||
3. Push the release branch to the descheuler repo and ensure branch protection is enabled (not required for patch releases)
|
||||
4. Tag the repository from the `master` branch (from the `release-1.18` branch for a patch release) and push the tag `VERSION=v0.18.0 git tag -m $VERSION $VERSION; git push origin $VERSION`
|
||||
5. Publish a draft release using the tag you just created
|
||||
6. Perform the [image promotion process](https://github.com/kubernetes/k8s.io/tree/main/k8s.gcr.io#image-promoter)
|
||||
7. Publish release
|
||||
8. Email `kubernetes-sig-scheduling@googlegroups.com` to announce the release
|
||||
|
||||
### Manual
|
||||
|
||||
1. Make sure your repo is clean by git's standards
|
||||
2. Create a release branch `git checkout -b release-1.18` (not required for patch releases)
|
||||
3. Push the release branch to the descheuler repo and ensure branch protection is enabled (not required for patch releases)
|
||||
4. Tag the repository from the `master` branch (from the `release-1.18` branch for a patch release) and push the tag `VERSION=v0.18.0 git tag -m $VERSION $VERSION; git push origin $VERSION`
|
||||
5. Checkout the tag you just created and make sure your repo is clean by git's standards `git checkout $VERSION`
|
||||
6. Build and push the container image to the staging registry `VERSION=$VERSION make push-all`
|
||||
7. Publish a draft release using the tag you just created
|
||||
8. Perform the [image promotion process](https://github.com/kubernetes/k8s.io/tree/main/k8s.gcr.io#image-promoter)
|
||||
9. Publish release
|
||||
10. Email `kubernetes-sig-scheduling@googlegroups.com` to announce the release
|
||||
|
||||
### Notes
|
||||
It's important to create the tag on the master branch after creating the release-* branch so that the [Helm releaser-action](#helm-chart) can work.
|
||||
It compares the changes in the action-triggering branch to the latest tag on that branch, so if you tag before creating the new branch there
|
||||
will be nothing to compare and it will fail (creating a new release branch usually involves no code changes). For this same reason, you should
|
||||
also tag patch releases (on the release-* branch) *after* pushing changes (if those changes involve bumping the Helm chart version).
|
||||
|
||||
See [post-descheduler-push-images dashboard](https://testgrid.k8s.io/sig-scheduling#post-descheduler-push-images) for staging registry image build job status.
|
||||
|
||||
View the descheduler staging registry using [this URL](https://console.cloud.google.com/gcr/images/k8s-staging-descheduler/GLOBAL/descheduler) in a web browser
|
||||
or use the below `gcloud` commands.
|
||||
|
||||
List images in staging registry.
|
||||
```
|
||||
gcloud container images list --repository gcr.io/k8s-staging-descheduler
|
||||
```
|
||||
|
||||
List descheduler image tags in the staging registry.
|
||||
```
|
||||
gcloud container images list-tags gcr.io/k8s-staging-descheduler/descheduler
|
||||
```
|
||||
|
||||
Get SHA256 hash for a specific image in the staging registry.
|
||||
```
|
||||
gcloud container images describe gcr.io/k8s-staging-descheduler/descheduler:v20200206-0.9.0-94-ge2a23f284
|
||||
```
|
||||
|
||||
Pull image from the staging registry.
|
||||
```
|
||||
docker pull gcr.io/k8s-staging-descheduler/descheduler:v20200206-0.9.0-94-ge2a23f284
|
||||
```
|
||||
|
||||
## Helm Chart
|
||||
Helm chart releases are managed by a separate set of git tags that are prefixed with `descheduler-helm-chart-*`. Example git tag name is `descheduler-helm-chart-0.18.0`.
|
||||
Released versions of the helm charts are stored in the `gh-pages` branch of this repo. The [chart-releaser-action GitHub Action](https://github.com/helm/chart-releaser-action)
|
||||
is setup to build and push the helm charts to the `gh-pages` branch when changes are pushed to a `release-*` branch.
|
||||
|
||||
The major and minor version of the chart matches the descheduler major and minor versions. For example descheduler helm chart version helm-descheduler-chart-0.18.0 corresponds
|
||||
to descheduler version v0.18.0. The patch version of the descheduler helm chart and the patcher version of the descheduler will not necessarily match. The patch
|
||||
version of the descheduler helm chart is used to version changes specific to the helm chart.
|
||||
|
||||
1. Merge all helm chart changes into the master branch before the release is tagged/cut
|
||||
1. Ensure that `appVersion` in file `charts/descheduler/Chart.yaml` matches the descheduler version(no `v` prefix)
|
||||
2. Ensure that `version` in file `charts/descheduler/Chart.yaml` has been incremented. This is the chart version.
|
||||
2. Make sure your repo is clean by git's standards
|
||||
3. Follow the release-branch or patch release tagging pattern from the above section.
|
||||
4. Verify the new helm artifact has been successfully pushed to the `gh-pages` branch
|
||||
142
docs/user-guide.md
Normal file
142
docs/user-guide.md
Normal file
@@ -0,0 +1,142 @@
|
||||
# User Guide
|
||||
|
||||
Starting with descheduler release v0.10.0 container images are available in the official k8s container registry.
|
||||
|
||||
Descheduler Version | Container Image | Architectures |
|
||||
------------------- |-----------------------------------------------------|-------------------------|
|
||||
v0.22.0 | k8s.gcr.io/descheduler/descheduler:v0.22.0 | AMD64<br>ARM64<br>ARMv7 |
|
||||
v0.21.0 | k8s.gcr.io/descheduler/descheduler:v0.21.0 | AMD64<br>ARM64<br>ARMv7 |
|
||||
v0.20.0 | k8s.gcr.io/descheduler/descheduler:v0.20.0 | AMD64<br>ARM64 |
|
||||
v0.19.0 | k8s.gcr.io/descheduler/descheduler:v0.19.0 | AMD64 |
|
||||
v0.18.0 | k8s.gcr.io/descheduler/descheduler:v0.18.0 | AMD64 |
|
||||
v0.10.0 | k8s.gcr.io/descheduler/descheduler:v0.10.0 | AMD64 |
|
||||
|
||||
Note that multi-arch container images cannot be pulled by [kind](https://kind.sigs.k8s.io) from a registry. Therefore
|
||||
starting with descheduler release v0.20.0 use the below process to download the official descheduler
|
||||
image into a kind cluster.
|
||||
```
|
||||
kind create cluster
|
||||
docker pull k8s.gcr.io/descheduler/descheduler:v0.20.0
|
||||
kind load docker-image k8s.gcr.io/descheduler/descheduler:v0.20.0
|
||||
```
|
||||
|
||||
## Policy Configuration Examples
|
||||
The [examples](https://github.com/kubernetes-sigs/descheduler/tree/master/examples) directory has descheduler policy configuration examples.
|
||||
|
||||
## CLI Options
|
||||
The descheduler has many CLI options that can be used to override its default behavior.
|
||||
```
|
||||
descheduler --help
|
||||
The descheduler evicts pods which may be bound to less desired nodes
|
||||
|
||||
Usage:
|
||||
descheduler [flags]
|
||||
descheduler [command]
|
||||
|
||||
Available Commands:
|
||||
help Help about any command
|
||||
version Version of descheduler
|
||||
|
||||
Flags:
|
||||
--add-dir-header If true, adds the file directory to the header of the log messages
|
||||
--alsologtostderr log to standard error as well as files
|
||||
--descheduling-interval duration Time interval between two consecutive descheduler executions. Setting this value instructs the descheduler to run in a continuous loop at the interval specified.
|
||||
--dry-run execute descheduler in dry run mode.
|
||||
--evict-local-storage-pods DEPRECATED: enables evicting pods using local storage by descheduler
|
||||
-h, --help help for descheduler
|
||||
--kubeconfig string File with kube configuration.
|
||||
--log-backtrace-at traceLocation when logging hits line file:N, emit a stack trace (default :0)
|
||||
--log-dir string If non-empty, write log files in this directory
|
||||
--log-file string If non-empty, use this log file
|
||||
--log-file-max-size uint Defines the maximum size a log file can grow to. Unit is megabytes. If the value is 0, the maximum file size is unlimited. (default 1800)
|
||||
--log-flush-frequency duration Maximum number of seconds between log flushes (default 5s)
|
||||
--logtostderr log to standard error instead of files (default true)
|
||||
--max-pods-to-evict-per-node int DEPRECATED: limits the maximum number of pods to be evicted per node by descheduler
|
||||
--node-selector string DEPRECATED: selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2)
|
||||
--policy-config-file string File with descheduler policy configuration.
|
||||
--skip-headers If true, avoid header prefixes in the log messages
|
||||
--skip-log-headers If true, avoid headers when opening log files
|
||||
--stderrthreshold severity logs at or above this threshold go to stderr (default 2)
|
||||
-v, --v Level number for the log level verbosity
|
||||
--vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
|
||||
|
||||
Use "descheduler [command] --help" for more information about a command.
|
||||
```
|
||||
|
||||
## Production Use Cases
|
||||
This section contains descriptions of real world production use cases.
|
||||
|
||||
### Balance Cluster By Pod Age
|
||||
When initially migrating applications from a static virtual machine infrastructure to a cloud native k8s
|
||||
infrastructure there can be a tendency to treat application pods like static virtual machines. One approach
|
||||
to help prevent developers and operators from treating pods like virtual machines is to ensure that pods
|
||||
only run for a fixed amount
|
||||
of time.
|
||||
|
||||
The `PodLifeTime` strategy can be used to ensure that old pods are evicted. It is recommended to create a
|
||||
[pod disruption budget](https://kubernetes.io/docs/tasks/run-application/configure-pdb/) for each
|
||||
application to ensure application availability.
|
||||
```
|
||||
descheduler -v=3 --evict-local-storage-pods --policy-config-file=pod-life-time.yml
|
||||
```
|
||||
|
||||
This policy configuration file ensures that pods created more than 7 days ago are evicted.
|
||||
```
|
||||
---
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
strategies:
|
||||
"PodLifeTime":
|
||||
enabled: true
|
||||
params:
|
||||
maxPodLifeTimeSeconds: 604800 # pods run for a maximum of 7 days
|
||||
```
|
||||
|
||||
### Balance Cluster By Node Memory Utilization
|
||||
If your cluster has been running for a long period of time, you may find that the resource utilization is not very
|
||||
balanced. The following two strategies can be used to rebalance your cluster based on `cpu`, `memory`
|
||||
or `number of pods`.
|
||||
|
||||
#### Balance high utilization nodes
|
||||
Using `LowNodeUtilization`, descheduler will rebalance the cluster based on memory by evicting pods
|
||||
from nodes with memory utilization over 70% to nodes with memory utilization below 20%.
|
||||
|
||||
```
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
strategies:
|
||||
"LowNodeUtilization":
|
||||
enabled: true
|
||||
params:
|
||||
nodeResourceUtilizationThresholds:
|
||||
thresholds:
|
||||
"memory": 20
|
||||
targetThresholds:
|
||||
"memory": 70
|
||||
```
|
||||
|
||||
#### Balance low utilization nodes
|
||||
Using `HighNodeUtilization`, descheduler will rebalance the cluster based on memory by evicting pods
|
||||
from nodes with memory utilization lower than 20%. This should be used along with scheduler strategy `MostRequestedPriority`.
|
||||
The evicted pods will be compacted into minimal set of nodes.
|
||||
|
||||
```
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
strategies:
|
||||
"HighNodeUtilization":
|
||||
enabled: true
|
||||
params:
|
||||
nodeResourceUtilizationThresholds:
|
||||
thresholds:
|
||||
"memory": 20
|
||||
```
|
||||
|
||||
### Autoheal Node Problems
|
||||
Descheduler's `RemovePodsViolatingNodeTaints` strategy can be combined with
|
||||
[Node Problem Detector](https://github.com/kubernetes/node-problem-detector/) and
|
||||
[Cluster Autoscaler](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler) to automatically remove
|
||||
Nodes which have problems. Node Problem Detector can detect specific Node problems and taint any Nodes which have those
|
||||
problems. The Descheduler will then deschedule workloads from those Nodes. Finally, if the descheduled Node's resource
|
||||
allocation falls below the Cluster Autoscaler's scale down threshold, the Node will become a scale down candidate
|
||||
and can be removed by Cluster Autoscaler. These three components form an autohealing cycle for Node problems.
|
||||
14
examples/failed-pods.yaml
Normal file
14
examples/failed-pods.yaml
Normal file
@@ -0,0 +1,14 @@
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
strategies:
|
||||
"RemoveFailedPods":
|
||||
enabled: true
|
||||
params:
|
||||
failedPods:
|
||||
reasons:
|
||||
- "OutOfcpu"
|
||||
- "CreateContainerConfigError"
|
||||
includingInitContainers: true
|
||||
excludeOwnerKinds:
|
||||
- "Job"
|
||||
minPodLifetimeSeconds: 3600 # 1 hour
|
||||
9
examples/high-node-utilization.yml
Normal file
9
examples/high-node-utilization.yml
Normal file
@@ -0,0 +1,9 @@
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
strategies:
|
||||
"HighNodeUtilization":
|
||||
enabled: true
|
||||
params:
|
||||
nodeResourceUtilizationThresholds:
|
||||
thresholds:
|
||||
"memory": 20
|
||||
11
examples/low-node-utilization.yml
Normal file
11
examples/low-node-utilization.yml
Normal file
@@ -0,0 +1,11 @@
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
strategies:
|
||||
"LowNodeUtilization":
|
||||
enabled: true
|
||||
params:
|
||||
nodeResourceUtilizationThresholds:
|
||||
thresholds:
|
||||
"memory": 20
|
||||
targetThresholds:
|
||||
"memory": 70
|
||||
8
examples/node-affinity.yml
Normal file
8
examples/node-affinity.yml
Normal file
@@ -0,0 +1,8 @@
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
strategies:
|
||||
"RemovePodsViolatingNodeAffinity":
|
||||
enabled: true
|
||||
params:
|
||||
nodeAffinityType:
|
||||
- "requiredDuringSchedulingIgnoredDuringExecution"
|
||||
8
examples/pod-life-time.yml
Normal file
8
examples/pod-life-time.yml
Normal file
@@ -0,0 +1,8 @@
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
strategies:
|
||||
"PodLifeTime":
|
||||
enabled: true
|
||||
params:
|
||||
podLifeTime:
|
||||
maxPodLifeTimeSeconds: 604800 # 7 days
|
||||
@@ -17,3 +17,13 @@ strategies:
|
||||
"cpu" : 50
|
||||
"memory": 50
|
||||
"pods": 50
|
||||
"RemovePodsHavingTooManyRestarts":
|
||||
enabled: true
|
||||
params:
|
||||
podsHavingTooManyRestarts:
|
||||
podRestartThreshold: 100
|
||||
includingInitContainers: true
|
||||
"RemovePodsViolatingTopologySpreadConstraint":
|
||||
enabled: true
|
||||
params:
|
||||
includeSoftConstraints: true
|
||||
|
||||
7
examples/topology-spread-constraint.yaml
Normal file
7
examples/topology-spread-constraint.yaml
Normal file
@@ -0,0 +1,7 @@
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
strategies:
|
||||
"RemovePodsViolatingTopologySpreadConstraint":
|
||||
enabled: true
|
||||
params:
|
||||
includeSoftConstraints: true # Include 'ScheduleAnyways' constraints
|
||||
328
glide.lock
generated
328
glide.lock
generated
@@ -1,328 +0,0 @@
|
||||
hash: 6ccf8e8213eb31f9dd31b46c3aa3c2c01929c6230fb049cfabcabd498ade9c30
|
||||
updated: 2017-10-16T14:31:20.353977552-04:00
|
||||
imports:
|
||||
- name: github.com/davecgh/go-spew
|
||||
version: 5215b55f46b2b919f50a1df0eaa5886afe4e3b3d
|
||||
subpackages:
|
||||
- spew
|
||||
- name: github.com/docker/distribution
|
||||
version: cd27f179f2c10c5d300e6d09025b538c475b0d51
|
||||
subpackages:
|
||||
- digest
|
||||
- reference
|
||||
- name: github.com/emicklei/go-restful
|
||||
version: ff4f55a206334ef123e4f79bbf348980da81ca46
|
||||
subpackages:
|
||||
- log
|
||||
- name: github.com/emicklei/go-restful-swagger12
|
||||
version: dcef7f55730566d41eae5db10e7d6981829720f6
|
||||
- name: github.com/ghodss/yaml
|
||||
version: 73d445a93680fa1a78ae23a5839bad48f32ba1ee
|
||||
- name: github.com/go-openapi/analysis
|
||||
version: b44dc874b601d9e4e2f6e19140e794ba24bead3b
|
||||
- name: github.com/go-openapi/jsonpointer
|
||||
version: 46af16f9f7b149af66e5d1bd010e3574dc06de98
|
||||
- name: github.com/go-openapi/jsonreference
|
||||
version: 13c6e3589ad90f49bd3e3bbe2c2cb3d7a4142272
|
||||
- name: github.com/go-openapi/loads
|
||||
version: 18441dfa706d924a39a030ee2c3b1d8d81917b38
|
||||
- name: github.com/go-openapi/spec
|
||||
version: 6aced65f8501fe1217321abf0749d354824ba2ff
|
||||
- name: github.com/go-openapi/swag
|
||||
version: 1d0bd113de87027671077d3c71eb3ac5d7dbba72
|
||||
- name: github.com/gogo/protobuf
|
||||
version: c0656edd0d9eab7c66d1eb0c568f9039345796f7
|
||||
subpackages:
|
||||
- proto
|
||||
- sortkeys
|
||||
- name: github.com/golang/glog
|
||||
version: 44145f04b68cf362d9c4df2182967c2275eaefed
|
||||
- name: github.com/google/gofuzz
|
||||
version: 44d81051d367757e1c7c6a5a86423ece9afcf63c
|
||||
- name: github.com/hashicorp/golang-lru
|
||||
version: a0d98a5f288019575c6d1f4bb1573fef2d1fcdc4
|
||||
subpackages:
|
||||
- simplelru
|
||||
- name: github.com/howeyc/gopass
|
||||
version: bf9dde6d0d2c004a008c27aaee91170c786f6db8
|
||||
- name: github.com/imdario/mergo
|
||||
version: 6633656539c1639d9d78127b7d47c622b5d7b6dc
|
||||
- name: github.com/inconshreveable/mousetrap
|
||||
version: 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75
|
||||
- name: github.com/juju/ratelimit
|
||||
version: 5b9ff866471762aa2ab2dced63c9fb6f53921342
|
||||
- name: github.com/kubernetes/repo-infra
|
||||
version: f521b5d472e00e05da5394994942064510a6e8bf
|
||||
- name: github.com/mailru/easyjson
|
||||
version: d5b7844b561a7bc640052f1b935f7b800330d7e0
|
||||
subpackages:
|
||||
- buffer
|
||||
- jlexer
|
||||
- jwriter
|
||||
- name: github.com/PuerkitoBio/purell
|
||||
version: 8a290539e2e8629dbc4e6bad948158f790ec31f4
|
||||
- name: github.com/PuerkitoBio/urlesc
|
||||
version: 5bd2802263f21d8788851d5305584c82a5c75d7e
|
||||
- name: github.com/spf13/cobra
|
||||
version: f62e98d28ab7ad31d707ba837a966378465c7b57
|
||||
- name: github.com/spf13/pflag
|
||||
version: 9ff6c6923cfffbcd502984b8e0c80539a94968b7
|
||||
- name: github.com/ugorji/go
|
||||
version: ded73eae5db7e7a0ef6f55aace87a2873c5d2b74
|
||||
subpackages:
|
||||
- codec
|
||||
- name: golang.org/x/crypto
|
||||
version: d172538b2cfce0c13cee31e647d0367aa8cd2486
|
||||
subpackages:
|
||||
- bcrypt
|
||||
- blowfish
|
||||
- nacl/secretbox
|
||||
- poly1305
|
||||
- salsa20/salsa
|
||||
- ssh/terminal
|
||||
- name: golang.org/x/net
|
||||
version: f2499483f923065a842d38eb4c7f1927e6fc6e6d
|
||||
subpackages:
|
||||
- context
|
||||
- html
|
||||
- html/atom
|
||||
- http2
|
||||
- http2/hpack
|
||||
- idna
|
||||
- internal/timeseries
|
||||
- lex/httplex
|
||||
- trace
|
||||
- websocket
|
||||
- name: golang.org/x/sys
|
||||
version: 8f0908ab3b2457e2e15403d3697c9ef5cb4b57a9
|
||||
subpackages:
|
||||
- unix
|
||||
- name: golang.org/x/text
|
||||
version: 2910a502d2bf9e43193af9d68ca516529614eed3
|
||||
subpackages:
|
||||
- cases
|
||||
- internal/tag
|
||||
- language
|
||||
- runes
|
||||
- secure/bidirule
|
||||
- secure/precis
|
||||
- transform
|
||||
- unicode/bidi
|
||||
- unicode/norm
|
||||
- width
|
||||
- name: gopkg.in/inf.v0
|
||||
version: 3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4
|
||||
- name: gopkg.in/yaml.v2
|
||||
version: 53feefa2559fb8dfa8d81baad31be332c97d6c77
|
||||
- name: k8s.io/apimachinery
|
||||
version: 917740426ad66ff818da4809990480bcc0786a77
|
||||
subpackages:
|
||||
- pkg/api/equality
|
||||
- pkg/api/errors
|
||||
- pkg/api/meta
|
||||
- pkg/api/resource
|
||||
- pkg/apimachinery
|
||||
- pkg/apimachinery/announced
|
||||
- pkg/apimachinery/registered
|
||||
- pkg/apis/meta/v1
|
||||
- pkg/apis/meta/v1/unstructured
|
||||
- pkg/apis/meta/v1alpha1
|
||||
- pkg/conversion
|
||||
- pkg/conversion/queryparams
|
||||
- pkg/conversion/unstructured
|
||||
- pkg/fields
|
||||
- pkg/labels
|
||||
- pkg/openapi
|
||||
- pkg/runtime
|
||||
- pkg/runtime/schema
|
||||
- pkg/runtime/serializer
|
||||
- pkg/runtime/serializer/json
|
||||
- pkg/runtime/serializer/protobuf
|
||||
- pkg/runtime/serializer/recognizer
|
||||
- pkg/runtime/serializer/streaming
|
||||
- pkg/runtime/serializer/versioning
|
||||
- pkg/selection
|
||||
- pkg/types
|
||||
- pkg/util/cache
|
||||
- pkg/util/clock
|
||||
- pkg/util/diff
|
||||
- pkg/util/errors
|
||||
- pkg/util/framer
|
||||
- pkg/util/intstr
|
||||
- pkg/util/json
|
||||
- pkg/util/net
|
||||
- pkg/util/rand
|
||||
- pkg/util/runtime
|
||||
- pkg/util/sets
|
||||
- pkg/util/validation
|
||||
- pkg/util/validation/field
|
||||
- pkg/util/wait
|
||||
- pkg/util/yaml
|
||||
- pkg/version
|
||||
- pkg/watch
|
||||
- third_party/forked/golang/reflect
|
||||
- name: k8s.io/apiserver
|
||||
version: a7f02eb8e3920e446965036c9610ec52a7ede92f
|
||||
subpackages:
|
||||
- pkg/util/flag
|
||||
- pkg/util/logs
|
||||
- name: k8s.io/client-go
|
||||
version: ec52d278b25c8fef82a965d93afdc74771ea6963
|
||||
subpackages:
|
||||
- discovery
|
||||
- discovery/fake
|
||||
- kubernetes/scheme
|
||||
- pkg/api
|
||||
- pkg/api/v1
|
||||
- pkg/apis/admissionregistration
|
||||
- pkg/apis/admissionregistration/v1alpha1
|
||||
- pkg/apis/apps
|
||||
- pkg/apis/apps/v1beta1
|
||||
- pkg/apis/authentication
|
||||
- pkg/apis/authentication/v1
|
||||
- pkg/apis/authentication/v1beta1
|
||||
- pkg/apis/authorization
|
||||
- pkg/apis/authorization/v1
|
||||
- pkg/apis/authorization/v1beta1
|
||||
- pkg/apis/autoscaling
|
||||
- pkg/apis/autoscaling/v1
|
||||
- pkg/apis/autoscaling/v2alpha1
|
||||
- pkg/apis/batch
|
||||
- pkg/apis/batch/v1
|
||||
- pkg/apis/batch/v2alpha1
|
||||
- pkg/apis/certificates
|
||||
- pkg/apis/certificates/v1beta1
|
||||
- pkg/apis/extensions
|
||||
- pkg/apis/extensions/v1beta1
|
||||
- pkg/apis/networking
|
||||
- pkg/apis/networking/v1
|
||||
- pkg/apis/policy
|
||||
- pkg/apis/policy/v1beta1
|
||||
- pkg/apis/rbac
|
||||
- pkg/apis/rbac/v1alpha1
|
||||
- pkg/apis/rbac/v1beta1
|
||||
- pkg/apis/settings
|
||||
- pkg/apis/settings/v1alpha1
|
||||
- pkg/apis/storage
|
||||
- pkg/apis/storage/v1
|
||||
- pkg/apis/storage/v1beta1
|
||||
- pkg/util
|
||||
- pkg/util/parsers
|
||||
- pkg/version
|
||||
- rest
|
||||
- rest/watch
|
||||
- testing
|
||||
- tools/auth
|
||||
- tools/cache
|
||||
- tools/clientcmd
|
||||
- tools/clientcmd/api
|
||||
- tools/clientcmd/api/latest
|
||||
- tools/clientcmd/api/v1
|
||||
- tools/metrics
|
||||
- transport
|
||||
- util/cert
|
||||
- util/flowcontrol
|
||||
- util/homedir
|
||||
- util/integer
|
||||
- name: k8s.io/gengo
|
||||
version: c79c13d131b0a8f42d05faa6491c12e94ccc6f30
|
||||
- name: k8s.io/kubernetes
|
||||
version: 4bc5e7f9a6c25dc4c03d4d656f2cefd21540e28c
|
||||
subpackages:
|
||||
- pkg/api
|
||||
- pkg/api/install
|
||||
- pkg/api/v1
|
||||
- pkg/api/v1/helper/qos
|
||||
- pkg/api/v1/ref
|
||||
- pkg/api/v1/resource
|
||||
- pkg/apis/admissionregistration
|
||||
- pkg/apis/admissionregistration/v1alpha1
|
||||
- pkg/apis/apps
|
||||
- pkg/apis/apps/install
|
||||
- pkg/apis/apps/v1beta1
|
||||
- pkg/apis/authentication
|
||||
- pkg/apis/authentication/install
|
||||
- pkg/apis/authentication/v1
|
||||
- pkg/apis/authentication/v1beta1
|
||||
- pkg/apis/authorization
|
||||
- pkg/apis/authorization/install
|
||||
- pkg/apis/authorization/v1
|
||||
- pkg/apis/authorization/v1beta1
|
||||
- pkg/apis/autoscaling
|
||||
- pkg/apis/autoscaling/install
|
||||
- pkg/apis/autoscaling/v1
|
||||
- pkg/apis/autoscaling/v2alpha1
|
||||
- pkg/apis/batch
|
||||
- pkg/apis/batch/install
|
||||
- pkg/apis/batch/v1
|
||||
- pkg/apis/batch/v2alpha1
|
||||
- pkg/apis/certificates
|
||||
- pkg/apis/certificates/install
|
||||
- pkg/apis/certificates/v1beta1
|
||||
- pkg/apis/extensions
|
||||
- pkg/apis/extensions/install
|
||||
- pkg/apis/extensions/v1beta1
|
||||
- pkg/apis/networking
|
||||
- pkg/apis/networking/v1
|
||||
- pkg/apis/policy
|
||||
- pkg/apis/policy/install
|
||||
- pkg/apis/policy/v1beta1
|
||||
- pkg/apis/rbac
|
||||
- pkg/apis/rbac/install
|
||||
- pkg/apis/rbac/v1alpha1
|
||||
- pkg/apis/rbac/v1beta1
|
||||
- pkg/apis/settings
|
||||
- pkg/apis/settings/install
|
||||
- pkg/apis/settings/v1alpha1
|
||||
- pkg/apis/storage
|
||||
- pkg/apis/storage/install
|
||||
- pkg/apis/storage/v1
|
||||
- pkg/apis/storage/v1beta1
|
||||
- pkg/client/clientset_generated/clientset
|
||||
- pkg/client/clientset_generated/clientset/fake
|
||||
- pkg/client/clientset_generated/clientset/scheme
|
||||
- pkg/client/clientset_generated/clientset/typed/admissionregistration/v1alpha1
|
||||
- pkg/client/clientset_generated/clientset/typed/admissionregistration/v1alpha1/fake
|
||||
- pkg/client/clientset_generated/clientset/typed/apps/v1beta1
|
||||
- pkg/client/clientset_generated/clientset/typed/apps/v1beta1/fake
|
||||
- pkg/client/clientset_generated/clientset/typed/authentication/v1
|
||||
- pkg/client/clientset_generated/clientset/typed/authentication/v1/fake
|
||||
- pkg/client/clientset_generated/clientset/typed/authentication/v1beta1
|
||||
- pkg/client/clientset_generated/clientset/typed/authentication/v1beta1/fake
|
||||
- pkg/client/clientset_generated/clientset/typed/authorization/v1
|
||||
- pkg/client/clientset_generated/clientset/typed/authorization/v1/fake
|
||||
- pkg/client/clientset_generated/clientset/typed/authorization/v1beta1
|
||||
- pkg/client/clientset_generated/clientset/typed/authorization/v1beta1/fake
|
||||
- pkg/client/clientset_generated/clientset/typed/autoscaling/v1
|
||||
- pkg/client/clientset_generated/clientset/typed/autoscaling/v1/fake
|
||||
- pkg/client/clientset_generated/clientset/typed/autoscaling/v2alpha1
|
||||
- pkg/client/clientset_generated/clientset/typed/autoscaling/v2alpha1/fake
|
||||
- pkg/client/clientset_generated/clientset/typed/batch/v1
|
||||
- pkg/client/clientset_generated/clientset/typed/batch/v1/fake
|
||||
- pkg/client/clientset_generated/clientset/typed/batch/v2alpha1
|
||||
- pkg/client/clientset_generated/clientset/typed/batch/v2alpha1/fake
|
||||
- pkg/client/clientset_generated/clientset/typed/certificates/v1beta1
|
||||
- pkg/client/clientset_generated/clientset/typed/certificates/v1beta1/fake
|
||||
- pkg/client/clientset_generated/clientset/typed/core/v1
|
||||
- pkg/client/clientset_generated/clientset/typed/core/v1/fake
|
||||
- pkg/client/clientset_generated/clientset/typed/extensions/v1beta1
|
||||
- pkg/client/clientset_generated/clientset/typed/extensions/v1beta1/fake
|
||||
- pkg/client/clientset_generated/clientset/typed/networking/v1
|
||||
- pkg/client/clientset_generated/clientset/typed/networking/v1/fake
|
||||
- pkg/client/clientset_generated/clientset/typed/policy/v1beta1
|
||||
- pkg/client/clientset_generated/clientset/typed/policy/v1beta1/fake
|
||||
- pkg/client/clientset_generated/clientset/typed/rbac/v1alpha1
|
||||
- pkg/client/clientset_generated/clientset/typed/rbac/v1alpha1/fake
|
||||
- pkg/client/clientset_generated/clientset/typed/rbac/v1beta1
|
||||
- pkg/client/clientset_generated/clientset/typed/rbac/v1beta1/fake
|
||||
- pkg/client/clientset_generated/clientset/typed/settings/v1alpha1
|
||||
- pkg/client/clientset_generated/clientset/typed/settings/v1alpha1/fake
|
||||
- pkg/client/clientset_generated/clientset/typed/storage/v1
|
||||
- pkg/client/clientset_generated/clientset/typed/storage/v1/fake
|
||||
- pkg/client/clientset_generated/clientset/typed/storage/v1beta1
|
||||
- pkg/client/clientset_generated/clientset/typed/storage/v1beta1/fake
|
||||
- pkg/client/listers/core/v1
|
||||
- pkg/kubelet/types
|
||||
- pkg/util
|
||||
- pkg/util/parsers
|
||||
testImports: []
|
||||
15
glide.yaml
15
glide.yaml
@@ -1,15 +0,0 @@
|
||||
package: github.com/kubernetes-incubator/descheduler
|
||||
import:
|
||||
- package: k8s.io/client-go
|
||||
version: ec52d278b25c8fef82a965d93afdc74771ea6963
|
||||
- package: k8s.io/apiserver
|
||||
version: release-1.7
|
||||
- package: k8s.io/apimachinery
|
||||
version: 917740426ad66ff818da4809990480bcc0786a77
|
||||
- package: k8s.io/kubernetes
|
||||
version: 1.7.6
|
||||
- package: github.com/kubernetes/repo-infra
|
||||
- package: github.com/spf13/cobra
|
||||
version: f62e98d28ab7ad31d707ba837a966378465c7b57
|
||||
- package: k8s.io/gengo
|
||||
version: c79c13d131b0a8f42d05faa6491c12e94ccc6f30
|
||||
117
go.mod
Normal file
117
go.mod
Normal file
@@ -0,0 +1,117 @@
|
||||
module sigs.k8s.io/descheduler
|
||||
|
||||
go 1.17
|
||||
|
||||
require (
|
||||
github.com/client9/misspell v0.3.4
|
||||
github.com/spf13/cobra v1.2.1
|
||||
github.com/spf13/pflag v1.0.5
|
||||
k8s.io/api v0.23.0
|
||||
k8s.io/apimachinery v0.23.0
|
||||
k8s.io/apiserver v0.23.0
|
||||
k8s.io/client-go v0.23.0
|
||||
k8s.io/code-generator v0.23.0
|
||||
k8s.io/component-base v0.23.0
|
||||
k8s.io/component-helpers v0.23.0
|
||||
k8s.io/klog/v2 v2.30.0
|
||||
k8s.io/kubectl v0.20.5
|
||||
sigs.k8s.io/mdtoc v1.0.1
|
||||
)
|
||||
|
||||
require (
|
||||
cloud.google.com/go v0.81.0 // indirect
|
||||
github.com/Azure/go-autorest v14.2.0+incompatible // indirect
|
||||
github.com/Azure/go-autorest/autorest v0.11.18 // indirect
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.13 // indirect
|
||||
github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect
|
||||
github.com/Azure/go-autorest/logger v0.2.1 // indirect
|
||||
github.com/Azure/go-autorest/tracing v0.6.0 // indirect
|
||||
github.com/BurntSushi/toml v0.3.1 // indirect
|
||||
github.com/NYTimes/gziphandler v1.1.1 // indirect
|
||||
github.com/PuerkitoBio/purell v1.1.1 // indirect
|
||||
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/blang/semver v3.5.1+incompatible // indirect
|
||||
github.com/cespare/xxhash/v2 v2.1.1 // indirect
|
||||
github.com/coreos/go-semver v0.3.0 // indirect
|
||||
github.com/coreos/go-systemd/v22 v22.3.2 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/emicklei/go-restful v2.9.5+incompatible // indirect
|
||||
github.com/evanphx/json-patch v4.12.0+incompatible // indirect
|
||||
github.com/felixge/httpsnoop v1.0.1 // indirect
|
||||
github.com/form3tech-oss/jwt-go v3.2.3+incompatible // indirect
|
||||
github.com/fsnotify/fsnotify v1.4.9 // indirect
|
||||
github.com/go-logr/logr v1.2.0 // indirect
|
||||
github.com/go-logr/zapr v1.2.0 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.19.5 // indirect
|
||||
github.com/go-openapi/jsonreference v0.19.5 // indirect
|
||||
github.com/go-openapi/swag v0.19.14 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/protobuf v1.5.2 // indirect
|
||||
github.com/gomarkdown/markdown v0.0.0-20200824053859-8c8b3816f167 // indirect
|
||||
github.com/google/go-cmp v0.5.5 // indirect
|
||||
github.com/google/gofuzz v1.1.0 // indirect
|
||||
github.com/google/uuid v1.1.2 // indirect
|
||||
github.com/googleapis/gnostic v0.5.5 // indirect
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect
|
||||
github.com/imdario/mergo v0.3.5 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.0.0 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/mailru/easyjson v0.7.6 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
|
||||
github.com/mmarkdown/mmark v2.0.40+incompatible // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/prometheus/client_golang v1.11.0 // indirect
|
||||
github.com/prometheus/client_model v0.2.0 // indirect
|
||||
github.com/prometheus/common v0.28.0 // indirect
|
||||
github.com/prometheus/procfs v0.6.0 // indirect
|
||||
go.etcd.io/etcd/api/v3 v3.5.0 // indirect
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.0 // indirect
|
||||
go.etcd.io/etcd/client/v3 v3.5.0 // indirect
|
||||
go.opentelemetry.io/contrib v0.20.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.20.0 // indirect
|
||||
go.opentelemetry.io/otel v0.20.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp v0.20.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v0.20.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk v0.20.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk/export/metric v0.20.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk/metric v0.20.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v0.20.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v0.7.0 // indirect
|
||||
go.uber.org/atomic v1.7.0 // indirect
|
||||
go.uber.org/multierr v1.6.0 // indirect
|
||||
go.uber.org/zap v1.19.0 // indirect
|
||||
golang.org/x/crypto v0.0.0-20210817164053-32db794688a5 // indirect
|
||||
golang.org/x/mod v0.4.2 // indirect
|
||||
golang.org/x/net v0.0.0-20210825183410-e898025ed96a // indirect
|
||||
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f // indirect
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c // indirect
|
||||
golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e // indirect
|
||||
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b // indirect
|
||||
golang.org/x/text v0.3.7 // indirect
|
||||
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac // indirect
|
||||
golang.org/x/tools v0.1.6-0.20210820212750-d4cc65f0b2ff // indirect
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2 // indirect
|
||||
google.golang.org/grpc v1.40.0 // indirect
|
||||
google.golang.org/protobuf v1.27.1 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
|
||||
k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65 // indirect
|
||||
k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b // indirect
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.25 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.0 // indirect
|
||||
sigs.k8s.io/yaml v1.2.0 // indirect
|
||||
)
|
||||
6
hack/.spelling_failures
Normal file
6
hack/.spelling_failures
Normal file
@@ -0,0 +1,6 @@
|
||||
BUILD
|
||||
CHANGELOG
|
||||
OWNERS
|
||||
go.mod
|
||||
go.sum
|
||||
vendor/
|
||||
94
hack/e2e-gce/gcloud_create_cluster.sh
Executable file
94
hack/e2e-gce/gcloud_create_cluster.sh
Executable file
@@ -0,0 +1,94 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
echo "Make sure that uuid package is installed"
|
||||
|
||||
master_uuid=$(uuid)
|
||||
node1_uuid=$(uuid)
|
||||
node2_uuid=$(uuid)
|
||||
kube_apiserver_port=6443
|
||||
kube_version=1.13.1
|
||||
|
||||
DESCHEDULER_ROOT=$(dirname "${BASH_SOURCE}")/../../
|
||||
E2E_GCE_HOME=$DESCHEDULER_ROOT/hack/e2e-gce
|
||||
|
||||
|
||||
create_cluster() {
|
||||
echo "#################### Creating instances ##########################"
|
||||
gcloud compute instances create descheduler-$master_uuid --image-family="ubuntu-1804-lts" --image-project="ubuntu-os-cloud" --zone=us-east1-b
|
||||
# Keeping the --zone here so as to make sure that e2e's can run locally.
|
||||
echo "gcloud compute instances delete descheduler-$master_uuid --zone=us-east1-b --quiet" > $E2E_GCE_HOME/delete_cluster.sh
|
||||
|
||||
gcloud compute instances create descheduler-$node1_uuid --image-family="ubuntu-1804-lts" --image-project="ubuntu-os-cloud" --zone=us-east1-b
|
||||
echo "gcloud compute instances delete descheduler-$node1_uuid --zone=us-east1-b --quiet" >> $E2E_GCE_HOME/delete_cluster.sh
|
||||
|
||||
gcloud compute instances create descheduler-$node2_uuid --image-family="ubuntu-1804-lts" --image-project="ubuntu-os-cloud" --zone=us-east1-b
|
||||
echo "gcloud compute instances delete descheduler-$node2_uuid --zone=us-east1-c --quiet" >> $E2E_GCE_HOME/delete_cluster.sh
|
||||
|
||||
# Delete the firewall port created for master.
|
||||
echo "gcloud compute firewall-rules delete kubeapiserver-$master_uuid --quiet" >> $E2E_GCE_HOME/delete_cluster.sh
|
||||
chmod 755 $E2E_GCE_HOME/delete_cluster.sh
|
||||
}
|
||||
|
||||
|
||||
generate_kubeadm_instance_files() {
|
||||
# TODO: Check if they have come up. awk $6 contains the state(RUNNING or not).
|
||||
master_private_ip=$(gcloud compute instances list | grep $master_uuid|awk '{print $4}')
|
||||
node1_public_ip=$(gcloud compute instances list | grep $node1_uuid|awk '{print $5}')
|
||||
node2_public_ip=$(gcloud compute instances list | grep $node2_uuid|awk '{print $5}')
|
||||
echo "kubeadm init --kubernetes-version=${kube_version} --apiserver-advertise-address=${master_private_ip}" --ignore-preflight-errors=all --pod-network-cidr=10.96.0.0/12 > $E2E_GCE_HOME/kubeadm_install.sh
|
||||
}
|
||||
|
||||
|
||||
transfer_install_files() {
|
||||
gcloud compute scp $E2E_GCE_HOME/kubeadm_preinstall.sh descheduler-$master_uuid:/tmp --zone=us-east1-b
|
||||
gcloud compute scp $E2E_GCE_HOME/kubeadm_install.sh descheduler-$master_uuid:/tmp --zone=us-east1-b
|
||||
gcloud compute scp $E2E_GCE_HOME/kubeadm_preinstall.sh descheduler-$node1_uuid:/tmp --zone=us-east1-b
|
||||
gcloud compute scp $E2E_GCE_HOME/kubeadm_preinstall.sh descheduler-$node2_uuid:/tmp --zone=us-east1-c
|
||||
}
|
||||
|
||||
|
||||
install_kube() {
|
||||
# Docker installation.
|
||||
gcloud compute ssh descheduler-$master_uuid --command "sudo apt-get update; sudo apt-get install -y docker.io" --zone=us-east1-b
|
||||
gcloud compute ssh descheduler-$node1_uuid --command "sudo apt-get update; sudo apt-get install -y docker.io" --zone=us-east1-b
|
||||
gcloud compute ssh descheduler-$node2_uuid --command "sudo apt-get update; sudo apt-get install -y docker.io" --zone=us-east1-c
|
||||
# kubeadm installation.
|
||||
# 1. Transfer files to master, nodes.
|
||||
transfer_install_files
|
||||
# 2. Install kubeadm.
|
||||
#TODO: Add rm /tmp/kubeadm_install.sh
|
||||
# Open port for kube API server
|
||||
gcloud compute firewall-rules create kubeapiserver-$master_uuid --allow tcp:6443 --source-tags=descheduler-$master_uuid --source-ranges=0.0.0.0/0 --description="Opening api server port"
|
||||
|
||||
gcloud compute ssh descheduler-$master_uuid --command "sudo chmod 755 /tmp/kubeadm_preinstall.sh; sudo /tmp/kubeadm_preinstall.sh" --zone=us-east1-b
|
||||
kubeadm_join_command=$(gcloud compute ssh descheduler-$master_uuid --command "sudo chmod 755 /tmp/kubeadm_install.sh; sudo /tmp/kubeadm_install.sh" --zone=us-east1-b|grep 'kubeadm join')
|
||||
|
||||
# Copy the kubeconfig file onto /tmp for e2e tests.
|
||||
gcloud compute ssh descheduler-$master_uuid --command "sudo cp /etc/kubernetes/admin.conf /tmp; sudo chmod 777 /tmp/admin.conf" --zone=us-east1-b
|
||||
gcloud compute scp descheduler-$master_uuid:/tmp/admin.conf /tmp/admin.conf --zone=us-east1-b
|
||||
|
||||
# Postinstall on master, need to add a network plugin for kube-dns to come to running state.
|
||||
gcloud compute ssh descheduler-$master_uuid --command "sudo kubectl apply -f https://raw.githubusercontent.com/cloudnativelabs/kube-router/master/daemonset/kubeadm-kuberouter.yaml --kubeconfig /etc/kubernetes/admin.conf" --zone=us-east1-b
|
||||
echo $kubeadm_join_command > $E2E_GCE_HOME/kubeadm_join.sh
|
||||
|
||||
# Copy kubeadm_join to every node.
|
||||
#TODO: Put these in a loop, so that extension becomes possible.
|
||||
gcloud compute ssh descheduler-$node1_uuid --command "sudo chmod 755 /tmp/kubeadm_preinstall.sh; sudo /tmp/kubeadm_preinstall.sh" --zone=us-east1-b
|
||||
gcloud compute scp $E2E_GCE_HOME/kubeadm_join.sh descheduler-$node1_uuid:/tmp --zone=us-east1-b
|
||||
gcloud compute ssh descheduler-$node1_uuid --command "sudo chmod 755 /tmp/kubeadm_join.sh; sudo /tmp/kubeadm_join.sh" --zone=us-east1-b
|
||||
|
||||
gcloud compute ssh descheduler-$node2_uuid --command "sudo chmod 755 /tmp/kubeadm_preinstall.sh; sudo /tmp/kubeadm_preinstall.sh" --zone=us-east1-c
|
||||
gcloud compute scp $E2E_GCE_HOME/kubeadm_join.sh descheduler-$node2_uuid:/tmp --zone=us-east1-c
|
||||
gcloud compute ssh descheduler-$node2_uuid --command "sudo chmod 755 /tmp/kubeadm_join.sh; sudo /tmp/kubeadm_join.sh" --zone=us-east1-c
|
||||
}
|
||||
|
||||
|
||||
create_cluster
|
||||
|
||||
generate_kubeadm_instance_files
|
||||
|
||||
install_kube
|
||||
8
hack/e2e-gce/gcloud_sdk_configure.sh
Executable file
8
hack/e2e-gce/gcloud_sdk_configure.sh
Executable file
@@ -0,0 +1,8 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -e
|
||||
|
||||
|
||||
gcloud auth activate-service-account --key-file "${GCE_SA_CREDS}"
|
||||
gcloud config set project $GCE_PROJECT_ID
|
||||
gcloud config set compute/zone $GCE_ZONE
|
||||
9
hack/e2e-gce/install_gcloud.sh
Executable file
9
hack/e2e-gce/install_gcloud.sh
Executable file
@@ -0,0 +1,9 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -e
|
||||
|
||||
wget https://dl.google.com/dl/cloudsdk/channels/rapid/downloads/google-cloud-sdk-176.0.0-linux-x86_64.tar.gz
|
||||
|
||||
tar -xvzf google-cloud-sdk-176.0.0-linux-x86_64.tar.gz
|
||||
|
||||
./google-cloud-sdk/install.sh -q
|
||||
11
hack/e2e-gce/kubeadm_preinstall.sh
Normal file
11
hack/e2e-gce/kubeadm_preinstall.sh
Normal file
@@ -0,0 +1,11 @@
|
||||
apt-get update
|
||||
apt-get install -y docker.io
|
||||
|
||||
apt-get update && apt-get install -y apt-transport-https
|
||||
curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add -
|
||||
cat <<EOF >/etc/apt/sources.list.d/kubernetes.list
|
||||
deb http://apt.kubernetes.io/ kubernetes-xenial main
|
||||
EOF
|
||||
apt-get update
|
||||
apt-get install -y kubelet kubeadm kubectl
|
||||
exit 0
|
||||
18
hack/kind_config.yaml
Normal file
18
hack/kind_config.yaml
Normal file
@@ -0,0 +1,18 @@
|
||||
kind: Cluster
|
||||
apiVersion: kind.x-k8s.io/v1alpha4
|
||||
nodes:
|
||||
- role: control-plane
|
||||
- role: worker
|
||||
kubeadmConfigPatches:
|
||||
- |
|
||||
kind: JoinConfiguration
|
||||
nodeRegistration:
|
||||
kubeletExtraArgs:
|
||||
node-labels: "topology.kubernetes.io/zone=local-a"
|
||||
- role: worker
|
||||
kubeadmConfigPatches:
|
||||
- |
|
||||
kind: JoinConfiguration
|
||||
nodeRegistration:
|
||||
kubeletExtraArgs:
|
||||
node-labels: "topology.kubernetes.io/zone=local-b"
|
||||
@@ -43,5 +43,5 @@ OS_ROOT="$( os::util::absolute_path "${init_source}" )"
|
||||
export OS_ROOT
|
||||
cd "${OS_ROOT}"
|
||||
|
||||
PRJ_PREFIX="github.com/kubernetes-incubator/descheduler"
|
||||
PRJ_PREFIX="sigs.k8s.io/descheduler"
|
||||
OS_OUTPUT_BINPATH="${OS_ROOT}/_output/bin"
|
||||
|
||||
27
hack/tools.go
Normal file
27
hack/tools.go
Normal file
@@ -0,0 +1,27 @@
|
||||
//go:build tools
|
||||
// +build tools
|
||||
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// This package imports things required by build scripts, to force `go mod` to see them as dependencies
|
||||
package tools
|
||||
|
||||
import (
|
||||
_ "github.com/client9/misspell/cmd/misspell"
|
||||
_ "k8s.io/code-generator"
|
||||
_ "sigs.k8s.io/mdtoc"
|
||||
)
|
||||
@@ -1,151 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2015 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
|
||||
|
||||
# The sort at the end makes sure we feed the topological sort a deterministic
|
||||
# list (since there aren't many dependencies).
|
||||
|
||||
generated_files=($(
|
||||
find . -not \( \
|
||||
\( \
|
||||
-wholename './output' \
|
||||
-o -wholename './_output' \
|
||||
-o -wholename './staging' \
|
||||
-o -wholename './release' \
|
||||
-o -wholename './target' \
|
||||
-o -wholename '*/third_party/*' \
|
||||
-o -wholename '*/vendor/*' \
|
||||
-o -wholename '*/codecgen-*-1234.generated.go' \
|
||||
\) -prune \
|
||||
\) -name '*.generated.go' | LC_ALL=C sort -r
|
||||
))
|
||||
|
||||
# We only work for deps within this prefix.
|
||||
#my_prefix="k8s.io/kubernetes"
|
||||
my_prefix="github.com/kubernetes-incubator/descheduler"
|
||||
|
||||
# Register function to be called on EXIT to remove codecgen
|
||||
# binary and also to touch the files that should be regenerated
|
||||
# since they are first removed.
|
||||
# This is necessary to make the script work after previous failure.
|
||||
function cleanup {
|
||||
rm -f "${CODECGEN:-}"
|
||||
pushd "${KUBE_ROOT}" > /dev/null
|
||||
for (( i=0; i < number; i++ )); do
|
||||
touch "${generated_files[${i}]}" || true
|
||||
done
|
||||
popd > /dev/null
|
||||
}
|
||||
trap cleanup EXIT
|
||||
|
||||
# Precompute dependencies for all directories.
|
||||
# Then sort all files in the dependency order.
|
||||
number=${#generated_files[@]}
|
||||
result=""
|
||||
for (( i=0; i<number; i++ )); do
|
||||
visited[${i}]=false
|
||||
file="${generated_files[${i}]/\.generated\.go/.go}"
|
||||
deps[${i}]=$(go list -f '{{range .Deps}}{{.}}{{"\n"}}{{end}}' ${file} | grep "^${my_prefix}")
|
||||
done
|
||||
###echo "DBG: found $number generated files"
|
||||
###for f in $(echo "${generated_files[@]}" | LC_ALL=C sort); do
|
||||
### echo "DBG: $f"
|
||||
###done
|
||||
|
||||
# NOTE: depends function assumes that the whole repository is under
|
||||
# $my_prefix - it will NOT work if that is not true.
|
||||
function depends {
|
||||
rhs="$(dirname ${generated_files[$2]/#./${my_prefix}})"
|
||||
###echo "DBG: does ${file} depend on ${rhs}?"
|
||||
for dep in ${deps[$1]}; do
|
||||
###echo "DBG: checking against $dep"
|
||||
if [[ "${dep}" == "${rhs}" ]]; then
|
||||
###echo "DBG: = yes"
|
||||
return 0
|
||||
fi
|
||||
done
|
||||
###echo "DBG: = no"
|
||||
return 1
|
||||
}
|
||||
|
||||
function tsort {
|
||||
visited[$1]=true
|
||||
local j=0
|
||||
for (( j=0; j<number; j++ )); do
|
||||
if ! ${visited[${j}]}; then
|
||||
if depends "$1" ${j}; then
|
||||
tsort $j
|
||||
fi
|
||||
fi
|
||||
done
|
||||
result="${result} $1"
|
||||
}
|
||||
echo "Building dependencies"
|
||||
for (( i=0; i<number; i++ )); do
|
||||
###echo "DBG: considering ${generated_files[${i}]}"
|
||||
if ! ${visited[${i}]}; then
|
||||
###echo "DBG: tsorting ${generated_files[${i}]}"
|
||||
tsort ${i}
|
||||
fi
|
||||
done
|
||||
index=(${result})
|
||||
|
||||
haveindex=${index:-}
|
||||
if [[ -z ${haveindex} ]]; then
|
||||
echo No files found for $0
|
||||
echo A previous run of $0 may have deleted all the files and then crashed.
|
||||
echo Use 'touch' to create files named 'types.generated.go' listed as deleted in 'git status'
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Building codecgen"
|
||||
CODECGEN="${PWD}/codecgen_binary"
|
||||
go build -o "${CODECGEN}" ./vendor/github.com/ugorji/go/codec/codecgen
|
||||
|
||||
# Running codecgen fails if some of the files doesn't compile.
|
||||
# Thus (since all the files are completely auto-generated and
|
||||
# not required for the code to be compilable, we first remove
|
||||
# them and the regenerate them.
|
||||
for (( i=0; i < number; i++ )); do
|
||||
rm -f "${generated_files[${i}]}"
|
||||
done
|
||||
|
||||
# Generate files in the dependency order.
|
||||
for current in "${index[@]}"; do
|
||||
generated_file=${generated_files[${current}]}
|
||||
initial_dir=${PWD}
|
||||
file=${generated_file/\.generated\.go/.go}
|
||||
echo "processing ${file}"
|
||||
# codecgen work only if invoked from directory where the file
|
||||
# is located.
|
||||
pushd "$(dirname ${file})" > /dev/null
|
||||
base_file=$(basename "${file}")
|
||||
base_generated_file=$(basename "${generated_file}")
|
||||
# We use '-d 1234' flag to have a deterministic output every time.
|
||||
# The constant was just randomly chosen.
|
||||
###echo "DBG: running ${CODECGEN} -d 1234 -o ${base_generated_file} ${base_file}"
|
||||
${CODECGEN} -d 1234 -o "${base_generated_file}" "${base_file}"
|
||||
# Add boilerplate at the beginning of the generated file.
|
||||
sed 's/YEAR/2017/' "${initial_dir}/hack/boilerplate/boilerplate.go.txt" > "${base_generated_file}.tmp"
|
||||
cat "${base_generated_file}" >> "${base_generated_file}.tmp"
|
||||
mv "${base_generated_file}.tmp" "${base_generated_file}"
|
||||
popd > /dev/null
|
||||
done
|
||||
@@ -1,7 +1,7 @@
|
||||
#!/bin/bash
|
||||
source "$(dirname "${BASH_SOURCE}")/lib/init.sh"
|
||||
|
||||
go build -o "${OS_OUTPUT_BINPATH}/conversion-gen" "${PRJ_PREFIX}/vendor/k8s.io/kubernetes/cmd/libs/go2idl/conversion-gen"
|
||||
go build -o "${OS_OUTPUT_BINPATH}/conversion-gen" "k8s.io/code-generator/cmd/conversion-gen"
|
||||
|
||||
${OS_OUTPUT_BINPATH}/conversion-gen \
|
||||
--go-header-file "hack/boilerplate/boilerplate.go.txt" \
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
#!/bin/bash
|
||||
source "$(dirname "${BASH_SOURCE}")/lib/init.sh"
|
||||
|
||||
go build -o "${OS_OUTPUT_BINPATH}/deepcopy-gen" "${PRJ_PREFIX}/vendor/k8s.io/kubernetes/cmd/libs/go2idl/deepcopy-gen"
|
||||
go build -o "${OS_OUTPUT_BINPATH}/deepcopy-gen" "k8s.io/code-generator/cmd/deepcopy-gen"
|
||||
|
||||
${OS_OUTPUT_BINPATH}/deepcopy-gen \
|
||||
--go-header-file "hack/boilerplate/boilerplate.go.txt" \
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
#!/bin/bash
|
||||
source "$(dirname "${BASH_SOURCE}")/lib/init.sh"
|
||||
|
||||
go build -o "${OS_OUTPUT_BINPATH}/defaulter-gen" "${PRJ_PREFIX}/vendor/k8s.io/kubernetes/cmd/libs/go2idl/defaulter-gen"
|
||||
go build -o "${OS_OUTPUT_BINPATH}/defaulter-gen" "k8s.io/code-generator/cmd/defaulter-gen"
|
||||
|
||||
${OS_OUTPUT_BINPATH}/defaulter-gen \
|
||||
--go-header-file "hack/boilerplate/boilerplate.go.txt" \
|
||||
|
||||
49
hack/update-gofmt.sh
Executable file
49
hack/update-gofmt.sh
Executable file
@@ -0,0 +1,49 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2017 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
DESCHEDULER_ROOT=$(dirname "${BASH_SOURCE}")/..
|
||||
|
||||
GO_VERSION=($(go version))
|
||||
|
||||
if [[ -z $(echo "${GO_VERSION[2]}" | grep -E 'go1.14|go1.15|go1.16|go1.17') ]]; then
|
||||
echo "Unknown go version '${GO_VERSION[2]}', skipping gofmt."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
cd "${DESCHEDULER_ROOT}"
|
||||
|
||||
find_files() {
|
||||
find . -not \( \
|
||||
\( \
|
||||
-wholename './output' \
|
||||
-o -wholename './_output' \
|
||||
-o -wholename './release' \
|
||||
-o -wholename './target' \
|
||||
-o -wholename './.git' \
|
||||
-o -wholename '*/third_party/*' \
|
||||
-o -wholename '*/Godeps/*' \
|
||||
-o -wholename '*/vendor/*' \
|
||||
\) -prune \
|
||||
\) -name '*.go'
|
||||
}
|
||||
|
||||
GOFMT="gofmt -s -w"
|
||||
find_files | xargs $GOFMT -l
|
||||
25
hack/update-toc.sh
Executable file
25
hack/update-toc.sh
Executable file
@@ -0,0 +1,25 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2021 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
source "$(dirname "${BASH_SOURCE}")/lib/init.sh"
|
||||
|
||||
go build -o "${OS_OUTPUT_BINPATH}/mdtoc" "sigs.k8s.io/mdtoc"
|
||||
|
||||
${OS_OUTPUT_BINPATH}/mdtoc --inplace README.md
|
||||
21
hack/update-vendor.sh
Executable file
21
hack/update-vendor.sh
Executable file
@@ -0,0 +1,21 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2020 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
source "$(dirname "${BASH_SOURCE}")/lib/init.sh"
|
||||
|
||||
go mod tidy
|
||||
go mod vendor
|
||||
36
hack/verify-conversions.sh
Executable file
36
hack/verify-conversions.sh
Executable file
@@ -0,0 +1,36 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
source "$(dirname "${BASH_SOURCE}")/lib/init.sh"
|
||||
DESCHEDULER_ROOT=$(dirname "${BASH_SOURCE}")/..
|
||||
mkdir -p "${DESCHEDULER_ROOT}/_tmp"
|
||||
_tmpdir="$(mktemp -d "${DESCHEDULER_ROOT}/_tmp/kube-verify.XXXXXX")"
|
||||
|
||||
_deschedulertmp="${_tmpdir}"
|
||||
mkdir -p "${_deschedulertmp}"
|
||||
|
||||
git archive --format=tar --prefix=descheduler/ "$(git write-tree)" | (cd "${_deschedulertmp}" && tar xf -)
|
||||
_deschedulertmp="${_deschedulertmp}/descheduler"
|
||||
|
||||
pushd "${_deschedulertmp}" > /dev/null 2>&1
|
||||
go build -o "${OS_OUTPUT_BINPATH}/conversion-gen" "k8s.io/code-generator/cmd/conversion-gen"
|
||||
|
||||
${OS_OUTPUT_BINPATH}/conversion-gen \
|
||||
--go-header-file "hack/boilerplate/boilerplate.go.txt" \
|
||||
--input-dirs "./pkg/apis/componentconfig/v1alpha1,./pkg/api/v1alpha1" \
|
||||
--output-file-base zz_generated.conversion
|
||||
popd > /dev/null 2>&1
|
||||
|
||||
pushd "${DESCHEDULER_ROOT}" > /dev/null 2>&1
|
||||
if ! _out="$(diff -Naupr pkg/ "${_deschedulertmp}/pkg/")"; then
|
||||
echo "Generated output differs:" >&2
|
||||
echo "${_out}" >&2
|
||||
echo "Generated conversions verify failed. Please run ./hack/update-generated-conversions.sh"
|
||||
exit 1
|
||||
fi
|
||||
popd > /dev/null 2>&1
|
||||
|
||||
echo "Generated conversions verified."
|
||||
36
hack/verify-deep-copies.sh
Executable file
36
hack/verify-deep-copies.sh
Executable file
@@ -0,0 +1,36 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
source "$(dirname "${BASH_SOURCE}")/lib/init.sh"
|
||||
DESCHEDULER_ROOT=$(dirname "${BASH_SOURCE}")/..
|
||||
mkdir -p "${DESCHEDULER_ROOT}/_tmp"
|
||||
_tmpdir="$(mktemp -d "${DESCHEDULER_ROOT}/_tmp/kube-verify.XXXXXX")"
|
||||
|
||||
_deschedulertmp="${_tmpdir}"
|
||||
mkdir -p "${_deschedulertmp}"
|
||||
|
||||
git archive --format=tar --prefix=descheduler/ "$(git write-tree)" | (cd "${_deschedulertmp}" && tar xf -)
|
||||
_deschedulertmp="${_deschedulertmp}/descheduler"
|
||||
|
||||
pushd "${_deschedulertmp}" > /dev/null 2>&1
|
||||
go build -o "${OS_OUTPUT_BINPATH}/deepcopy-gen" "k8s.io/code-generator/cmd/deepcopy-gen"
|
||||
|
||||
${OS_OUTPUT_BINPATH}/deepcopy-gen \
|
||||
--go-header-file "hack/boilerplate/boilerplate.go.txt" \
|
||||
--input-dirs "./pkg/apis/componentconfig,./pkg/apis/componentconfig/v1alpha1,./pkg/api,./pkg/api/v1alpha1" \
|
||||
--output-file-base zz_generated.deepcopy
|
||||
popd > /dev/null 2>&1
|
||||
|
||||
pushd "${DESCHEDULER_ROOT}" > /dev/null 2>&1
|
||||
if ! _out="$(diff -Naupr pkg/ "${_deschedulertmp}/pkg/")"; then
|
||||
echo "Generated deep-copies output differs:" >&2
|
||||
echo "${_out}" >&2
|
||||
echo "Generated deep-copies verify failed. Please run ./hack/update-generated-deep-copies.sh"
|
||||
exit 1
|
||||
fi
|
||||
popd > /dev/null 2>&1
|
||||
|
||||
echo "Generated deep-copies verified."
|
||||
35
hack/verify-defaulters.sh
Executable file
35
hack/verify-defaulters.sh
Executable file
@@ -0,0 +1,35 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
source "$(dirname "${BASH_SOURCE}")/lib/init.sh"
|
||||
DESCHEDULER_ROOT=$(dirname "${BASH_SOURCE}")/..
|
||||
_tmpdir="$(mktemp -d "${DESCHEDULER_ROOT}/_tmp/kube-verify.XXXXXX")"
|
||||
|
||||
_deschedulertmp="${_tmpdir}"
|
||||
mkdir -p "${_deschedulertmp}"
|
||||
|
||||
git archive --format=tar --prefix=descheduler/ "$(git write-tree)" | (cd "${_deschedulertmp}" && tar xf -)
|
||||
_deschedulertmp="${_deschedulertmp}/descheduler"
|
||||
|
||||
pushd "${_deschedulertmp}" > /dev/null 2>&1
|
||||
go build -o "${OS_OUTPUT_BINPATH}/defaulter-gen" "k8s.io/code-generator/cmd/defaulter-gen"
|
||||
|
||||
${OS_OUTPUT_BINPATH}/defaulter-gen \
|
||||
--go-header-file "hack/boilerplate/boilerplate.go.txt" \
|
||||
--input-dirs "${PRJ_PREFIX}/pkg/apis/componentconfig/v1alpha1,${PRJ_PREFIX}/pkg/api/v1alpha1" \
|
||||
--extra-peer-dirs "${PRJ_PREFIX}/pkg/apis/componentconfig/v1alpha1,${PRJ_PREFIX}/pkg/api/v1alpha1" \
|
||||
--output-file-base zz_generated.defaults
|
||||
popd > /dev/null 2>&1
|
||||
|
||||
pushd "${DESCHEDULER_ROOT}" > /dev/null 2>&1
|
||||
if ! _out="$(diff -Naupr pkg/ "${_deschedulertmp}/pkg/")"; then
|
||||
echo "Generated defaulters output differs:" >&2
|
||||
echo "${_out}" >&2
|
||||
echo "Generated defaulters verify failed. Please run ./hack/update-generated-defaulters.sh"
|
||||
fi
|
||||
popd > /dev/null 2>&1
|
||||
|
||||
echo "Generated Defaulters verified."
|
||||
@@ -23,8 +23,8 @@ DESCHEDULER_ROOT=$(dirname "${BASH_SOURCE}")/..
|
||||
|
||||
GO_VERSION=($(go version))
|
||||
|
||||
if [[ -z $(echo "${GO_VERSION[2]}" | grep -E 'go1.2|go1.3|go1.4|go1.5|go1.6|go1.7|go1.8') ]]; then
|
||||
echo "Unknown go version '${GO_VERSION}', skipping gofmt."
|
||||
if [[ -z $(echo "${GO_VERSION[2]}" | grep -E 'go1.14|go1.15|go1.16|go1.17') ]]; then
|
||||
echo "Unknown go version '${GO_VERSION[2]}', skipping gofmt."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
@@ -45,7 +45,7 @@ find_files() {
|
||||
\) -name '*.go'
|
||||
}
|
||||
|
||||
GOFMT="gofmt -s"
|
||||
GOFMT="gofmt -s"
|
||||
bad_files=$(find_files | xargs $GOFMT -l)
|
||||
if [[ -n "${bad_files}" ]]; then
|
||||
echo "!!! '$GOFMT' needs to be run on the following files: "
|
||||
|
||||
19
hack/verify-govet.sh
Executable file
19
hack/verify-govet.sh
Executable file
@@ -0,0 +1,19 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2021 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
source "$(dirname "${BASH_SOURCE}")/lib/init.sh"
|
||||
|
||||
go vet ${OS_ROOT}/...
|
||||
41
hack/verify-spelling.sh
Executable file
41
hack/verify-spelling.sh
Executable file
@@ -0,0 +1,41 @@
|
||||
#!/usr/bin/env bash
|
||||
# Copyright 2018 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# This script checks commonly misspelled English words in all files in the
|
||||
# working directory by client9/misspell package.
|
||||
# Usage: `hack/verify-spelling.sh`.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/..
|
||||
export KUBE_ROOT
|
||||
source "${KUBE_ROOT}/hack/lib/init.sh"
|
||||
|
||||
# Ensure that we find the binaries we build before anything else.
|
||||
export GOBIN="${OS_OUTPUT_BINPATH}"
|
||||
PATH="${GOBIN}:${PATH}"
|
||||
|
||||
# Install tools we need
|
||||
pushd "${KUBE_ROOT}" >/dev/null
|
||||
GO111MODULE=on go install github.com/client9/misspell/cmd/misspell
|
||||
popd >/dev/null
|
||||
|
||||
# Spell checking
|
||||
# All the skipping files are defined in hack/.spelling_failures
|
||||
skipping_file="${KUBE_ROOT}/hack/.spelling_failures"
|
||||
failing_packages=$(sed "s| | -e |g" "${skipping_file}")
|
||||
git ls-files | grep -v -e "${failing_packages}" | xargs misspell -i "Creater,creater,ect" -error -o stderr
|
||||
29
hack/verify-toc.sh
Executable file
29
hack/verify-toc.sh
Executable file
@@ -0,0 +1,29 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2021 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
source "$(dirname "${BASH_SOURCE}")/lib/init.sh"
|
||||
|
||||
go build -o "${OS_OUTPUT_BINPATH}/mdtoc" "sigs.k8s.io/mdtoc"
|
||||
|
||||
if ! ${OS_OUTPUT_BINPATH}/mdtoc --inplace --dryrun README.md
|
||||
then
|
||||
echo "ERROR: Changes detected to table of contents. Run ./hack/update-toc.sh" >&2
|
||||
exit 1
|
||||
fi
|
||||
88
hack/verify-vendor.sh
Executable file
88
hack/verify-vendor.sh
Executable file
@@ -0,0 +1,88 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2020 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# This is mostly copied from the hack/verify-vendor.sh script located in k8s.io/kubernetes
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
source "$(dirname "${BASH_SOURCE}")/lib/init.sh"
|
||||
|
||||
DESCHEDULER_ROOT=$(dirname "${BASH_SOURCE}")/..
|
||||
|
||||
mkdir -p "${DESCHEDULER_ROOT}/_tmp"
|
||||
_tmpdir="$(mktemp -d "${DESCHEDULER_ROOT}/_tmp/kube-vendor.XXXXXX")"
|
||||
|
||||
if [[ -z ${KEEP_TMP:-} ]]; then
|
||||
KEEP_TMP=false
|
||||
fi
|
||||
|
||||
function cleanup {
|
||||
# make go module dirs writeable
|
||||
chmod -R +w "${_tmpdir}"
|
||||
if [ "${KEEP_TMP}" == "true" ]; then
|
||||
echo "Leaving ${_tmpdir} for you to examine or copy. Please delete it manually when finished. (rm -rf ${_tmpdir})"
|
||||
else
|
||||
echo "Removing ${_tmpdir}"
|
||||
rm -rf "${_tmpdir}"
|
||||
fi
|
||||
}
|
||||
trap "cleanup" EXIT
|
||||
|
||||
_deschedulertmp="${_tmpdir}"
|
||||
mkdir -p "${_deschedulertmp}"
|
||||
|
||||
git archive --format=tar --prefix=descheduler/ "$(git write-tree)" | (cd "${_deschedulertmp}" && tar xf -)
|
||||
_deschedulertmp="${_deschedulertmp}/descheduler"
|
||||
|
||||
pushd "${_deschedulertmp}" > /dev/null 2>&1
|
||||
# Destroy deps in the copy of the kube tree
|
||||
rm -rf ./vendor
|
||||
|
||||
# Recreate the vendor tree using the nice clean set we just downloaded
|
||||
hack/update-vendor.sh
|
||||
popd > /dev/null 2>&1
|
||||
|
||||
ret=0
|
||||
|
||||
pushd "${DESCHEDULER_ROOT}" > /dev/null 2>&1
|
||||
# Test for diffs
|
||||
if ! _out="$(diff -Naupr --ignore-matching-lines='^\s*\"GoVersion\":' go.mod "${_deschedulertmp}/go.mod")"; then
|
||||
echo "Your go.mod file is different:" >&2
|
||||
echo "${_out}" >&2
|
||||
echo "Vendor Verify failed." >&2
|
||||
echo "If you're seeing this locally, run the below command to fix your go.mod:" >&2
|
||||
echo "hack/update-vendor.sh" >&2
|
||||
ret=1
|
||||
fi
|
||||
|
||||
if ! _out="$(diff -Naupr -x "BUILD" -x "AUTHORS*" -x "CONTRIBUTORS*" vendor "${_deschedulertmp}/vendor")"; then
|
||||
echo "Your vendored results are different:" >&2
|
||||
echo "${_out}" >&2
|
||||
echo "Vendor Verify failed." >&2
|
||||
echo "${_out}" > vendordiff.patch
|
||||
echo "If you're seeing this locally, run the below command to fix your directories:" >&2
|
||||
echo "hack/update-vendor.sh" >&2
|
||||
ret=1
|
||||
fi
|
||||
popd > /dev/null 2>&1
|
||||
|
||||
if [[ ${ret} -gt 0 ]]; then
|
||||
exit ${ret}
|
||||
fi
|
||||
|
||||
echo "Vendor Verified."
|
||||
28
kubernetes/base/configmap.yaml
Normal file
28
kubernetes/base/configmap.yaml
Normal file
@@ -0,0 +1,28 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: descheduler-policy-configmap
|
||||
namespace: kube-system
|
||||
data:
|
||||
policy.yaml: |
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
strategies:
|
||||
"RemoveDuplicates":
|
||||
enabled: true
|
||||
"RemovePodsViolatingInterPodAntiAffinity":
|
||||
enabled: true
|
||||
"LowNodeUtilization":
|
||||
enabled: true
|
||||
params:
|
||||
nodeResourceUtilizationThresholds:
|
||||
thresholds:
|
||||
"cpu" : 20
|
||||
"memory": 20
|
||||
"pods": 20
|
||||
targetThresholds:
|
||||
"cpu" : 50
|
||||
"memory": 50
|
||||
"pods": 50
|
||||
|
||||
6
kubernetes/base/kustomization.yaml
Normal file
6
kubernetes/base/kustomization.yaml
Normal file
@@ -0,0 +1,6 @@
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
|
||||
resources:
|
||||
- configmap.yaml
|
||||
- rbac.yaml
|
||||
44
kubernetes/base/rbac.yaml
Normal file
44
kubernetes/base/rbac.yaml
Normal file
@@ -0,0 +1,44 @@
|
||||
---
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: descheduler-cluster-role
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["events"]
|
||||
verbs: ["create", "update"]
|
||||
- apiGroups: [""]
|
||||
resources: ["nodes"]
|
||||
verbs: ["get", "watch", "list"]
|
||||
- apiGroups: [""]
|
||||
resources: ["namespaces"]
|
||||
verbs: ["get", "list"]
|
||||
- apiGroups: [""]
|
||||
resources: ["pods"]
|
||||
verbs: ["get", "watch", "list", "delete"]
|
||||
- apiGroups: [""]
|
||||
resources: ["pods/eviction"]
|
||||
verbs: ["create"]
|
||||
- apiGroups: ["scheduling.k8s.io"]
|
||||
resources: ["priorityclasses"]
|
||||
verbs: ["get", "watch", "list"]
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: descheduler-sa
|
||||
namespace: kube-system
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: descheduler-cluster-role-binding
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: descheduler-cluster-role
|
||||
subjects:
|
||||
- name: descheduler-sa
|
||||
kind: ServiceAccount
|
||||
namespace: kube-system
|
||||
|
||||
55
kubernetes/cronjob/cronjob.yaml
Normal file
55
kubernetes/cronjob/cronjob.yaml
Normal file
@@ -0,0 +1,55 @@
|
||||
---
|
||||
apiVersion: batch/v1 # for k8s version < 1.21.0, use batch/v1beta1
|
||||
kind: CronJob
|
||||
metadata:
|
||||
name: descheduler-cronjob
|
||||
namespace: kube-system
|
||||
spec:
|
||||
schedule: "*/2 * * * *"
|
||||
concurrencyPolicy: "Forbid"
|
||||
jobTemplate:
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
name: descheduler-pod
|
||||
spec:
|
||||
priorityClassName: system-cluster-critical
|
||||
containers:
|
||||
- name: descheduler
|
||||
image: k8s.gcr.io/descheduler/descheduler:v0.22.0
|
||||
volumeMounts:
|
||||
- mountPath: /policy-dir
|
||||
name: policy-volume
|
||||
command:
|
||||
- "/bin/descheduler"
|
||||
args:
|
||||
- "--policy-config-file"
|
||||
- "/policy-dir/policy.yaml"
|
||||
- "--v"
|
||||
- "3"
|
||||
resources:
|
||||
requests:
|
||||
cpu: "500m"
|
||||
memory: "256Mi"
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 10258
|
||||
scheme: HTTPS
|
||||
initialDelaySeconds: 3
|
||||
periodSeconds: 10
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
privileged: false
|
||||
readOnlyRootFilesystem: true
|
||||
runAsNonRoot: true
|
||||
restartPolicy: "Never"
|
||||
serviceAccountName: descheduler-sa
|
||||
volumes:
|
||||
- name: policy-volume
|
||||
configMap:
|
||||
name: descheduler-policy-configmap
|
||||
6
kubernetes/cronjob/kustomization.yaml
Normal file
6
kubernetes/cronjob/kustomization.yaml
Normal file
@@ -0,0 +1,6 @@
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
|
||||
resources:
|
||||
- ../base
|
||||
- cronjob.yaml
|
||||
62
kubernetes/deployment/deployment.yaml
Normal file
62
kubernetes/deployment/deployment.yaml
Normal file
@@ -0,0 +1,62 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: descheduler
|
||||
namespace: kube-system
|
||||
labels:
|
||||
app: descheduler
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: descheduler
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: descheduler
|
||||
spec:
|
||||
priorityClassName: system-cluster-critical
|
||||
serviceAccountName: descheduler-sa
|
||||
containers:
|
||||
- name: descheduler
|
||||
image: k8s.gcr.io/descheduler/descheduler:v0.22.0
|
||||
imagePullPolicy: IfNotPresent
|
||||
command:
|
||||
- "/bin/descheduler"
|
||||
args:
|
||||
- "--policy-config-file"
|
||||
- "/policy-dir/policy.yaml"
|
||||
- "--descheduling-interval"
|
||||
- "5m"
|
||||
- "--v"
|
||||
- "3"
|
||||
ports:
|
||||
- containerPort: 10258
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 10258
|
||||
scheme: HTTPS
|
||||
initialDelaySeconds: 3
|
||||
periodSeconds: 10
|
||||
resources:
|
||||
requests:
|
||||
cpu: 500m
|
||||
memory: 256Mi
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
privileged: false
|
||||
readOnlyRootFilesystem: true
|
||||
runAsNonRoot: true
|
||||
volumeMounts:
|
||||
- mountPath: /policy-dir
|
||||
name: policy-volume
|
||||
volumes:
|
||||
- name: policy-volume
|
||||
configMap:
|
||||
name: descheduler-policy-configmap
|
||||
6
kubernetes/deployment/kustomization.yaml
Normal file
6
kubernetes/deployment/kustomization.yaml
Normal file
@@ -0,0 +1,6 @@
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
|
||||
resources:
|
||||
- ../base
|
||||
- deployment.yaml
|
||||
53
kubernetes/job/job.yaml
Normal file
53
kubernetes/job/job.yaml
Normal file
@@ -0,0 +1,53 @@
|
||||
---
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: descheduler-job
|
||||
namespace: kube-system
|
||||
spec:
|
||||
parallelism: 1
|
||||
completions: 1
|
||||
template:
|
||||
metadata:
|
||||
name: descheduler-pod
|
||||
spec:
|
||||
priorityClassName: system-cluster-critical
|
||||
containers:
|
||||
- name: descheduler
|
||||
image: k8s.gcr.io/descheduler/descheduler:v0.22.0
|
||||
volumeMounts:
|
||||
- mountPath: /policy-dir
|
||||
name: policy-volume
|
||||
command:
|
||||
- "/bin/descheduler"
|
||||
args:
|
||||
- "--policy-config-file"
|
||||
- "/policy-dir/policy.yaml"
|
||||
- "--v"
|
||||
- "3"
|
||||
resources:
|
||||
requests:
|
||||
cpu: "500m"
|
||||
memory: "256Mi"
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 10258
|
||||
scheme: HTTPS
|
||||
initialDelaySeconds: 3
|
||||
periodSeconds: 10
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
privileged: false
|
||||
readOnlyRootFilesystem: true
|
||||
runAsNonRoot: true
|
||||
restartPolicy: "Never"
|
||||
serviceAccountName: descheduler-sa
|
||||
volumes:
|
||||
- name: policy-volume
|
||||
configMap:
|
||||
name: descheduler-policy-configmap
|
||||
6
kubernetes/job/kustomization.yaml
Normal file
6
kubernetes/job/kustomization.yaml
Normal file
@@ -0,0 +1,6 @@
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
|
||||
resources:
|
||||
- ../base
|
||||
- job.yaml
|
||||
72
metrics/metrics.go
Normal file
72
metrics/metrics.go
Normal file
@@ -0,0 +1,72 @@
|
||||
/*
|
||||
Copyright 2021 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"k8s.io/component-base/metrics"
|
||||
"k8s.io/component-base/metrics/legacyregistry"
|
||||
"sigs.k8s.io/descheduler/pkg/version"
|
||||
)
|
||||
|
||||
const (
|
||||
// DeschedulerSubsystem - subsystem name used by descheduler
|
||||
DeschedulerSubsystem = "descheduler"
|
||||
)
|
||||
|
||||
var (
|
||||
PodsEvicted = metrics.NewCounterVec(
|
||||
&metrics.CounterOpts{
|
||||
Subsystem: DeschedulerSubsystem,
|
||||
Name: "pods_evicted",
|
||||
Help: "Number of evicted pods, by the result, by the strategy, by the namespace, by the node name. 'error' result means a pod could not be evicted",
|
||||
StabilityLevel: metrics.ALPHA,
|
||||
}, []string{"result", "strategy", "namespace", "node"})
|
||||
|
||||
buildInfo = metrics.NewGauge(
|
||||
&metrics.GaugeOpts{
|
||||
Subsystem: DeschedulerSubsystem,
|
||||
Name: "build_info",
|
||||
Help: "Build info about descheduler, including Go version, Descheduler version, Git SHA, Git branch",
|
||||
ConstLabels: map[string]string{"GoVersion": version.Get().GoVersion, "DeschedulerVersion": version.Get().GitVersion, "GitBranch": version.Get().GitBranch, "GitSha1": version.Get().GitSha1},
|
||||
StabilityLevel: metrics.ALPHA,
|
||||
},
|
||||
)
|
||||
|
||||
metricsList = []metrics.Registerable{
|
||||
PodsEvicted,
|
||||
buildInfo,
|
||||
}
|
||||
)
|
||||
|
||||
var registerMetrics sync.Once
|
||||
|
||||
// Register all metrics.
|
||||
func Register() {
|
||||
// Register the metrics.
|
||||
registerMetrics.Do(func() {
|
||||
RegisterMetrics(metricsList...)
|
||||
})
|
||||
}
|
||||
|
||||
// RegisterMetrics registers a list of metrics.
|
||||
func RegisterMetrics(extraMetrics ...metrics.Registerable) {
|
||||
for _, metric := range extraMetrics {
|
||||
legacyregistry.MustRegister(metric)
|
||||
}
|
||||
}
|
||||
@@ -16,4 +16,4 @@ limitations under the License.
|
||||
|
||||
// +k8s:deepcopy-gen=package,register
|
||||
|
||||
package api // import "github.com/kubernetes-incubator/descheduler/pkg/api"
|
||||
package api // import "sigs.k8s.io/descheduler/pkg/api"
|
||||
|
||||
@@ -1,49 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package install installs the descheduler's policy API group.
|
||||
package install
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/apimachinery/announced"
|
||||
"k8s.io/apimachinery/pkg/apimachinery/registered"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
||||
deschedulerapi "github.com/kubernetes-incubator/descheduler/pkg/api"
|
||||
"github.com/kubernetes-incubator/descheduler/pkg/api/v1alpha1"
|
||||
deschedulerscheme "github.com/kubernetes-incubator/descheduler/pkg/descheduler/scheme"
|
||||
)
|
||||
|
||||
func init() {
|
||||
Install(deschedulerscheme.GroupFactoryRegistry, deschedulerscheme.Registry, deschedulerscheme.Scheme)
|
||||
}
|
||||
|
||||
// Install registers the API group and adds types to a scheme
|
||||
func Install(groupFactoryRegistry announced.APIGroupFactoryRegistry, registry *registered.APIRegistrationManager, scheme *runtime.Scheme) {
|
||||
if err := announced.NewGroupMetaFactory(
|
||||
&announced.GroupMetaFactoryArgs{
|
||||
GroupName: deschedulerapi.GroupName,
|
||||
VersionPreferenceOrder: []string{v1alpha1.SchemeGroupVersion.Version},
|
||||
ImportPrefix: "github.com/kubernetes-incubator/descheduler/pkg/api",
|
||||
AddInternalObjectsToScheme: deschedulerapi.AddToScheme,
|
||||
},
|
||||
announced.VersionToSchemeFunc{
|
||||
v1alpha1.SchemeGroupVersion.Version: v1alpha1.AddToScheme,
|
||||
},
|
||||
).Announce(groupFactoryRegistry).RegisterAndEnable(registry, scheme); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
@@ -24,6 +24,7 @@ import (
|
||||
var (
|
||||
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
|
||||
AddToScheme = SchemeBuilder.AddToScheme
|
||||
Scheme = runtime.NewScheme()
|
||||
)
|
||||
|
||||
// GroupName is the group name use in this package
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -17,15 +17,38 @@ limitations under the License.
|
||||
package api
|
||||
|
||||
import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
)
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
type DeschedulerPolicy struct {
|
||||
metav1.TypeMeta
|
||||
|
||||
// Strategies
|
||||
Strategies StrategyList
|
||||
|
||||
// NodeSelector for a set of nodes to operate over
|
||||
NodeSelector *string
|
||||
|
||||
// EvictFailedBarePods allows pods without ownerReferences and in failed phase to be evicted.
|
||||
EvictFailedBarePods *bool
|
||||
|
||||
// EvictLocalStoragePods allows pods using local storage to be evicted.
|
||||
EvictLocalStoragePods *bool
|
||||
|
||||
// EvictSystemCriticalPods allows eviction of pods of any priority (including Kubernetes system pods)
|
||||
EvictSystemCriticalPods *bool
|
||||
|
||||
// IgnorePVCPods prevents pods with PVCs from being evicted.
|
||||
IgnorePVCPods *bool
|
||||
|
||||
// MaxNoOfPodsToEvictPerNode restricts maximum of pods to be evicted per node.
|
||||
MaxNoOfPodsToEvictPerNode *uint
|
||||
|
||||
// MaxNoOfPodsToEvictPerNamespace restricts maximum of pods to be evicted per namespace.
|
||||
MaxNoOfPodsToEvictPerNamespace *uint
|
||||
}
|
||||
|
||||
type StrategyName string
|
||||
@@ -39,12 +62,32 @@ type DeschedulerStrategy struct {
|
||||
Weight int
|
||||
|
||||
// Strategy parameters
|
||||
Params StrategyParameters
|
||||
Params *StrategyParameters
|
||||
}
|
||||
|
||||
// Only one of its members may be specified
|
||||
// Namespaces carries a list of included/excluded namespaces
|
||||
// for which a given strategy is applicable
|
||||
type Namespaces struct {
|
||||
Include []string
|
||||
Exclude []string
|
||||
}
|
||||
|
||||
// Besides Namespaces only one of its members may be specified
|
||||
// TODO(jchaloup): move Namespaces ThresholdPriority and ThresholdPriorityClassName to individual strategies
|
||||
// once the policy version is bumped to v1alpha2
|
||||
type StrategyParameters struct {
|
||||
NodeResourceUtilizationThresholds NodeResourceUtilizationThresholds
|
||||
NodeResourceUtilizationThresholds *NodeResourceUtilizationThresholds
|
||||
NodeAffinityType []string
|
||||
PodsHavingTooManyRestarts *PodsHavingTooManyRestarts
|
||||
PodLifeTime *PodLifeTime
|
||||
RemoveDuplicates *RemoveDuplicates
|
||||
FailedPods *FailedPods
|
||||
IncludeSoftConstraints bool
|
||||
Namespaces *Namespaces
|
||||
ThresholdPriority *int32
|
||||
ThresholdPriorityClassName string
|
||||
LabelSelector *metav1.LabelSelector
|
||||
NodeFit bool
|
||||
}
|
||||
|
||||
type Percentage float64
|
||||
@@ -55,3 +98,24 @@ type NodeResourceUtilizationThresholds struct {
|
||||
TargetThresholds ResourceThresholds
|
||||
NumberOfNodes int
|
||||
}
|
||||
|
||||
type PodsHavingTooManyRestarts struct {
|
||||
PodRestartThreshold int32
|
||||
IncludingInitContainers bool
|
||||
}
|
||||
|
||||
type RemoveDuplicates struct {
|
||||
ExcludeOwnerKinds []string
|
||||
}
|
||||
|
||||
type PodLifeTime struct {
|
||||
MaxPodLifeTimeSeconds *uint
|
||||
PodStatusPhases []string
|
||||
}
|
||||
|
||||
type FailedPods struct {
|
||||
ExcludeOwnerKinds []string
|
||||
MinPodLifetimeSeconds *uint
|
||||
Reasons []string
|
||||
IncludingInitContainers bool
|
||||
}
|
||||
|
||||
@@ -15,10 +15,10 @@ limitations under the License.
|
||||
*/
|
||||
|
||||
// +k8s:deepcopy-gen=package,register
|
||||
// +k8s:conversion-gen=github.com/kubernetes-incubator/descheduler/pkg/api
|
||||
// +k8s:conversion-gen=sigs.k8s.io/descheduler/pkg/api
|
||||
// +k8s:defaulter-gen=TypeMeta
|
||||
|
||||
// Package v1alpha1 is the v1alpha1 version of the descheduler API
|
||||
// +groupName=descheduler
|
||||
|
||||
package v1alpha1 // import "github.com/kubernetes-incubator/descheduler/pkg/api/v1alpha1"
|
||||
package v1alpha1 // import "sigs.k8s.io/descheduler/pkg/api/v1alpha1"
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -17,15 +17,38 @@ limitations under the License.
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
)
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
type DeschedulerPolicy struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
|
||||
// Strategies
|
||||
Strategies StrategyList `json:"strategies,omitempty"`
|
||||
|
||||
// NodeSelector for a set of nodes to operate over
|
||||
NodeSelector *string `json:"nodeSelector,omitempty"`
|
||||
|
||||
// EvictFailedBarePods allows pods without ownerReferences and in failed phase to be evicted.
|
||||
EvictFailedBarePods *bool `json:"evictFailedBarePods,omitempty"`
|
||||
|
||||
// EvictLocalStoragePods allows pods using local storage to be evicted.
|
||||
EvictLocalStoragePods *bool `json:"evictLocalStoragePods,omitempty"`
|
||||
|
||||
// EvictSystemCriticalPods allows eviction of pods of any priority (including Kubernetes system pods)
|
||||
EvictSystemCriticalPods *bool `json:"evictSystemCriticalPods,omitempty"`
|
||||
|
||||
// IgnorePVCPods prevents pods with PVCs from being evicted.
|
||||
IgnorePVCPods *bool `json:"ignorePvcPods,omitempty"`
|
||||
|
||||
// MaxNoOfPodsToEvictPerNode restricts maximum of pods to be evicted per node.
|
||||
MaxNoOfPodsToEvictPerNode *int `json:"maxNoOfPodsToEvictPerNode,omitempty"`
|
||||
|
||||
// MaxNoOfPodsToEvictPerNamespace restricts maximum of pods to be evicted per namespace.
|
||||
MaxNoOfPodsToEvictPerNamespace *int `json:"maxNoOfPodsToEvictPerNamespace,omitempty"`
|
||||
}
|
||||
|
||||
type StrategyName string
|
||||
@@ -39,12 +62,30 @@ type DeschedulerStrategy struct {
|
||||
Weight int `json:"weight,omitempty"`
|
||||
|
||||
// Strategy parameters
|
||||
Params StrategyParameters `json:"params,omitempty"`
|
||||
Params *StrategyParameters `json:"params,omitempty"`
|
||||
}
|
||||
|
||||
// Only one of its members may be specified
|
||||
// Namespaces carries a list of included/excluded namespaces
|
||||
// for which a given strategy is applicable.
|
||||
type Namespaces struct {
|
||||
Include []string `json:"include"`
|
||||
Exclude []string `json:"exclude"`
|
||||
}
|
||||
|
||||
// Besides Namespaces ThresholdPriority and ThresholdPriorityClassName only one of its members may be specified
|
||||
type StrategyParameters struct {
|
||||
NodeResourceUtilizationThresholds NodeResourceUtilizationThresholds `json:"nodeResourceUtilizationThresholds,omitempty"`
|
||||
NodeResourceUtilizationThresholds *NodeResourceUtilizationThresholds `json:"nodeResourceUtilizationThresholds,omitempty"`
|
||||
NodeAffinityType []string `json:"nodeAffinityType,omitempty"`
|
||||
PodsHavingTooManyRestarts *PodsHavingTooManyRestarts `json:"podsHavingTooManyRestarts,omitempty"`
|
||||
PodLifeTime *PodLifeTime `json:"podLifeTime,omitempty"`
|
||||
RemoveDuplicates *RemoveDuplicates `json:"removeDuplicates,omitempty"`
|
||||
FailedPods *FailedPods `json:"failedPods,omitempty"`
|
||||
IncludeSoftConstraints bool `json:"includeSoftConstraints"`
|
||||
Namespaces *Namespaces `json:"namespaces"`
|
||||
ThresholdPriority *int32 `json:"thresholdPriority"`
|
||||
ThresholdPriorityClassName string `json:"thresholdPriorityClassName"`
|
||||
LabelSelector *metav1.LabelSelector `json:"labelSelector"`
|
||||
NodeFit bool `json:"nodeFit"`
|
||||
}
|
||||
|
||||
type Percentage float64
|
||||
@@ -55,3 +96,24 @@ type NodeResourceUtilizationThresholds struct {
|
||||
TargetThresholds ResourceThresholds `json:"targetThresholds,omitempty"`
|
||||
NumberOfNodes int `json:"numberOfNodes,omitempty"`
|
||||
}
|
||||
|
||||
type PodsHavingTooManyRestarts struct {
|
||||
PodRestartThreshold int32 `json:"podRestartThreshold,omitempty"`
|
||||
IncludingInitContainers bool `json:"includingInitContainers,omitempty"`
|
||||
}
|
||||
|
||||
type RemoveDuplicates struct {
|
||||
ExcludeOwnerKinds []string `json:"excludeOwnerKinds,omitempty"`
|
||||
}
|
||||
|
||||
type PodLifeTime struct {
|
||||
MaxPodLifeTimeSeconds *uint `json:"maxPodLifeTimeSeconds,omitempty"`
|
||||
PodStatusPhases []string `json:"podStatusPhases,omitempty"`
|
||||
}
|
||||
|
||||
type FailedPods struct {
|
||||
ExcludeOwnerKinds []string `json:"excludeOwnerKinds,omitempty"`
|
||||
MinPodLifetimeSeconds *uint `json:"minPodLifetimeSeconds,omitempty"`
|
||||
Reasons []string `json:"reasons,omitempty"`
|
||||
IncludingInitContainers bool `json:"includingInitContainers,omitempty"`
|
||||
}
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
//go:build !ignore_autogenerated
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@@ -16,38 +17,140 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// This file was autogenerated by conversion-gen. Do not edit it manually!
|
||||
// Code generated by conversion-gen. DO NOT EDIT.
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
api "github.com/kubernetes-incubator/descheduler/pkg/api"
|
||||
unsafe "unsafe"
|
||||
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
conversion "k8s.io/apimachinery/pkg/conversion"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
unsafe "unsafe"
|
||||
api "sigs.k8s.io/descheduler/pkg/api"
|
||||
)
|
||||
|
||||
func init() {
|
||||
SchemeBuilder.Register(RegisterConversions)
|
||||
localSchemeBuilder.Register(RegisterConversions)
|
||||
}
|
||||
|
||||
// RegisterConversions adds conversion functions to the given scheme.
|
||||
// Public to allow building arbitrary schemes.
|
||||
func RegisterConversions(scheme *runtime.Scheme) error {
|
||||
return scheme.AddGeneratedConversionFuncs(
|
||||
Convert_v1alpha1_DeschedulerPolicy_To_api_DeschedulerPolicy,
|
||||
Convert_api_DeschedulerPolicy_To_v1alpha1_DeschedulerPolicy,
|
||||
Convert_v1alpha1_DeschedulerStrategy_To_api_DeschedulerStrategy,
|
||||
Convert_api_DeschedulerStrategy_To_v1alpha1_DeschedulerStrategy,
|
||||
Convert_v1alpha1_NodeResourceUtilizationThresholds_To_api_NodeResourceUtilizationThresholds,
|
||||
Convert_api_NodeResourceUtilizationThresholds_To_v1alpha1_NodeResourceUtilizationThresholds,
|
||||
Convert_v1alpha1_StrategyParameters_To_api_StrategyParameters,
|
||||
Convert_api_StrategyParameters_To_v1alpha1_StrategyParameters,
|
||||
)
|
||||
func RegisterConversions(s *runtime.Scheme) error {
|
||||
if err := s.AddGeneratedConversionFunc((*DeschedulerPolicy)(nil), (*api.DeschedulerPolicy)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1alpha1_DeschedulerPolicy_To_api_DeschedulerPolicy(a.(*DeschedulerPolicy), b.(*api.DeschedulerPolicy), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*api.DeschedulerPolicy)(nil), (*DeschedulerPolicy)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_api_DeschedulerPolicy_To_v1alpha1_DeschedulerPolicy(a.(*api.DeschedulerPolicy), b.(*DeschedulerPolicy), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*DeschedulerStrategy)(nil), (*api.DeschedulerStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1alpha1_DeschedulerStrategy_To_api_DeschedulerStrategy(a.(*DeschedulerStrategy), b.(*api.DeschedulerStrategy), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*api.DeschedulerStrategy)(nil), (*DeschedulerStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_api_DeschedulerStrategy_To_v1alpha1_DeschedulerStrategy(a.(*api.DeschedulerStrategy), b.(*DeschedulerStrategy), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*FailedPods)(nil), (*api.FailedPods)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1alpha1_FailedPods_To_api_FailedPods(a.(*FailedPods), b.(*api.FailedPods), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*api.FailedPods)(nil), (*FailedPods)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_api_FailedPods_To_v1alpha1_FailedPods(a.(*api.FailedPods), b.(*FailedPods), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*Namespaces)(nil), (*api.Namespaces)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1alpha1_Namespaces_To_api_Namespaces(a.(*Namespaces), b.(*api.Namespaces), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*api.Namespaces)(nil), (*Namespaces)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_api_Namespaces_To_v1alpha1_Namespaces(a.(*api.Namespaces), b.(*Namespaces), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*NodeResourceUtilizationThresholds)(nil), (*api.NodeResourceUtilizationThresholds)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1alpha1_NodeResourceUtilizationThresholds_To_api_NodeResourceUtilizationThresholds(a.(*NodeResourceUtilizationThresholds), b.(*api.NodeResourceUtilizationThresholds), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*api.NodeResourceUtilizationThresholds)(nil), (*NodeResourceUtilizationThresholds)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_api_NodeResourceUtilizationThresholds_To_v1alpha1_NodeResourceUtilizationThresholds(a.(*api.NodeResourceUtilizationThresholds), b.(*NodeResourceUtilizationThresholds), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*PodLifeTime)(nil), (*api.PodLifeTime)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1alpha1_PodLifeTime_To_api_PodLifeTime(a.(*PodLifeTime), b.(*api.PodLifeTime), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*api.PodLifeTime)(nil), (*PodLifeTime)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_api_PodLifeTime_To_v1alpha1_PodLifeTime(a.(*api.PodLifeTime), b.(*PodLifeTime), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*PodsHavingTooManyRestarts)(nil), (*api.PodsHavingTooManyRestarts)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1alpha1_PodsHavingTooManyRestarts_To_api_PodsHavingTooManyRestarts(a.(*PodsHavingTooManyRestarts), b.(*api.PodsHavingTooManyRestarts), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*api.PodsHavingTooManyRestarts)(nil), (*PodsHavingTooManyRestarts)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_api_PodsHavingTooManyRestarts_To_v1alpha1_PodsHavingTooManyRestarts(a.(*api.PodsHavingTooManyRestarts), b.(*PodsHavingTooManyRestarts), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*RemoveDuplicates)(nil), (*api.RemoveDuplicates)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1alpha1_RemoveDuplicates_To_api_RemoveDuplicates(a.(*RemoveDuplicates), b.(*api.RemoveDuplicates), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*api.RemoveDuplicates)(nil), (*RemoveDuplicates)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_api_RemoveDuplicates_To_v1alpha1_RemoveDuplicates(a.(*api.RemoveDuplicates), b.(*RemoveDuplicates), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*StrategyParameters)(nil), (*api.StrategyParameters)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1alpha1_StrategyParameters_To_api_StrategyParameters(a.(*StrategyParameters), b.(*api.StrategyParameters), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*api.StrategyParameters)(nil), (*StrategyParameters)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_api_StrategyParameters_To_v1alpha1_StrategyParameters(a.(*api.StrategyParameters), b.(*StrategyParameters), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func autoConvert_v1alpha1_DeschedulerPolicy_To_api_DeschedulerPolicy(in *DeschedulerPolicy, out *api.DeschedulerPolicy, s conversion.Scope) error {
|
||||
out.Strategies = *(*api.StrategyList)(unsafe.Pointer(&in.Strategies))
|
||||
out.NodeSelector = (*string)(unsafe.Pointer(in.NodeSelector))
|
||||
out.EvictFailedBarePods = (*bool)(unsafe.Pointer(in.EvictFailedBarePods))
|
||||
out.EvictLocalStoragePods = (*bool)(unsafe.Pointer(in.EvictLocalStoragePods))
|
||||
out.EvictSystemCriticalPods = (*bool)(unsafe.Pointer(in.EvictSystemCriticalPods))
|
||||
out.IgnorePVCPods = (*bool)(unsafe.Pointer(in.IgnorePVCPods))
|
||||
if in.MaxNoOfPodsToEvictPerNode != nil {
|
||||
in, out := &in.MaxNoOfPodsToEvictPerNode, &out.MaxNoOfPodsToEvictPerNode
|
||||
*out = new(uint)
|
||||
**out = uint(**in)
|
||||
} else {
|
||||
out.MaxNoOfPodsToEvictPerNode = nil
|
||||
}
|
||||
if in.MaxNoOfPodsToEvictPerNamespace != nil {
|
||||
in, out := &in.MaxNoOfPodsToEvictPerNamespace, &out.MaxNoOfPodsToEvictPerNamespace
|
||||
*out = new(uint)
|
||||
**out = uint(**in)
|
||||
} else {
|
||||
out.MaxNoOfPodsToEvictPerNamespace = nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -58,6 +161,25 @@ func Convert_v1alpha1_DeschedulerPolicy_To_api_DeschedulerPolicy(in *Descheduler
|
||||
|
||||
func autoConvert_api_DeschedulerPolicy_To_v1alpha1_DeschedulerPolicy(in *api.DeschedulerPolicy, out *DeschedulerPolicy, s conversion.Scope) error {
|
||||
out.Strategies = *(*StrategyList)(unsafe.Pointer(&in.Strategies))
|
||||
out.NodeSelector = (*string)(unsafe.Pointer(in.NodeSelector))
|
||||
out.EvictFailedBarePods = (*bool)(unsafe.Pointer(in.EvictFailedBarePods))
|
||||
out.EvictLocalStoragePods = (*bool)(unsafe.Pointer(in.EvictLocalStoragePods))
|
||||
out.EvictSystemCriticalPods = (*bool)(unsafe.Pointer(in.EvictSystemCriticalPods))
|
||||
out.IgnorePVCPods = (*bool)(unsafe.Pointer(in.IgnorePVCPods))
|
||||
if in.MaxNoOfPodsToEvictPerNode != nil {
|
||||
in, out := &in.MaxNoOfPodsToEvictPerNode, &out.MaxNoOfPodsToEvictPerNode
|
||||
*out = new(int)
|
||||
**out = int(**in)
|
||||
} else {
|
||||
out.MaxNoOfPodsToEvictPerNode = nil
|
||||
}
|
||||
if in.MaxNoOfPodsToEvictPerNamespace != nil {
|
||||
in, out := &in.MaxNoOfPodsToEvictPerNamespace, &out.MaxNoOfPodsToEvictPerNamespace
|
||||
*out = new(int)
|
||||
**out = int(**in)
|
||||
} else {
|
||||
out.MaxNoOfPodsToEvictPerNamespace = nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -69,9 +191,7 @@ func Convert_api_DeschedulerPolicy_To_v1alpha1_DeschedulerPolicy(in *api.Desched
|
||||
func autoConvert_v1alpha1_DeschedulerStrategy_To_api_DeschedulerStrategy(in *DeschedulerStrategy, out *api.DeschedulerStrategy, s conversion.Scope) error {
|
||||
out.Enabled = in.Enabled
|
||||
out.Weight = in.Weight
|
||||
if err := Convert_v1alpha1_StrategyParameters_To_api_StrategyParameters(&in.Params, &out.Params, s); err != nil {
|
||||
return err
|
||||
}
|
||||
out.Params = (*api.StrategyParameters)(unsafe.Pointer(in.Params))
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -83,9 +203,7 @@ func Convert_v1alpha1_DeschedulerStrategy_To_api_DeschedulerStrategy(in *Desched
|
||||
func autoConvert_api_DeschedulerStrategy_To_v1alpha1_DeschedulerStrategy(in *api.DeschedulerStrategy, out *DeschedulerStrategy, s conversion.Scope) error {
|
||||
out.Enabled = in.Enabled
|
||||
out.Weight = in.Weight
|
||||
if err := Convert_api_StrategyParameters_To_v1alpha1_StrategyParameters(&in.Params, &out.Params, s); err != nil {
|
||||
return err
|
||||
}
|
||||
out.Params = (*StrategyParameters)(unsafe.Pointer(in.Params))
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -94,6 +212,54 @@ func Convert_api_DeschedulerStrategy_To_v1alpha1_DeschedulerStrategy(in *api.Des
|
||||
return autoConvert_api_DeschedulerStrategy_To_v1alpha1_DeschedulerStrategy(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1alpha1_FailedPods_To_api_FailedPods(in *FailedPods, out *api.FailedPods, s conversion.Scope) error {
|
||||
out.ExcludeOwnerKinds = *(*[]string)(unsafe.Pointer(&in.ExcludeOwnerKinds))
|
||||
out.MinPodLifetimeSeconds = (*uint)(unsafe.Pointer(in.MinPodLifetimeSeconds))
|
||||
out.Reasons = *(*[]string)(unsafe.Pointer(&in.Reasons))
|
||||
out.IncludingInitContainers = in.IncludingInitContainers
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_v1alpha1_FailedPods_To_api_FailedPods is an autogenerated conversion function.
|
||||
func Convert_v1alpha1_FailedPods_To_api_FailedPods(in *FailedPods, out *api.FailedPods, s conversion.Scope) error {
|
||||
return autoConvert_v1alpha1_FailedPods_To_api_FailedPods(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_api_FailedPods_To_v1alpha1_FailedPods(in *api.FailedPods, out *FailedPods, s conversion.Scope) error {
|
||||
out.ExcludeOwnerKinds = *(*[]string)(unsafe.Pointer(&in.ExcludeOwnerKinds))
|
||||
out.MinPodLifetimeSeconds = (*uint)(unsafe.Pointer(in.MinPodLifetimeSeconds))
|
||||
out.Reasons = *(*[]string)(unsafe.Pointer(&in.Reasons))
|
||||
out.IncludingInitContainers = in.IncludingInitContainers
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_api_FailedPods_To_v1alpha1_FailedPods is an autogenerated conversion function.
|
||||
func Convert_api_FailedPods_To_v1alpha1_FailedPods(in *api.FailedPods, out *FailedPods, s conversion.Scope) error {
|
||||
return autoConvert_api_FailedPods_To_v1alpha1_FailedPods(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1alpha1_Namespaces_To_api_Namespaces(in *Namespaces, out *api.Namespaces, s conversion.Scope) error {
|
||||
out.Include = *(*[]string)(unsafe.Pointer(&in.Include))
|
||||
out.Exclude = *(*[]string)(unsafe.Pointer(&in.Exclude))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_v1alpha1_Namespaces_To_api_Namespaces is an autogenerated conversion function.
|
||||
func Convert_v1alpha1_Namespaces_To_api_Namespaces(in *Namespaces, out *api.Namespaces, s conversion.Scope) error {
|
||||
return autoConvert_v1alpha1_Namespaces_To_api_Namespaces(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_api_Namespaces_To_v1alpha1_Namespaces(in *api.Namespaces, out *Namespaces, s conversion.Scope) error {
|
||||
out.Include = *(*[]string)(unsafe.Pointer(&in.Include))
|
||||
out.Exclude = *(*[]string)(unsafe.Pointer(&in.Exclude))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_api_Namespaces_To_v1alpha1_Namespaces is an autogenerated conversion function.
|
||||
func Convert_api_Namespaces_To_v1alpha1_Namespaces(in *api.Namespaces, out *Namespaces, s conversion.Scope) error {
|
||||
return autoConvert_api_Namespaces_To_v1alpha1_Namespaces(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1alpha1_NodeResourceUtilizationThresholds_To_api_NodeResourceUtilizationThresholds(in *NodeResourceUtilizationThresholds, out *api.NodeResourceUtilizationThresholds, s conversion.Scope) error {
|
||||
out.Thresholds = *(*api.ResourceThresholds)(unsafe.Pointer(&in.Thresholds))
|
||||
out.TargetThresholds = *(*api.ResourceThresholds)(unsafe.Pointer(&in.TargetThresholds))
|
||||
@@ -118,10 +284,83 @@ func Convert_api_NodeResourceUtilizationThresholds_To_v1alpha1_NodeResourceUtili
|
||||
return autoConvert_api_NodeResourceUtilizationThresholds_To_v1alpha1_NodeResourceUtilizationThresholds(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1alpha1_PodLifeTime_To_api_PodLifeTime(in *PodLifeTime, out *api.PodLifeTime, s conversion.Scope) error {
|
||||
out.MaxPodLifeTimeSeconds = (*uint)(unsafe.Pointer(in.MaxPodLifeTimeSeconds))
|
||||
out.PodStatusPhases = *(*[]string)(unsafe.Pointer(&in.PodStatusPhases))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_v1alpha1_PodLifeTime_To_api_PodLifeTime is an autogenerated conversion function.
|
||||
func Convert_v1alpha1_PodLifeTime_To_api_PodLifeTime(in *PodLifeTime, out *api.PodLifeTime, s conversion.Scope) error {
|
||||
return autoConvert_v1alpha1_PodLifeTime_To_api_PodLifeTime(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_api_PodLifeTime_To_v1alpha1_PodLifeTime(in *api.PodLifeTime, out *PodLifeTime, s conversion.Scope) error {
|
||||
out.MaxPodLifeTimeSeconds = (*uint)(unsafe.Pointer(in.MaxPodLifeTimeSeconds))
|
||||
out.PodStatusPhases = *(*[]string)(unsafe.Pointer(&in.PodStatusPhases))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_api_PodLifeTime_To_v1alpha1_PodLifeTime is an autogenerated conversion function.
|
||||
func Convert_api_PodLifeTime_To_v1alpha1_PodLifeTime(in *api.PodLifeTime, out *PodLifeTime, s conversion.Scope) error {
|
||||
return autoConvert_api_PodLifeTime_To_v1alpha1_PodLifeTime(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1alpha1_PodsHavingTooManyRestarts_To_api_PodsHavingTooManyRestarts(in *PodsHavingTooManyRestarts, out *api.PodsHavingTooManyRestarts, s conversion.Scope) error {
|
||||
out.PodRestartThreshold = in.PodRestartThreshold
|
||||
out.IncludingInitContainers = in.IncludingInitContainers
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_v1alpha1_PodsHavingTooManyRestarts_To_api_PodsHavingTooManyRestarts is an autogenerated conversion function.
|
||||
func Convert_v1alpha1_PodsHavingTooManyRestarts_To_api_PodsHavingTooManyRestarts(in *PodsHavingTooManyRestarts, out *api.PodsHavingTooManyRestarts, s conversion.Scope) error {
|
||||
return autoConvert_v1alpha1_PodsHavingTooManyRestarts_To_api_PodsHavingTooManyRestarts(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_api_PodsHavingTooManyRestarts_To_v1alpha1_PodsHavingTooManyRestarts(in *api.PodsHavingTooManyRestarts, out *PodsHavingTooManyRestarts, s conversion.Scope) error {
|
||||
out.PodRestartThreshold = in.PodRestartThreshold
|
||||
out.IncludingInitContainers = in.IncludingInitContainers
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_api_PodsHavingTooManyRestarts_To_v1alpha1_PodsHavingTooManyRestarts is an autogenerated conversion function.
|
||||
func Convert_api_PodsHavingTooManyRestarts_To_v1alpha1_PodsHavingTooManyRestarts(in *api.PodsHavingTooManyRestarts, out *PodsHavingTooManyRestarts, s conversion.Scope) error {
|
||||
return autoConvert_api_PodsHavingTooManyRestarts_To_v1alpha1_PodsHavingTooManyRestarts(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1alpha1_RemoveDuplicates_To_api_RemoveDuplicates(in *RemoveDuplicates, out *api.RemoveDuplicates, s conversion.Scope) error {
|
||||
out.ExcludeOwnerKinds = *(*[]string)(unsafe.Pointer(&in.ExcludeOwnerKinds))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_v1alpha1_RemoveDuplicates_To_api_RemoveDuplicates is an autogenerated conversion function.
|
||||
func Convert_v1alpha1_RemoveDuplicates_To_api_RemoveDuplicates(in *RemoveDuplicates, out *api.RemoveDuplicates, s conversion.Scope) error {
|
||||
return autoConvert_v1alpha1_RemoveDuplicates_To_api_RemoveDuplicates(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_api_RemoveDuplicates_To_v1alpha1_RemoveDuplicates(in *api.RemoveDuplicates, out *RemoveDuplicates, s conversion.Scope) error {
|
||||
out.ExcludeOwnerKinds = *(*[]string)(unsafe.Pointer(&in.ExcludeOwnerKinds))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_api_RemoveDuplicates_To_v1alpha1_RemoveDuplicates is an autogenerated conversion function.
|
||||
func Convert_api_RemoveDuplicates_To_v1alpha1_RemoveDuplicates(in *api.RemoveDuplicates, out *RemoveDuplicates, s conversion.Scope) error {
|
||||
return autoConvert_api_RemoveDuplicates_To_v1alpha1_RemoveDuplicates(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1alpha1_StrategyParameters_To_api_StrategyParameters(in *StrategyParameters, out *api.StrategyParameters, s conversion.Scope) error {
|
||||
if err := Convert_v1alpha1_NodeResourceUtilizationThresholds_To_api_NodeResourceUtilizationThresholds(&in.NodeResourceUtilizationThresholds, &out.NodeResourceUtilizationThresholds, s); err != nil {
|
||||
return err
|
||||
}
|
||||
out.NodeResourceUtilizationThresholds = (*api.NodeResourceUtilizationThresholds)(unsafe.Pointer(in.NodeResourceUtilizationThresholds))
|
||||
out.NodeAffinityType = *(*[]string)(unsafe.Pointer(&in.NodeAffinityType))
|
||||
out.PodsHavingTooManyRestarts = (*api.PodsHavingTooManyRestarts)(unsafe.Pointer(in.PodsHavingTooManyRestarts))
|
||||
out.PodLifeTime = (*api.PodLifeTime)(unsafe.Pointer(in.PodLifeTime))
|
||||
out.RemoveDuplicates = (*api.RemoveDuplicates)(unsafe.Pointer(in.RemoveDuplicates))
|
||||
out.FailedPods = (*api.FailedPods)(unsafe.Pointer(in.FailedPods))
|
||||
out.IncludeSoftConstraints = in.IncludeSoftConstraints
|
||||
out.Namespaces = (*api.Namespaces)(unsafe.Pointer(in.Namespaces))
|
||||
out.ThresholdPriority = (*int32)(unsafe.Pointer(in.ThresholdPriority))
|
||||
out.ThresholdPriorityClassName = in.ThresholdPriorityClassName
|
||||
out.LabelSelector = (*v1.LabelSelector)(unsafe.Pointer(in.LabelSelector))
|
||||
out.NodeFit = in.NodeFit
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -131,9 +370,18 @@ func Convert_v1alpha1_StrategyParameters_To_api_StrategyParameters(in *StrategyP
|
||||
}
|
||||
|
||||
func autoConvert_api_StrategyParameters_To_v1alpha1_StrategyParameters(in *api.StrategyParameters, out *StrategyParameters, s conversion.Scope) error {
|
||||
if err := Convert_api_NodeResourceUtilizationThresholds_To_v1alpha1_NodeResourceUtilizationThresholds(&in.NodeResourceUtilizationThresholds, &out.NodeResourceUtilizationThresholds, s); err != nil {
|
||||
return err
|
||||
}
|
||||
out.NodeResourceUtilizationThresholds = (*NodeResourceUtilizationThresholds)(unsafe.Pointer(in.NodeResourceUtilizationThresholds))
|
||||
out.NodeAffinityType = *(*[]string)(unsafe.Pointer(&in.NodeAffinityType))
|
||||
out.PodsHavingTooManyRestarts = (*PodsHavingTooManyRestarts)(unsafe.Pointer(in.PodsHavingTooManyRestarts))
|
||||
out.PodLifeTime = (*PodLifeTime)(unsafe.Pointer(in.PodLifeTime))
|
||||
out.RemoveDuplicates = (*RemoveDuplicates)(unsafe.Pointer(in.RemoveDuplicates))
|
||||
out.FailedPods = (*FailedPods)(unsafe.Pointer(in.FailedPods))
|
||||
out.IncludeSoftConstraints = in.IncludeSoftConstraints
|
||||
out.Namespaces = (*Namespaces)(unsafe.Pointer(in.Namespaces))
|
||||
out.ThresholdPriority = (*int32)(unsafe.Pointer(in.ThresholdPriority))
|
||||
out.ThresholdPriorityClassName = in.ThresholdPriorityClassName
|
||||
out.LabelSelector = (*v1.LabelSelector)(unsafe.Pointer(in.LabelSelector))
|
||||
out.NodeFit = in.NodeFit
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
//go:build !ignore_autogenerated
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@@ -16,98 +17,354 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// This file was autogenerated by deepcopy-gen. Do not edit it manually!
|
||||
// Code generated by deepcopy-gen. DO NOT EDIT.
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
conversion "k8s.io/apimachinery/pkg/conversion"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
reflect "reflect"
|
||||
)
|
||||
|
||||
func init() {
|
||||
SchemeBuilder.Register(RegisterDeepCopies)
|
||||
}
|
||||
|
||||
// RegisterDeepCopies adds deep-copy functions to the given scheme. Public
|
||||
// to allow building arbitrary schemes.
|
||||
func RegisterDeepCopies(scheme *runtime.Scheme) error {
|
||||
return scheme.AddGeneratedDeepCopyFuncs(
|
||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1alpha1_DeschedulerPolicy, InType: reflect.TypeOf(&DeschedulerPolicy{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1alpha1_DeschedulerStrategy, InType: reflect.TypeOf(&DeschedulerStrategy{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1alpha1_NodeResourceUtilizationThresholds, InType: reflect.TypeOf(&NodeResourceUtilizationThresholds{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1alpha1_StrategyParameters, InType: reflect.TypeOf(&StrategyParameters{})},
|
||||
)
|
||||
}
|
||||
|
||||
// DeepCopy_v1alpha1_DeschedulerPolicy is an autogenerated deepcopy function.
|
||||
func DeepCopy_v1alpha1_DeschedulerPolicy(in interface{}, out interface{}, c *conversion.Cloner) error {
|
||||
{
|
||||
in := in.(*DeschedulerPolicy)
|
||||
out := out.(*DeschedulerPolicy)
|
||||
*out = *in
|
||||
if in.Strategies != nil {
|
||||
in, out := &in.Strategies, &out.Strategies
|
||||
*out = make(StrategyList)
|
||||
for key, val := range *in {
|
||||
newVal := new(DeschedulerStrategy)
|
||||
if err := DeepCopy_v1alpha1_DeschedulerStrategy(&val, newVal, c); err != nil {
|
||||
return err
|
||||
}
|
||||
(*out)[key] = *newVal
|
||||
}
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *DeschedulerPolicy) DeepCopyInto(out *DeschedulerPolicy) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
if in.Strategies != nil {
|
||||
in, out := &in.Strategies, &out.Strategies
|
||||
*out = make(StrategyList, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = *val.DeepCopy()
|
||||
}
|
||||
}
|
||||
if in.NodeSelector != nil {
|
||||
in, out := &in.NodeSelector, &out.NodeSelector
|
||||
*out = new(string)
|
||||
**out = **in
|
||||
}
|
||||
if in.EvictFailedBarePods != nil {
|
||||
in, out := &in.EvictFailedBarePods, &out.EvictFailedBarePods
|
||||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
if in.EvictLocalStoragePods != nil {
|
||||
in, out := &in.EvictLocalStoragePods, &out.EvictLocalStoragePods
|
||||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
if in.EvictSystemCriticalPods != nil {
|
||||
in, out := &in.EvictSystemCriticalPods, &out.EvictSystemCriticalPods
|
||||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
if in.IgnorePVCPods != nil {
|
||||
in, out := &in.IgnorePVCPods, &out.IgnorePVCPods
|
||||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
if in.MaxNoOfPodsToEvictPerNode != nil {
|
||||
in, out := &in.MaxNoOfPodsToEvictPerNode, &out.MaxNoOfPodsToEvictPerNode
|
||||
*out = new(int)
|
||||
**out = **in
|
||||
}
|
||||
if in.MaxNoOfPodsToEvictPerNamespace != nil {
|
||||
in, out := &in.MaxNoOfPodsToEvictPerNamespace, &out.MaxNoOfPodsToEvictPerNamespace
|
||||
*out = new(int)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeschedulerPolicy.
|
||||
func (in *DeschedulerPolicy) DeepCopy() *DeschedulerPolicy {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(DeschedulerPolicy)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *DeschedulerPolicy) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *DeschedulerStrategy) DeepCopyInto(out *DeschedulerStrategy) {
|
||||
*out = *in
|
||||
if in.Params != nil {
|
||||
in, out := &in.Params, &out.Params
|
||||
*out = new(StrategyParameters)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeschedulerStrategy.
|
||||
func (in *DeschedulerStrategy) DeepCopy() *DeschedulerStrategy {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(DeschedulerStrategy)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *FailedPods) DeepCopyInto(out *FailedPods) {
|
||||
*out = *in
|
||||
if in.ExcludeOwnerKinds != nil {
|
||||
in, out := &in.ExcludeOwnerKinds, &out.ExcludeOwnerKinds
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.MinPodLifetimeSeconds != nil {
|
||||
in, out := &in.MinPodLifetimeSeconds, &out.MinPodLifetimeSeconds
|
||||
*out = new(uint)
|
||||
**out = **in
|
||||
}
|
||||
if in.Reasons != nil {
|
||||
in, out := &in.Reasons, &out.Reasons
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FailedPods.
|
||||
func (in *FailedPods) DeepCopy() *FailedPods {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(FailedPods)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Namespaces) DeepCopyInto(out *Namespaces) {
|
||||
*out = *in
|
||||
if in.Include != nil {
|
||||
in, out := &in.Include, &out.Include
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.Exclude != nil {
|
||||
in, out := &in.Exclude, &out.Exclude
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Namespaces.
|
||||
func (in *Namespaces) DeepCopy() *Namespaces {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(Namespaces)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *NodeResourceUtilizationThresholds) DeepCopyInto(out *NodeResourceUtilizationThresholds) {
|
||||
*out = *in
|
||||
if in.Thresholds != nil {
|
||||
in, out := &in.Thresholds, &out.Thresholds
|
||||
*out = make(ResourceThresholds, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
if in.TargetThresholds != nil {
|
||||
in, out := &in.TargetThresholds, &out.TargetThresholds
|
||||
*out = make(ResourceThresholds, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeResourceUtilizationThresholds.
|
||||
func (in *NodeResourceUtilizationThresholds) DeepCopy() *NodeResourceUtilizationThresholds {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(NodeResourceUtilizationThresholds)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *PodLifeTime) DeepCopyInto(out *PodLifeTime) {
|
||||
*out = *in
|
||||
if in.MaxPodLifeTimeSeconds != nil {
|
||||
in, out := &in.MaxPodLifeTimeSeconds, &out.MaxPodLifeTimeSeconds
|
||||
*out = new(uint)
|
||||
**out = **in
|
||||
}
|
||||
if in.PodStatusPhases != nil {
|
||||
in, out := &in.PodStatusPhases, &out.PodStatusPhases
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodLifeTime.
|
||||
func (in *PodLifeTime) DeepCopy() *PodLifeTime {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(PodLifeTime)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *PodsHavingTooManyRestarts) DeepCopyInto(out *PodsHavingTooManyRestarts) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodsHavingTooManyRestarts.
|
||||
func (in *PodsHavingTooManyRestarts) DeepCopy() *PodsHavingTooManyRestarts {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(PodsHavingTooManyRestarts)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *RemoveDuplicates) DeepCopyInto(out *RemoveDuplicates) {
|
||||
*out = *in
|
||||
if in.ExcludeOwnerKinds != nil {
|
||||
in, out := &in.ExcludeOwnerKinds, &out.ExcludeOwnerKinds
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RemoveDuplicates.
|
||||
func (in *RemoveDuplicates) DeepCopy() *RemoveDuplicates {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(RemoveDuplicates)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in ResourceThresholds) DeepCopyInto(out *ResourceThresholds) {
|
||||
{
|
||||
in := &in
|
||||
*out = make(ResourceThresholds, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy_v1alpha1_DeschedulerStrategy is an autogenerated deepcopy function.
|
||||
func DeepCopy_v1alpha1_DeschedulerStrategy(in interface{}, out interface{}, c *conversion.Cloner) error {
|
||||
{
|
||||
in := in.(*DeschedulerStrategy)
|
||||
out := out.(*DeschedulerStrategy)
|
||||
*out = *in
|
||||
if err := DeepCopy_v1alpha1_StrategyParameters(&in.Params, &out.Params, c); err != nil {
|
||||
return err
|
||||
}
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceThresholds.
|
||||
func (in ResourceThresholds) DeepCopy() ResourceThresholds {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ResourceThresholds)
|
||||
in.DeepCopyInto(out)
|
||||
return *out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in StrategyList) DeepCopyInto(out *StrategyList) {
|
||||
{
|
||||
in := &in
|
||||
*out = make(StrategyList, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = *val.DeepCopy()
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy_v1alpha1_NodeResourceUtilizationThresholds is an autogenerated deepcopy function.
|
||||
func DeepCopy_v1alpha1_NodeResourceUtilizationThresholds(in interface{}, out interface{}, c *conversion.Cloner) error {
|
||||
{
|
||||
in := in.(*NodeResourceUtilizationThresholds)
|
||||
out := out.(*NodeResourceUtilizationThresholds)
|
||||
*out = *in
|
||||
if in.Thresholds != nil {
|
||||
in, out := &in.Thresholds, &out.Thresholds
|
||||
*out = make(ResourceThresholds)
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
if in.TargetThresholds != nil {
|
||||
in, out := &in.TargetThresholds, &out.TargetThresholds
|
||||
*out = make(ResourceThresholds)
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StrategyList.
|
||||
func (in StrategyList) DeepCopy() StrategyList {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(StrategyList)
|
||||
in.DeepCopyInto(out)
|
||||
return *out
|
||||
}
|
||||
|
||||
// DeepCopy_v1alpha1_StrategyParameters is an autogenerated deepcopy function.
|
||||
func DeepCopy_v1alpha1_StrategyParameters(in interface{}, out interface{}, c *conversion.Cloner) error {
|
||||
{
|
||||
in := in.(*StrategyParameters)
|
||||
out := out.(*StrategyParameters)
|
||||
*out = *in
|
||||
if err := DeepCopy_v1alpha1_NodeResourceUtilizationThresholds(&in.NodeResourceUtilizationThresholds, &out.NodeResourceUtilizationThresholds, c); err != nil {
|
||||
return err
|
||||
}
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *StrategyParameters) DeepCopyInto(out *StrategyParameters) {
|
||||
*out = *in
|
||||
if in.NodeResourceUtilizationThresholds != nil {
|
||||
in, out := &in.NodeResourceUtilizationThresholds, &out.NodeResourceUtilizationThresholds
|
||||
*out = new(NodeResourceUtilizationThresholds)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.NodeAffinityType != nil {
|
||||
in, out := &in.NodeAffinityType, &out.NodeAffinityType
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.PodsHavingTooManyRestarts != nil {
|
||||
in, out := &in.PodsHavingTooManyRestarts, &out.PodsHavingTooManyRestarts
|
||||
*out = new(PodsHavingTooManyRestarts)
|
||||
**out = **in
|
||||
}
|
||||
if in.PodLifeTime != nil {
|
||||
in, out := &in.PodLifeTime, &out.PodLifeTime
|
||||
*out = new(PodLifeTime)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.RemoveDuplicates != nil {
|
||||
in, out := &in.RemoveDuplicates, &out.RemoveDuplicates
|
||||
*out = new(RemoveDuplicates)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.FailedPods != nil {
|
||||
in, out := &in.FailedPods, &out.FailedPods
|
||||
*out = new(FailedPods)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.Namespaces != nil {
|
||||
in, out := &in.Namespaces, &out.Namespaces
|
||||
*out = new(Namespaces)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.ThresholdPriority != nil {
|
||||
in, out := &in.ThresholdPriority, &out.ThresholdPriority
|
||||
*out = new(int32)
|
||||
**out = **in
|
||||
}
|
||||
if in.LabelSelector != nil {
|
||||
in, out := &in.LabelSelector, &out.LabelSelector
|
||||
*out = new(v1.LabelSelector)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StrategyParameters.
|
||||
func (in *StrategyParameters) DeepCopy() *StrategyParameters {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(StrategyParameters)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
//go:build !ignore_autogenerated
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@@ -16,7 +17,7 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// This file was autogenerated by defaulter-gen. Do not edit it manually!
|
||||
// Code generated by defaulter-gen. DO NOT EDIT.
|
||||
|
||||
package v1alpha1
|
||||
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
//go:build !ignore_autogenerated
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@@ -16,98 +17,354 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// This file was autogenerated by deepcopy-gen. Do not edit it manually!
|
||||
// Code generated by deepcopy-gen. DO NOT EDIT.
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
conversion "k8s.io/apimachinery/pkg/conversion"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
reflect "reflect"
|
||||
)
|
||||
|
||||
func init() {
|
||||
SchemeBuilder.Register(RegisterDeepCopies)
|
||||
}
|
||||
|
||||
// RegisterDeepCopies adds deep-copy functions to the given scheme. Public
|
||||
// to allow building arbitrary schemes.
|
||||
func RegisterDeepCopies(scheme *runtime.Scheme) error {
|
||||
return scheme.AddGeneratedDeepCopyFuncs(
|
||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_DeschedulerPolicy, InType: reflect.TypeOf(&DeschedulerPolicy{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_DeschedulerStrategy, InType: reflect.TypeOf(&DeschedulerStrategy{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_NodeResourceUtilizationThresholds, InType: reflect.TypeOf(&NodeResourceUtilizationThresholds{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_StrategyParameters, InType: reflect.TypeOf(&StrategyParameters{})},
|
||||
)
|
||||
}
|
||||
|
||||
// DeepCopy_api_DeschedulerPolicy is an autogenerated deepcopy function.
|
||||
func DeepCopy_api_DeschedulerPolicy(in interface{}, out interface{}, c *conversion.Cloner) error {
|
||||
{
|
||||
in := in.(*DeschedulerPolicy)
|
||||
out := out.(*DeschedulerPolicy)
|
||||
*out = *in
|
||||
if in.Strategies != nil {
|
||||
in, out := &in.Strategies, &out.Strategies
|
||||
*out = make(StrategyList)
|
||||
for key, val := range *in {
|
||||
newVal := new(DeschedulerStrategy)
|
||||
if err := DeepCopy_api_DeschedulerStrategy(&val, newVal, c); err != nil {
|
||||
return err
|
||||
}
|
||||
(*out)[key] = *newVal
|
||||
}
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *DeschedulerPolicy) DeepCopyInto(out *DeschedulerPolicy) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
if in.Strategies != nil {
|
||||
in, out := &in.Strategies, &out.Strategies
|
||||
*out = make(StrategyList, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = *val.DeepCopy()
|
||||
}
|
||||
}
|
||||
if in.NodeSelector != nil {
|
||||
in, out := &in.NodeSelector, &out.NodeSelector
|
||||
*out = new(string)
|
||||
**out = **in
|
||||
}
|
||||
if in.EvictFailedBarePods != nil {
|
||||
in, out := &in.EvictFailedBarePods, &out.EvictFailedBarePods
|
||||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
if in.EvictLocalStoragePods != nil {
|
||||
in, out := &in.EvictLocalStoragePods, &out.EvictLocalStoragePods
|
||||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
if in.EvictSystemCriticalPods != nil {
|
||||
in, out := &in.EvictSystemCriticalPods, &out.EvictSystemCriticalPods
|
||||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
if in.IgnorePVCPods != nil {
|
||||
in, out := &in.IgnorePVCPods, &out.IgnorePVCPods
|
||||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
if in.MaxNoOfPodsToEvictPerNode != nil {
|
||||
in, out := &in.MaxNoOfPodsToEvictPerNode, &out.MaxNoOfPodsToEvictPerNode
|
||||
*out = new(uint)
|
||||
**out = **in
|
||||
}
|
||||
if in.MaxNoOfPodsToEvictPerNamespace != nil {
|
||||
in, out := &in.MaxNoOfPodsToEvictPerNamespace, &out.MaxNoOfPodsToEvictPerNamespace
|
||||
*out = new(uint)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeschedulerPolicy.
|
||||
func (in *DeschedulerPolicy) DeepCopy() *DeschedulerPolicy {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(DeschedulerPolicy)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *DeschedulerPolicy) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *DeschedulerStrategy) DeepCopyInto(out *DeschedulerStrategy) {
|
||||
*out = *in
|
||||
if in.Params != nil {
|
||||
in, out := &in.Params, &out.Params
|
||||
*out = new(StrategyParameters)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeschedulerStrategy.
|
||||
func (in *DeschedulerStrategy) DeepCopy() *DeschedulerStrategy {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(DeschedulerStrategy)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *FailedPods) DeepCopyInto(out *FailedPods) {
|
||||
*out = *in
|
||||
if in.ExcludeOwnerKinds != nil {
|
||||
in, out := &in.ExcludeOwnerKinds, &out.ExcludeOwnerKinds
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.MinPodLifetimeSeconds != nil {
|
||||
in, out := &in.MinPodLifetimeSeconds, &out.MinPodLifetimeSeconds
|
||||
*out = new(uint)
|
||||
**out = **in
|
||||
}
|
||||
if in.Reasons != nil {
|
||||
in, out := &in.Reasons, &out.Reasons
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FailedPods.
|
||||
func (in *FailedPods) DeepCopy() *FailedPods {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(FailedPods)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Namespaces) DeepCopyInto(out *Namespaces) {
|
||||
*out = *in
|
||||
if in.Include != nil {
|
||||
in, out := &in.Include, &out.Include
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.Exclude != nil {
|
||||
in, out := &in.Exclude, &out.Exclude
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Namespaces.
|
||||
func (in *Namespaces) DeepCopy() *Namespaces {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(Namespaces)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *NodeResourceUtilizationThresholds) DeepCopyInto(out *NodeResourceUtilizationThresholds) {
|
||||
*out = *in
|
||||
if in.Thresholds != nil {
|
||||
in, out := &in.Thresholds, &out.Thresholds
|
||||
*out = make(ResourceThresholds, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
if in.TargetThresholds != nil {
|
||||
in, out := &in.TargetThresholds, &out.TargetThresholds
|
||||
*out = make(ResourceThresholds, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeResourceUtilizationThresholds.
|
||||
func (in *NodeResourceUtilizationThresholds) DeepCopy() *NodeResourceUtilizationThresholds {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(NodeResourceUtilizationThresholds)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *PodLifeTime) DeepCopyInto(out *PodLifeTime) {
|
||||
*out = *in
|
||||
if in.MaxPodLifeTimeSeconds != nil {
|
||||
in, out := &in.MaxPodLifeTimeSeconds, &out.MaxPodLifeTimeSeconds
|
||||
*out = new(uint)
|
||||
**out = **in
|
||||
}
|
||||
if in.PodStatusPhases != nil {
|
||||
in, out := &in.PodStatusPhases, &out.PodStatusPhases
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodLifeTime.
|
||||
func (in *PodLifeTime) DeepCopy() *PodLifeTime {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(PodLifeTime)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *PodsHavingTooManyRestarts) DeepCopyInto(out *PodsHavingTooManyRestarts) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodsHavingTooManyRestarts.
|
||||
func (in *PodsHavingTooManyRestarts) DeepCopy() *PodsHavingTooManyRestarts {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(PodsHavingTooManyRestarts)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *RemoveDuplicates) DeepCopyInto(out *RemoveDuplicates) {
|
||||
*out = *in
|
||||
if in.ExcludeOwnerKinds != nil {
|
||||
in, out := &in.ExcludeOwnerKinds, &out.ExcludeOwnerKinds
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RemoveDuplicates.
|
||||
func (in *RemoveDuplicates) DeepCopy() *RemoveDuplicates {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(RemoveDuplicates)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in ResourceThresholds) DeepCopyInto(out *ResourceThresholds) {
|
||||
{
|
||||
in := &in
|
||||
*out = make(ResourceThresholds, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy_api_DeschedulerStrategy is an autogenerated deepcopy function.
|
||||
func DeepCopy_api_DeschedulerStrategy(in interface{}, out interface{}, c *conversion.Cloner) error {
|
||||
{
|
||||
in := in.(*DeschedulerStrategy)
|
||||
out := out.(*DeschedulerStrategy)
|
||||
*out = *in
|
||||
if err := DeepCopy_api_StrategyParameters(&in.Params, &out.Params, c); err != nil {
|
||||
return err
|
||||
}
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceThresholds.
|
||||
func (in ResourceThresholds) DeepCopy() ResourceThresholds {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ResourceThresholds)
|
||||
in.DeepCopyInto(out)
|
||||
return *out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in StrategyList) DeepCopyInto(out *StrategyList) {
|
||||
{
|
||||
in := &in
|
||||
*out = make(StrategyList, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = *val.DeepCopy()
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy_api_NodeResourceUtilizationThresholds is an autogenerated deepcopy function.
|
||||
func DeepCopy_api_NodeResourceUtilizationThresholds(in interface{}, out interface{}, c *conversion.Cloner) error {
|
||||
{
|
||||
in := in.(*NodeResourceUtilizationThresholds)
|
||||
out := out.(*NodeResourceUtilizationThresholds)
|
||||
*out = *in
|
||||
if in.Thresholds != nil {
|
||||
in, out := &in.Thresholds, &out.Thresholds
|
||||
*out = make(ResourceThresholds)
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
if in.TargetThresholds != nil {
|
||||
in, out := &in.TargetThresholds, &out.TargetThresholds
|
||||
*out = make(ResourceThresholds)
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StrategyList.
|
||||
func (in StrategyList) DeepCopy() StrategyList {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(StrategyList)
|
||||
in.DeepCopyInto(out)
|
||||
return *out
|
||||
}
|
||||
|
||||
// DeepCopy_api_StrategyParameters is an autogenerated deepcopy function.
|
||||
func DeepCopy_api_StrategyParameters(in interface{}, out interface{}, c *conversion.Cloner) error {
|
||||
{
|
||||
in := in.(*StrategyParameters)
|
||||
out := out.(*StrategyParameters)
|
||||
*out = *in
|
||||
if err := DeepCopy_api_NodeResourceUtilizationThresholds(&in.NodeResourceUtilizationThresholds, &out.NodeResourceUtilizationThresholds, c); err != nil {
|
||||
return err
|
||||
}
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *StrategyParameters) DeepCopyInto(out *StrategyParameters) {
|
||||
*out = *in
|
||||
if in.NodeResourceUtilizationThresholds != nil {
|
||||
in, out := &in.NodeResourceUtilizationThresholds, &out.NodeResourceUtilizationThresholds
|
||||
*out = new(NodeResourceUtilizationThresholds)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.NodeAffinityType != nil {
|
||||
in, out := &in.NodeAffinityType, &out.NodeAffinityType
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.PodsHavingTooManyRestarts != nil {
|
||||
in, out := &in.PodsHavingTooManyRestarts, &out.PodsHavingTooManyRestarts
|
||||
*out = new(PodsHavingTooManyRestarts)
|
||||
**out = **in
|
||||
}
|
||||
if in.PodLifeTime != nil {
|
||||
in, out := &in.PodLifeTime, &out.PodLifeTime
|
||||
*out = new(PodLifeTime)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.RemoveDuplicates != nil {
|
||||
in, out := &in.RemoveDuplicates, &out.RemoveDuplicates
|
||||
*out = new(RemoveDuplicates)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.FailedPods != nil {
|
||||
in, out := &in.FailedPods, &out.FailedPods
|
||||
*out = new(FailedPods)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.Namespaces != nil {
|
||||
in, out := &in.Namespaces, &out.Namespaces
|
||||
*out = new(Namespaces)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.ThresholdPriority != nil {
|
||||
in, out := &in.ThresholdPriority, &out.ThresholdPriority
|
||||
*out = new(int32)
|
||||
**out = **in
|
||||
}
|
||||
if in.LabelSelector != nil {
|
||||
in, out := &in.LabelSelector, &out.LabelSelector
|
||||
*out = new(v1.LabelSelector)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StrategyParameters.
|
||||
func (in *StrategyParameters) DeepCopy() *StrategyParameters {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(StrategyParameters)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
@@ -16,4 +16,4 @@ limitations under the License.
|
||||
|
||||
// +k8s:deepcopy-gen=package,register
|
||||
|
||||
package componentconfig // import "github.com/kubernetes-incubator/descheduler/pkg/apis/componentconfig"
|
||||
package componentconfig // import "sigs.k8s.io/descheduler/pkg/apis/componentconfig"
|
||||
|
||||
@@ -1,49 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package install installs the descheduler's componentconfig API group.
|
||||
package install
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/apimachinery/announced"
|
||||
"k8s.io/apimachinery/pkg/apimachinery/registered"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
||||
"github.com/kubernetes-incubator/descheduler/pkg/apis/componentconfig"
|
||||
"github.com/kubernetes-incubator/descheduler/pkg/apis/componentconfig/v1alpha1"
|
||||
deschedulerscheme "github.com/kubernetes-incubator/descheduler/pkg/descheduler/scheme"
|
||||
)
|
||||
|
||||
func init() {
|
||||
Install(deschedulerscheme.GroupFactoryRegistry, deschedulerscheme.Registry, deschedulerscheme.Scheme)
|
||||
}
|
||||
|
||||
// Install registers the API group and adds types to a scheme
|
||||
func Install(groupFactoryRegistry announced.APIGroupFactoryRegistry, registry *registered.APIRegistrationManager, scheme *runtime.Scheme) {
|
||||
if err := announced.NewGroupMetaFactory(
|
||||
&announced.GroupMetaFactoryArgs{
|
||||
GroupName: componentconfig.GroupName,
|
||||
VersionPreferenceOrder: []string{v1alpha1.SchemeGroupVersion.Version},
|
||||
ImportPrefix: "github.com/kubernetes-incubator/descheduler/pkg/apis/componentconfig",
|
||||
AddInternalObjectsToScheme: componentconfig.AddToScheme,
|
||||
},
|
||||
announced.VersionToSchemeFunc{
|
||||
v1alpha1.SchemeGroupVersion.Version: v1alpha1.AddToScheme,
|
||||
},
|
||||
).Announce(groupFactoryRegistry).RegisterAndEnable(registry, scheme); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
@@ -1,578 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// ************************************************************
|
||||
// DO NOT EDIT.
|
||||
// THIS FILE IS AUTO-GENERATED BY codecgen.
|
||||
// ************************************************************
|
||||
|
||||
package componentconfig
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
codec1978 "github.com/ugorji/go/codec"
|
||||
pkg1_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"reflect"
|
||||
"runtime"
|
||||
time "time"
|
||||
)
|
||||
|
||||
const (
|
||||
// ----- content types ----
|
||||
codecSelferC_UTF81234 = 1
|
||||
codecSelferC_RAW1234 = 0
|
||||
// ----- value types used ----
|
||||
codecSelferValueTypeArray1234 = 10
|
||||
codecSelferValueTypeMap1234 = 9
|
||||
// ----- containerStateValues ----
|
||||
codecSelfer_containerMapKey1234 = 2
|
||||
codecSelfer_containerMapValue1234 = 3
|
||||
codecSelfer_containerMapEnd1234 = 4
|
||||
codecSelfer_containerArrayElem1234 = 6
|
||||
codecSelfer_containerArrayEnd1234 = 7
|
||||
)
|
||||
|
||||
var (
|
||||
codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits())
|
||||
codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`)
|
||||
)
|
||||
|
||||
type codecSelfer1234 struct{}
|
||||
|
||||
func init() {
|
||||
if codec1978.GenVersion != 5 {
|
||||
_, file, _, _ := runtime.Caller(0)
|
||||
err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v",
|
||||
5, codec1978.GenVersion, file)
|
||||
panic(err)
|
||||
}
|
||||
if false { // reference the types, but skip this branch at build/run time
|
||||
var v0 pkg1_v1.TypeMeta
|
||||
var v1 time.Duration
|
||||
_, _ = v0, v1
|
||||
}
|
||||
}
|
||||
|
||||
func (x *DeschedulerConfiguration) CodecEncodeSelf(e *codec1978.Encoder) {
|
||||
var h codecSelfer1234
|
||||
z, r := codec1978.GenHelperEncoder(e)
|
||||
_, _, _ = h, z, r
|
||||
if x == nil {
|
||||
r.EncodeNil()
|
||||
} else {
|
||||
yym1 := z.EncBinary()
|
||||
_ = yym1
|
||||
if false {
|
||||
} else if z.HasExtensions() && z.EncExt(x) {
|
||||
} else {
|
||||
yysep2 := !z.EncBinary()
|
||||
yy2arr2 := z.EncBasicHandle().StructToArray
|
||||
var yyq2 [7]bool
|
||||
_, _, _ = yysep2, yyq2, yy2arr2
|
||||
const yyr2 bool = false
|
||||
yyq2[0] = x.Kind != ""
|
||||
yyq2[1] = x.APIVersion != ""
|
||||
var yynn2 int
|
||||
if yyr2 || yy2arr2 {
|
||||
r.EncodeArrayStart(7)
|
||||
} else {
|
||||
yynn2 = 5
|
||||
for _, b := range yyq2 {
|
||||
if b {
|
||||
yynn2++
|
||||
}
|
||||
}
|
||||
r.EncodeMapStart(yynn2)
|
||||
yynn2 = 0
|
||||
}
|
||||
if yyr2 || yy2arr2 {
|
||||
z.EncSendContainerState(codecSelfer_containerArrayElem1234)
|
||||
if yyq2[0] {
|
||||
yym4 := z.EncBinary()
|
||||
_ = yym4
|
||||
if false {
|
||||
} else {
|
||||
r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
|
||||
}
|
||||
} else {
|
||||
r.EncodeString(codecSelferC_UTF81234, "")
|
||||
}
|
||||
} else {
|
||||
if yyq2[0] {
|
||||
z.EncSendContainerState(codecSelfer_containerMapKey1234)
|
||||
r.EncodeString(codecSelferC_UTF81234, string("kind"))
|
||||
z.EncSendContainerState(codecSelfer_containerMapValue1234)
|
||||
yym5 := z.EncBinary()
|
||||
_ = yym5
|
||||
if false {
|
||||
} else {
|
||||
r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
|
||||
}
|
||||
}
|
||||
}
|
||||
if yyr2 || yy2arr2 {
|
||||
z.EncSendContainerState(codecSelfer_containerArrayElem1234)
|
||||
if yyq2[1] {
|
||||
yym7 := z.EncBinary()
|
||||
_ = yym7
|
||||
if false {
|
||||
} else {
|
||||
r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
|
||||
}
|
||||
} else {
|
||||
r.EncodeString(codecSelferC_UTF81234, "")
|
||||
}
|
||||
} else {
|
||||
if yyq2[1] {
|
||||
z.EncSendContainerState(codecSelfer_containerMapKey1234)
|
||||
r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
|
||||
z.EncSendContainerState(codecSelfer_containerMapValue1234)
|
||||
yym8 := z.EncBinary()
|
||||
_ = yym8
|
||||
if false {
|
||||
} else {
|
||||
r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
|
||||
}
|
||||
}
|
||||
}
|
||||
if yyr2 || yy2arr2 {
|
||||
z.EncSendContainerState(codecSelfer_containerArrayElem1234)
|
||||
yym10 := z.EncBinary()
|
||||
_ = yym10
|
||||
if false {
|
||||
} else if z.HasExtensions() && z.EncExt(x.DeschedulingInterval) {
|
||||
} else {
|
||||
r.EncodeInt(int64(x.DeschedulingInterval))
|
||||
}
|
||||
} else {
|
||||
z.EncSendContainerState(codecSelfer_containerMapKey1234)
|
||||
r.EncodeString(codecSelferC_UTF81234, string("DeschedulingInterval"))
|
||||
z.EncSendContainerState(codecSelfer_containerMapValue1234)
|
||||
yym11 := z.EncBinary()
|
||||
_ = yym11
|
||||
if false {
|
||||
} else if z.HasExtensions() && z.EncExt(x.DeschedulingInterval) {
|
||||
} else {
|
||||
r.EncodeInt(int64(x.DeschedulingInterval))
|
||||
}
|
||||
}
|
||||
if yyr2 || yy2arr2 {
|
||||
z.EncSendContainerState(codecSelfer_containerArrayElem1234)
|
||||
yym13 := z.EncBinary()
|
||||
_ = yym13
|
||||
if false {
|
||||
} else {
|
||||
r.EncodeString(codecSelferC_UTF81234, string(x.KubeconfigFile))
|
||||
}
|
||||
} else {
|
||||
z.EncSendContainerState(codecSelfer_containerMapKey1234)
|
||||
r.EncodeString(codecSelferC_UTF81234, string("KubeconfigFile"))
|
||||
z.EncSendContainerState(codecSelfer_containerMapValue1234)
|
||||
yym14 := z.EncBinary()
|
||||
_ = yym14
|
||||
if false {
|
||||
} else {
|
||||
r.EncodeString(codecSelferC_UTF81234, string(x.KubeconfigFile))
|
||||
}
|
||||
}
|
||||
if yyr2 || yy2arr2 {
|
||||
z.EncSendContainerState(codecSelfer_containerArrayElem1234)
|
||||
yym16 := z.EncBinary()
|
||||
_ = yym16
|
||||
if false {
|
||||
} else {
|
||||
r.EncodeString(codecSelferC_UTF81234, string(x.PolicyConfigFile))
|
||||
}
|
||||
} else {
|
||||
z.EncSendContainerState(codecSelfer_containerMapKey1234)
|
||||
r.EncodeString(codecSelferC_UTF81234, string("PolicyConfigFile"))
|
||||
z.EncSendContainerState(codecSelfer_containerMapValue1234)
|
||||
yym17 := z.EncBinary()
|
||||
_ = yym17
|
||||
if false {
|
||||
} else {
|
||||
r.EncodeString(codecSelferC_UTF81234, string(x.PolicyConfigFile))
|
||||
}
|
||||
}
|
||||
if yyr2 || yy2arr2 {
|
||||
z.EncSendContainerState(codecSelfer_containerArrayElem1234)
|
||||
yym19 := z.EncBinary()
|
||||
_ = yym19
|
||||
if false {
|
||||
} else {
|
||||
r.EncodeBool(bool(x.DryRun))
|
||||
}
|
||||
} else {
|
||||
z.EncSendContainerState(codecSelfer_containerMapKey1234)
|
||||
r.EncodeString(codecSelferC_UTF81234, string("DryRun"))
|
||||
z.EncSendContainerState(codecSelfer_containerMapValue1234)
|
||||
yym20 := z.EncBinary()
|
||||
_ = yym20
|
||||
if false {
|
||||
} else {
|
||||
r.EncodeBool(bool(x.DryRun))
|
||||
}
|
||||
}
|
||||
if yyr2 || yy2arr2 {
|
||||
z.EncSendContainerState(codecSelfer_containerArrayElem1234)
|
||||
yym22 := z.EncBinary()
|
||||
_ = yym22
|
||||
if false {
|
||||
} else {
|
||||
r.EncodeString(codecSelferC_UTF81234, string(x.NodeSelector))
|
||||
}
|
||||
} else {
|
||||
z.EncSendContainerState(codecSelfer_containerMapKey1234)
|
||||
r.EncodeString(codecSelferC_UTF81234, string("NodeSelector"))
|
||||
z.EncSendContainerState(codecSelfer_containerMapValue1234)
|
||||
yym23 := z.EncBinary()
|
||||
_ = yym23
|
||||
if false {
|
||||
} else {
|
||||
r.EncodeString(codecSelferC_UTF81234, string(x.NodeSelector))
|
||||
}
|
||||
}
|
||||
if yyr2 || yy2arr2 {
|
||||
z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
|
||||
} else {
|
||||
z.EncSendContainerState(codecSelfer_containerMapEnd1234)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (x *DeschedulerConfiguration) CodecDecodeSelf(d *codec1978.Decoder) {
|
||||
var h codecSelfer1234
|
||||
z, r := codec1978.GenHelperDecoder(d)
|
||||
_, _, _ = h, z, r
|
||||
yym1 := z.DecBinary()
|
||||
_ = yym1
|
||||
if false {
|
||||
} else if z.HasExtensions() && z.DecExt(x) {
|
||||
} else {
|
||||
yyct2 := r.ContainerType()
|
||||
if yyct2 == codecSelferValueTypeMap1234 {
|
||||
yyl2 := r.ReadMapStart()
|
||||
if yyl2 == 0 {
|
||||
z.DecSendContainerState(codecSelfer_containerMapEnd1234)
|
||||
} else {
|
||||
x.codecDecodeSelfFromMap(yyl2, d)
|
||||
}
|
||||
} else if yyct2 == codecSelferValueTypeArray1234 {
|
||||
yyl2 := r.ReadArrayStart()
|
||||
if yyl2 == 0 {
|
||||
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
|
||||
} else {
|
||||
x.codecDecodeSelfFromArray(yyl2, d)
|
||||
}
|
||||
} else {
|
||||
panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (x *DeschedulerConfiguration) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
|
||||
var h codecSelfer1234
|
||||
z, r := codec1978.GenHelperDecoder(d)
|
||||
_, _, _ = h, z, r
|
||||
var yys3Slc = z.DecScratchBuffer() // default slice to decode into
|
||||
_ = yys3Slc
|
||||
var yyhl3 bool = l >= 0
|
||||
for yyj3 := 0; ; yyj3++ {
|
||||
if yyhl3 {
|
||||
if yyj3 >= l {
|
||||
break
|
||||
}
|
||||
} else {
|
||||
if r.CheckBreak() {
|
||||
break
|
||||
}
|
||||
}
|
||||
z.DecSendContainerState(codecSelfer_containerMapKey1234)
|
||||
yys3Slc = r.DecodeBytes(yys3Slc, true, true)
|
||||
yys3 := string(yys3Slc)
|
||||
z.DecSendContainerState(codecSelfer_containerMapValue1234)
|
||||
switch yys3 {
|
||||
case "kind":
|
||||
if r.TryDecodeAsNil() {
|
||||
x.Kind = ""
|
||||
} else {
|
||||
yyv4 := &x.Kind
|
||||
yym5 := z.DecBinary()
|
||||
_ = yym5
|
||||
if false {
|
||||
} else {
|
||||
*((*string)(yyv4)) = r.DecodeString()
|
||||
}
|
||||
}
|
||||
case "apiVersion":
|
||||
if r.TryDecodeAsNil() {
|
||||
x.APIVersion = ""
|
||||
} else {
|
||||
yyv6 := &x.APIVersion
|
||||
yym7 := z.DecBinary()
|
||||
_ = yym7
|
||||
if false {
|
||||
} else {
|
||||
*((*string)(yyv6)) = r.DecodeString()
|
||||
}
|
||||
}
|
||||
case "DeschedulingInterval":
|
||||
if r.TryDecodeAsNil() {
|
||||
x.DeschedulingInterval = 0
|
||||
} else {
|
||||
yyv8 := &x.DeschedulingInterval
|
||||
yym9 := z.DecBinary()
|
||||
_ = yym9
|
||||
if false {
|
||||
} else if z.HasExtensions() && z.DecExt(yyv8) {
|
||||
} else {
|
||||
*((*int64)(yyv8)) = int64(r.DecodeInt(64))
|
||||
}
|
||||
}
|
||||
case "KubeconfigFile":
|
||||
if r.TryDecodeAsNil() {
|
||||
x.KubeconfigFile = ""
|
||||
} else {
|
||||
yyv10 := &x.KubeconfigFile
|
||||
yym11 := z.DecBinary()
|
||||
_ = yym11
|
||||
if false {
|
||||
} else {
|
||||
*((*string)(yyv10)) = r.DecodeString()
|
||||
}
|
||||
}
|
||||
case "PolicyConfigFile":
|
||||
if r.TryDecodeAsNil() {
|
||||
x.PolicyConfigFile = ""
|
||||
} else {
|
||||
yyv12 := &x.PolicyConfigFile
|
||||
yym13 := z.DecBinary()
|
||||
_ = yym13
|
||||
if false {
|
||||
} else {
|
||||
*((*string)(yyv12)) = r.DecodeString()
|
||||
}
|
||||
}
|
||||
case "DryRun":
|
||||
if r.TryDecodeAsNil() {
|
||||
x.DryRun = false
|
||||
} else {
|
||||
yyv14 := &x.DryRun
|
||||
yym15 := z.DecBinary()
|
||||
_ = yym15
|
||||
if false {
|
||||
} else {
|
||||
*((*bool)(yyv14)) = r.DecodeBool()
|
||||
}
|
||||
}
|
||||
case "NodeSelector":
|
||||
if r.TryDecodeAsNil() {
|
||||
x.NodeSelector = ""
|
||||
} else {
|
||||
yyv16 := &x.NodeSelector
|
||||
yym17 := z.DecBinary()
|
||||
_ = yym17
|
||||
if false {
|
||||
} else {
|
||||
*((*string)(yyv16)) = r.DecodeString()
|
||||
}
|
||||
}
|
||||
default:
|
||||
z.DecStructFieldNotFound(-1, yys3)
|
||||
} // end switch yys3
|
||||
} // end for yyj3
|
||||
z.DecSendContainerState(codecSelfer_containerMapEnd1234)
|
||||
}
|
||||
|
||||
func (x *DeschedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
|
||||
var h codecSelfer1234
|
||||
z, r := codec1978.GenHelperDecoder(d)
|
||||
_, _, _ = h, z, r
|
||||
var yyj18 int
|
||||
var yyb18 bool
|
||||
var yyhl18 bool = l >= 0
|
||||
yyj18++
|
||||
if yyhl18 {
|
||||
yyb18 = yyj18 > l
|
||||
} else {
|
||||
yyb18 = r.CheckBreak()
|
||||
}
|
||||
if yyb18 {
|
||||
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
|
||||
return
|
||||
}
|
||||
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
|
||||
if r.TryDecodeAsNil() {
|
||||
x.Kind = ""
|
||||
} else {
|
||||
yyv19 := &x.Kind
|
||||
yym20 := z.DecBinary()
|
||||
_ = yym20
|
||||
if false {
|
||||
} else {
|
||||
*((*string)(yyv19)) = r.DecodeString()
|
||||
}
|
||||
}
|
||||
yyj18++
|
||||
if yyhl18 {
|
||||
yyb18 = yyj18 > l
|
||||
} else {
|
||||
yyb18 = r.CheckBreak()
|
||||
}
|
||||
if yyb18 {
|
||||
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
|
||||
return
|
||||
}
|
||||
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
|
||||
if r.TryDecodeAsNil() {
|
||||
x.APIVersion = ""
|
||||
} else {
|
||||
yyv21 := &x.APIVersion
|
||||
yym22 := z.DecBinary()
|
||||
_ = yym22
|
||||
if false {
|
||||
} else {
|
||||
*((*string)(yyv21)) = r.DecodeString()
|
||||
}
|
||||
}
|
||||
yyj18++
|
||||
if yyhl18 {
|
||||
yyb18 = yyj18 > l
|
||||
} else {
|
||||
yyb18 = r.CheckBreak()
|
||||
}
|
||||
if yyb18 {
|
||||
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
|
||||
return
|
||||
}
|
||||
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
|
||||
if r.TryDecodeAsNil() {
|
||||
x.DeschedulingInterval = 0
|
||||
} else {
|
||||
yyv23 := &x.DeschedulingInterval
|
||||
yym24 := z.DecBinary()
|
||||
_ = yym24
|
||||
if false {
|
||||
} else if z.HasExtensions() && z.DecExt(yyv23) {
|
||||
} else {
|
||||
*((*int64)(yyv23)) = int64(r.DecodeInt(64))
|
||||
}
|
||||
}
|
||||
yyj18++
|
||||
if yyhl18 {
|
||||
yyb18 = yyj18 > l
|
||||
} else {
|
||||
yyb18 = r.CheckBreak()
|
||||
}
|
||||
if yyb18 {
|
||||
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
|
||||
return
|
||||
}
|
||||
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
|
||||
if r.TryDecodeAsNil() {
|
||||
x.KubeconfigFile = ""
|
||||
} else {
|
||||
yyv25 := &x.KubeconfigFile
|
||||
yym26 := z.DecBinary()
|
||||
_ = yym26
|
||||
if false {
|
||||
} else {
|
||||
*((*string)(yyv25)) = r.DecodeString()
|
||||
}
|
||||
}
|
||||
yyj18++
|
||||
if yyhl18 {
|
||||
yyb18 = yyj18 > l
|
||||
} else {
|
||||
yyb18 = r.CheckBreak()
|
||||
}
|
||||
if yyb18 {
|
||||
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
|
||||
return
|
||||
}
|
||||
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
|
||||
if r.TryDecodeAsNil() {
|
||||
x.PolicyConfigFile = ""
|
||||
} else {
|
||||
yyv27 := &x.PolicyConfigFile
|
||||
yym28 := z.DecBinary()
|
||||
_ = yym28
|
||||
if false {
|
||||
} else {
|
||||
*((*string)(yyv27)) = r.DecodeString()
|
||||
}
|
||||
}
|
||||
yyj18++
|
||||
if yyhl18 {
|
||||
yyb18 = yyj18 > l
|
||||
} else {
|
||||
yyb18 = r.CheckBreak()
|
||||
}
|
||||
if yyb18 {
|
||||
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
|
||||
return
|
||||
}
|
||||
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
|
||||
if r.TryDecodeAsNil() {
|
||||
x.DryRun = false
|
||||
} else {
|
||||
yyv29 := &x.DryRun
|
||||
yym30 := z.DecBinary()
|
||||
_ = yym30
|
||||
if false {
|
||||
} else {
|
||||
*((*bool)(yyv29)) = r.DecodeBool()
|
||||
}
|
||||
}
|
||||
yyj18++
|
||||
if yyhl18 {
|
||||
yyb18 = yyj18 > l
|
||||
} else {
|
||||
yyb18 = r.CheckBreak()
|
||||
}
|
||||
if yyb18 {
|
||||
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
|
||||
return
|
||||
}
|
||||
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
|
||||
if r.TryDecodeAsNil() {
|
||||
x.NodeSelector = ""
|
||||
} else {
|
||||
yyv31 := &x.NodeSelector
|
||||
yym32 := z.DecBinary()
|
||||
_ = yym32
|
||||
if false {
|
||||
} else {
|
||||
*((*string)(yyv31)) = r.DecodeString()
|
||||
}
|
||||
}
|
||||
for {
|
||||
yyj18++
|
||||
if yyhl18 {
|
||||
yyb18 = yyj18 > l
|
||||
} else {
|
||||
yyb18 = r.CheckBreak()
|
||||
}
|
||||
if yyb18 {
|
||||
break
|
||||
}
|
||||
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
|
||||
z.DecStructFieldNotFound(yyj18-1, "")
|
||||
}
|
||||
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
|
||||
}
|
||||
@@ -20,8 +20,11 @@ import (
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
componentbaseconfig "k8s.io/component-base/config"
|
||||
)
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
type DeschedulerConfiguration struct {
|
||||
metav1.TypeMeta
|
||||
|
||||
@@ -40,4 +43,17 @@ type DeschedulerConfiguration struct {
|
||||
|
||||
// Node selectors
|
||||
NodeSelector string
|
||||
|
||||
// MaxNoOfPodsToEvictPerNode restricts maximum of pods to be evicted per node.
|
||||
MaxNoOfPodsToEvictPerNode int
|
||||
|
||||
// EvictLocalStoragePods allows pods using local storage to be evicted.
|
||||
EvictLocalStoragePods bool
|
||||
|
||||
// IgnorePVCPods sets whether PVC pods should be allowed to be evicted
|
||||
IgnorePVCPods bool
|
||||
|
||||
// Logging specifies the options of logging.
|
||||
// Refer [Logs Options](https://github.com/kubernetes/component-base/blob/master/logs/options.go) for more information.
|
||||
Logging componentbaseconfig.LoggingConfiguration
|
||||
}
|
||||
|
||||
@@ -15,10 +15,10 @@ limitations under the License.
|
||||
*/
|
||||
|
||||
// +k8s:deepcopy-gen=package,register
|
||||
// +k8s:conversion-gen=github.com/kubernetes-incubator/descheduler/pkg/apis/componentconfig
|
||||
// +k8s:conversion-gen=sigs.k8s.io/descheduler/pkg/apis/componentconfig
|
||||
// +k8s:defaulter-gen=TypeMeta
|
||||
|
||||
// Package v1alpha1 is the v1alpha1 version of the descheduler's componentconfig API
|
||||
// +groupName=deschedulercomponentconfig
|
||||
|
||||
package v1alpha1 // import "github.com/kubernetes-incubator/descheduler/pkg/apis/componentconfig/v1alpha1"
|
||||
package v1alpha1 // import "sigs.k8s.io/descheduler/pkg/apis/componentconfig/v1alpha1"
|
||||
|
||||
@@ -1,606 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// ************************************************************
|
||||
// DO NOT EDIT.
|
||||
// THIS FILE IS AUTO-GENERATED BY codecgen.
|
||||
// ************************************************************
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
codec1978 "github.com/ugorji/go/codec"
|
||||
pkg1_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"reflect"
|
||||
"runtime"
|
||||
time "time"
|
||||
)
|
||||
|
||||
const (
|
||||
// ----- content types ----
|
||||
codecSelferC_UTF81234 = 1
|
||||
codecSelferC_RAW1234 = 0
|
||||
// ----- value types used ----
|
||||
codecSelferValueTypeArray1234 = 10
|
||||
codecSelferValueTypeMap1234 = 9
|
||||
// ----- containerStateValues ----
|
||||
codecSelfer_containerMapKey1234 = 2
|
||||
codecSelfer_containerMapValue1234 = 3
|
||||
codecSelfer_containerMapEnd1234 = 4
|
||||
codecSelfer_containerArrayElem1234 = 6
|
||||
codecSelfer_containerArrayEnd1234 = 7
|
||||
)
|
||||
|
||||
var (
|
||||
codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits())
|
||||
codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`)
|
||||
)
|
||||
|
||||
type codecSelfer1234 struct{}
|
||||
|
||||
func init() {
|
||||
if codec1978.GenVersion != 5 {
|
||||
_, file, _, _ := runtime.Caller(0)
|
||||
err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v",
|
||||
5, codec1978.GenVersion, file)
|
||||
panic(err)
|
||||
}
|
||||
if false { // reference the types, but skip this branch at build/run time
|
||||
var v0 pkg1_v1.TypeMeta
|
||||
var v1 time.Duration
|
||||
_, _ = v0, v1
|
||||
}
|
||||
}
|
||||
|
||||
func (x *DeschedulerConfiguration) CodecEncodeSelf(e *codec1978.Encoder) {
|
||||
var h codecSelfer1234
|
||||
z, r := codec1978.GenHelperEncoder(e)
|
||||
_, _, _ = h, z, r
|
||||
if x == nil {
|
||||
r.EncodeNil()
|
||||
} else {
|
||||
yym1 := z.EncBinary()
|
||||
_ = yym1
|
||||
if false {
|
||||
} else if z.HasExtensions() && z.EncExt(x) {
|
||||
} else {
|
||||
yysep2 := !z.EncBinary()
|
||||
yy2arr2 := z.EncBasicHandle().StructToArray
|
||||
var yyq2 [7]bool
|
||||
_, _, _ = yysep2, yyq2, yy2arr2
|
||||
const yyr2 bool = false
|
||||
yyq2[0] = x.Kind != ""
|
||||
yyq2[1] = x.APIVersion != ""
|
||||
yyq2[2] = x.DeschedulingInterval != 0
|
||||
yyq2[4] = x.PolicyConfigFile != ""
|
||||
yyq2[5] = x.DryRun != false
|
||||
yyq2[6] = x.NodeSelector != ""
|
||||
var yynn2 int
|
||||
if yyr2 || yy2arr2 {
|
||||
r.EncodeArrayStart(7)
|
||||
} else {
|
||||
yynn2 = 1
|
||||
for _, b := range yyq2 {
|
||||
if b {
|
||||
yynn2++
|
||||
}
|
||||
}
|
||||
r.EncodeMapStart(yynn2)
|
||||
yynn2 = 0
|
||||
}
|
||||
if yyr2 || yy2arr2 {
|
||||
z.EncSendContainerState(codecSelfer_containerArrayElem1234)
|
||||
if yyq2[0] {
|
||||
yym4 := z.EncBinary()
|
||||
_ = yym4
|
||||
if false {
|
||||
} else {
|
||||
r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
|
||||
}
|
||||
} else {
|
||||
r.EncodeString(codecSelferC_UTF81234, "")
|
||||
}
|
||||
} else {
|
||||
if yyq2[0] {
|
||||
z.EncSendContainerState(codecSelfer_containerMapKey1234)
|
||||
r.EncodeString(codecSelferC_UTF81234, string("kind"))
|
||||
z.EncSendContainerState(codecSelfer_containerMapValue1234)
|
||||
yym5 := z.EncBinary()
|
||||
_ = yym5
|
||||
if false {
|
||||
} else {
|
||||
r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
|
||||
}
|
||||
}
|
||||
}
|
||||
if yyr2 || yy2arr2 {
|
||||
z.EncSendContainerState(codecSelfer_containerArrayElem1234)
|
||||
if yyq2[1] {
|
||||
yym7 := z.EncBinary()
|
||||
_ = yym7
|
||||
if false {
|
||||
} else {
|
||||
r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
|
||||
}
|
||||
} else {
|
||||
r.EncodeString(codecSelferC_UTF81234, "")
|
||||
}
|
||||
} else {
|
||||
if yyq2[1] {
|
||||
z.EncSendContainerState(codecSelfer_containerMapKey1234)
|
||||
r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
|
||||
z.EncSendContainerState(codecSelfer_containerMapValue1234)
|
||||
yym8 := z.EncBinary()
|
||||
_ = yym8
|
||||
if false {
|
||||
} else {
|
||||
r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
|
||||
}
|
||||
}
|
||||
}
|
||||
if yyr2 || yy2arr2 {
|
||||
z.EncSendContainerState(codecSelfer_containerArrayElem1234)
|
||||
if yyq2[2] {
|
||||
yym10 := z.EncBinary()
|
||||
_ = yym10
|
||||
if false {
|
||||
} else if z.HasExtensions() && z.EncExt(x.DeschedulingInterval) {
|
||||
} else {
|
||||
r.EncodeInt(int64(x.DeschedulingInterval))
|
||||
}
|
||||
} else {
|
||||
r.EncodeInt(0)
|
||||
}
|
||||
} else {
|
||||
if yyq2[2] {
|
||||
z.EncSendContainerState(codecSelfer_containerMapKey1234)
|
||||
r.EncodeString(codecSelferC_UTF81234, string("deschedulingInterval"))
|
||||
z.EncSendContainerState(codecSelfer_containerMapValue1234)
|
||||
yym11 := z.EncBinary()
|
||||
_ = yym11
|
||||
if false {
|
||||
} else if z.HasExtensions() && z.EncExt(x.DeschedulingInterval) {
|
||||
} else {
|
||||
r.EncodeInt(int64(x.DeschedulingInterval))
|
||||
}
|
||||
}
|
||||
}
|
||||
if yyr2 || yy2arr2 {
|
||||
z.EncSendContainerState(codecSelfer_containerArrayElem1234)
|
||||
yym13 := z.EncBinary()
|
||||
_ = yym13
|
||||
if false {
|
||||
} else {
|
||||
r.EncodeString(codecSelferC_UTF81234, string(x.KubeconfigFile))
|
||||
}
|
||||
} else {
|
||||
z.EncSendContainerState(codecSelfer_containerMapKey1234)
|
||||
r.EncodeString(codecSelferC_UTF81234, string("kubeconfigFile"))
|
||||
z.EncSendContainerState(codecSelfer_containerMapValue1234)
|
||||
yym14 := z.EncBinary()
|
||||
_ = yym14
|
||||
if false {
|
||||
} else {
|
||||
r.EncodeString(codecSelferC_UTF81234, string(x.KubeconfigFile))
|
||||
}
|
||||
}
|
||||
if yyr2 || yy2arr2 {
|
||||
z.EncSendContainerState(codecSelfer_containerArrayElem1234)
|
||||
if yyq2[4] {
|
||||
yym16 := z.EncBinary()
|
||||
_ = yym16
|
||||
if false {
|
||||
} else {
|
||||
r.EncodeString(codecSelferC_UTF81234, string(x.PolicyConfigFile))
|
||||
}
|
||||
} else {
|
||||
r.EncodeString(codecSelferC_UTF81234, "")
|
||||
}
|
||||
} else {
|
||||
if yyq2[4] {
|
||||
z.EncSendContainerState(codecSelfer_containerMapKey1234)
|
||||
r.EncodeString(codecSelferC_UTF81234, string("policyConfigFile"))
|
||||
z.EncSendContainerState(codecSelfer_containerMapValue1234)
|
||||
yym17 := z.EncBinary()
|
||||
_ = yym17
|
||||
if false {
|
||||
} else {
|
||||
r.EncodeString(codecSelferC_UTF81234, string(x.PolicyConfigFile))
|
||||
}
|
||||
}
|
||||
}
|
||||
if yyr2 || yy2arr2 {
|
||||
z.EncSendContainerState(codecSelfer_containerArrayElem1234)
|
||||
if yyq2[5] {
|
||||
yym19 := z.EncBinary()
|
||||
_ = yym19
|
||||
if false {
|
||||
} else {
|
||||
r.EncodeBool(bool(x.DryRun))
|
||||
}
|
||||
} else {
|
||||
r.EncodeBool(false)
|
||||
}
|
||||
} else {
|
||||
if yyq2[5] {
|
||||
z.EncSendContainerState(codecSelfer_containerMapKey1234)
|
||||
r.EncodeString(codecSelferC_UTF81234, string("dryRun"))
|
||||
z.EncSendContainerState(codecSelfer_containerMapValue1234)
|
||||
yym20 := z.EncBinary()
|
||||
_ = yym20
|
||||
if false {
|
||||
} else {
|
||||
r.EncodeBool(bool(x.DryRun))
|
||||
}
|
||||
}
|
||||
}
|
||||
if yyr2 || yy2arr2 {
|
||||
z.EncSendContainerState(codecSelfer_containerArrayElem1234)
|
||||
if yyq2[6] {
|
||||
yym22 := z.EncBinary()
|
||||
_ = yym22
|
||||
if false {
|
||||
} else {
|
||||
r.EncodeString(codecSelferC_UTF81234, string(x.NodeSelector))
|
||||
}
|
||||
} else {
|
||||
r.EncodeString(codecSelferC_UTF81234, "")
|
||||
}
|
||||
} else {
|
||||
if yyq2[6] {
|
||||
z.EncSendContainerState(codecSelfer_containerMapKey1234)
|
||||
r.EncodeString(codecSelferC_UTF81234, string("nodeSelector"))
|
||||
z.EncSendContainerState(codecSelfer_containerMapValue1234)
|
||||
yym23 := z.EncBinary()
|
||||
_ = yym23
|
||||
if false {
|
||||
} else {
|
||||
r.EncodeString(codecSelferC_UTF81234, string(x.NodeSelector))
|
||||
}
|
||||
}
|
||||
}
|
||||
if yyr2 || yy2arr2 {
|
||||
z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
|
||||
} else {
|
||||
z.EncSendContainerState(codecSelfer_containerMapEnd1234)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (x *DeschedulerConfiguration) CodecDecodeSelf(d *codec1978.Decoder) {
|
||||
var h codecSelfer1234
|
||||
z, r := codec1978.GenHelperDecoder(d)
|
||||
_, _, _ = h, z, r
|
||||
yym1 := z.DecBinary()
|
||||
_ = yym1
|
||||
if false {
|
||||
} else if z.HasExtensions() && z.DecExt(x) {
|
||||
} else {
|
||||
yyct2 := r.ContainerType()
|
||||
if yyct2 == codecSelferValueTypeMap1234 {
|
||||
yyl2 := r.ReadMapStart()
|
||||
if yyl2 == 0 {
|
||||
z.DecSendContainerState(codecSelfer_containerMapEnd1234)
|
||||
} else {
|
||||
x.codecDecodeSelfFromMap(yyl2, d)
|
||||
}
|
||||
} else if yyct2 == codecSelferValueTypeArray1234 {
|
||||
yyl2 := r.ReadArrayStart()
|
||||
if yyl2 == 0 {
|
||||
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
|
||||
} else {
|
||||
x.codecDecodeSelfFromArray(yyl2, d)
|
||||
}
|
||||
} else {
|
||||
panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (x *DeschedulerConfiguration) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
|
||||
var h codecSelfer1234
|
||||
z, r := codec1978.GenHelperDecoder(d)
|
||||
_, _, _ = h, z, r
|
||||
var yys3Slc = z.DecScratchBuffer() // default slice to decode into
|
||||
_ = yys3Slc
|
||||
var yyhl3 bool = l >= 0
|
||||
for yyj3 := 0; ; yyj3++ {
|
||||
if yyhl3 {
|
||||
if yyj3 >= l {
|
||||
break
|
||||
}
|
||||
} else {
|
||||
if r.CheckBreak() {
|
||||
break
|
||||
}
|
||||
}
|
||||
z.DecSendContainerState(codecSelfer_containerMapKey1234)
|
||||
yys3Slc = r.DecodeBytes(yys3Slc, true, true)
|
||||
yys3 := string(yys3Slc)
|
||||
z.DecSendContainerState(codecSelfer_containerMapValue1234)
|
||||
switch yys3 {
|
||||
case "kind":
|
||||
if r.TryDecodeAsNil() {
|
||||
x.Kind = ""
|
||||
} else {
|
||||
yyv4 := &x.Kind
|
||||
yym5 := z.DecBinary()
|
||||
_ = yym5
|
||||
if false {
|
||||
} else {
|
||||
*((*string)(yyv4)) = r.DecodeString()
|
||||
}
|
||||
}
|
||||
case "apiVersion":
|
||||
if r.TryDecodeAsNil() {
|
||||
x.APIVersion = ""
|
||||
} else {
|
||||
yyv6 := &x.APIVersion
|
||||
yym7 := z.DecBinary()
|
||||
_ = yym7
|
||||
if false {
|
||||
} else {
|
||||
*((*string)(yyv6)) = r.DecodeString()
|
||||
}
|
||||
}
|
||||
case "deschedulingInterval":
|
||||
if r.TryDecodeAsNil() {
|
||||
x.DeschedulingInterval = 0
|
||||
} else {
|
||||
yyv8 := &x.DeschedulingInterval
|
||||
yym9 := z.DecBinary()
|
||||
_ = yym9
|
||||
if false {
|
||||
} else if z.HasExtensions() && z.DecExt(yyv8) {
|
||||
} else {
|
||||
*((*int64)(yyv8)) = int64(r.DecodeInt(64))
|
||||
}
|
||||
}
|
||||
case "kubeconfigFile":
|
||||
if r.TryDecodeAsNil() {
|
||||
x.KubeconfigFile = ""
|
||||
} else {
|
||||
yyv10 := &x.KubeconfigFile
|
||||
yym11 := z.DecBinary()
|
||||
_ = yym11
|
||||
if false {
|
||||
} else {
|
||||
*((*string)(yyv10)) = r.DecodeString()
|
||||
}
|
||||
}
|
||||
case "policyConfigFile":
|
||||
if r.TryDecodeAsNil() {
|
||||
x.PolicyConfigFile = ""
|
||||
} else {
|
||||
yyv12 := &x.PolicyConfigFile
|
||||
yym13 := z.DecBinary()
|
||||
_ = yym13
|
||||
if false {
|
||||
} else {
|
||||
*((*string)(yyv12)) = r.DecodeString()
|
||||
}
|
||||
}
|
||||
case "dryRun":
|
||||
if r.TryDecodeAsNil() {
|
||||
x.DryRun = false
|
||||
} else {
|
||||
yyv14 := &x.DryRun
|
||||
yym15 := z.DecBinary()
|
||||
_ = yym15
|
||||
if false {
|
||||
} else {
|
||||
*((*bool)(yyv14)) = r.DecodeBool()
|
||||
}
|
||||
}
|
||||
case "nodeSelector":
|
||||
if r.TryDecodeAsNil() {
|
||||
x.NodeSelector = ""
|
||||
} else {
|
||||
yyv16 := &x.NodeSelector
|
||||
yym17 := z.DecBinary()
|
||||
_ = yym17
|
||||
if false {
|
||||
} else {
|
||||
*((*string)(yyv16)) = r.DecodeString()
|
||||
}
|
||||
}
|
||||
default:
|
||||
z.DecStructFieldNotFound(-1, yys3)
|
||||
} // end switch yys3
|
||||
} // end for yyj3
|
||||
z.DecSendContainerState(codecSelfer_containerMapEnd1234)
|
||||
}
|
||||
|
||||
func (x *DeschedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
|
||||
var h codecSelfer1234
|
||||
z, r := codec1978.GenHelperDecoder(d)
|
||||
_, _, _ = h, z, r
|
||||
var yyj18 int
|
||||
var yyb18 bool
|
||||
var yyhl18 bool = l >= 0
|
||||
yyj18++
|
||||
if yyhl18 {
|
||||
yyb18 = yyj18 > l
|
||||
} else {
|
||||
yyb18 = r.CheckBreak()
|
||||
}
|
||||
if yyb18 {
|
||||
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
|
||||
return
|
||||
}
|
||||
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
|
||||
if r.TryDecodeAsNil() {
|
||||
x.Kind = ""
|
||||
} else {
|
||||
yyv19 := &x.Kind
|
||||
yym20 := z.DecBinary()
|
||||
_ = yym20
|
||||
if false {
|
||||
} else {
|
||||
*((*string)(yyv19)) = r.DecodeString()
|
||||
}
|
||||
}
|
||||
yyj18++
|
||||
if yyhl18 {
|
||||
yyb18 = yyj18 > l
|
||||
} else {
|
||||
yyb18 = r.CheckBreak()
|
||||
}
|
||||
if yyb18 {
|
||||
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
|
||||
return
|
||||
}
|
||||
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
|
||||
if r.TryDecodeAsNil() {
|
||||
x.APIVersion = ""
|
||||
} else {
|
||||
yyv21 := &x.APIVersion
|
||||
yym22 := z.DecBinary()
|
||||
_ = yym22
|
||||
if false {
|
||||
} else {
|
||||
*((*string)(yyv21)) = r.DecodeString()
|
||||
}
|
||||
}
|
||||
yyj18++
|
||||
if yyhl18 {
|
||||
yyb18 = yyj18 > l
|
||||
} else {
|
||||
yyb18 = r.CheckBreak()
|
||||
}
|
||||
if yyb18 {
|
||||
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
|
||||
return
|
||||
}
|
||||
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
|
||||
if r.TryDecodeAsNil() {
|
||||
x.DeschedulingInterval = 0
|
||||
} else {
|
||||
yyv23 := &x.DeschedulingInterval
|
||||
yym24 := z.DecBinary()
|
||||
_ = yym24
|
||||
if false {
|
||||
} else if z.HasExtensions() && z.DecExt(yyv23) {
|
||||
} else {
|
||||
*((*int64)(yyv23)) = int64(r.DecodeInt(64))
|
||||
}
|
||||
}
|
||||
yyj18++
|
||||
if yyhl18 {
|
||||
yyb18 = yyj18 > l
|
||||
} else {
|
||||
yyb18 = r.CheckBreak()
|
||||
}
|
||||
if yyb18 {
|
||||
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
|
||||
return
|
||||
}
|
||||
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
|
||||
if r.TryDecodeAsNil() {
|
||||
x.KubeconfigFile = ""
|
||||
} else {
|
||||
yyv25 := &x.KubeconfigFile
|
||||
yym26 := z.DecBinary()
|
||||
_ = yym26
|
||||
if false {
|
||||
} else {
|
||||
*((*string)(yyv25)) = r.DecodeString()
|
||||
}
|
||||
}
|
||||
yyj18++
|
||||
if yyhl18 {
|
||||
yyb18 = yyj18 > l
|
||||
} else {
|
||||
yyb18 = r.CheckBreak()
|
||||
}
|
||||
if yyb18 {
|
||||
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
|
||||
return
|
||||
}
|
||||
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
|
||||
if r.TryDecodeAsNil() {
|
||||
x.PolicyConfigFile = ""
|
||||
} else {
|
||||
yyv27 := &x.PolicyConfigFile
|
||||
yym28 := z.DecBinary()
|
||||
_ = yym28
|
||||
if false {
|
||||
} else {
|
||||
*((*string)(yyv27)) = r.DecodeString()
|
||||
}
|
||||
}
|
||||
yyj18++
|
||||
if yyhl18 {
|
||||
yyb18 = yyj18 > l
|
||||
} else {
|
||||
yyb18 = r.CheckBreak()
|
||||
}
|
||||
if yyb18 {
|
||||
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
|
||||
return
|
||||
}
|
||||
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
|
||||
if r.TryDecodeAsNil() {
|
||||
x.DryRun = false
|
||||
} else {
|
||||
yyv29 := &x.DryRun
|
||||
yym30 := z.DecBinary()
|
||||
_ = yym30
|
||||
if false {
|
||||
} else {
|
||||
*((*bool)(yyv29)) = r.DecodeBool()
|
||||
}
|
||||
}
|
||||
yyj18++
|
||||
if yyhl18 {
|
||||
yyb18 = yyj18 > l
|
||||
} else {
|
||||
yyb18 = r.CheckBreak()
|
||||
}
|
||||
if yyb18 {
|
||||
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
|
||||
return
|
||||
}
|
||||
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
|
||||
if r.TryDecodeAsNil() {
|
||||
x.NodeSelector = ""
|
||||
} else {
|
||||
yyv31 := &x.NodeSelector
|
||||
yym32 := z.DecBinary()
|
||||
_ = yym32
|
||||
if false {
|
||||
} else {
|
||||
*((*string)(yyv31)) = r.DecodeString()
|
||||
}
|
||||
}
|
||||
for {
|
||||
yyj18++
|
||||
if yyhl18 {
|
||||
yyb18 = yyj18 > l
|
||||
} else {
|
||||
yyb18 = r.CheckBreak()
|
||||
}
|
||||
if yyb18 {
|
||||
break
|
||||
}
|
||||
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
|
||||
z.DecStructFieldNotFound(yyj18-1, "")
|
||||
}
|
||||
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user