mirror of
https://github.com/kubernetes-sigs/descheduler.git
synced 2026-01-26 05:14:13 +01:00
Compare commits
317 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
782058a67c | ||
|
|
6ecff2d544 | ||
|
|
241e3c50f7 | ||
|
|
345e1f7139 | ||
|
|
818033a143 | ||
|
|
54540412b8 | ||
|
|
e91082a4e9 | ||
|
|
74150670ed | ||
|
|
8662aa75b1 | ||
|
|
25876a2d2d | ||
|
|
28a3011f77 | ||
|
|
751a0dec8e | ||
|
|
932bf0bb42 | ||
|
|
da42d43c2f | ||
|
|
39066a41de | ||
|
|
640d4af538 | ||
|
|
fe135ef841 | ||
|
|
df30be9f04 | ||
|
|
6dcb094ada | ||
|
|
8849b415fa | ||
|
|
3d6c3c82ae | ||
|
|
245b8c0f84 | ||
|
|
f6ef3370cd | ||
|
|
6bdefcbeff | ||
|
|
9f3411a93e | ||
|
|
6f8585493e | ||
|
|
2213cb977c | ||
|
|
bf052b2fed | ||
|
|
e372b00b9a | ||
|
|
1795479cf0 | ||
|
|
97a0ab922f | ||
|
|
81df2c3da8 | ||
|
|
9e35349cb3 | ||
|
|
6e1f88b932 | ||
|
|
a14d4bfba2 | ||
|
|
0894f7740c | ||
|
|
c3f07dc366 | ||
|
|
ca8f1051eb | ||
|
|
c7692a2e9f | ||
|
|
53badf7b61 | ||
|
|
f801c5f72f | ||
|
|
327880ba51 | ||
|
|
7bb8b4feda | ||
|
|
36b1e1f061 | ||
|
|
4507a90bb6 | ||
|
|
550f68306c | ||
|
|
2b668566ce | ||
|
|
ee414ea366 | ||
|
|
5761b5d595 | ||
|
|
07f476dfc4 | ||
|
|
f5e9f07321 | ||
|
|
05c69ee26a | ||
|
|
1623e09122 | ||
|
|
696aa7c505 | ||
|
|
c53dce0805 | ||
|
|
cd8b5a0354 | ||
|
|
b51f24eb8e | ||
|
|
ae3b4368ee | ||
|
|
61eef93618 | ||
|
|
fa0a2ec6fe | ||
|
|
7ece10a643 | ||
|
|
71c8eae47e | ||
|
|
267b0837dc | ||
|
|
c713537d56 | ||
|
|
e374229707 | ||
|
|
f834581a8e | ||
|
|
15fcde5229 | ||
|
|
96c5dd3941 | ||
|
|
65a03e76bf | ||
|
|
43525f6493 | ||
|
|
7457626f62 | ||
|
|
08c22e8921 | ||
|
|
4ff533ec17 | ||
|
|
7680e3d079 | ||
|
|
305801dd0e | ||
|
|
9951b85d60 | ||
|
|
ff21ec9432 | ||
|
|
5e15d77bf2 | ||
|
|
d833c73fc4 | ||
|
|
795a80dfb0 | ||
|
|
f5e4acdd8a | ||
|
|
eb4c1bb355 | ||
|
|
6dfa95cc87 | ||
|
|
eb9d974a8b | ||
|
|
d41a1f4a56 | ||
|
|
138ad556a3 | ||
|
|
37124e6e45 | ||
|
|
bd412bf87f | ||
|
|
6f9b31f568 | ||
|
|
c858740c4f | ||
|
|
bfefe634a1 | ||
|
|
7b7b9e1cd7 | ||
|
|
0a4b8b0a25 | ||
|
|
f28183dcbe | ||
|
|
abdf79454f | ||
|
|
46b570b71d | ||
|
|
616a9b5f6b | ||
|
|
003a4cdc2b | ||
|
|
54ea05d8bb | ||
|
|
d0eea0cabb | ||
|
|
f0297dfe03 | ||
|
|
ef1f36f8e4 | ||
|
|
a733c95dcc | ||
|
|
5a81a0661b | ||
|
|
83e04960af | ||
|
|
2a8dc69cbb | ||
|
|
5d82d08af3 | ||
|
|
c265825166 | ||
|
|
ed0126fb63 | ||
|
|
8c7267b379 | ||
|
|
d85ce22975 | ||
|
|
a9091a1e37 | ||
|
|
d3b0ac8e06 | ||
|
|
435674fb44 | ||
|
|
eacbae72fd | ||
|
|
34550d4b7c | ||
|
|
c6ff87dbd6 | ||
|
|
04efe65f90 | ||
|
|
55afde6251 | ||
|
|
7039b6c8aa | ||
|
|
cff984261e | ||
|
|
4819ab9c69 | ||
|
|
25336da708 | ||
|
|
4941f6a16b | ||
|
|
d7e93058d4 | ||
|
|
c20a595370 | ||
|
|
eec1104d6e | ||
|
|
741b35edf5 | ||
|
|
c01cfcf3b6 | ||
|
|
643cd472ef | ||
|
|
668d727fc2 | ||
|
|
423ee35846 | ||
|
|
31c7855212 | ||
|
|
211f3942b6 | ||
|
|
beae282735 | ||
|
|
635348efb9 | ||
|
|
fa335c782f | ||
|
|
b019a58525 | ||
|
|
78eef6c343 | ||
|
|
cbcefb5d2f | ||
|
|
149085fb57 | ||
|
|
991eddb691 | ||
|
|
91de471376 | ||
|
|
c2d7e22749 | ||
|
|
e7c42794a0 | ||
|
|
3a8dfc07ed | ||
|
|
077b7f6505 | ||
|
|
240fa93bc5 | ||
|
|
6c7f846917 | ||
|
|
6db7c3b92c | ||
|
|
030267107a | ||
|
|
1c300a9881 | ||
|
|
0e9b33b822 | ||
|
|
36e3d1e703 | ||
|
|
f53264b613 | ||
|
|
414554ae5e | ||
|
|
150f945592 | ||
|
|
9a84afece1 | ||
|
|
e0c101c5ae | ||
|
|
e3a562aea0 | ||
|
|
4966e8ee08 | ||
|
|
d3542d5892 | ||
|
|
62d04b0fc7 | ||
|
|
6ecbc85448 | ||
|
|
b9b1eae6fb | ||
|
|
e95e42930d | ||
|
|
3cabb69014 | ||
|
|
ad8f90f177 | ||
|
|
8a62cf1699 | ||
|
|
a6b54dae99 | ||
|
|
112684bcb9 | ||
|
|
682e07c3cd | ||
|
|
9593ce16d9 | ||
|
|
2545c8b031 | ||
|
|
c9793e7029 | ||
|
|
4757132452 | ||
|
|
561b3b67b3 | ||
|
|
566f33e6ad | ||
|
|
83a75bac80 | ||
|
|
d8772f5685 | ||
|
|
df510187d6 | ||
|
|
137b9b72e7 | ||
|
|
09b4979673 | ||
|
|
eff8185d7c | ||
|
|
83ee94dd08 | ||
|
|
aae52ac2ee | ||
|
|
a7c4295c58 | ||
|
|
7a6e095451 | ||
|
|
28c17d240d | ||
|
|
6dd91b6a22 | ||
|
|
7d93551c34 | ||
|
|
853c43737d | ||
|
|
d8dab9d134 | ||
|
|
8dc7b475d9 | ||
|
|
15a971494e | ||
|
|
5364e17c62 | ||
|
|
fc8c581d7a | ||
|
|
d7482bd618 | ||
|
|
d065f9904b | ||
|
|
fb8cdc10c7 | ||
|
|
55cf45a6ba | ||
|
|
e2a23f2848 | ||
|
|
c567768845 | ||
|
|
9510891f42 | ||
|
|
73858beeea | ||
|
|
c3346e9806 | ||
|
|
cc92eaa35d | ||
|
|
816f8cb682 | ||
|
|
5f12ade97b | ||
|
|
2b68e65238 | ||
|
|
86d0f3b038 | ||
|
|
c9f64dfe37 | ||
|
|
431597dd43 | ||
|
|
861f057d1b | ||
|
|
d845040d77 | ||
|
|
30d05382b6 | ||
|
|
ca02665d14 | ||
|
|
2328d89897 | ||
|
|
ac089fe5ce | ||
|
|
00e23dbc07 | ||
|
|
3401edab53 | ||
|
|
e3865fcf8e | ||
|
|
361aa01c51 | ||
|
|
c77f240e37 | ||
|
|
d9a77393cc | ||
|
|
44c7eb5285 | ||
|
|
462bbbbb47 | ||
|
|
18e3fd3de5 | ||
|
|
fdf94304d0 | ||
|
|
ea0ba7d39a | ||
|
|
5be355d815 | ||
|
|
5efec68fd3 | ||
|
|
b3cc62dac6 | ||
|
|
3be0a9f80d | ||
|
|
906bca0802 | ||
|
|
d193bc1370 | ||
|
|
6654aeff99 | ||
|
|
e3d06d1541 | ||
|
|
b44d7718b3 | ||
|
|
531d6ddc49 | ||
|
|
bbc902b86f | ||
|
|
1fdcbcd008 | ||
|
|
2fdcfc04d5 | ||
|
|
872953b9cf | ||
|
|
9276f0e555 | ||
|
|
cee12a5019 | ||
|
|
456110b508 | ||
|
|
edab9d7fed | ||
|
|
7563b5561b | ||
|
|
8b210b08f6 | ||
|
|
a3d33909fa | ||
|
|
4b9e732c18 | ||
|
|
dd54f1a656 | ||
|
|
11044ed89d | ||
|
|
e5d4a2eba6 | ||
|
|
c9e3c63b85 | ||
|
|
5b1d551ffd | ||
|
|
b176dd2e77 | ||
|
|
9ea6aa536e | ||
|
|
bd2c217010 | ||
|
|
c42670e1cc | ||
|
|
5e25e21ca2 | ||
|
|
0af97c1b5e | ||
|
|
1652ba7976 | ||
|
|
5af668e89a | ||
|
|
26adf87323 | ||
|
|
0a58cf4535 | ||
|
|
3116dad75e | ||
|
|
fb1b5fc690 | ||
|
|
dc7f9efc19 | ||
|
|
f1127541aa | ||
|
|
9d6b6094cd | ||
|
|
e35eb4a0b5 | ||
|
|
2e6f14103b | ||
|
|
17ef1d5e5f | ||
|
|
7788d53d0b | ||
|
|
5f66ed8401 | ||
|
|
cd4d09726c | ||
|
|
68a106aed0 | ||
|
|
1931bd6c1a | ||
|
|
9e28f0b362 | ||
|
|
7245a31f52 | ||
|
|
66a2a87e49 | ||
|
|
e7ceddf2bc | ||
|
|
e3d25a9ab4 | ||
|
|
992e00ecd2 | ||
|
|
d157a4359b | ||
|
|
6a08b5661a | ||
|
|
7094c404c9 | ||
|
|
20c610c65a | ||
|
|
934a06381d | ||
|
|
674c1db05c | ||
|
|
dee89a6cc1 | ||
|
|
60fbaca305 | ||
|
|
164d2b0729 | ||
|
|
023a2f2a47 | ||
|
|
d1c6f3f709 | ||
|
|
fc1688057a | ||
|
|
e6e200b93c | ||
|
|
5d843d1f08 | ||
|
|
6c981cc067 | ||
|
|
22a3a6ea1d | ||
|
|
294bddb5e2 | ||
|
|
0a9d1959e2 | ||
|
|
19ee5d80b5 | ||
|
|
14d9e175c2 | ||
|
|
468e138070 | ||
|
|
db13b2ac73 | ||
|
|
40ca53e0a5 | ||
|
|
35d8367fe5 | ||
|
|
345dd9cf27 | ||
|
|
81f471fe05 | ||
|
|
aa5e8770f5 | ||
|
|
2690d139c5 | ||
|
|
cd192ce5fc | ||
|
|
048f3fd1e5 | ||
|
|
a079fd2757 |
@@ -1,7 +0,0 @@
|
||||
language: go
|
||||
go:
|
||||
- 1.9.1
|
||||
script:
|
||||
- hack/verify-gofmt.sh
|
||||
- make build
|
||||
- make test-unit
|
||||
26
Dockerfile
26
Dockerfile
@@ -1,26 +0,0 @@
|
||||
# Copyright 2017 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
FROM golang:1.9.2
|
||||
|
||||
WORKDIR /go/src/github.com/kubernetes-incubator/descheduler
|
||||
COPY . .
|
||||
RUN make
|
||||
|
||||
FROM scratch
|
||||
|
||||
MAINTAINER Avesh Agarwal <avagarwa@redhat.com>
|
||||
|
||||
COPY --from=0 /go/src/github.com/kubernetes-incubator/descheduler/_output/bin/descheduler /bin/descheduler
|
||||
|
||||
CMD ["/bin/descheduler", "--help"]
|
||||
@@ -1,20 +0,0 @@
|
||||
# Copyright 2017 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
FROM scratch
|
||||
|
||||
MAINTAINER Avesh Agarwal <avagarwa@redhat.com>
|
||||
|
||||
COPY _output/bin/descheduler /bin/descheduler
|
||||
|
||||
CMD ["/bin/descheduler", "--help"]
|
||||
201
LICENSE
201
LICENSE
@@ -1,201 +0,0 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright {yyyy} {name of copyright owner}
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
54
Makefile
54
Makefile
@@ -1,54 +0,0 @@
|
||||
# Copyright 2017 The Kubernetes Authors.
|
||||
# #
|
||||
# # Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# # you may not use this file except in compliance with the License.
|
||||
# # You may obtain a copy of the License at
|
||||
# #
|
||||
# # http://www.apache.org/licenses/LICENSE-2.0
|
||||
# #
|
||||
# # Unless required by applicable law or agreed to in writing, software
|
||||
# # distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# # See the License for the specific language governing permissions and
|
||||
# # limitations under the License.
|
||||
|
||||
.PHONY: test
|
||||
|
||||
# VERSION is currently based on the last commit
|
||||
VERSION=`git describe --tags`
|
||||
COMMIT=`git rev-parse HEAD`
|
||||
BUILD=`date +%FT%T%z`
|
||||
LDFLAG_LOCATION=github.com/kubernetes-incubator/descheduler/cmd/descheduler/app
|
||||
|
||||
LDFLAGS=-ldflags "-X ${LDFLAG_LOCATION}.version=${VERSION} -X ${LDFLAG_LOCATION}.buildDate=${BUILD} -X ${LDFLAG_LOCATION}.gitCommit=${COMMIT}"
|
||||
|
||||
|
||||
# IMAGE is the image name of descheduler
|
||||
# Should this be changed?
|
||||
IMAGE:=descheduler:$(VERSION)
|
||||
|
||||
all: build
|
||||
|
||||
build:
|
||||
CGO_ENABLED=0 go build ${LDFLAGS} -o _output/bin/descheduler github.com/kubernetes-incubator/descheduler/cmd/descheduler
|
||||
|
||||
dev-image: build
|
||||
docker build -f Dockerfile.dev -t $(IMAGE) .
|
||||
|
||||
image:
|
||||
docker build -t $(IMAGE) .
|
||||
|
||||
clean:
|
||||
rm -rf _output
|
||||
|
||||
test-unit:
|
||||
./test/run-unit-tests.sh
|
||||
|
||||
test-e2e:
|
||||
./test/run-e2e-tests.sh
|
||||
|
||||
gen:
|
||||
./hack/update-codecgen.sh
|
||||
./hack/update-generated-conversions.sh
|
||||
./hack/update-generated-deep-copies.sh
|
||||
./hack/update-generated-defaulters.sh
|
||||
8
OWNERS
8
OWNERS
@@ -1,8 +0,0 @@
|
||||
approvers:
|
||||
- aveshagarwal
|
||||
- ravisantoshgudimetla
|
||||
- jayunit100
|
||||
reviewers:
|
||||
- aveshagarwal
|
||||
- ravisantoshgudimetla
|
||||
- jayunit100
|
||||
295
README.md
295
README.md
@@ -1,295 +0,0 @@
|
||||
[](https://travis-ci.org/kubernetes-incubator/descheduler)
|
||||
[](https://goreportcard.com/report/github.com/kubernetes-incubator/descheduler)
|
||||
|
||||
# Descheduler for Kubernetes
|
||||
|
||||
## Introduction
|
||||
|
||||
Scheduling in Kubernetes is the process of binding pending pods to nodes, and is performed by
|
||||
a component of Kubernetes called kube-scheduler. The scheduler's decisions, whether or where a
|
||||
pod can or can not be scheduled, are guided by its configurable policy which comprises of set of
|
||||
rules, called predicates and priorities. The scheduler's decisions are influenced by its view of
|
||||
a Kubernetes cluster at that point of time when a new pod appears first time for scheduling.
|
||||
As Kubernetes clusters are very dynamic and their state change over time, there may be desired
|
||||
to move already running pods to some other nodes for various reasons:
|
||||
|
||||
* Some nodes are under or over utilized.
|
||||
* The original scheduling decision does not hold true any more, as taints or labels are added to
|
||||
or removed from nodes, pod/node affinity requirements are not satisfied any more.
|
||||
* Some nodes failed and their pods moved to other nodes.
|
||||
* New nodes are added to clusters.
|
||||
|
||||
Consequently, there might be several pods scheduled on less desired nodes in a cluster.
|
||||
Descheduler, based on its policy, finds pods that can be moved and evicts them. Please
|
||||
note, in current implementation, descheduler does not schedule replacement of evicted pods
|
||||
but relies on the default scheduler for that.
|
||||
|
||||
## Build and Run
|
||||
|
||||
Build descheduler:
|
||||
|
||||
```sh
|
||||
$ make
|
||||
```
|
||||
|
||||
and run descheduler:
|
||||
|
||||
```sh
|
||||
$ ./_output/bin/descheduler --kubeconfig <path to kubeconfig> --policy-config-file <path-to-policy-file>
|
||||
```
|
||||
|
||||
For more information about available options run:
|
||||
```
|
||||
$ ./_output/bin/descheduler --help
|
||||
```
|
||||
|
||||
## Running Descheduler as a Job Inside of a Pod
|
||||
|
||||
Descheduler can be run as a job inside of a pod. It has the advantage of
|
||||
being able to be run multiple times without needing user intervention.
|
||||
Descheduler pod is run as a critical pod to avoid being evicted by itself,
|
||||
or by kubelet due to an eviction event. Since critical pods are created in
|
||||
`kube-system` namespace, descheduler job and its pod will also be created
|
||||
in `kube-system` namespace.
|
||||
|
||||
### Create a container image
|
||||
|
||||
First we create a simple Docker image utilizing the Dockerfile found in the root directory:
|
||||
|
||||
```
|
||||
$ make dev-image
|
||||
```
|
||||
|
||||
This creates an image based off the binary we've built before. To build both the
|
||||
binary and image in one step you can run the following command:
|
||||
|
||||
```
|
||||
$ make image
|
||||
```
|
||||
|
||||
This eliminates the need to have Go installed locally and builds the binary
|
||||
within it's own container.
|
||||
|
||||
### Create a cluster role
|
||||
|
||||
To give necessary permissions for the descheduler to work in a pod, create a cluster role:
|
||||
|
||||
```
|
||||
$ cat << EOF| kubectl create -f -
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: descheduler-cluster-role
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["nodes"]
|
||||
verbs: ["get", "watch", "list"]
|
||||
- apiGroups: [""]
|
||||
resources: ["pods"]
|
||||
verbs: ["get", "watch", "list", "delete"]
|
||||
- apiGroups: [""]
|
||||
resources: ["pods/eviction"]
|
||||
verbs: ["create"]
|
||||
EOF
|
||||
```
|
||||
|
||||
### Create the service account which will be used to run the job:
|
||||
|
||||
```
|
||||
$ kubectl create sa descheduler-sa -n kube-system
|
||||
```
|
||||
|
||||
### Bind the cluster role to the service account:
|
||||
|
||||
```
|
||||
$ kubectl create clusterrolebinding descheduler-cluster-role-binding \
|
||||
--clusterrole=descheduler-cluster-role \
|
||||
--serviceaccount=kube-system:descheduler-sa
|
||||
```
|
||||
### Create a configmap to store descheduler policy
|
||||
|
||||
Descheduler policy is created as a ConfigMap in `kube-system` namespace
|
||||
so that it can be mounted as a volume inside pod.
|
||||
|
||||
```
|
||||
$ kubectl create configmap descheduler-policy-configmap \
|
||||
-n kube-system --from-file=<path-to-policy-dir/policy.yaml>
|
||||
```
|
||||
### Create the job specification (descheduler-job.yaml)
|
||||
|
||||
```
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: descheduler-job
|
||||
namespace: kube-system
|
||||
spec:
|
||||
parallelism: 1
|
||||
completions: 1
|
||||
template:
|
||||
metadata:
|
||||
name: descheduler-pod
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ""
|
||||
spec:
|
||||
containers:
|
||||
- name: descheduler
|
||||
image: descheduler
|
||||
volumeMounts:
|
||||
- mountPath: /policy-dir
|
||||
name: policy-volume
|
||||
command:
|
||||
- "/bin/sh"
|
||||
- "-ec"
|
||||
- |
|
||||
/bin/descheduler --policy-config-file /policy-dir/policy.yaml
|
||||
restartPolicy: "Never"
|
||||
serviceAccountName: descheduler-sa
|
||||
volumes:
|
||||
- name: policy-volume
|
||||
configMap:
|
||||
name: descheduler-policy-configmap
|
||||
```
|
||||
|
||||
Please note that the pod template is configured with critical pod annotation, and
|
||||
the policy `policy-file` is mounted as a volume from the config map.
|
||||
|
||||
### Run the descheduler as a job in a pod:
|
||||
```
|
||||
$ kubectl create -f descheduler-job.yaml
|
||||
```
|
||||
|
||||
## Policy and Strategies
|
||||
|
||||
Descheduler's policy is configurable and includes strategies to be enabled or disabled.
|
||||
Four strategies, `RemoveDuplicates`, `LowNodeUtilization`, `RemovePodsViolatingInterPodAntiAffinity`, `RemovePodsViolatingNodeAffinity` are currently implemented.
|
||||
As part of the policy, the parameters associated with the strategies can be configured too.
|
||||
By default, all strategies are enabled.
|
||||
|
||||
### RemoveDuplicates
|
||||
|
||||
This strategy makes sure that there is only one pod associated with a Replica Set (RS),
|
||||
Replication Controller (RC), Deployment, or Job running on same node. If there are more,
|
||||
those duplicate pods are evicted for better spreading of pods in a cluster. This issue could happen
|
||||
if some nodes went down due to whatever reasons, and pods on them were moved to other nodes leading to
|
||||
more than one pod associated with RS or RC, for example, running on same node. Once the failed nodes
|
||||
are ready again, this strategy could be enabled to evict those duplicate pods. Currently, there are no
|
||||
parameters associated with this strategy. To disable this strategy, the policy should look like:
|
||||
|
||||
```
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
strategies:
|
||||
"RemoveDuplicates":
|
||||
enabled: false
|
||||
```
|
||||
|
||||
### LowNodeUtilization
|
||||
|
||||
This strategy finds nodes that are under utilized and evicts pods, if possible, from other nodes
|
||||
in the hope that recreation of evicted pods will be scheduled on these underutilized nodes. The
|
||||
parameters of this strategy are configured under `nodeResourceUtilizationThresholds`.
|
||||
|
||||
The under utilization of nodes is determined by a configurable threshold, `thresholds`. The threshold
|
||||
`thresholds` can be configured for cpu, memory, and number of pods in terms of percentage. If a node's
|
||||
usage is below threshold for all (cpu, memory, and number of pods), the node is considered underutilized.
|
||||
Currently, pods' request resource requirements are considered for computing node resource utilization.
|
||||
|
||||
There is another configurable threshold, `targetThresholds`, that is used to compute those potential nodes
|
||||
from where pods could be evicted. Any node, between the thresholds, `thresholds` and `targetThresholds` is
|
||||
considered appropriately utilized and is not considered for eviction. The threshold, `targetThresholds`,
|
||||
can be configured for cpu, memory, and number of pods too in terms of percentage.
|
||||
|
||||
These thresholds, `thresholds` and `targetThresholds`, could be tuned as per your cluster requirements.
|
||||
An example of the policy for this strategy would look like:
|
||||
|
||||
```
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
strategies:
|
||||
"LowNodeUtilization":
|
||||
enabled: true
|
||||
params:
|
||||
nodeResourceUtilizationThresholds:
|
||||
thresholds:
|
||||
"cpu" : 20
|
||||
"memory": 20
|
||||
"pods": 20
|
||||
targetThresholds:
|
||||
"cpu" : 50
|
||||
"memory": 50
|
||||
"pods": 50
|
||||
```
|
||||
|
||||
There is another parameter associated with `LowNodeUtilization` strategy, called `numberOfNodes`.
|
||||
This parameter can be configured to activate the strategy only when number of under utilized nodes
|
||||
are above the configured value. This could be helpful in large clusters where a few nodes could go
|
||||
under utilized frequently or for a short period of time. By default, `numberOfNodes` is set to zero.
|
||||
|
||||
### RemovePodsViolatingInterPodAntiAffinity
|
||||
|
||||
This strategy makes sure that pods violating interpod anti-affinity are removed from nodes. For example, if there is podA on node and podB and podC(running on same node) have antiaffinity rules which prohibit them to run on the same node, then podA will be evicted from the node so that podB and podC could run. This issue could happen, when the anti-affinity rules for pods B,C are created when they are already running on node. Currently, there are no parameters associated with this strategy. To disable this strategy, the policy should look like:
|
||||
|
||||
```
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
strategies:
|
||||
"RemovePodsViolatingInterPodAntiAffinity":
|
||||
enabled: false
|
||||
```
|
||||
|
||||
### RemovePodsViolatingNodeAffinity
|
||||
|
||||
This strategy makes sure that pods violating node affinity are removed from nodes. For example, there is podA that was scheduled on nodeA which satisfied the node affinity rule `requiredDuringSchedulingIgnoredDuringExecution` at the time of scheduling, but over time nodeA no longer satisfies the rule, then if another node nodeB is available that satisfies the node affinity rule, then podA will be evicted from nodeA. The policy file should like this -
|
||||
|
||||
```
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
strategies:
|
||||
"RemovePodsViolatingNodeAffinity":
|
||||
enabled: true
|
||||
params:
|
||||
nodeAffinityType:
|
||||
- "requiredDuringSchedulingIgnoredDuringExecution"
|
||||
```
|
||||
|
||||
## Pod Evictions
|
||||
|
||||
When the descheduler decides to evict pods from a node, it employs following general mechanism:
|
||||
|
||||
* Critical pods (with annotations scheduler.alpha.kubernetes.io/critical-pod) are never evicted.
|
||||
* Pods (static or mirrored pods or stand alone pods) not part of an RC, RS, Deployment or Jobs are
|
||||
never evicted because these pods won't be recreated.
|
||||
* Pods associated with DaemonSets are never evicted.
|
||||
* Pods with local storage are never evicted.
|
||||
* Best efforts pods are evicted before Burstable and Guaranteed pods.
|
||||
|
||||
### Pod disruption Budget (PDB)
|
||||
Pods subject to Pod Disruption Budget (PDB) are not evicted if descheduling violates its pod
|
||||
disruption budget (PDB). The pods are evicted by using eviction subresource to handle PDB.
|
||||
|
||||
## Roadmap
|
||||
|
||||
This roadmap is not in any particular order.
|
||||
|
||||
* Strategy to consider taints and tolerations
|
||||
* Consideration of pod affinity
|
||||
* Strategy to consider pod life time
|
||||
* Strategy to consider number of pending pods
|
||||
* Integration with cluster autoscaler
|
||||
* Integration with metrics providers for obtaining real load metrics
|
||||
* Consideration of Kubernetes's scheduler's predicates
|
||||
|
||||
|
||||
## Compatibility matrix
|
||||
|
||||
Descheduler | supported Kubernetes version
|
||||
-------------|-----------------------------
|
||||
0.4 | 1.9+
|
||||
0.1-0.3 | 1.7-1.8
|
||||
|
||||
## Note
|
||||
|
||||
This project is under active development, and is not intended for production use.
|
||||
Any api could be changed any time with out any notice. That said, your feedback is
|
||||
very important and appreciated to make this project more stable and useful.
|
||||
@@ -1,14 +0,0 @@
|
||||
# Defined below are the security contacts for this repo.
|
||||
#
|
||||
# They are the contact point for the Product Security Team to reach out
|
||||
# to for triaging and handling of incoming issues.
|
||||
#
|
||||
# The below names agree to abide by the
|
||||
# [Embargo Policy](https://github.com/kubernetes/sig-release/blob/master/security-release-process-documentation/security-release-process.md#embargo-policy)
|
||||
# and will be removed and replaced if they violate that agreement.
|
||||
#
|
||||
# DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE
|
||||
# INSTRUCTIONS AT https://kubernetes.io/security/
|
||||
|
||||
aveshagarwal
|
||||
ravisantoshgudimetla
|
||||
@@ -1,60 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package options provides the descheduler flags
|
||||
package options
|
||||
|
||||
import (
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
|
||||
// install the componentconfig api so we get its defaulting and conversion functions
|
||||
"github.com/kubernetes-incubator/descheduler/pkg/apis/componentconfig"
|
||||
_ "github.com/kubernetes-incubator/descheduler/pkg/apis/componentconfig/install"
|
||||
"github.com/kubernetes-incubator/descheduler/pkg/apis/componentconfig/v1alpha1"
|
||||
deschedulerscheme "github.com/kubernetes-incubator/descheduler/pkg/descheduler/scheme"
|
||||
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
// DeschedulerServer configuration
|
||||
type DeschedulerServer struct {
|
||||
componentconfig.DeschedulerConfiguration
|
||||
Client clientset.Interface
|
||||
}
|
||||
|
||||
// NewDeschedulerServer creates a new DeschedulerServer with default parameters
|
||||
func NewDeschedulerServer() *DeschedulerServer {
|
||||
versioned := v1alpha1.DeschedulerConfiguration{}
|
||||
deschedulerscheme.Scheme.Default(&versioned)
|
||||
cfg := componentconfig.DeschedulerConfiguration{}
|
||||
deschedulerscheme.Scheme.Convert(versioned, &cfg, nil)
|
||||
s := DeschedulerServer{
|
||||
DeschedulerConfiguration: cfg,
|
||||
}
|
||||
return &s
|
||||
}
|
||||
|
||||
// AddFlags adds flags for a specific SchedulerServer to the specified FlagSet
|
||||
func (rs *DeschedulerServer) AddFlags(fs *pflag.FlagSet) {
|
||||
fs.DurationVar(&rs.DeschedulingInterval, "descheduling-interval", rs.DeschedulingInterval, "time interval between two consecutive descheduler executions")
|
||||
fs.StringVar(&rs.KubeconfigFile, "kubeconfig", rs.KubeconfigFile, "File with kube configuration.")
|
||||
fs.StringVar(&rs.PolicyConfigFile, "policy-config-file", rs.PolicyConfigFile, "File with descheduler policy configuration.")
|
||||
fs.BoolVar(&rs.DryRun, "dry-run", rs.DryRun, "execute descheduler in dry run mode.")
|
||||
// node-selector query causes descheduler to run only on nodes that matches the node labels in the query
|
||||
fs.StringVar(&rs.NodeSelector, "node-selector", rs.NodeSelector, "Selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2)")
|
||||
// max-no-pods-to-evict limits the maximum number of pods to be evicted per node by descheduler.
|
||||
fs.IntVar(&rs.MaxNoOfPodsToEvictPerNode, "max-pods-to-evict-per-node", rs.MaxNoOfPodsToEvictPerNode, "Limits the maximum number of pods to be evicted per node by descheduler")
|
||||
}
|
||||
@@ -1,61 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package app implements a Server object for running the descheduler.
|
||||
package app
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"io"
|
||||
|
||||
"github.com/kubernetes-incubator/descheduler/cmd/descheduler/app/options"
|
||||
"github.com/kubernetes-incubator/descheduler/pkg/descheduler"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
aflag "k8s.io/apiserver/pkg/util/flag"
|
||||
"k8s.io/apiserver/pkg/util/logs"
|
||||
)
|
||||
|
||||
// NewDeschedulerCommand creates a *cobra.Command object with default parameters
|
||||
func NewDeschedulerCommand(out io.Writer) *cobra.Command {
|
||||
s := options.NewDeschedulerServer()
|
||||
cmd := &cobra.Command{
|
||||
Use: "descheduler",
|
||||
Short: "descheduler",
|
||||
Long: `The descheduler evicts pods which may be bound to less desired nodes`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
logs.InitLogs()
|
||||
defer logs.FlushLogs()
|
||||
err := Run(s)
|
||||
if err != nil {
|
||||
glog.Errorf("%v", err)
|
||||
}
|
||||
},
|
||||
}
|
||||
cmd.SetOutput(out)
|
||||
|
||||
flags := cmd.Flags()
|
||||
flags.SetNormalizeFunc(aflag.WordSepNormalizeFunc)
|
||||
flags.AddGoFlagSet(flag.CommandLine)
|
||||
s.AddFlags(flags)
|
||||
return cmd
|
||||
}
|
||||
|
||||
func Run(rs *options.DeschedulerServer) error {
|
||||
return descheduler.Run(rs)
|
||||
}
|
||||
@@ -1,86 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package app
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/spf13/cobra"
|
||||
"runtime"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var (
|
||||
// gitCommit is a constant representing the source version that
|
||||
// generated this build. It should be set during build via -ldflags.
|
||||
gitCommit string
|
||||
// version is a constant representing the version tag that
|
||||
// generated this build. It should be set during build via -ldflags.
|
||||
version string
|
||||
// buildDate in ISO8601 format, output of $(date -u +'%Y-%m-%dT%H:%M:%SZ')
|
||||
//It should be set during build via -ldflags.
|
||||
buildDate string
|
||||
)
|
||||
|
||||
// Info holds the information related to descheduler app version.
|
||||
type Info struct {
|
||||
Major string `json:"major"`
|
||||
Minor string `json:"minor"`
|
||||
GitCommit string `json:"gitCommit"`
|
||||
GitVersion string `json:"gitVersion"`
|
||||
BuildDate string `json:"buildDate"`
|
||||
GoVersion string `json:"goVersion"`
|
||||
Compiler string `json:"compiler"`
|
||||
Platform string `json:"platform"`
|
||||
}
|
||||
|
||||
// Get returns the overall codebase version. It's for detecting
|
||||
// what code a binary was built from.
|
||||
func Get() Info {
|
||||
majorVersion, minorVersion := splitVersion(version)
|
||||
return Info{
|
||||
Major: majorVersion,
|
||||
Minor: minorVersion,
|
||||
GitCommit: gitCommit,
|
||||
GitVersion: version,
|
||||
BuildDate: buildDate,
|
||||
GoVersion: runtime.Version(),
|
||||
Compiler: runtime.Compiler,
|
||||
Platform: fmt.Sprintf("%s/%s", runtime.GOOS, runtime.GOARCH),
|
||||
}
|
||||
}
|
||||
|
||||
func NewVersionCommand() *cobra.Command {
|
||||
var versionCmd = &cobra.Command{
|
||||
Use: "version",
|
||||
Short: "Version of descheduler",
|
||||
Long: `Prints the version of descheduler.`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
fmt.Printf("Descheduler version %+v\n", Get())
|
||||
},
|
||||
}
|
||||
return versionCmd
|
||||
}
|
||||
|
||||
// splitVersion splits the git version to generate major and minor versions needed.
|
||||
func splitVersion(version string) (string, string) {
|
||||
if version == "" {
|
||||
return "", ""
|
||||
}
|
||||
// A sample version would be of form v0.1.0-7-ge884046, so split at first '.' and
|
||||
// then return 0 and 1+(+ appended to follow semver convention) for major and minor versions.
|
||||
return strings.Trim(strings.Split(version, ".")[0], "v"), strings.Split(version, ".")[1] + "+"
|
||||
}
|
||||
@@ -1,36 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/kubernetes-incubator/descheduler/cmd/descheduler/app"
|
||||
)
|
||||
|
||||
func main() {
|
||||
out := os.Stdout
|
||||
cmd := app.NewDeschedulerCommand(out)
|
||||
cmd.AddCommand(app.NewVersionCommand())
|
||||
flag.CommandLine.Parse([]string{})
|
||||
if err := cmd.Execute(); err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
@@ -1,3 +0,0 @@
|
||||
# Kubernetes Community Code of Conduct
|
||||
|
||||
Please refer to our [Kubernetes Community Code of Conduct](https://git.k8s.io/community/code-of-conduct.md)
|
||||
@@ -1,8 +0,0 @@
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
strategies:
|
||||
"RemovePodsViolatingNodeAffinity":
|
||||
enabled: true
|
||||
params:
|
||||
nodeAffinityType:
|
||||
- "requiredDuringSchedulingIgnoredDuringExecution"
|
||||
@@ -1,19 +0,0 @@
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
strategies:
|
||||
"RemoveDuplicates":
|
||||
enabled: true
|
||||
"RemovePodsViolatingInterPodAntiAffinity":
|
||||
enabled: true
|
||||
"LowNodeUtilization":
|
||||
enabled: true
|
||||
params:
|
||||
nodeResourceUtilizationThresholds:
|
||||
thresholds:
|
||||
"cpu" : 20
|
||||
"memory": 20
|
||||
"pods": 20
|
||||
targetThresholds:
|
||||
"cpu" : 50
|
||||
"memory": 50
|
||||
"pods": 50
|
||||
464
glide.lock
generated
464
glide.lock
generated
@@ -1,464 +0,0 @@
|
||||
hash: ed51a8e643db6e9996ef0ffca671fb31ab5b7fe0d61ecdda828192871f9da366
|
||||
updated: 2018-05-22T18:05:00.26435-07:00
|
||||
imports:
|
||||
- name: cloud.google.com/go
|
||||
version: 3b1ae45394a234c385be014e9a488f2bb6eef821
|
||||
subpackages:
|
||||
- compute/metadata
|
||||
- internal
|
||||
- name: github.com/Azure/go-autorest
|
||||
version: e14a70c556c8e0db173358d1a903dca345a8e75e
|
||||
subpackages:
|
||||
- autorest
|
||||
- autorest/adal
|
||||
- autorest/azure
|
||||
- autorest/date
|
||||
- name: github.com/davecgh/go-spew
|
||||
version: 782f4967f2dc4564575ca782fe2d04090b5faca8
|
||||
subpackages:
|
||||
- spew
|
||||
- name: github.com/dgrijalva/jwt-go
|
||||
version: 01aeca54ebda6e0fbfafd0a524d234159c05ec20
|
||||
- name: github.com/docker/distribution
|
||||
version: edc3ab29cdff8694dd6feb85cfeb4b5f1b38ed9c
|
||||
subpackages:
|
||||
- digestset
|
||||
- reference
|
||||
- name: github.com/emicklei/go-restful
|
||||
version: ff4f55a206334ef123e4f79bbf348980da81ca46
|
||||
subpackages:
|
||||
- log
|
||||
- name: github.com/ghodss/yaml
|
||||
version: 73d445a93680fa1a78ae23a5839bad48f32ba1ee
|
||||
- name: github.com/go-openapi/jsonpointer
|
||||
version: 46af16f9f7b149af66e5d1bd010e3574dc06de98
|
||||
- name: github.com/go-openapi/jsonreference
|
||||
version: 13c6e3589ad90f49bd3e3bbe2c2cb3d7a4142272
|
||||
- name: github.com/go-openapi/spec
|
||||
version: 7abd5745472fff5eb3685386d5fb8bf38683154d
|
||||
- name: github.com/go-openapi/swag
|
||||
version: f3f9494671f93fcff853e3c6e9e948b3eb71e590
|
||||
- name: github.com/gogo/protobuf
|
||||
version: c0656edd0d9eab7c66d1eb0c568f9039345796f7
|
||||
subpackages:
|
||||
- proto
|
||||
- sortkeys
|
||||
- name: github.com/golang/glog
|
||||
version: 23def4e6c14b4da8ac2ed8007337bc5eb5007998
|
||||
- name: github.com/golang/protobuf
|
||||
version: 1643683e1b54a9e88ad26d98f81400c8c9d9f4f9
|
||||
subpackages:
|
||||
- jsonpb
|
||||
- proto
|
||||
- ptypes
|
||||
- ptypes/any
|
||||
- ptypes/duration
|
||||
- ptypes/struct
|
||||
- ptypes/timestamp
|
||||
- name: github.com/google/btree
|
||||
version: 7d79101e329e5a3adf994758c578dab82b90c017
|
||||
- name: github.com/google/gofuzz
|
||||
version: 44d81051d367757e1c7c6a5a86423ece9afcf63c
|
||||
- name: github.com/googleapis/gnostic
|
||||
version: 0c5108395e2debce0d731cf0287ddf7242066aba
|
||||
subpackages:
|
||||
- OpenAPIv2
|
||||
- compiler
|
||||
- extensions
|
||||
- name: github.com/gophercloud/gophercloud
|
||||
version: 8183543f90d1aef267a5ecc209f2e0715b355acb
|
||||
subpackages:
|
||||
- openstack
|
||||
- openstack/identity/v2/tenants
|
||||
- openstack/identity/v2/tokens
|
||||
- openstack/identity/v3/tokens
|
||||
- openstack/utils
|
||||
- pagination
|
||||
- name: github.com/gregjones/httpcache
|
||||
version: 787624de3eb7bd915c329cba748687a3b22666a6
|
||||
subpackages:
|
||||
- diskcache
|
||||
- name: github.com/hashicorp/golang-lru
|
||||
version: a0d98a5f288019575c6d1f4bb1573fef2d1fcdc4
|
||||
subpackages:
|
||||
- simplelru
|
||||
- name: github.com/howeyc/gopass
|
||||
version: bf9dde6d0d2c004a008c27aaee91170c786f6db8
|
||||
- name: github.com/imdario/mergo
|
||||
version: 6633656539c1639d9d78127b7d47c622b5d7b6dc
|
||||
- name: github.com/inconshreveable/mousetrap
|
||||
version: 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75
|
||||
- name: github.com/json-iterator/go
|
||||
version: 36b14963da70d11297d313183d7e6388c8510e1e
|
||||
- name: github.com/juju/ratelimit
|
||||
version: 5b9ff866471762aa2ab2dced63c9fb6f53921342
|
||||
- name: github.com/kubernetes/repo-infra
|
||||
version: dbcbd7624d5e4eb29f33c48edf1b1651809827a3
|
||||
- name: github.com/mailru/easyjson
|
||||
version: 2f5df55504ebc322e4d52d34df6a1f5b503bf26d
|
||||
subpackages:
|
||||
- buffer
|
||||
- jlexer
|
||||
- jwriter
|
||||
- name: github.com/opencontainers/go-digest
|
||||
version: a6d0ee40d4207ea02364bd3b9e8e77b9159ba1eb
|
||||
- name: github.com/peterbourgon/diskv
|
||||
version: 5f041e8faa004a95c88a202771f4cc3e991971e6
|
||||
- name: github.com/PuerkitoBio/purell
|
||||
version: 8a290539e2e8629dbc4e6bad948158f790ec31f4
|
||||
- name: github.com/PuerkitoBio/urlesc
|
||||
version: 5bd2802263f21d8788851d5305584c82a5c75d7e
|
||||
- name: github.com/spf13/cobra
|
||||
version: f62e98d28ab7ad31d707ba837a966378465c7b57
|
||||
- name: github.com/spf13/pflag
|
||||
version: 4c012f6dcd9546820e378d0bdda4d8fc772cdfea
|
||||
- name: github.com/ugorji/go
|
||||
version: f57d8945648dbfe4c332cff9c50fb57548958e3f
|
||||
subpackages:
|
||||
- codec
|
||||
- name: golang.org/x/crypto
|
||||
version: 81e90905daefcd6fd217b62423c0908922eadb30
|
||||
subpackages:
|
||||
- bcrypt
|
||||
- blowfish
|
||||
- nacl/secretbox
|
||||
- poly1305
|
||||
- salsa20/salsa
|
||||
- ssh/terminal
|
||||
- name: golang.org/x/net
|
||||
version: 1c05540f6879653db88113bc4a2b70aec4bd491f
|
||||
subpackages:
|
||||
- context
|
||||
- context/ctxhttp
|
||||
- html
|
||||
- html/atom
|
||||
- http2
|
||||
- http2/hpack
|
||||
- idna
|
||||
- internal/timeseries
|
||||
- lex/httplex
|
||||
- trace
|
||||
- websocket
|
||||
- name: golang.org/x/oauth2
|
||||
version: a6bd8cefa1811bd24b86f8902872e4e8225f74c4
|
||||
subpackages:
|
||||
- google
|
||||
- internal
|
||||
- jws
|
||||
- jwt
|
||||
- name: golang.org/x/sys
|
||||
version: 95c6576299259db960f6c5b9b69ea52422860fce
|
||||
subpackages:
|
||||
- unix
|
||||
- windows
|
||||
- name: golang.org/x/text
|
||||
version: b19bf474d317b857955b12035d2c5acb57ce8b01
|
||||
subpackages:
|
||||
- cases
|
||||
- internal
|
||||
- internal/tag
|
||||
- language
|
||||
- runes
|
||||
- secure/bidirule
|
||||
- secure/precis
|
||||
- transform
|
||||
- unicode/bidi
|
||||
- unicode/norm
|
||||
- width
|
||||
- name: golang.org/x/tools
|
||||
version: 8cab8a1319f0be9798e7fe78b15da75e5f94b2e9
|
||||
subpackages:
|
||||
- imports
|
||||
- name: google.golang.org/appengine
|
||||
version: b1f26356af11148e710935ed1ac8a7f5702c7612
|
||||
subpackages:
|
||||
- internal
|
||||
- internal/app_identity
|
||||
- internal/base
|
||||
- internal/datastore
|
||||
- internal/log
|
||||
- internal/modules
|
||||
- internal/remote_api
|
||||
- internal/urlfetch
|
||||
- urlfetch
|
||||
- name: gopkg.in/inf.v0
|
||||
version: 3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4
|
||||
- name: gopkg.in/yaml.v2
|
||||
version: 53feefa2559fb8dfa8d81baad31be332c97d6c77
|
||||
- name: k8s.io/api
|
||||
version: af4bc157c3a209798fc897f6d4aaaaeb6c2e0d6a
|
||||
subpackages:
|
||||
- admission/v1beta1
|
||||
- admissionregistration/v1alpha1
|
||||
- admissionregistration/v1beta1
|
||||
- apps/v1
|
||||
- apps/v1beta1
|
||||
- apps/v1beta2
|
||||
- authentication/v1
|
||||
- authentication/v1beta1
|
||||
- authorization/v1
|
||||
- authorization/v1beta1
|
||||
- autoscaling/v1
|
||||
- autoscaling/v2beta1
|
||||
- batch/v1
|
||||
- batch/v1beta1
|
||||
- batch/v2alpha1
|
||||
- certificates/v1beta1
|
||||
- core/v1
|
||||
- events/v1beta1
|
||||
- extensions/v1beta1
|
||||
- imagepolicy/v1alpha1
|
||||
- networking/v1
|
||||
- policy/v1beta1
|
||||
- rbac/v1
|
||||
- rbac/v1alpha1
|
||||
- rbac/v1beta1
|
||||
- scheduling/v1alpha1
|
||||
- settings/v1alpha1
|
||||
- storage/v1
|
||||
- storage/v1alpha1
|
||||
- storage/v1beta1
|
||||
- name: k8s.io/apiextensions-apiserver
|
||||
version: aaccab68c17a51ccdfd3c8559221cce9334d3394
|
||||
subpackages:
|
||||
- pkg/features
|
||||
- name: k8s.io/apimachinery
|
||||
version: 180eddb345a5be3a157cea1c624700ad5bd27b8f
|
||||
subpackages:
|
||||
- pkg/api/errors
|
||||
- pkg/api/meta
|
||||
- pkg/api/resource
|
||||
- pkg/apimachinery
|
||||
- pkg/apimachinery/announced
|
||||
- pkg/apimachinery/registered
|
||||
- pkg/apis/meta/internalversion
|
||||
- pkg/apis/meta/v1
|
||||
- pkg/apis/meta/v1/unstructured
|
||||
- pkg/apis/meta/v1alpha1
|
||||
- pkg/conversion
|
||||
- pkg/conversion/queryparams
|
||||
- pkg/fields
|
||||
- pkg/labels
|
||||
- pkg/runtime
|
||||
- pkg/runtime/schema
|
||||
- pkg/runtime/serializer
|
||||
- pkg/runtime/serializer/json
|
||||
- pkg/runtime/serializer/protobuf
|
||||
- pkg/runtime/serializer/recognizer
|
||||
- pkg/runtime/serializer/streaming
|
||||
- pkg/runtime/serializer/versioning
|
||||
- pkg/selection
|
||||
- pkg/types
|
||||
- pkg/util/cache
|
||||
- pkg/util/clock
|
||||
- pkg/util/diff
|
||||
- pkg/util/errors
|
||||
- pkg/util/framer
|
||||
- pkg/util/intstr
|
||||
- pkg/util/json
|
||||
- pkg/util/net
|
||||
- pkg/util/runtime
|
||||
- pkg/util/sets
|
||||
- pkg/util/validation
|
||||
- pkg/util/validation/field
|
||||
- pkg/util/wait
|
||||
- pkg/util/yaml
|
||||
- pkg/version
|
||||
- pkg/watch
|
||||
- third_party/forked/golang/reflect
|
||||
- name: k8s.io/apiserver
|
||||
version: 91e14f394e4796abf5a994a349a222e7081d86b6
|
||||
subpackages:
|
||||
- pkg/features
|
||||
- pkg/util/feature
|
||||
- pkg/util/flag
|
||||
- pkg/util/logs
|
||||
- name: k8s.io/client-go
|
||||
version: 78700dec6369ba22221b72770783300f143df150
|
||||
subpackages:
|
||||
- discovery
|
||||
- discovery/fake
|
||||
- kubernetes
|
||||
- kubernetes/fake
|
||||
- kubernetes/scheme
|
||||
- kubernetes/typed/admissionregistration/v1alpha1
|
||||
- kubernetes/typed/admissionregistration/v1alpha1/fake
|
||||
- kubernetes/typed/admissionregistration/v1beta1
|
||||
- kubernetes/typed/admissionregistration/v1beta1/fake
|
||||
- kubernetes/typed/apps/v1
|
||||
- kubernetes/typed/apps/v1/fake
|
||||
- kubernetes/typed/apps/v1beta1
|
||||
- kubernetes/typed/apps/v1beta1/fake
|
||||
- kubernetes/typed/apps/v1beta2
|
||||
- kubernetes/typed/apps/v1beta2/fake
|
||||
- kubernetes/typed/authentication/v1
|
||||
- kubernetes/typed/authentication/v1/fake
|
||||
- kubernetes/typed/authentication/v1beta1
|
||||
- kubernetes/typed/authentication/v1beta1/fake
|
||||
- kubernetes/typed/authorization/v1
|
||||
- kubernetes/typed/authorization/v1/fake
|
||||
- kubernetes/typed/authorization/v1beta1
|
||||
- kubernetes/typed/authorization/v1beta1/fake
|
||||
- kubernetes/typed/autoscaling/v1
|
||||
- kubernetes/typed/autoscaling/v1/fake
|
||||
- kubernetes/typed/autoscaling/v2beta1
|
||||
- kubernetes/typed/autoscaling/v2beta1/fake
|
||||
- kubernetes/typed/batch/v1
|
||||
- kubernetes/typed/batch/v1/fake
|
||||
- kubernetes/typed/batch/v1beta1
|
||||
- kubernetes/typed/batch/v1beta1/fake
|
||||
- kubernetes/typed/batch/v2alpha1
|
||||
- kubernetes/typed/batch/v2alpha1/fake
|
||||
- kubernetes/typed/certificates/v1beta1
|
||||
- kubernetes/typed/certificates/v1beta1/fake
|
||||
- kubernetes/typed/core/v1
|
||||
- kubernetes/typed/core/v1/fake
|
||||
- kubernetes/typed/events/v1beta1
|
||||
- kubernetes/typed/events/v1beta1/fake
|
||||
- kubernetes/typed/extensions/v1beta1
|
||||
- kubernetes/typed/extensions/v1beta1/fake
|
||||
- kubernetes/typed/networking/v1
|
||||
- kubernetes/typed/networking/v1/fake
|
||||
- kubernetes/typed/policy/v1beta1
|
||||
- kubernetes/typed/policy/v1beta1/fake
|
||||
- kubernetes/typed/rbac/v1
|
||||
- kubernetes/typed/rbac/v1/fake
|
||||
- kubernetes/typed/rbac/v1alpha1
|
||||
- kubernetes/typed/rbac/v1alpha1/fake
|
||||
- kubernetes/typed/rbac/v1beta1
|
||||
- kubernetes/typed/rbac/v1beta1/fake
|
||||
- kubernetes/typed/scheduling/v1alpha1
|
||||
- kubernetes/typed/scheduling/v1alpha1/fake
|
||||
- kubernetes/typed/settings/v1alpha1
|
||||
- kubernetes/typed/settings/v1alpha1/fake
|
||||
- kubernetes/typed/storage/v1
|
||||
- kubernetes/typed/storage/v1/fake
|
||||
- kubernetes/typed/storage/v1alpha1
|
||||
- kubernetes/typed/storage/v1alpha1/fake
|
||||
- kubernetes/typed/storage/v1beta1
|
||||
- kubernetes/typed/storage/v1beta1/fake
|
||||
- listers/core/v1
|
||||
- pkg/version
|
||||
- plugin/pkg/client/auth
|
||||
- plugin/pkg/client/auth/azure
|
||||
- plugin/pkg/client/auth/gcp
|
||||
- plugin/pkg/client/auth/oidc
|
||||
- plugin/pkg/client/auth/openstack
|
||||
- rest
|
||||
- rest/watch
|
||||
- testing
|
||||
- third_party/forked/golang/template
|
||||
- tools/auth
|
||||
- tools/cache
|
||||
- tools/clientcmd
|
||||
- tools/clientcmd/api
|
||||
- tools/clientcmd/api/latest
|
||||
- tools/clientcmd/api/v1
|
||||
- tools/metrics
|
||||
- tools/pager
|
||||
- tools/reference
|
||||
- transport
|
||||
- util/buffer
|
||||
- util/cert
|
||||
- util/flowcontrol
|
||||
- util/homedir
|
||||
- util/integer
|
||||
- util/jsonpath
|
||||
- name: k8s.io/code-generator
|
||||
version: fef8bcdbaf36ac6a1a18c9ef7d85200b249fad30
|
||||
- name: k8s.io/gengo
|
||||
version: 1ef560bbde5195c01629039ad3b337ce63e7b321
|
||||
- name: k8s.io/kube-openapi
|
||||
version: 39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1
|
||||
subpackages:
|
||||
- pkg/builder
|
||||
- pkg/common
|
||||
- pkg/handler
|
||||
- pkg/util
|
||||
- pkg/util/proto
|
||||
- name: k8s.io/kubernetes
|
||||
version: 925c127ec6b946659ad0fd596fa959be43f0cc05
|
||||
subpackages:
|
||||
- pkg/api/legacyscheme
|
||||
- pkg/api/testapi
|
||||
- pkg/api/v1/resource
|
||||
- pkg/apis/admission
|
||||
- pkg/apis/admission/install
|
||||
- pkg/apis/admission/v1beta1
|
||||
- pkg/apis/admissionregistration
|
||||
- pkg/apis/admissionregistration/install
|
||||
- pkg/apis/admissionregistration/v1alpha1
|
||||
- pkg/apis/admissionregistration/v1beta1
|
||||
- pkg/apis/apps
|
||||
- pkg/apis/apps/install
|
||||
- pkg/apis/apps/v1
|
||||
- pkg/apis/apps/v1beta1
|
||||
- pkg/apis/apps/v1beta2
|
||||
- pkg/apis/authentication
|
||||
- pkg/apis/authentication/install
|
||||
- pkg/apis/authentication/v1
|
||||
- pkg/apis/authentication/v1beta1
|
||||
- pkg/apis/authorization
|
||||
- pkg/apis/authorization/install
|
||||
- pkg/apis/authorization/v1
|
||||
- pkg/apis/authorization/v1beta1
|
||||
- pkg/apis/autoscaling
|
||||
- pkg/apis/autoscaling/install
|
||||
- pkg/apis/autoscaling/v1
|
||||
- pkg/apis/autoscaling/v2beta1
|
||||
- pkg/apis/batch
|
||||
- pkg/apis/batch/install
|
||||
- pkg/apis/batch/v1
|
||||
- pkg/apis/batch/v1beta1
|
||||
- pkg/apis/batch/v2alpha1
|
||||
- pkg/apis/certificates
|
||||
- pkg/apis/certificates/install
|
||||
- pkg/apis/certificates/v1beta1
|
||||
- pkg/apis/componentconfig
|
||||
- pkg/apis/componentconfig/install
|
||||
- pkg/apis/componentconfig/v1alpha1
|
||||
- pkg/apis/core
|
||||
- pkg/apis/core/helper
|
||||
- pkg/apis/core/install
|
||||
- pkg/apis/core/v1
|
||||
- pkg/apis/core/v1/helper
|
||||
- pkg/apis/core/v1/helper/qos
|
||||
- pkg/apis/events
|
||||
- pkg/apis/events/install
|
||||
- pkg/apis/events/v1beta1
|
||||
- pkg/apis/extensions
|
||||
- pkg/apis/extensions/install
|
||||
- pkg/apis/extensions/v1beta1
|
||||
- pkg/apis/imagepolicy
|
||||
- pkg/apis/imagepolicy/install
|
||||
- pkg/apis/imagepolicy/v1alpha1
|
||||
- pkg/apis/networking
|
||||
- pkg/apis/networking/install
|
||||
- pkg/apis/networking/v1
|
||||
- pkg/apis/policy
|
||||
- pkg/apis/policy/install
|
||||
- pkg/apis/policy/v1beta1
|
||||
- pkg/apis/rbac
|
||||
- pkg/apis/rbac/install
|
||||
- pkg/apis/rbac/v1
|
||||
- pkg/apis/rbac/v1alpha1
|
||||
- pkg/apis/rbac/v1beta1
|
||||
- pkg/apis/scheduling
|
||||
- pkg/apis/scheduling/install
|
||||
- pkg/apis/scheduling/v1alpha1
|
||||
- pkg/apis/settings
|
||||
- pkg/apis/settings/install
|
||||
- pkg/apis/settings/v1alpha1
|
||||
- pkg/apis/storage
|
||||
- pkg/apis/storage/install
|
||||
- pkg/apis/storage/v1
|
||||
- pkg/apis/storage/v1alpha1
|
||||
- pkg/apis/storage/v1beta1
|
||||
- pkg/features
|
||||
- pkg/kubelet/apis
|
||||
- pkg/kubelet/types
|
||||
- pkg/master/ports
|
||||
- pkg/util/parsers
|
||||
- pkg/util/pointer
|
||||
- plugin/pkg/scheduler/algorithm/priorities/util
|
||||
testImports: []
|
||||
25
glide.yaml
25
glide.yaml
@@ -1,25 +0,0 @@
|
||||
package: github.com/kubernetes-incubator/descheduler
|
||||
import:
|
||||
- package: k8s.io/client-go
|
||||
version: 78700dec6369ba22221b72770783300f143df150
|
||||
- package: k8s.io/api
|
||||
version: af4bc157c3a209798fc897f6d4aaaaeb6c2e0d6a
|
||||
- package: k8s.io/apiserver
|
||||
version: 91e14f394e4796abf5a994a349a222e7081d86b6
|
||||
- package: k8s.io/apimachinery
|
||||
version: 180eddb345a5be3a157cea1c624700ad5bd27b8f
|
||||
- package: k8s.io/kubernetes
|
||||
version: v1.9.0
|
||||
- package: k8s.io/code-generator
|
||||
version: kubernetes-1.9.0
|
||||
- package: github.com/kubernetes/repo-infra
|
||||
- package: github.com/spf13/cobra
|
||||
version: f62e98d28ab7ad31d707ba837a966378465c7b57
|
||||
- package: k8s.io/gengo
|
||||
- package: github.com/ugorji/go
|
||||
version: v.1.1-beta
|
||||
- package: github.com/Azure/go-autorest
|
||||
version: e14a70c556c8e0db173358d1a903dca345a8e75e
|
||||
- package: golang.org/x/tools
|
||||
subpackages:
|
||||
- imports
|
||||
@@ -1,16 +0,0 @@
|
||||
/*
|
||||
Copyright YEAR The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
@@ -1,95 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
echo "Make sure that uuid package is installed"
|
||||
|
||||
master_uuid=$(uuid)
|
||||
node1_uuid=$(uuid)
|
||||
node2_uuid=$(uuid)
|
||||
kube_apiserver_port=6443
|
||||
kube_version=1.11.1
|
||||
|
||||
DESCHEDULER_ROOT=$(dirname "${BASH_SOURCE}")/../../
|
||||
E2E_GCE_HOME=$DESCHEDULER_ROOT/hack/e2e-gce
|
||||
|
||||
|
||||
create_cluster() {
|
||||
echo "#################### Creating instances ##########################"
|
||||
gcloud compute instances create descheduler-$master_uuid --image="ubuntu-1604-xenial-v20180306" --image-project="ubuntu-os-cloud" --zone=us-east1-b
|
||||
# Keeping the --zone here so as to make sure that e2e's can run locally.
|
||||
echo "gcloud compute instances delete descheduler-$master_uuid --zone=us-east1-b --quiet" > $E2E_GCE_HOME/delete_cluster.sh
|
||||
|
||||
gcloud compute instances create descheduler-$node1_uuid --image="ubuntu-1604-xenial-v20180306" --image-project="ubuntu-os-cloud" --zone=us-east1-b
|
||||
echo "gcloud compute instances delete descheduler-$node1_uuid --zone=us-east1-b --quiet" >> $E2E_GCE_HOME/delete_cluster.sh
|
||||
|
||||
gcloud compute instances create descheduler-$node2_uuid --image="ubuntu-1604-xenial-v20180306" --image-project="ubuntu-os-cloud" --zone=us-east1-b
|
||||
echo "gcloud compute instances delete descheduler-$node2_uuid --zone=us-east1-b --quiet" >> $E2E_GCE_HOME/delete_cluster.sh
|
||||
|
||||
# Delete the firewall port created for master.
|
||||
echo "gcloud compute firewall-rules delete kubeapiserver-$master_uuid --quiet" >> $E2E_GCE_HOME/delete_cluster.sh
|
||||
chmod 755 $E2E_GCE_HOME/delete_cluster.sh
|
||||
}
|
||||
|
||||
|
||||
generate_kubeadm_instance_files() {
|
||||
# TODO: Check if they have come up. awk $6 contains the state(RUNNING or not).
|
||||
master_public_ip=$(gcloud compute instances list | grep $master_uuid|awk '{print $5}')
|
||||
node1_public_ip=$(gcloud compute instances list | grep $node1_uuid|awk '{print $5}')
|
||||
node2_public_ip=$(gcloud compute instances list | grep $node2_uuid|awk '{print $5}')
|
||||
echo "kubeadm init --kubernetes-version=${kube_version} --apiserver-advertise-address=${master_public_ip}" --ignore-preflight-errors=all --pod-network-cidr=10.96.0.0/12 > $E2E_GCE_HOME/kubeadm_install.sh
|
||||
}
|
||||
|
||||
|
||||
transfer_install_files() {
|
||||
gcloud compute scp $E2E_GCE_HOME/kubeadm_preinstall.sh descheduler-$master_uuid:/tmp --zone=us-east1-b
|
||||
gcloud compute scp $E2E_GCE_HOME/kubeadm_install.sh descheduler-$master_uuid:/tmp --zone=us-east1-b
|
||||
gcloud compute scp $E2E_GCE_HOME/kubeadm_preinstall.sh descheduler-$node1_uuid:/tmp --zone=us-east1-b
|
||||
gcloud compute scp $E2E_GCE_HOME/kubeadm_preinstall.sh descheduler-$node2_uuid:/tmp --zone=us-east1-b
|
||||
}
|
||||
|
||||
|
||||
install_kube() {
|
||||
# Docker installation.
|
||||
gcloud compute ssh descheduler-$master_uuid --command "sudo apt-get update; sudo apt-get install -y docker.io" --zone=us-east1-b
|
||||
gcloud compute ssh descheduler-$node1_uuid --command "sudo apt-get update; sudo apt-get install -y docker.io" --zone=us-east1-b
|
||||
gcloud compute ssh descheduler-$node2_uuid --command "sudo apt-get update; sudo apt-get install -y docker.io" --zone=us-east1-b
|
||||
# kubeadm installation.
|
||||
# 1. Transfer files to master, nodes.
|
||||
transfer_install_files
|
||||
# 2. Install kubeadm.
|
||||
#TODO: Add rm /tmp/kubeadm_install.sh
|
||||
# Open port for kube API server
|
||||
gcloud compute firewall-rules create kubeapiserver-$master_uuid --allow tcp:6443 --source-tags=descheduler-$master_uuid --source-ranges=0.0.0.0/0 --description="Opening api server port"
|
||||
|
||||
gcloud compute ssh descheduler-$master_uuid --command "sudo chmod 755 /tmp/kubeadm_preinstall.sh; sudo /tmp/kubeadm_preinstall.sh" --zone=us-east1-b
|
||||
kubeadm_join_command=$(gcloud compute ssh descheduler-$master_uuid --command "sudo chmod 755 /tmp/kubeadm_install.sh; sudo /tmp/kubeadm_install.sh" --zone=us-east1-b|grep 'kubeadm join')
|
||||
|
||||
# Copy the kubeconfig file onto /tmp for e2e tests.
|
||||
gcloud compute ssh descheduler-$master_uuid --command "sudo cp /etc/kubernetes/admin.conf /tmp; sudo chmod 777 /tmp/admin.conf" --zone=us-east1-b
|
||||
gcloud compute scp descheduler-$master_uuid:/tmp/admin.conf /tmp/admin.conf --zone=us-east1-b
|
||||
|
||||
# Postinstall on master, need to add a network plugin for kube-dns to come to running state.
|
||||
gcloud compute ssh descheduler-$master_uuid --command "sudo kubectl apply -f https://raw.githubusercontent.com/cloudnativelabs/kube-router/master/daemonset/kubeadm-kuberouter.yaml --kubeconfig /etc/kubernetes/admin.conf" --zone=us-east1-b
|
||||
echo $kubeadm_join_command > $E2E_GCE_HOME/kubeadm_join.sh
|
||||
|
||||
# Copy kubeadm_join to every node.
|
||||
#TODO: Put these in a loop, so that extension becomes possible.
|
||||
gcloud compute ssh descheduler-$node1_uuid --command "sudo chmod 755 /tmp/kubeadm_preinstall.sh; sudo /tmp/kubeadm_preinstall.sh" --zone=us-east1-b
|
||||
gcloud compute scp $E2E_GCE_HOME/kubeadm_join.sh descheduler-$node1_uuid:/tmp --zone=us-east1-b
|
||||
gcloud compute ssh descheduler-$node1_uuid --command "sudo chmod 755 /tmp/kubeadm_join.sh; sudo /tmp/kubeadm_join.sh" --zone=us-east1-b
|
||||
|
||||
gcloud compute ssh descheduler-$node2_uuid --command "sudo chmod 755 /tmp/kubeadm_preinstall.sh; sudo /tmp/kubeadm_preinstall.sh" --zone=us-east1-b
|
||||
gcloud compute scp $E2E_GCE_HOME/kubeadm_join.sh descheduler-$node2_uuid:/tmp --zone=us-east1-b
|
||||
gcloud compute ssh descheduler-$node2_uuid --command "sudo chmod 755 /tmp/kubeadm_join.sh; sudo /tmp/kubeadm_join.sh" --zone=us-east1-b
|
||||
|
||||
}
|
||||
|
||||
|
||||
create_cluster
|
||||
|
||||
generate_kubeadm_instance_files
|
||||
|
||||
install_kube
|
||||
@@ -1,8 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -e
|
||||
|
||||
|
||||
gcloud auth activate-service-account --key-file "${GCE_SA_CREDS}"
|
||||
gcloud config set project $GCE_PROJECT_ID
|
||||
gcloud config set compute/zone $GCE_ZONE
|
||||
@@ -1,9 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -e
|
||||
|
||||
wget https://dl.google.com/dl/cloudsdk/channels/rapid/downloads/google-cloud-sdk-176.0.0-linux-x86_64.tar.gz
|
||||
|
||||
tar -xvzf google-cloud-sdk-176.0.0-linux-x86_64.tar.gz
|
||||
|
||||
./google-cloud-sdk/install.sh -q
|
||||
@@ -1,11 +0,0 @@
|
||||
apt-get update
|
||||
apt-get install -y docker.io
|
||||
|
||||
apt-get update && apt-get install -y apt-transport-https
|
||||
curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add -
|
||||
cat <<EOF >/etc/apt/sources.list.d/kubernetes.list
|
||||
deb http://apt.kubernetes.io/ kubernetes-xenial main
|
||||
EOF
|
||||
apt-get update
|
||||
apt-get install -y kubelet kubeadm kubectl
|
||||
exit 0
|
||||
@@ -1,47 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2014 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
|
||||
# os::util::absolute_path returns the absolute path to the directory provided
|
||||
function os::util::absolute_path() {
|
||||
local relative_path="$1"
|
||||
local absolute_path
|
||||
|
||||
pushd "${relative_path}" >/dev/null
|
||||
relative_path="$( pwd )"
|
||||
if [[ -h "${relative_path}" ]]; then
|
||||
absolute_path="$( readlink "${relative_path}" )"
|
||||
else
|
||||
absolute_path="${relative_path}"
|
||||
fi
|
||||
popd >/dev/null
|
||||
|
||||
echo "${absolute_path}"
|
||||
}
|
||||
readonly -f os::util::absolute_path
|
||||
|
||||
# find the absolute path to the root of the Origin source tree
|
||||
init_source="$( dirname "${BASH_SOURCE}" )/../.."
|
||||
OS_ROOT="$( os::util::absolute_path "${init_source}" )"
|
||||
export OS_ROOT
|
||||
cd "${OS_ROOT}"
|
||||
|
||||
PRJ_PREFIX="github.com/${REPO_ORG:-kubernetes-incubator}/descheduler"
|
||||
OS_OUTPUT_BINPATH="${OS_ROOT}/_output/bin"
|
||||
@@ -1,151 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2015 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
|
||||
|
||||
# The sort at the end makes sure we feed the topological sort a deterministic
|
||||
# list (since there aren't many dependencies).
|
||||
|
||||
generated_files=($(
|
||||
find . -not \( \
|
||||
\( \
|
||||
-wholename './output' \
|
||||
-o -wholename './_output' \
|
||||
-o -wholename './staging' \
|
||||
-o -wholename './release' \
|
||||
-o -wholename './target' \
|
||||
-o -wholename '*/third_party/*' \
|
||||
-o -wholename '*/vendor/*' \
|
||||
-o -wholename '*/codecgen-*-1234.generated.go' \
|
||||
\) -prune \
|
||||
\) -name '*.generated.go' | LC_ALL=C sort -r
|
||||
))
|
||||
|
||||
# We only work for deps within this prefix.
|
||||
#my_prefix="k8s.io/kubernetes"
|
||||
my_prefix="github.com/${REPO_ORG:-kubernetes-incubator}/descheduler"
|
||||
|
||||
# Register function to be called on EXIT to remove codecgen
|
||||
# binary and also to touch the files that should be regenerated
|
||||
# since they are first removed.
|
||||
# This is necessary to make the script work after previous failure.
|
||||
function cleanup {
|
||||
rm -f "${CODECGEN:-}"
|
||||
pushd "${KUBE_ROOT}" > /dev/null
|
||||
for (( i=0; i < number; i++ )); do
|
||||
touch "${generated_files[${i}]}" || true
|
||||
done
|
||||
popd > /dev/null
|
||||
}
|
||||
trap cleanup EXIT
|
||||
|
||||
# Precompute dependencies for all directories.
|
||||
# Then sort all files in the dependency order.
|
||||
number=${#generated_files[@]}
|
||||
result=""
|
||||
for (( i=0; i<number; i++ )); do
|
||||
visited[${i}]=false
|
||||
file="${generated_files[${i}]/\.generated\.go/.go}"
|
||||
deps[${i}]=$(go list -f '{{range .Deps}}{{.}}{{"\n"}}{{end}}' ${file} | grep "^${my_prefix}")
|
||||
done
|
||||
###echo "DBG: found $number generated files"
|
||||
###for f in $(echo "${generated_files[@]}" | LC_ALL=C sort); do
|
||||
### echo "DBG: $f"
|
||||
###done
|
||||
|
||||
# NOTE: depends function assumes that the whole repository is under
|
||||
# $my_prefix - it will NOT work if that is not true.
|
||||
function depends {
|
||||
rhs="$(dirname ${generated_files[$2]/#./${my_prefix}})"
|
||||
###echo "DBG: does ${file} depend on ${rhs}?"
|
||||
for dep in ${deps[$1]}; do
|
||||
###echo "DBG: checking against $dep"
|
||||
if [[ "${dep}" == "${rhs}" ]]; then
|
||||
###echo "DBG: = yes"
|
||||
return 0
|
||||
fi
|
||||
done
|
||||
###echo "DBG: = no"
|
||||
return 1
|
||||
}
|
||||
|
||||
function tsort {
|
||||
visited[$1]=true
|
||||
local j=0
|
||||
for (( j=0; j<number; j++ )); do
|
||||
if ! ${visited[${j}]}; then
|
||||
if depends "$1" ${j}; then
|
||||
tsort $j
|
||||
fi
|
||||
fi
|
||||
done
|
||||
result="${result} $1"
|
||||
}
|
||||
echo "Building dependencies"
|
||||
for (( i=0; i<number; i++ )); do
|
||||
###echo "DBG: considering ${generated_files[${i}]}"
|
||||
if ! ${visited[${i}]}; then
|
||||
###echo "DBG: tsorting ${generated_files[${i}]}"
|
||||
tsort ${i}
|
||||
fi
|
||||
done
|
||||
index=(${result})
|
||||
|
||||
haveindex=${index:-}
|
||||
if [[ -z ${haveindex} ]]; then
|
||||
echo No files found for $0
|
||||
echo A previous run of $0 may have deleted all the files and then crashed.
|
||||
echo Use 'touch' to create files named 'types.generated.go' listed as deleted in 'git status'
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Building codecgen"
|
||||
CODECGEN="${PWD}/codecgen_binary"
|
||||
go build -o "${CODECGEN}" ./vendor/github.com/ugorji/go/codec/codecgen
|
||||
|
||||
# Running codecgen fails if some of the files doesn't compile.
|
||||
# Thus (since all the files are completely auto-generated and
|
||||
# not required for the code to be compilable, we first remove
|
||||
# them and the regenerate them.
|
||||
for (( i=0; i < number; i++ )); do
|
||||
rm -f "${generated_files[${i}]}"
|
||||
done
|
||||
|
||||
# Generate files in the dependency order.
|
||||
for current in "${index[@]}"; do
|
||||
generated_file=${generated_files[${current}]}
|
||||
initial_dir=${PWD}
|
||||
file=${generated_file/\.generated\.go/.go}
|
||||
echo "processing ${file}"
|
||||
# codecgen work only if invoked from directory where the file
|
||||
# is located.
|
||||
pushd "$(dirname ${file})" > /dev/null
|
||||
base_file=$(basename "${file}")
|
||||
base_generated_file=$(basename "${generated_file}")
|
||||
# We use '-d 1234' flag to have a deterministic output every time.
|
||||
# The constant was just randomly chosen.
|
||||
###echo "DBG: running ${CODECGEN} -d 1234 -o ${base_generated_file} ${base_file}"
|
||||
${CODECGEN} -d 1234 -o "${base_generated_file}" "${base_file}"
|
||||
# Add boilerplate at the beginning of the generated file.
|
||||
sed 's/YEAR/2017/' "${initial_dir}/hack/boilerplate/boilerplate.go.txt" > "${base_generated_file}.tmp"
|
||||
cat "${base_generated_file}" >> "${base_generated_file}.tmp"
|
||||
mv "${base_generated_file}.tmp" "${base_generated_file}"
|
||||
popd > /dev/null
|
||||
done
|
||||
@@ -1,9 +0,0 @@
|
||||
#!/bin/bash
|
||||
source "$(dirname "${BASH_SOURCE}")/lib/init.sh"
|
||||
|
||||
go build -o "${OS_OUTPUT_BINPATH}/conversion-gen" "${PRJ_PREFIX}/vendor/k8s.io/code-generator/cmd/conversion-gen"
|
||||
|
||||
${OS_OUTPUT_BINPATH}/conversion-gen \
|
||||
--go-header-file "hack/boilerplate/boilerplate.go.txt" \
|
||||
--input-dirs "${PRJ_PREFIX}/pkg/apis/componentconfig/v1alpha1,${PRJ_PREFIX}/pkg/api/v1alpha1" \
|
||||
--output-file-base zz_generated.conversion
|
||||
@@ -1,10 +0,0 @@
|
||||
#!/bin/bash
|
||||
source "$(dirname "${BASH_SOURCE}")/lib/init.sh"
|
||||
|
||||
go build -o "${OS_OUTPUT_BINPATH}/deepcopy-gen" "${PRJ_PREFIX}/vendor/k8s.io/code-generator/cmd/deepcopy-gen"
|
||||
|
||||
${OS_OUTPUT_BINPATH}/deepcopy-gen \
|
||||
--go-header-file "hack/boilerplate/boilerplate.go.txt" \
|
||||
--input-dirs "${PRJ_PREFIX}/pkg/apis/componentconfig,${PRJ_PREFIX}/pkg/apis/componentconfig/v1alpha1,${PRJ_PREFIX}/pkg/api,${PRJ_PREFIX}/pkg/api/v1alpha1" \
|
||||
--output-file-base zz_generated.deepcopy
|
||||
|
||||
@@ -1,10 +0,0 @@
|
||||
#!/bin/bash
|
||||
source "$(dirname "${BASH_SOURCE}")/lib/init.sh"
|
||||
|
||||
go build -o "${OS_OUTPUT_BINPATH}/defaulter-gen" "${PRJ_PREFIX}/vendor/k8s.io/code-generator/cmd/defaulter-gen"
|
||||
|
||||
${OS_OUTPUT_BINPATH}/defaulter-gen \
|
||||
--go-header-file "hack/boilerplate/boilerplate.go.txt" \
|
||||
--input-dirs "${PRJ_PREFIX}/pkg/apis/componentconfig/v1alpha1,${PRJ_PREFIX}/pkg/api/v1alpha1" \
|
||||
--extra-peer-dirs "${PRJ_PREFIX}/pkg/apis/componentconfig/v1alpha1,${PRJ_PREFIX}/pkg/api/v1alpha1" \
|
||||
--output-file-base zz_generated.defaults
|
||||
@@ -1,54 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2017 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
DESCHEDULER_ROOT=$(dirname "${BASH_SOURCE}")/..
|
||||
|
||||
GO_VERSION=($(go version))
|
||||
|
||||
if [[ -z $(echo "${GO_VERSION[2]}" | grep -E 'go1.2|go1.3|go1.4|go1.5|go1.6|go1.7|go1.8|go1.9|go1.10') ]]; then
|
||||
echo "Unknown go version '${GO_VERSION}', skipping gofmt."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
cd "${DESCHEDULER_ROOT}"
|
||||
|
||||
find_files() {
|
||||
find . -not \( \
|
||||
\( \
|
||||
-wholename './output' \
|
||||
-o -wholename './_output' \
|
||||
-o -wholename './release' \
|
||||
-o -wholename './target' \
|
||||
-o -wholename './.git' \
|
||||
-o -wholename '*/third_party/*' \
|
||||
-o -wholename '*/Godeps/*' \
|
||||
-o -wholename '*/vendor/*' \
|
||||
\) -prune \
|
||||
\) -name '*.go'
|
||||
}
|
||||
|
||||
GOFMT="gofmt -s"
|
||||
bad_files=$(find_files | xargs $GOFMT -l)
|
||||
if [[ -n "${bad_files}" ]]; then
|
||||
echo "!!! '$GOFMT' needs to be run on the following files: "
|
||||
echo "${bad_files}"
|
||||
exit 1
|
||||
fi
|
||||
762
index.yaml
Normal file
762
index.yaml
Normal file
@@ -0,0 +1,762 @@
|
||||
apiVersion: v1
|
||||
entries:
|
||||
descheduler:
|
||||
- apiVersion: v1
|
||||
appVersion: 0.34.0
|
||||
created: "2025-10-30T16:41:34.519386946Z"
|
||||
description: Descheduler for Kubernetes is used to rebalance clusters by evicting
|
||||
pods that can potentially be scheduled on better nodes. In the current implementation,
|
||||
descheduler does not schedule replacement of evicted pods but relies on the
|
||||
default scheduler for that.
|
||||
digest: 0fb91a600418400c8581eae65adab602ff2589104540306e5c6936a50fdd5bd6
|
||||
home: https://github.com/kubernetes-sigs/descheduler
|
||||
icon: https://kubernetes.io/images/favicon.png
|
||||
keywords:
|
||||
- kubernetes
|
||||
- descheduler
|
||||
- kube-scheduler
|
||||
maintainers:
|
||||
- email: sig-scheduling@kubernetes.io
|
||||
name: Kubernetes SIG Scheduling
|
||||
name: descheduler
|
||||
sources:
|
||||
- https://github.com/kubernetes-sigs/descheduler
|
||||
urls:
|
||||
- https://github.com/kubernetes-sigs/descheduler/releases/download/descheduler-helm-chart-0.34.0/descheduler-0.34.0.tgz
|
||||
version: 0.34.0
|
||||
- apiVersion: v1
|
||||
appVersion: 0.33.0
|
||||
created: "2025-05-04T19:49:46.589102937Z"
|
||||
description: Descheduler for Kubernetes is used to rebalance clusters by evicting
|
||||
pods that can potentially be scheduled on better nodes. In the current implementation,
|
||||
descheduler does not schedule replacement of evicted pods but relies on the
|
||||
default scheduler for that.
|
||||
digest: 1f32da32dedc6c066f197637e1fbd10546e9c6987322b88e71add1c9765c978f
|
||||
home: https://github.com/kubernetes-sigs/descheduler
|
||||
icon: https://kubernetes.io/images/favicon.png
|
||||
keywords:
|
||||
- kubernetes
|
||||
- descheduler
|
||||
- kube-scheduler
|
||||
maintainers:
|
||||
- email: sig-scheduling@kubernetes.io
|
||||
name: Kubernetes SIG Scheduling
|
||||
name: descheduler
|
||||
sources:
|
||||
- https://github.com/kubernetes-sigs/descheduler
|
||||
urls:
|
||||
- https://github.com/kubernetes-sigs/descheduler/releases/download/descheduler-helm-chart-0.33.0/descheduler-0.33.0.tgz
|
||||
version: 0.33.0
|
||||
- apiVersion: v1
|
||||
appVersion: 0.32.2
|
||||
created: "2025-02-11T04:46:21.248724497Z"
|
||||
description: Descheduler for Kubernetes is used to rebalance clusters by evicting
|
||||
pods that can potentially be scheduled on better nodes. In the current implementation,
|
||||
descheduler does not schedule replacement of evicted pods but relies on the
|
||||
default scheduler for that.
|
||||
digest: 011205c924597543fe93bf6ce66d75a33bc6a7d1a029eedd4c0390a16e57e264
|
||||
home: https://github.com/kubernetes-sigs/descheduler
|
||||
icon: https://kubernetes.io/images/favicon.png
|
||||
keywords:
|
||||
- kubernetes
|
||||
- descheduler
|
||||
- kube-scheduler
|
||||
maintainers:
|
||||
- email: kubernetes-sig-scheduling@googlegroups.com
|
||||
name: Kubernetes SIG Scheduling
|
||||
name: descheduler
|
||||
sources:
|
||||
- https://github.com/kubernetes-sigs/descheduler
|
||||
urls:
|
||||
- https://github.com/kubernetes-sigs/descheduler/releases/download/descheduler-helm-chart-0.32.2/descheduler-0.32.2.tgz
|
||||
version: 0.32.2
|
||||
- apiVersion: v1
|
||||
appVersion: 0.32.1
|
||||
created: "2025-01-06T23:08:49.134597641Z"
|
||||
description: Descheduler for Kubernetes is used to rebalance clusters by evicting
|
||||
pods that can potentially be scheduled on better nodes. In the current implementation,
|
||||
descheduler does not schedule replacement of evicted pods but relies on the
|
||||
default scheduler for that.
|
||||
digest: e104f5e14b8ef7f5166912e1e9a61785977f5f17ca95c43e744d0685528f8436
|
||||
home: https://github.com/kubernetes-sigs/descheduler
|
||||
icon: https://kubernetes.io/images/favicon.png
|
||||
keywords:
|
||||
- kubernetes
|
||||
- descheduler
|
||||
- kube-scheduler
|
||||
maintainers:
|
||||
- email: kubernetes-sig-scheduling@googlegroups.com
|
||||
name: Kubernetes SIG Scheduling
|
||||
name: descheduler
|
||||
sources:
|
||||
- https://github.com/kubernetes-sigs/descheduler
|
||||
urls:
|
||||
- https://github.com/kubernetes-sigs/descheduler/releases/download/descheduler-helm-chart-0.32.1/descheduler-0.32.1.tgz
|
||||
version: 0.32.1
|
||||
- apiVersion: v1
|
||||
appVersion: 0.32.0
|
||||
created: "2025-01-02T23:34:02.843431016Z"
|
||||
description: Descheduler for Kubernetes is used to rebalance clusters by evicting
|
||||
pods that can potentially be scheduled on better nodes. In the current implementation,
|
||||
descheduler does not schedule replacement of evicted pods but relies on the
|
||||
default scheduler for that.
|
||||
digest: 4ca12ecc4916ae1c47ae9b03571e371766e6b6846cc1821299bbd2da75042bb3
|
||||
home: https://github.com/kubernetes-sigs/descheduler
|
||||
icon: https://kubernetes.io/images/favicon.png
|
||||
keywords:
|
||||
- kubernetes
|
||||
- descheduler
|
||||
- kube-scheduler
|
||||
maintainers:
|
||||
- email: kubernetes-sig-scheduling@googlegroups.com
|
||||
name: Kubernetes SIG Scheduling
|
||||
name: descheduler
|
||||
sources:
|
||||
- https://github.com/kubernetes-sigs/descheduler
|
||||
urls:
|
||||
- https://github.com/kubernetes-sigs/descheduler/releases/download/descheduler-helm-chart-0.32.0/descheduler-0.32.0.tgz
|
||||
version: 0.32.0
|
||||
- apiVersion: v1
|
||||
appVersion: 0.31.0
|
||||
created: "2024-09-09T22:59:21.130329884Z"
|
||||
description: Descheduler for Kubernetes is used to rebalance clusters by evicting
|
||||
pods that can potentially be scheduled on better nodes. In the current implementation,
|
||||
descheduler does not schedule replacement of evicted pods but relies on the
|
||||
default scheduler for that.
|
||||
digest: 7e9d95713e2384b0976e6ea89742550c83c9adf52005e9c0cc9da3f04477d530
|
||||
home: https://github.com/kubernetes-sigs/descheduler
|
||||
icon: https://kubernetes.io/images/favicon.png
|
||||
keywords:
|
||||
- kubernetes
|
||||
- descheduler
|
||||
- kube-scheduler
|
||||
maintainers:
|
||||
- email: kubernetes-sig-scheduling@googlegroups.com
|
||||
name: Kubernetes SIG Scheduling
|
||||
name: descheduler
|
||||
sources:
|
||||
- https://github.com/kubernetes-sigs/descheduler
|
||||
urls:
|
||||
- https://github.com/kubernetes-sigs/descheduler/releases/download/descheduler-helm-chart-0.31.0/descheduler-0.31.0.tgz
|
||||
version: 0.31.0
|
||||
- apiVersion: v1
|
||||
appVersion: 0.30.2
|
||||
created: "2024-11-20T15:09:49.215945303Z"
|
||||
description: Descheduler for Kubernetes is used to rebalance clusters by evicting
|
||||
pods that can potentially be scheduled on better nodes. In the current implementation,
|
||||
descheduler does not schedule replacement of evicted pods but relies on the
|
||||
default scheduler for that.
|
||||
digest: 7608d3966b7d1fc08fe31cdffc283dc2a4d7a473fe63efa1672a5926b81774c2
|
||||
home: https://github.com/kubernetes-sigs/descheduler
|
||||
icon: https://kubernetes.io/images/favicon.png
|
||||
keywords:
|
||||
- kubernetes
|
||||
- descheduler
|
||||
- kube-scheduler
|
||||
maintainers:
|
||||
- email: kubernetes-sig-scheduling@googlegroups.com
|
||||
name: Kubernetes SIG Scheduling
|
||||
name: descheduler
|
||||
sources:
|
||||
- https://github.com/kubernetes-sigs/descheduler
|
||||
urls:
|
||||
- https://github.com/kubernetes-sigs/descheduler/releases/download/descheduler-helm-chart-0.30.2/descheduler-0.30.2.tgz
|
||||
version: 0.30.2
|
||||
- apiVersion: v1
|
||||
appVersion: 0.30.1
|
||||
created: "2024-06-05T12:06:23.870221344Z"
|
||||
description: Descheduler for Kubernetes is used to rebalance clusters by evicting
|
||||
pods that can potentially be scheduled on better nodes. In the current implementation,
|
||||
descheduler does not schedule replacement of evicted pods but relies on the
|
||||
default scheduler for that.
|
||||
digest: 8ab41d27011119ff62abfb86fa9253d66a1c74b7247d32c5786a96d3fd11abd2
|
||||
home: https://github.com/kubernetes-sigs/descheduler
|
||||
icon: https://kubernetes.io/images/favicon.png
|
||||
keywords:
|
||||
- kubernetes
|
||||
- descheduler
|
||||
- kube-scheduler
|
||||
maintainers:
|
||||
- email: kubernetes-sig-scheduling@googlegroups.com
|
||||
name: Kubernetes SIG Scheduling
|
||||
name: descheduler
|
||||
sources:
|
||||
- https://github.com/kubernetes-sigs/descheduler
|
||||
urls:
|
||||
- https://github.com/kubernetes-sigs/descheduler/releases/download/descheduler-helm-chart-0.30.1/descheduler-0.30.1.tgz
|
||||
version: 0.30.1
|
||||
- apiVersion: v1
|
||||
appVersion: 0.30.0
|
||||
created: "2024-05-20T14:03:13.843460018Z"
|
||||
description: Descheduler for Kubernetes is used to rebalance clusters by evicting
|
||||
pods that can potentially be scheduled on better nodes. In the current implementation,
|
||||
descheduler does not schedule replacement of evicted pods but relies on the
|
||||
default scheduler for that.
|
||||
digest: 7599edc8d0c09821e290438a850509d762a6ad79d48024ab5c9ec56e70cc4229
|
||||
home: https://github.com/kubernetes-sigs/descheduler
|
||||
icon: https://kubernetes.io/images/favicon.png
|
||||
keywords:
|
||||
- kubernetes
|
||||
- descheduler
|
||||
- kube-scheduler
|
||||
maintainers:
|
||||
- email: kubernetes-sig-scheduling@googlegroups.com
|
||||
name: Kubernetes SIG Scheduling
|
||||
name: descheduler
|
||||
sources:
|
||||
- https://github.com/kubernetes-sigs/descheduler
|
||||
urls:
|
||||
- https://github.com/kubernetes-sigs/descheduler/releases/download/descheduler-helm-chart-0.30.0/descheduler-0.30.0.tgz
|
||||
version: 0.30.0
|
||||
- apiVersion: v1
|
||||
appVersion: 0.29.0
|
||||
created: "2024-01-02T18:54:20.992594156Z"
|
||||
description: Descheduler for Kubernetes is used to rebalance clusters by evicting
|
||||
pods that can potentially be scheduled on better nodes. In the current implementation,
|
||||
descheduler does not schedule replacement of evicted pods but relies on the
|
||||
default scheduler for that.
|
||||
digest: f6157f5d02bf90e8f84e74a8d7ccb1660b802d4aa710fdb32d9398eaccd50f67
|
||||
home: https://github.com/kubernetes-sigs/descheduler
|
||||
icon: https://kubernetes.io/images/favicon.png
|
||||
keywords:
|
||||
- kubernetes
|
||||
- descheduler
|
||||
- kube-scheduler
|
||||
maintainers:
|
||||
- email: kubernetes-sig-scheduling@googlegroups.com
|
||||
name: Kubernetes SIG Scheduling
|
||||
name: descheduler
|
||||
sources:
|
||||
- https://github.com/kubernetes-sigs/descheduler
|
||||
urls:
|
||||
- https://github.com/kubernetes-sigs/descheduler/releases/download/descheduler-helm-chart-0.29.0/descheduler-0.29.0.tgz
|
||||
version: 0.29.0
|
||||
- apiVersion: v1
|
||||
appVersion: 0.28.1
|
||||
created: "2023-11-29T17:22:04.773831615Z"
|
||||
description: Descheduler for Kubernetes is used to rebalance clusters by evicting
|
||||
pods that can potentially be scheduled on better nodes. In the current implementation,
|
||||
descheduler does not schedule replacement of evicted pods but relies on the
|
||||
default scheduler for that.
|
||||
digest: 33ccfc268727e9b129254bc450c192d3084d56967ceb315e4a1d6d744bc204ea
|
||||
home: https://github.com/kubernetes-sigs/descheduler
|
||||
icon: https://raw.githubusercontent.com/kubernetes-sigs/descheduler/master/assets/logo/descheduler-stacked-color.png
|
||||
keywords:
|
||||
- kubernetes
|
||||
- descheduler
|
||||
- kube-scheduler
|
||||
maintainers:
|
||||
- email: kubernetes-sig-scheduling@googlegroups.com
|
||||
name: Kubernetes SIG Scheduling
|
||||
name: descheduler
|
||||
sources:
|
||||
- https://github.com/kubernetes-sigs/descheduler
|
||||
urls:
|
||||
- https://github.com/kubernetes-sigs/descheduler/releases/download/descheduler-helm-chart-0.28.1/descheduler-0.28.1.tgz
|
||||
version: 0.28.1
|
||||
- apiVersion: v1
|
||||
appVersion: 0.28.0
|
||||
created: "2023-08-24T13:07:51.303386087Z"
|
||||
description: Descheduler for Kubernetes is used to rebalance clusters by evicting
|
||||
pods that can potentially be scheduled on better nodes. In the current implementation,
|
||||
descheduler does not schedule replacement of evicted pods but relies on the
|
||||
default scheduler for that.
|
||||
digest: 50efd424df52a1bfd6e40900e634394e66db4a901ff797b595481855dcb01584
|
||||
home: https://github.com/kubernetes-sigs/descheduler
|
||||
icon: https://kubernetes.io/images/favicon.png
|
||||
keywords:
|
||||
- kubernetes
|
||||
- descheduler
|
||||
- kube-scheduler
|
||||
maintainers:
|
||||
- email: kubernetes-sig-scheduling@googlegroups.com
|
||||
name: Kubernetes SIG Scheduling
|
||||
name: descheduler
|
||||
sources:
|
||||
- https://github.com/kubernetes-sigs/descheduler
|
||||
urls:
|
||||
- https://github.com/kubernetes-sigs/descheduler/releases/download/descheduler-helm-chart-0.28.0/descheduler-0.28.0.tgz
|
||||
version: 0.28.0
|
||||
- apiVersion: v1
|
||||
appVersion: 0.27.1
|
||||
created: "2023-05-31T08:18:14.881530101Z"
|
||||
description: Descheduler for Kubernetes is used to rebalance clusters by evicting
|
||||
pods that can potentially be scheduled on better nodes. In the current implementation,
|
||||
descheduler does not schedule replacement of evicted pods but relies on the
|
||||
default scheduler for that.
|
||||
digest: c7d93a7fc65f8311bd1853893bd4c5b3f3f4dabcf182c896846752d99720c98b
|
||||
home: https://github.com/kubernetes-sigs/descheduler
|
||||
icon: https://kubernetes.io/images/favicon.png
|
||||
keywords:
|
||||
- kubernetes
|
||||
- descheduler
|
||||
- kube-scheduler
|
||||
maintainers:
|
||||
- email: kubernetes-sig-scheduling@googlegroups.com
|
||||
name: Kubernetes SIG Scheduling
|
||||
name: descheduler
|
||||
sources:
|
||||
- https://github.com/kubernetes-sigs/descheduler
|
||||
urls:
|
||||
- https://github.com/kubernetes-sigs/descheduler/releases/download/descheduler-helm-chart-0.27.1/descheduler-0.27.1.tgz
|
||||
version: 0.27.1
|
||||
- apiVersion: v1
|
||||
appVersion: 0.27.0
|
||||
created: "2023-05-05T14:14:44.725587487Z"
|
||||
description: Descheduler for Kubernetes is used to rebalance clusters by evicting
|
||||
pods that can potentially be scheduled on better nodes. In the current implementation,
|
||||
descheduler does not schedule replacement of evicted pods but relies on the
|
||||
default scheduler for that.
|
||||
digest: dd06be2789590c070b204e0f7f0910ec111d90a13e8b5a46e3b80b015e458b11
|
||||
home: https://github.com/kubernetes-sigs/descheduler
|
||||
icon: https://kubernetes.io/images/favicon.png
|
||||
keywords:
|
||||
- kubernetes
|
||||
- descheduler
|
||||
- kube-scheduler
|
||||
maintainers:
|
||||
- email: kubernetes-sig-scheduling@googlegroups.com
|
||||
name: Kubernetes SIG Scheduling
|
||||
name: descheduler
|
||||
sources:
|
||||
- https://github.com/kubernetes-sigs/descheduler
|
||||
urls:
|
||||
- https://github.com/kubernetes-sigs/descheduler/releases/download/descheduler-helm-chart-0.27.0/descheduler-0.27.0.tgz
|
||||
version: 0.27.0
|
||||
- apiVersion: v1
|
||||
appVersion: 0.26.1
|
||||
created: "2023-04-04T17:34:17.284123888Z"
|
||||
description: Descheduler for Kubernetes is used to rebalance clusters by evicting
|
||||
pods that can potentially be scheduled on better nodes. In the current implementation,
|
||||
descheduler does not schedule replacement of evicted pods but relies on the
|
||||
default scheduler for that.
|
||||
digest: 391167e6741cc412bc12bc0e5a6ab513a63528fa4d161396e2c14f017eb1b7b3
|
||||
home: https://github.com/kubernetes-sigs/descheduler
|
||||
icon: https://kubernetes.io/images/favicon.png
|
||||
keywords:
|
||||
- kubernetes
|
||||
- descheduler
|
||||
- kube-scheduler
|
||||
maintainers:
|
||||
- email: kubernetes-sig-scheduling@googlegroups.com
|
||||
name: Kubernetes SIG Scheduling
|
||||
name: descheduler
|
||||
sources:
|
||||
- https://github.com/kubernetes-sigs/descheduler
|
||||
urls:
|
||||
- https://github.com/kubernetes-sigs/descheduler/releases/download/descheduler-helm-chart-0.26.1/descheduler-0.26.1.tgz
|
||||
version: 0.26.1
|
||||
- apiVersion: v1
|
||||
appVersion: 0.26.0
|
||||
created: "2023-01-17T14:42:00.271072093Z"
|
||||
description: Descheduler for Kubernetes is used to rebalance clusters by evicting
|
||||
pods that can potentially be scheduled on better nodes. In the current implementation,
|
||||
descheduler does not schedule replacement of evicted pods but relies on the
|
||||
default scheduler for that.
|
||||
digest: 4bb1285dcb65814059b13d268d0f230a0993527fddc652a0b82a2039aed7a1b4
|
||||
home: https://github.com/kubernetes-sigs/descheduler
|
||||
icon: https://kubernetes.io/images/favicon.png
|
||||
keywords:
|
||||
- kubernetes
|
||||
- descheduler
|
||||
- kube-scheduler
|
||||
maintainers:
|
||||
- email: kubernetes-sig-scheduling@googlegroups.com
|
||||
name: Kubernetes SIG Scheduling
|
||||
name: descheduler
|
||||
sources:
|
||||
- https://github.com/kubernetes-sigs/descheduler
|
||||
urls:
|
||||
- https://github.com/kubernetes-sigs/descheduler/releases/download/descheduler-helm-chart-0.26.0/descheduler-0.26.0.tgz
|
||||
version: 0.26.0
|
||||
- apiVersion: v1
|
||||
appVersion: 0.25.1
|
||||
created: "2022-10-17T21:53:53.644159593Z"
|
||||
description: Descheduler for Kubernetes is used to rebalance clusters by evicting
|
||||
pods that can potentially be scheduled on better nodes. In the current implementation,
|
||||
descheduler does not schedule replacement of evicted pods but relies on the
|
||||
default scheduler for that.
|
||||
digest: 68b973e5294bcc1735ec7cfb9c82769c4dccc4a2ef75d8cf2a3244ebaaa73b5d
|
||||
home: https://github.com/kubernetes-sigs/descheduler
|
||||
icon: https://kubernetes.io/images/favicon.png
|
||||
keywords:
|
||||
- kubernetes
|
||||
- descheduler
|
||||
- kube-scheduler
|
||||
maintainers:
|
||||
- email: kubernetes-sig-scheduling@googlegroups.com
|
||||
name: Kubernetes SIG Scheduling
|
||||
name: descheduler
|
||||
sources:
|
||||
- https://github.com/kubernetes-sigs/descheduler
|
||||
urls:
|
||||
- https://github.com/kubernetes-sigs/descheduler/releases/download/descheduler-helm-chart-0.25.2/descheduler-0.25.2.tgz
|
||||
version: 0.25.2
|
||||
- apiVersion: v1
|
||||
appVersion: 0.25.1
|
||||
created: "2022-09-27T15:02:20.445257034Z"
|
||||
description: Descheduler for Kubernetes is used to rebalance clusters by evicting
|
||||
pods that can potentially be scheduled on better nodes. In the current implementation,
|
||||
descheduler does not schedule replacement of evicted pods but relies on the
|
||||
default scheduler for that.
|
||||
digest: 758d970bf9fc7d575f3a171affd3a60475dafba6405cfcfd98d42062582a75ec
|
||||
home: https://github.com/kubernetes-sigs/descheduler
|
||||
icon: https://kubernetes.io/images/favicon.png
|
||||
keywords:
|
||||
- kubernetes
|
||||
- descheduler
|
||||
- kube-scheduler
|
||||
maintainers:
|
||||
- email: kubernetes-sig-scheduling@googlegroups.com
|
||||
name: Kubernetes SIG Scheduling
|
||||
name: descheduler
|
||||
sources:
|
||||
- https://github.com/kubernetes-sigs/descheduler
|
||||
urls:
|
||||
- https://github.com/kubernetes-sigs/descheduler/releases/download/descheduler-helm-chart-0.25.1/descheduler-0.25.1.tgz
|
||||
version: 0.25.1
|
||||
- apiVersion: v1
|
||||
appVersion: 0.25.0
|
||||
created: "2022-09-15T16:33:37.881987127Z"
|
||||
description: Descheduler for Kubernetes is used to rebalance clusters by evicting
|
||||
pods that can potentially be scheduled on better nodes. In the current implementation,
|
||||
descheduler does not schedule replacement of evicted pods but relies on the
|
||||
default scheduler for that.
|
||||
digest: 8aa3f1af4a9f3ab49dd9c177c88596993d1a1481b83318ebeff3a49e49714bdd
|
||||
home: https://github.com/kubernetes-sigs/descheduler
|
||||
icon: https://kubernetes.io/images/favicon.png
|
||||
keywords:
|
||||
- kubernetes
|
||||
- descheduler
|
||||
- kube-scheduler
|
||||
maintainers:
|
||||
- email: kubernetes-sig-scheduling@googlegroups.com
|
||||
name: Kubernetes SIG Scheduling
|
||||
name: descheduler
|
||||
sources:
|
||||
- https://github.com/kubernetes-sigs/descheduler
|
||||
urls:
|
||||
- https://github.com/kubernetes-sigs/descheduler/releases/download/descheduler-helm-chart-0.25.0/descheduler-0.25.0.tgz
|
||||
version: 0.25.0
|
||||
- apiVersion: v1
|
||||
appVersion: 0.24.1
|
||||
created: "2022-05-31T13:39:20.603755914Z"
|
||||
description: Descheduler for Kubernetes is used to rebalance clusters by evicting
|
||||
pods that can potentially be scheduled on better nodes. In the current implementation,
|
||||
descheduler does not schedule replacement of evicted pods but relies on the
|
||||
default scheduler for that.
|
||||
digest: 3cec16a47c2416e5f5946eae473ce7fd073dfeb2ca49fa87e900886c57707fcf
|
||||
home: https://github.com/kubernetes-sigs/descheduler
|
||||
icon: https://kubernetes.io/images/favicon.png
|
||||
keywords:
|
||||
- kubernetes
|
||||
- descheduler
|
||||
- kube-scheduler
|
||||
maintainers:
|
||||
- email: kubernetes-sig-scheduling@googlegroups.com
|
||||
name: Kubernetes SIG Scheduling
|
||||
name: descheduler
|
||||
sources:
|
||||
- https://github.com/kubernetes-sigs/descheduler
|
||||
urls:
|
||||
- https://github.com/kubernetes-sigs/descheduler/releases/download/descheduler-helm-chart-0.24.1/descheduler-0.24.1.tgz
|
||||
version: 0.24.1
|
||||
- apiVersion: v1
|
||||
appVersion: 0.24.0
|
||||
created: "2022-05-23T14:14:10.037216155Z"
|
||||
description: Descheduler for Kubernetes is used to rebalance clusters by evicting
|
||||
pods that can potentially be scheduled on better nodes. In the current implementation,
|
||||
descheduler does not schedule replacement of evicted pods but relies on the
|
||||
default scheduler for that.
|
||||
digest: 2cd39048ebca1bf37d99be4f659364bedaa5c6e6cfb62640914e327884945b24
|
||||
home: https://github.com/kubernetes-sigs/descheduler
|
||||
icon: https://kubernetes.io/images/favicon.png
|
||||
keywords:
|
||||
- kubernetes
|
||||
- descheduler
|
||||
- kube-scheduler
|
||||
maintainers:
|
||||
- email: kubernetes-sig-scheduling@googlegroups.com
|
||||
name: Kubernetes SIG Scheduling
|
||||
name: descheduler
|
||||
sources:
|
||||
- https://github.com/kubernetes-sigs/descheduler
|
||||
urls:
|
||||
- https://github.com/kubernetes-sigs/descheduler/releases/download/descheduler-helm-chart-0.24.0/descheduler-0.24.0.tgz
|
||||
version: 0.24.0
|
||||
- apiVersion: v1
|
||||
appVersion: 0.23.1
|
||||
created: "2022-02-28T18:56:10.944563703Z"
|
||||
description: Descheduler for Kubernetes is used to rebalance clusters by evicting
|
||||
pods that can potentially be scheduled on better nodes. In the current implementation,
|
||||
descheduler does not schedule replacement of evicted pods but relies on the
|
||||
default scheduler for that.
|
||||
digest: 7c6c08ca5d1a0be8632e3596d721f5fc99117e3a422642cb908f5a2d2e355059
|
||||
home: https://github.com/kubernetes-sigs/descheduler
|
||||
icon: https://kubernetes.io/images/favicon.png
|
||||
keywords:
|
||||
- kubernetes
|
||||
- descheduler
|
||||
- kube-scheduler
|
||||
maintainers:
|
||||
- email: kubernetes-sig-scheduling@googlegroups.com
|
||||
name: Kubernetes SIG Scheduling
|
||||
name: descheduler
|
||||
sources:
|
||||
- https://github.com/kubernetes-sigs/descheduler
|
||||
urls:
|
||||
- https://github.com/kubernetes-sigs/descheduler/releases/download/descheduler-helm-chart-0.23.2/descheduler-0.23.2.tgz
|
||||
version: 0.23.2
|
||||
- apiVersion: v1
|
||||
appVersion: 0.23.0
|
||||
created: "2022-02-09T13:34:50.093591254Z"
|
||||
description: Descheduler for Kubernetes is used to rebalance clusters by evicting
|
||||
pods that can potentially be scheduled on better nodes. In the current implementation,
|
||||
descheduler does not schedule replacement of evicted pods but relies on the
|
||||
default scheduler for that.
|
||||
digest: a862faecae9eae025558d69716ebd67e467cc0316a431487d9f64dd8a60b7848
|
||||
home: https://github.com/kubernetes-sigs/descheduler
|
||||
icon: https://kubernetes.io/images/favicon.png
|
||||
keywords:
|
||||
- kubernetes
|
||||
- descheduler
|
||||
- kube-scheduler
|
||||
maintainers:
|
||||
- email: kubernetes-sig-scheduling@googlegroups.com
|
||||
name: Kubernetes SIG Scheduling
|
||||
name: descheduler
|
||||
sources:
|
||||
- https://github.com/kubernetes-sigs/descheduler
|
||||
urls:
|
||||
- https://github.com/kubernetes-sigs/descheduler/releases/download/descheduler-helm-chart-0.23.1/descheduler-0.23.1.tgz
|
||||
version: 0.23.1
|
||||
- apiVersion: v1
|
||||
appVersion: 0.23.0
|
||||
created: "2022-02-03T20:23:32.21492446Z"
|
||||
description: Descheduler for Kubernetes is used to rebalance clusters by evicting
|
||||
pods that can potentially be scheduled on better nodes. In the current implementation,
|
||||
descheduler does not schedule replacement of evicted pods but relies on the
|
||||
default scheduler for that.
|
||||
digest: 224b16599938ee331612c01dd5889a88a3ed3ef1872a9e108621aba3fb608713
|
||||
home: https://github.com/kubernetes-sigs/descheduler
|
||||
icon: https://kubernetes.io/images/favicon.png
|
||||
keywords:
|
||||
- kubernetes
|
||||
- descheduler
|
||||
- kube-scheduler
|
||||
maintainers:
|
||||
- email: kubernetes-sig-scheduling@googlegroups.com
|
||||
name: Kubernetes SIG Scheduling
|
||||
name: descheduler
|
||||
sources:
|
||||
- https://github.com/kubernetes-sigs/descheduler
|
||||
urls:
|
||||
- https://github.com/kubernetes-sigs/descheduler/releases/download/descheduler-helm-chart-0.23.0/descheduler-0.23.0.tgz
|
||||
version: 0.23.0
|
||||
- apiVersion: v1
|
||||
appVersion: 0.22.1
|
||||
created: "2021-09-29T16:37:16.381510827Z"
|
||||
description: Descheduler for Kubernetes is used to rebalance clusters by evicting
|
||||
pods that can potentially be scheduled on better nodes. In the current implementation,
|
||||
descheduler does not schedule replacement of evicted pods but relies on the
|
||||
default scheduler for that.
|
||||
digest: 83a407bbf981c300b6c75df148045be56797517cfcf7b744dfe8cbd3aa946c56
|
||||
home: https://github.com/kubernetes-sigs/descheduler
|
||||
icon: https://kubernetes.io/images/favicon.png
|
||||
keywords:
|
||||
- kubernetes
|
||||
- descheduler
|
||||
- kube-scheduler
|
||||
maintainers:
|
||||
- email: kubernetes-sig-scheduling@googlegroups.com
|
||||
name: Kubernetes SIG Scheduling
|
||||
name: descheduler
|
||||
sources:
|
||||
- https://github.com/kubernetes-sigs/descheduler
|
||||
urls:
|
||||
- https://github.com/kubernetes-sigs/descheduler/releases/download/descheduler-helm-chart-0.22.1/descheduler-0.22.1.tgz
|
||||
version: 0.22.1
|
||||
- apiVersion: v1
|
||||
appVersion: 0.22.0
|
||||
created: "2021-09-08T20:44:21.974382529Z"
|
||||
description: Descheduler for Kubernetes is used to rebalance clusters by evicting
|
||||
pods that can potentially be scheduled on better nodes. In the current implementation,
|
||||
descheduler does not schedule replacement of evicted pods but relies on the
|
||||
default scheduler for that.
|
||||
digest: 9dd645e058c66eba711acd604026dbbb546ccd5366253464bcea38d63c51ed69
|
||||
home: https://github.com/kubernetes-sigs/descheduler
|
||||
icon: https://kubernetes.io/images/favicon.png
|
||||
keywords:
|
||||
- kubernetes
|
||||
- descheduler
|
||||
- kube-scheduler
|
||||
maintainers:
|
||||
- email: kubernetes-sig-scheduling@googlegroups.com
|
||||
name: Kubernetes SIG Scheduling
|
||||
name: descheduler
|
||||
sources:
|
||||
- https://github.com/kubernetes-sigs/descheduler
|
||||
urls:
|
||||
- https://github.com/kubernetes-sigs/descheduler/releases/download/descheduler-helm-chart-0.22.0/descheduler-0.22.0.tgz
|
||||
version: 0.22.0
|
||||
- apiVersion: v1
|
||||
appVersion: 0.21.0
|
||||
created: "2021-06-08T17:43:48.212056769Z"
|
||||
description: Descheduler for Kubernetes is used to rebalance clusters by evicting
|
||||
pods that can potentially be scheduled on better nodes. In the current implementation,
|
||||
descheduler does not schedule replacement of evicted pods but relies on the
|
||||
default scheduler for that.
|
||||
digest: bb951c9c5e2c76855e0bb1a3a20c5238963acd515d7c250e3b27ccefa002bec2
|
||||
home: https://github.com/kubernetes-sigs/descheduler
|
||||
icon: https://kubernetes.io/images/favicon.png
|
||||
keywords:
|
||||
- kubernetes
|
||||
- descheduler
|
||||
- kube-scheduler
|
||||
maintainers:
|
||||
- email: kubernetes-sig-scheduling@googlegroups.com
|
||||
name: Kubernetes SIG Scheduling
|
||||
name: descheduler
|
||||
sources:
|
||||
- https://github.com/kubernetes-sigs/descheduler
|
||||
urls:
|
||||
- https://github.com/kubernetes-sigs/descheduler/releases/download/descheduler-helm-chart-0.21.0/descheduler-0.21.0.tgz
|
||||
version: 0.21.0
|
||||
- apiVersion: v1
|
||||
appVersion: 0.20.0
|
||||
created: "2020-12-10T15:23:17.367104788Z"
|
||||
description: Descheduler for Kubernetes is used to rebalance clusters by evicting
|
||||
pods that can potentially be scheduled on better nodes. In the current implementation,
|
||||
descheduler does not schedule replacement of evicted pods but relies on the
|
||||
default scheduler for that.
|
||||
digest: f8f0177966994e3cdb61385069ebba3f8f658107ab800a382b30d5fe809789d0
|
||||
home: https://github.com/kubernetes-sigs/descheduler
|
||||
icon: https://kubernetes.io/images/favicon.png
|
||||
keywords:
|
||||
- kubernetes
|
||||
- descheduler
|
||||
- kube-scheduler
|
||||
maintainers:
|
||||
- email: kubernetes-sig-scheduling@googlegroups.com
|
||||
name: Kubernetes SIG Scheduling
|
||||
name: descheduler
|
||||
sources:
|
||||
- https://github.com/kubernetes-sigs/descheduler
|
||||
urls:
|
||||
- https://github.com/kubernetes-sigs/descheduler/releases/download/descheduler-helm-chart-0.20.0/descheduler-0.20.0.tgz
|
||||
version: 0.20.0
|
||||
- apiVersion: v1
|
||||
appVersion: 0.19.0
|
||||
created: "2020-12-09T01:47:49.250755956Z"
|
||||
description: Descheduler for Kubernetes is used to rebalance clusters by evicting
|
||||
pods that can potentially be scheduled on better nodes. In the current implementation,
|
||||
descheduler does not schedule replacement of evicted pods but relies on the
|
||||
default scheduler for that.
|
||||
digest: 1331ce7da686311e18e552edd8353786694049a747ccab6616362e892579e766
|
||||
home: https://github.com/kubernetes-sigs/descheduler
|
||||
icon: https://kubernetes.io/images/favicon.png
|
||||
keywords:
|
||||
- kubernetes
|
||||
- descheduler
|
||||
- kube-scheduler
|
||||
maintainers:
|
||||
- email: kubernetes-sig-scheduling@googlegroups.com
|
||||
name: Kubernetes SIG Scheduling
|
||||
name: descheduler
|
||||
sources:
|
||||
- https://github.com/kubernetes-sigs/descheduler
|
||||
urls:
|
||||
- https://github.com/kubernetes-sigs/descheduler/releases/download/descheduler-helm-chart-0.19.2/descheduler-0.19.2.tgz
|
||||
version: 0.19.2
|
||||
- apiVersion: v1
|
||||
appVersion: 0.18.0
|
||||
created: "2020-12-08T19:34:02.786640856Z"
|
||||
description: Descheduler for Kubernetes is used to rebalance clusters by evicting
|
||||
pods that can potentially be scheduled on better nodes. In the current implementation,
|
||||
descheduler does not schedule replacement of evicted pods but relies on the
|
||||
default scheduler for that.
|
||||
digest: 55b8b8e015bfa3d0794ca1d7316b2b412211fa6ccbd7649a80f7a1d6ec097f26
|
||||
home: https://github.com/kubernetes-sigs/descheduler
|
||||
icon: https://kubernetes.io/images/favicon.png
|
||||
keywords:
|
||||
- kubernetes
|
||||
- descheduler
|
||||
- kube-scheduler
|
||||
maintainers:
|
||||
- email: kubernetes-sig-scheduling@googlegroups.com
|
||||
name: Kubernetes SIG Scheduling
|
||||
name: descheduler
|
||||
sources:
|
||||
- https://github.com/kubernetes-sigs/descheduler
|
||||
urls:
|
||||
- https://github.com/kubernetes-sigs/descheduler/releases/download/descheduler-helm-chart-0.18.2/descheduler-0.18.2.tgz
|
||||
version: 0.18.2
|
||||
descheduler-helm-chart:
|
||||
- apiVersion: v1
|
||||
appVersion: 0.19.0
|
||||
created: "2020-12-08T19:22:08.294224978Z"
|
||||
deprecated: true
|
||||
description: DEPRECATED - Descheduler for Kubernetes is used to rebalance clusters
|
||||
by evicting pods that can potentially be scheduled on better nodes. In the current
|
||||
implementation, descheduler does not schedule replacement of evicted pods but
|
||||
relies on the default scheduler for that.
|
||||
digest: b77ac9bb15ec71f7006d7d4faefffbb29b0cbcdabbef6389c858690beb12e529
|
||||
home: https://github.com/kubernetes-sigs/descheduler
|
||||
icon: https://kubernetes.io/images/favicon.png
|
||||
keywords:
|
||||
- kubernetes
|
||||
- descheduler
|
||||
- kube-scheduler
|
||||
name: descheduler-helm-chart
|
||||
sources:
|
||||
- https://github.com/kubernetes-sigs/descheduler
|
||||
urls:
|
||||
- https://github.com/kubernetes-sigs/descheduler/releases/download/descheduler-helm-chart-0.19.1/descheduler-helm-chart-0.19.1.tgz
|
||||
version: 0.19.1
|
||||
- apiVersion: v1
|
||||
appVersion: 0.19.0
|
||||
created: "2020-09-01T16:59:22.515242595Z"
|
||||
description: Descheduler for Kubernetes is used to rebalance clusters by evicting
|
||||
pods that can potentially be scheduled on better nodes. In the current implementation,
|
||||
descheduler does not schedule replacement of evicted pods but relies on the
|
||||
default scheduler for that.
|
||||
digest: dc70fc1ac8b57872e54c3406000617d4cbd0b2a9653abf0054c345c471b4be1c
|
||||
home: https://github.com/kubernetes-sigs/descheduler
|
||||
icon: https://kubernetes.io/images/favicon.png
|
||||
keywords:
|
||||
- kubernetes
|
||||
- descheduler
|
||||
- kube-scheduler
|
||||
maintainers:
|
||||
- email: kubernetes-sig-scheduling@googlegroups.com
|
||||
name: Kubernetes SIG Scheduling
|
||||
name: descheduler-helm-chart
|
||||
sources:
|
||||
- https://github.com/kubernetes-sigs/descheduler
|
||||
urls:
|
||||
- https://github.com/kubernetes-sigs/descheduler/releases/download/descheduler-helm-chart-0.19.0/descheduler-helm-chart-0.19.0.tgz
|
||||
version: 0.19.0
|
||||
- apiVersion: v1
|
||||
appVersion: 0.18.0
|
||||
created: "2020-07-23T18:29:32.622241657Z"
|
||||
description: Descheduler for Kubernetes is used to rebalance clusters by evicting
|
||||
pods that can potentially be scheduled on better nodes. In the current implementation,
|
||||
descheduler does not schedule replacement of evicted pods but relies on the
|
||||
default scheduler for that.
|
||||
digest: d2a7516d4b2bbd288d5016b935d1a0ce4db9cc40c4f8a6981fe5075d71f4bcda
|
||||
home: https://github.com/kubernetes-sigs/descheduler
|
||||
icon: https://kubernetes.io/images/favicon.png
|
||||
keywords:
|
||||
- kubernetes
|
||||
- descheduler
|
||||
- kube-scheduler
|
||||
maintainers:
|
||||
- email: steve.hipwell@github.com
|
||||
name: stevehipwell
|
||||
name: descheduler-helm-chart
|
||||
sources:
|
||||
- https://github.com/kubernetes-sigs/descheduler
|
||||
urls:
|
||||
- https://github.com/kubernetes-sigs/descheduler/releases/download/descheduler-helm-chart-0.18.1/descheduler-helm-chart-0.18.1.tgz
|
||||
version: 0.18.1
|
||||
generated: "2025-10-30T16:41:34.519616538Z"
|
||||
@@ -1,19 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// +k8s:deepcopy-gen=package,register
|
||||
|
||||
package api // import "github.com/kubernetes-incubator/descheduler/pkg/api"
|
||||
@@ -1,48 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package install installs the descheduler's policy API group.
|
||||
package install
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/apimachinery/announced"
|
||||
"k8s.io/apimachinery/pkg/apimachinery/registered"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
||||
deschedulerapi "github.com/kubernetes-incubator/descheduler/pkg/api"
|
||||
"github.com/kubernetes-incubator/descheduler/pkg/api/v1alpha1"
|
||||
deschedulerscheme "github.com/kubernetes-incubator/descheduler/pkg/descheduler/scheme"
|
||||
)
|
||||
|
||||
func init() {
|
||||
Install(deschedulerscheme.GroupFactoryRegistry, deschedulerscheme.Registry, deschedulerscheme.Scheme)
|
||||
}
|
||||
|
||||
// Install registers the API group and adds types to a scheme
|
||||
func Install(groupFactoryRegistry announced.APIGroupFactoryRegistry, registry *registered.APIRegistrationManager, scheme *runtime.Scheme) {
|
||||
if err := announced.NewGroupMetaFactory(
|
||||
&announced.GroupMetaFactoryArgs{
|
||||
GroupName: deschedulerapi.GroupName,
|
||||
VersionPreferenceOrder: []string{v1alpha1.SchemeGroupVersion.Version},
|
||||
AddInternalObjectsToScheme: deschedulerapi.AddToScheme,
|
||||
},
|
||||
announced.VersionToSchemeFunc{
|
||||
v1alpha1.SchemeGroupVersion.Version: v1alpha1.AddToScheme,
|
||||
},
|
||||
).Announce(groupFactoryRegistry).RegisterAndEnable(registry, scheme); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
@@ -1,50 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
)
|
||||
|
||||
var (
|
||||
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
|
||||
AddToScheme = SchemeBuilder.AddToScheme
|
||||
)
|
||||
|
||||
// GroupName is the group name use in this package
|
||||
const GroupName = "descheduler"
|
||||
|
||||
// SchemeGroupVersion is group version used to register these objects
|
||||
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal}
|
||||
|
||||
// Kind takes an unqualified kind and returns a Group qualified GroupKind
|
||||
func Kind(kind string) schema.GroupKind {
|
||||
return SchemeGroupVersion.WithKind(kind).GroupKind()
|
||||
}
|
||||
|
||||
// Resource takes an unqualified resource and returns a Group qualified GroupResource
|
||||
func Resource(resource string) schema.GroupResource {
|
||||
return SchemeGroupVersion.WithResource(resource).GroupResource()
|
||||
}
|
||||
|
||||
func addKnownTypes(scheme *runtime.Scheme) error {
|
||||
scheme.AddKnownTypes(SchemeGroupVersion,
|
||||
&DeschedulerPolicy{},
|
||||
)
|
||||
return nil
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,60 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
type DeschedulerPolicy struct {
|
||||
metav1.TypeMeta
|
||||
|
||||
// Strategies
|
||||
Strategies StrategyList
|
||||
}
|
||||
|
||||
type StrategyName string
|
||||
type StrategyList map[StrategyName]DeschedulerStrategy
|
||||
|
||||
type DeschedulerStrategy struct {
|
||||
// Enabled or disabled
|
||||
Enabled bool
|
||||
|
||||
// Weight
|
||||
Weight int
|
||||
|
||||
// Strategy parameters
|
||||
Params StrategyParameters
|
||||
}
|
||||
|
||||
// Only one of its members may be specified
|
||||
type StrategyParameters struct {
|
||||
NodeResourceUtilizationThresholds NodeResourceUtilizationThresholds
|
||||
NodeAffinityType []string
|
||||
}
|
||||
|
||||
type Percentage float64
|
||||
type ResourceThresholds map[v1.ResourceName]Percentage
|
||||
|
||||
type NodeResourceUtilizationThresholds struct {
|
||||
Thresholds ResourceThresholds
|
||||
TargetThresholds ResourceThresholds
|
||||
NumberOfNodes int
|
||||
}
|
||||
@@ -1,23 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import "k8s.io/apimachinery/pkg/runtime"
|
||||
|
||||
func addDefaultingFuncs(scheme *runtime.Scheme) error {
|
||||
return RegisterDefaults(scheme)
|
||||
}
|
||||
@@ -1,24 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// +k8s:deepcopy-gen=package,register
|
||||
// +k8s:conversion-gen=github.com/kubernetes-incubator/descheduler/pkg/api
|
||||
// +k8s:defaulter-gen=TypeMeta
|
||||
|
||||
// Package v1alpha1 is the v1alpha1 version of the descheduler API
|
||||
// +groupName=descheduler
|
||||
|
||||
package v1alpha1 // import "github.com/kubernetes-incubator/descheduler/pkg/api/v1alpha1"
|
||||
@@ -1,61 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
)
|
||||
|
||||
var (
|
||||
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
|
||||
localSchemeBuilder = &SchemeBuilder
|
||||
AddToScheme = SchemeBuilder.AddToScheme
|
||||
)
|
||||
|
||||
// GroupName is the group name used in this package
|
||||
const GroupName = "descheduler"
|
||||
const GroupVersion = "v1alpha1"
|
||||
|
||||
// SchemeGroupVersion is group version used to register these objects
|
||||
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: GroupVersion}
|
||||
|
||||
// Kind takes an unqualified kind and returns a Group qualified GroupKind
|
||||
func Kind(kind string) schema.GroupKind {
|
||||
return SchemeGroupVersion.WithKind(kind).GroupKind()
|
||||
}
|
||||
|
||||
// Resource takes an unqualified resource and returns a Group qualified GroupResource
|
||||
func Resource(resource string) schema.GroupResource {
|
||||
return SchemeGroupVersion.WithResource(resource).GroupResource()
|
||||
}
|
||||
|
||||
func init() {
|
||||
// We only register manually written functions here. The registration of the
|
||||
// generated functions takes place in the generated files. The separation
|
||||
// makes the code compile even when the generated files are missing.
|
||||
localSchemeBuilder.Register(addKnownTypes, addDefaultingFuncs)
|
||||
}
|
||||
|
||||
func addKnownTypes(scheme *runtime.Scheme) error {
|
||||
// TODO this will get cleaned up with the scheme types are fixed
|
||||
scheme.AddKnownTypes(SchemeGroupVersion,
|
||||
&DeschedulerPolicy{},
|
||||
)
|
||||
|
||||
return nil
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,60 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
type DeschedulerPolicy struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
|
||||
// Strategies
|
||||
Strategies StrategyList `json:"strategies,omitempty"`
|
||||
}
|
||||
|
||||
type StrategyName string
|
||||
type StrategyList map[StrategyName]DeschedulerStrategy
|
||||
|
||||
type DeschedulerStrategy struct {
|
||||
// Enabled or disabled
|
||||
Enabled bool `json:"enabled,omitempty"`
|
||||
|
||||
// Weight
|
||||
Weight int `json:"weight,omitempty"`
|
||||
|
||||
// Strategy parameters
|
||||
Params StrategyParameters `json:"params,omitempty"`
|
||||
}
|
||||
|
||||
// Only one of its members may be specified
|
||||
type StrategyParameters struct {
|
||||
NodeResourceUtilizationThresholds NodeResourceUtilizationThresholds `json:"nodeResourceUtilizationThresholds,omitempty"`
|
||||
NodeAffinityType []string `json:"nodeAffinityType,omitempty"`
|
||||
}
|
||||
|
||||
type Percentage float64
|
||||
type ResourceThresholds map[v1.ResourceName]Percentage
|
||||
|
||||
type NodeResourceUtilizationThresholds struct {
|
||||
Thresholds ResourceThresholds `json:"thresholds,omitempty"`
|
||||
TargetThresholds ResourceThresholds `json:"targetThresholds,omitempty"`
|
||||
NumberOfNodes int `json:"numberOfNodes,omitempty"`
|
||||
}
|
||||
@@ -1,146 +0,0 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// This file was autogenerated by conversion-gen. Do not edit it manually!
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
unsafe "unsafe"
|
||||
|
||||
api "github.com/kubernetes-incubator/descheduler/pkg/api"
|
||||
conversion "k8s.io/apimachinery/pkg/conversion"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
func init() {
|
||||
localSchemeBuilder.Register(RegisterConversions)
|
||||
}
|
||||
|
||||
// RegisterConversions adds conversion functions to the given scheme.
|
||||
// Public to allow building arbitrary schemes.
|
||||
func RegisterConversions(scheme *runtime.Scheme) error {
|
||||
return scheme.AddGeneratedConversionFuncs(
|
||||
Convert_v1alpha1_DeschedulerPolicy_To_api_DeschedulerPolicy,
|
||||
Convert_api_DeschedulerPolicy_To_v1alpha1_DeschedulerPolicy,
|
||||
Convert_v1alpha1_DeschedulerStrategy_To_api_DeschedulerStrategy,
|
||||
Convert_api_DeschedulerStrategy_To_v1alpha1_DeschedulerStrategy,
|
||||
Convert_v1alpha1_NodeResourceUtilizationThresholds_To_api_NodeResourceUtilizationThresholds,
|
||||
Convert_api_NodeResourceUtilizationThresholds_To_v1alpha1_NodeResourceUtilizationThresholds,
|
||||
Convert_v1alpha1_StrategyParameters_To_api_StrategyParameters,
|
||||
Convert_api_StrategyParameters_To_v1alpha1_StrategyParameters,
|
||||
)
|
||||
}
|
||||
|
||||
func autoConvert_v1alpha1_DeschedulerPolicy_To_api_DeschedulerPolicy(in *DeschedulerPolicy, out *api.DeschedulerPolicy, s conversion.Scope) error {
|
||||
out.Strategies = *(*api.StrategyList)(unsafe.Pointer(&in.Strategies))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_v1alpha1_DeschedulerPolicy_To_api_DeschedulerPolicy is an autogenerated conversion function.
|
||||
func Convert_v1alpha1_DeschedulerPolicy_To_api_DeschedulerPolicy(in *DeschedulerPolicy, out *api.DeschedulerPolicy, s conversion.Scope) error {
|
||||
return autoConvert_v1alpha1_DeschedulerPolicy_To_api_DeschedulerPolicy(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_api_DeschedulerPolicy_To_v1alpha1_DeschedulerPolicy(in *api.DeschedulerPolicy, out *DeschedulerPolicy, s conversion.Scope) error {
|
||||
out.Strategies = *(*StrategyList)(unsafe.Pointer(&in.Strategies))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_api_DeschedulerPolicy_To_v1alpha1_DeschedulerPolicy is an autogenerated conversion function.
|
||||
func Convert_api_DeschedulerPolicy_To_v1alpha1_DeschedulerPolicy(in *api.DeschedulerPolicy, out *DeschedulerPolicy, s conversion.Scope) error {
|
||||
return autoConvert_api_DeschedulerPolicy_To_v1alpha1_DeschedulerPolicy(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1alpha1_DeschedulerStrategy_To_api_DeschedulerStrategy(in *DeschedulerStrategy, out *api.DeschedulerStrategy, s conversion.Scope) error {
|
||||
out.Enabled = in.Enabled
|
||||
out.Weight = in.Weight
|
||||
if err := Convert_v1alpha1_StrategyParameters_To_api_StrategyParameters(&in.Params, &out.Params, s); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_v1alpha1_DeschedulerStrategy_To_api_DeschedulerStrategy is an autogenerated conversion function.
|
||||
func Convert_v1alpha1_DeschedulerStrategy_To_api_DeschedulerStrategy(in *DeschedulerStrategy, out *api.DeschedulerStrategy, s conversion.Scope) error {
|
||||
return autoConvert_v1alpha1_DeschedulerStrategy_To_api_DeschedulerStrategy(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_api_DeschedulerStrategy_To_v1alpha1_DeschedulerStrategy(in *api.DeschedulerStrategy, out *DeschedulerStrategy, s conversion.Scope) error {
|
||||
out.Enabled = in.Enabled
|
||||
out.Weight = in.Weight
|
||||
if err := Convert_api_StrategyParameters_To_v1alpha1_StrategyParameters(&in.Params, &out.Params, s); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_api_DeschedulerStrategy_To_v1alpha1_DeschedulerStrategy is an autogenerated conversion function.
|
||||
func Convert_api_DeschedulerStrategy_To_v1alpha1_DeschedulerStrategy(in *api.DeschedulerStrategy, out *DeschedulerStrategy, s conversion.Scope) error {
|
||||
return autoConvert_api_DeschedulerStrategy_To_v1alpha1_DeschedulerStrategy(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1alpha1_NodeResourceUtilizationThresholds_To_api_NodeResourceUtilizationThresholds(in *NodeResourceUtilizationThresholds, out *api.NodeResourceUtilizationThresholds, s conversion.Scope) error {
|
||||
out.Thresholds = *(*api.ResourceThresholds)(unsafe.Pointer(&in.Thresholds))
|
||||
out.TargetThresholds = *(*api.ResourceThresholds)(unsafe.Pointer(&in.TargetThresholds))
|
||||
out.NumberOfNodes = in.NumberOfNodes
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_v1alpha1_NodeResourceUtilizationThresholds_To_api_NodeResourceUtilizationThresholds is an autogenerated conversion function.
|
||||
func Convert_v1alpha1_NodeResourceUtilizationThresholds_To_api_NodeResourceUtilizationThresholds(in *NodeResourceUtilizationThresholds, out *api.NodeResourceUtilizationThresholds, s conversion.Scope) error {
|
||||
return autoConvert_v1alpha1_NodeResourceUtilizationThresholds_To_api_NodeResourceUtilizationThresholds(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_api_NodeResourceUtilizationThresholds_To_v1alpha1_NodeResourceUtilizationThresholds(in *api.NodeResourceUtilizationThresholds, out *NodeResourceUtilizationThresholds, s conversion.Scope) error {
|
||||
out.Thresholds = *(*ResourceThresholds)(unsafe.Pointer(&in.Thresholds))
|
||||
out.TargetThresholds = *(*ResourceThresholds)(unsafe.Pointer(&in.TargetThresholds))
|
||||
out.NumberOfNodes = in.NumberOfNodes
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_api_NodeResourceUtilizationThresholds_To_v1alpha1_NodeResourceUtilizationThresholds is an autogenerated conversion function.
|
||||
func Convert_api_NodeResourceUtilizationThresholds_To_v1alpha1_NodeResourceUtilizationThresholds(in *api.NodeResourceUtilizationThresholds, out *NodeResourceUtilizationThresholds, s conversion.Scope) error {
|
||||
return autoConvert_api_NodeResourceUtilizationThresholds_To_v1alpha1_NodeResourceUtilizationThresholds(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1alpha1_StrategyParameters_To_api_StrategyParameters(in *StrategyParameters, out *api.StrategyParameters, s conversion.Scope) error {
|
||||
if err := Convert_v1alpha1_NodeResourceUtilizationThresholds_To_api_NodeResourceUtilizationThresholds(&in.NodeResourceUtilizationThresholds, &out.NodeResourceUtilizationThresholds, s); err != nil {
|
||||
return err
|
||||
}
|
||||
out.NodeAffinityType = *(*[]string)(unsafe.Pointer(&in.NodeAffinityType))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_v1alpha1_StrategyParameters_To_api_StrategyParameters is an autogenerated conversion function.
|
||||
func Convert_v1alpha1_StrategyParameters_To_api_StrategyParameters(in *StrategyParameters, out *api.StrategyParameters, s conversion.Scope) error {
|
||||
return autoConvert_v1alpha1_StrategyParameters_To_api_StrategyParameters(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_api_StrategyParameters_To_v1alpha1_StrategyParameters(in *api.StrategyParameters, out *StrategyParameters, s conversion.Scope) error {
|
||||
if err := Convert_api_NodeResourceUtilizationThresholds_To_v1alpha1_NodeResourceUtilizationThresholds(&in.NodeResourceUtilizationThresholds, &out.NodeResourceUtilizationThresholds, s); err != nil {
|
||||
return err
|
||||
}
|
||||
out.NodeAffinityType = *(*[]string)(unsafe.Pointer(&in.NodeAffinityType))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_api_StrategyParameters_To_v1alpha1_StrategyParameters is an autogenerated conversion function.
|
||||
func Convert_api_StrategyParameters_To_v1alpha1_StrategyParameters(in *api.StrategyParameters, out *StrategyParameters, s conversion.Scope) error {
|
||||
return autoConvert_api_StrategyParameters_To_v1alpha1_StrategyParameters(in, out, s)
|
||||
}
|
||||
@@ -1,129 +0,0 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// This file was autogenerated by deepcopy-gen. Do not edit it manually!
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *DeschedulerPolicy) DeepCopyInto(out *DeschedulerPolicy) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
if in.Strategies != nil {
|
||||
in, out := &in.Strategies, &out.Strategies
|
||||
*out = make(StrategyList, len(*in))
|
||||
for key, val := range *in {
|
||||
newVal := new(DeschedulerStrategy)
|
||||
val.DeepCopyInto(newVal)
|
||||
(*out)[key] = *newVal
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeschedulerPolicy.
|
||||
func (in *DeschedulerPolicy) DeepCopy() *DeschedulerPolicy {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(DeschedulerPolicy)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *DeschedulerPolicy) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *DeschedulerStrategy) DeepCopyInto(out *DeschedulerStrategy) {
|
||||
*out = *in
|
||||
in.Params.DeepCopyInto(&out.Params)
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeschedulerStrategy.
|
||||
func (in *DeschedulerStrategy) DeepCopy() *DeschedulerStrategy {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(DeschedulerStrategy)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *NodeResourceUtilizationThresholds) DeepCopyInto(out *NodeResourceUtilizationThresholds) {
|
||||
*out = *in
|
||||
if in.Thresholds != nil {
|
||||
in, out := &in.Thresholds, &out.Thresholds
|
||||
*out = make(ResourceThresholds, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
if in.TargetThresholds != nil {
|
||||
in, out := &in.TargetThresholds, &out.TargetThresholds
|
||||
*out = make(ResourceThresholds, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeResourceUtilizationThresholds.
|
||||
func (in *NodeResourceUtilizationThresholds) DeepCopy() *NodeResourceUtilizationThresholds {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(NodeResourceUtilizationThresholds)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *StrategyParameters) DeepCopyInto(out *StrategyParameters) {
|
||||
*out = *in
|
||||
in.NodeResourceUtilizationThresholds.DeepCopyInto(&out.NodeResourceUtilizationThresholds)
|
||||
if in.NodeAffinityType != nil {
|
||||
in, out := &in.NodeAffinityType, &out.NodeAffinityType
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StrategyParameters.
|
||||
func (in *StrategyParameters) DeepCopy() *StrategyParameters {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(StrategyParameters)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
@@ -1,32 +0,0 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// This file was autogenerated by defaulter-gen. Do not edit it manually!
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
// RegisterDefaults adds defaulters functions to the given scheme.
|
||||
// Public to allow building arbitrary schemes.
|
||||
// All generated defaulters are covering - they call all nested defaulters.
|
||||
func RegisterDefaults(scheme *runtime.Scheme) error {
|
||||
return nil
|
||||
}
|
||||
@@ -1,129 +0,0 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// This file was autogenerated by deepcopy-gen. Do not edit it manually!
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *DeschedulerPolicy) DeepCopyInto(out *DeschedulerPolicy) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
if in.Strategies != nil {
|
||||
in, out := &in.Strategies, &out.Strategies
|
||||
*out = make(StrategyList, len(*in))
|
||||
for key, val := range *in {
|
||||
newVal := new(DeschedulerStrategy)
|
||||
val.DeepCopyInto(newVal)
|
||||
(*out)[key] = *newVal
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeschedulerPolicy.
|
||||
func (in *DeschedulerPolicy) DeepCopy() *DeschedulerPolicy {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(DeschedulerPolicy)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *DeschedulerPolicy) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *DeschedulerStrategy) DeepCopyInto(out *DeschedulerStrategy) {
|
||||
*out = *in
|
||||
in.Params.DeepCopyInto(&out.Params)
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeschedulerStrategy.
|
||||
func (in *DeschedulerStrategy) DeepCopy() *DeschedulerStrategy {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(DeschedulerStrategy)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *NodeResourceUtilizationThresholds) DeepCopyInto(out *NodeResourceUtilizationThresholds) {
|
||||
*out = *in
|
||||
if in.Thresholds != nil {
|
||||
in, out := &in.Thresholds, &out.Thresholds
|
||||
*out = make(ResourceThresholds, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
if in.TargetThresholds != nil {
|
||||
in, out := &in.TargetThresholds, &out.TargetThresholds
|
||||
*out = make(ResourceThresholds, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeResourceUtilizationThresholds.
|
||||
func (in *NodeResourceUtilizationThresholds) DeepCopy() *NodeResourceUtilizationThresholds {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(NodeResourceUtilizationThresholds)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *StrategyParameters) DeepCopyInto(out *StrategyParameters) {
|
||||
*out = *in
|
||||
in.NodeResourceUtilizationThresholds.DeepCopyInto(&out.NodeResourceUtilizationThresholds)
|
||||
if in.NodeAffinityType != nil {
|
||||
in, out := &in.NodeAffinityType, &out.NodeAffinityType
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StrategyParameters.
|
||||
func (in *StrategyParameters) DeepCopy() *StrategyParameters {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(StrategyParameters)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
@@ -1,19 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// +k8s:deepcopy-gen=package,register
|
||||
|
||||
package componentconfig // import "github.com/kubernetes-incubator/descheduler/pkg/apis/componentconfig"
|
||||
@@ -1,48 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package install installs the descheduler's componentconfig API group.
|
||||
package install
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/apimachinery/announced"
|
||||
"k8s.io/apimachinery/pkg/apimachinery/registered"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
||||
"github.com/kubernetes-incubator/descheduler/pkg/apis/componentconfig"
|
||||
"github.com/kubernetes-incubator/descheduler/pkg/apis/componentconfig/v1alpha1"
|
||||
deschedulerscheme "github.com/kubernetes-incubator/descheduler/pkg/descheduler/scheme"
|
||||
)
|
||||
|
||||
func init() {
|
||||
Install(deschedulerscheme.GroupFactoryRegistry, deschedulerscheme.Registry, deschedulerscheme.Scheme)
|
||||
}
|
||||
|
||||
// Install registers the API group and adds types to a scheme
|
||||
func Install(groupFactoryRegistry announced.APIGroupFactoryRegistry, registry *registered.APIRegistrationManager, scheme *runtime.Scheme) {
|
||||
if err := announced.NewGroupMetaFactory(
|
||||
&announced.GroupMetaFactoryArgs{
|
||||
GroupName: componentconfig.GroupName,
|
||||
VersionPreferenceOrder: []string{v1alpha1.SchemeGroupVersion.Version},
|
||||
AddInternalObjectsToScheme: componentconfig.AddToScheme,
|
||||
},
|
||||
announced.VersionToSchemeFunc{
|
||||
v1alpha1.SchemeGroupVersion.Version: v1alpha1.AddToScheme,
|
||||
},
|
||||
).Announce(groupFactoryRegistry).RegisterAndEnable(registry, scheme); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
@@ -1,50 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package componentconfig
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
)
|
||||
|
||||
var (
|
||||
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
|
||||
AddToScheme = SchemeBuilder.AddToScheme
|
||||
)
|
||||
|
||||
// GroupName is the group name used in this package
|
||||
const GroupName = "deschedulercomponentconfig"
|
||||
|
||||
// SchemeGroupVersion is group version used to register these objects
|
||||
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal}
|
||||
|
||||
// Kind takes an unqualified kind and returns a Group qualified GroupKind
|
||||
func Kind(kind string) schema.GroupKind {
|
||||
return SchemeGroupVersion.WithKind(kind).GroupKind()
|
||||
}
|
||||
|
||||
// Resource takes an unqualified resource and returns a Group qualified GroupResource
|
||||
func Resource(resource string) schema.GroupResource {
|
||||
return SchemeGroupVersion.WithResource(resource).GroupResource()
|
||||
}
|
||||
|
||||
func addKnownTypes(scheme *runtime.Scheme) error {
|
||||
scheme.AddKnownTypes(SchemeGroupVersion,
|
||||
&DeschedulerConfiguration{},
|
||||
)
|
||||
return nil
|
||||
}
|
||||
@@ -1,629 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by codecgen - DO NOT EDIT.
|
||||
|
||||
package componentconfig
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
codec1978 "github.com/ugorji/go/codec"
|
||||
pkg1_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"reflect"
|
||||
"runtime"
|
||||
time "time"
|
||||
)
|
||||
|
||||
const (
|
||||
// ----- content types ----
|
||||
codecSelferCcUTF81234 = 1
|
||||
codecSelferCcRAW1234 = 0
|
||||
// ----- value types used ----
|
||||
codecSelferValueTypeArray1234 = 10
|
||||
codecSelferValueTypeMap1234 = 9
|
||||
codecSelferValueTypeString1234 = 6
|
||||
codecSelferValueTypeInt1234 = 2
|
||||
codecSelferValueTypeUint1234 = 3
|
||||
codecSelferValueTypeFloat1234 = 4
|
||||
)
|
||||
|
||||
var (
|
||||
codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits())
|
||||
errCodecSelferOnlyMapOrArrayEncodeToStruct1234 = errors.New(`only encoded map or array can be decoded into a struct`)
|
||||
)
|
||||
|
||||
type codecSelfer1234 struct{}
|
||||
|
||||
func init() {
|
||||
if codec1978.GenVersion != 8 {
|
||||
_, file, _, _ := runtime.Caller(0)
|
||||
err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v",
|
||||
8, codec1978.GenVersion, file)
|
||||
panic(err)
|
||||
}
|
||||
if false { // reference the types, but skip this branch at build/run time
|
||||
var v0 pkg1_v1.TypeMeta
|
||||
var v1 time.Duration
|
||||
_, _ = v0, v1
|
||||
}
|
||||
}
|
||||
|
||||
func (x *DeschedulerConfiguration) CodecEncodeSelf(e *codec1978.Encoder) {
|
||||
var h codecSelfer1234
|
||||
z, r := codec1978.GenHelperEncoder(e)
|
||||
_, _, _ = h, z, r
|
||||
if x == nil {
|
||||
r.EncodeNil()
|
||||
} else {
|
||||
yym1 := z.EncBinary()
|
||||
_ = yym1
|
||||
if false {
|
||||
} else if yyxt1 := z.Extension(z.I2Rtid(x)); yyxt1 != nil {
|
||||
z.EncExtension(x, yyxt1)
|
||||
} else {
|
||||
yysep2 := !z.EncBinary()
|
||||
yy2arr2 := z.EncBasicHandle().StructToArray
|
||||
var yyq2 [8]bool
|
||||
_ = yyq2
|
||||
_, _ = yysep2, yy2arr2
|
||||
const yyr2 bool = false
|
||||
yyq2[0] = x.Kind != ""
|
||||
yyq2[1] = x.APIVersion != ""
|
||||
if yyr2 || yy2arr2 {
|
||||
r.WriteArrayStart(8)
|
||||
} else {
|
||||
var yynn2 = 6
|
||||
for _, b := range yyq2 {
|
||||
if b {
|
||||
yynn2++
|
||||
}
|
||||
}
|
||||
r.WriteMapStart(yynn2)
|
||||
yynn2 = 0
|
||||
}
|
||||
if yyr2 || yy2arr2 {
|
||||
r.WriteArrayElem()
|
||||
if yyq2[0] {
|
||||
yym4 := z.EncBinary()
|
||||
_ = yym4
|
||||
if false {
|
||||
} else {
|
||||
r.EncodeString(codecSelferCcUTF81234, string(x.Kind))
|
||||
}
|
||||
} else {
|
||||
r.EncodeString(codecSelferCcUTF81234, "")
|
||||
}
|
||||
} else {
|
||||
if yyq2[0] {
|
||||
r.WriteMapElemKey()
|
||||
r.EncStructFieldKey(codecSelferValueTypeString1234, `kind`)
|
||||
r.WriteMapElemValue()
|
||||
yym5 := z.EncBinary()
|
||||
_ = yym5
|
||||
if false {
|
||||
} else {
|
||||
r.EncodeString(codecSelferCcUTF81234, string(x.Kind))
|
||||
}
|
||||
}
|
||||
}
|
||||
if yyr2 || yy2arr2 {
|
||||
r.WriteArrayElem()
|
||||
if yyq2[1] {
|
||||
yym7 := z.EncBinary()
|
||||
_ = yym7
|
||||
if false {
|
||||
} else {
|
||||
r.EncodeString(codecSelferCcUTF81234, string(x.APIVersion))
|
||||
}
|
||||
} else {
|
||||
r.EncodeString(codecSelferCcUTF81234, "")
|
||||
}
|
||||
} else {
|
||||
if yyq2[1] {
|
||||
r.WriteMapElemKey()
|
||||
r.EncStructFieldKey(codecSelferValueTypeString1234, `apiVersion`)
|
||||
r.WriteMapElemValue()
|
||||
yym8 := z.EncBinary()
|
||||
_ = yym8
|
||||
if false {
|
||||
} else {
|
||||
r.EncodeString(codecSelferCcUTF81234, string(x.APIVersion))
|
||||
}
|
||||
}
|
||||
}
|
||||
if yyr2 || yy2arr2 {
|
||||
r.WriteArrayElem()
|
||||
yym10 := z.EncBinary()
|
||||
_ = yym10
|
||||
if false {
|
||||
} else if yyxt10 := z.Extension(z.I2Rtid(x.DeschedulingInterval)); yyxt10 != nil {
|
||||
z.EncExtension(x.DeschedulingInterval, yyxt10)
|
||||
} else {
|
||||
r.EncodeInt(int64(x.DeschedulingInterval))
|
||||
}
|
||||
} else {
|
||||
r.WriteMapElemKey()
|
||||
r.EncStructFieldKey(codecSelferValueTypeString1234, `DeschedulingInterval`)
|
||||
r.WriteMapElemValue()
|
||||
yym11 := z.EncBinary()
|
||||
_ = yym11
|
||||
if false {
|
||||
} else if yyxt11 := z.Extension(z.I2Rtid(x.DeschedulingInterval)); yyxt11 != nil {
|
||||
z.EncExtension(x.DeschedulingInterval, yyxt11)
|
||||
} else {
|
||||
r.EncodeInt(int64(x.DeschedulingInterval))
|
||||
}
|
||||
}
|
||||
if yyr2 || yy2arr2 {
|
||||
r.WriteArrayElem()
|
||||
yym13 := z.EncBinary()
|
||||
_ = yym13
|
||||
if false {
|
||||
} else {
|
||||
r.EncodeString(codecSelferCcUTF81234, string(x.KubeconfigFile))
|
||||
}
|
||||
} else {
|
||||
r.WriteMapElemKey()
|
||||
r.EncStructFieldKey(codecSelferValueTypeString1234, `KubeconfigFile`)
|
||||
r.WriteMapElemValue()
|
||||
yym14 := z.EncBinary()
|
||||
_ = yym14
|
||||
if false {
|
||||
} else {
|
||||
r.EncodeString(codecSelferCcUTF81234, string(x.KubeconfigFile))
|
||||
}
|
||||
}
|
||||
if yyr2 || yy2arr2 {
|
||||
r.WriteArrayElem()
|
||||
yym16 := z.EncBinary()
|
||||
_ = yym16
|
||||
if false {
|
||||
} else {
|
||||
r.EncodeString(codecSelferCcUTF81234, string(x.PolicyConfigFile))
|
||||
}
|
||||
} else {
|
||||
r.WriteMapElemKey()
|
||||
r.EncStructFieldKey(codecSelferValueTypeString1234, `PolicyConfigFile`)
|
||||
r.WriteMapElemValue()
|
||||
yym17 := z.EncBinary()
|
||||
_ = yym17
|
||||
if false {
|
||||
} else {
|
||||
r.EncodeString(codecSelferCcUTF81234, string(x.PolicyConfigFile))
|
||||
}
|
||||
}
|
||||
if yyr2 || yy2arr2 {
|
||||
r.WriteArrayElem()
|
||||
yym19 := z.EncBinary()
|
||||
_ = yym19
|
||||
if false {
|
||||
} else {
|
||||
r.EncodeBool(bool(x.DryRun))
|
||||
}
|
||||
} else {
|
||||
r.WriteMapElemKey()
|
||||
r.EncStructFieldKey(codecSelferValueTypeString1234, `DryRun`)
|
||||
r.WriteMapElemValue()
|
||||
yym20 := z.EncBinary()
|
||||
_ = yym20
|
||||
if false {
|
||||
} else {
|
||||
r.EncodeBool(bool(x.DryRun))
|
||||
}
|
||||
}
|
||||
if yyr2 || yy2arr2 {
|
||||
r.WriteArrayElem()
|
||||
yym22 := z.EncBinary()
|
||||
_ = yym22
|
||||
if false {
|
||||
} else {
|
||||
r.EncodeString(codecSelferCcUTF81234, string(x.NodeSelector))
|
||||
}
|
||||
} else {
|
||||
r.WriteMapElemKey()
|
||||
r.EncStructFieldKey(codecSelferValueTypeString1234, `NodeSelector`)
|
||||
r.WriteMapElemValue()
|
||||
yym23 := z.EncBinary()
|
||||
_ = yym23
|
||||
if false {
|
||||
} else {
|
||||
r.EncodeString(codecSelferCcUTF81234, string(x.NodeSelector))
|
||||
}
|
||||
}
|
||||
if yyr2 || yy2arr2 {
|
||||
r.WriteArrayElem()
|
||||
yym25 := z.EncBinary()
|
||||
_ = yym25
|
||||
if false {
|
||||
} else {
|
||||
r.EncodeInt(int64(x.MaxNoOfPodsToEvictPerNode))
|
||||
}
|
||||
} else {
|
||||
r.WriteMapElemKey()
|
||||
r.EncStructFieldKey(codecSelferValueTypeString1234, `MaxNoOfPodsToEvictPerNode`)
|
||||
r.WriteMapElemValue()
|
||||
yym26 := z.EncBinary()
|
||||
_ = yym26
|
||||
if false {
|
||||
} else {
|
||||
r.EncodeInt(int64(x.MaxNoOfPodsToEvictPerNode))
|
||||
}
|
||||
}
|
||||
if yyr2 || yy2arr2 {
|
||||
r.WriteArrayEnd()
|
||||
} else {
|
||||
r.WriteMapEnd()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (x *DeschedulerConfiguration) CodecDecodeSelf(d *codec1978.Decoder) {
|
||||
var h codecSelfer1234
|
||||
z, r := codec1978.GenHelperDecoder(d)
|
||||
_, _, _ = h, z, r
|
||||
yym1 := z.DecBinary()
|
||||
_ = yym1
|
||||
if false {
|
||||
} else if yyxt1 := z.Extension(z.I2Rtid(x)); yyxt1 != nil {
|
||||
z.DecExtension(x, yyxt1)
|
||||
} else {
|
||||
yyct2 := r.ContainerType()
|
||||
if yyct2 == codecSelferValueTypeMap1234 {
|
||||
yyl2 := r.ReadMapStart()
|
||||
if yyl2 == 0 {
|
||||
r.ReadMapEnd()
|
||||
} else {
|
||||
x.codecDecodeSelfFromMap(yyl2, d)
|
||||
}
|
||||
} else if yyct2 == codecSelferValueTypeArray1234 {
|
||||
yyl2 := r.ReadArrayStart()
|
||||
if yyl2 == 0 {
|
||||
r.ReadArrayEnd()
|
||||
} else {
|
||||
x.codecDecodeSelfFromArray(yyl2, d)
|
||||
}
|
||||
} else {
|
||||
panic(errCodecSelferOnlyMapOrArrayEncodeToStruct1234)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (x *DeschedulerConfiguration) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
|
||||
var h codecSelfer1234
|
||||
z, r := codec1978.GenHelperDecoder(d)
|
||||
_, _, _ = h, z, r
|
||||
var yyhl3 bool = l >= 0
|
||||
for yyj3 := 0; ; yyj3++ {
|
||||
if yyhl3 {
|
||||
if yyj3 >= l {
|
||||
break
|
||||
}
|
||||
} else {
|
||||
if r.CheckBreak() {
|
||||
break
|
||||
}
|
||||
}
|
||||
r.ReadMapElemKey()
|
||||
yys3 := z.StringView(r.DecStructFieldKey(codecSelferValueTypeString1234, z.DecScratchArrayBuffer()))
|
||||
r.ReadMapElemValue()
|
||||
switch yys3 {
|
||||
case "kind":
|
||||
if r.TryDecodeAsNil() {
|
||||
x.Kind = ""
|
||||
} else {
|
||||
yyv4 := &x.Kind
|
||||
yym5 := z.DecBinary()
|
||||
_ = yym5
|
||||
if false {
|
||||
} else {
|
||||
*((*string)(yyv4)) = r.DecodeString()
|
||||
}
|
||||
}
|
||||
case "apiVersion":
|
||||
if r.TryDecodeAsNil() {
|
||||
x.APIVersion = ""
|
||||
} else {
|
||||
yyv6 := &x.APIVersion
|
||||
yym7 := z.DecBinary()
|
||||
_ = yym7
|
||||
if false {
|
||||
} else {
|
||||
*((*string)(yyv6)) = r.DecodeString()
|
||||
}
|
||||
}
|
||||
case "DeschedulingInterval":
|
||||
if r.TryDecodeAsNil() {
|
||||
x.DeschedulingInterval = 0
|
||||
} else {
|
||||
yyv8 := &x.DeschedulingInterval
|
||||
yym9 := z.DecBinary()
|
||||
_ = yym9
|
||||
if false {
|
||||
} else if yyxt9 := z.Extension(z.I2Rtid(yyv8)); yyxt9 != nil {
|
||||
z.DecExtension(yyv8, yyxt9)
|
||||
} else {
|
||||
*((*int64)(yyv8)) = int64(r.DecodeInt(64))
|
||||
}
|
||||
}
|
||||
case "KubeconfigFile":
|
||||
if r.TryDecodeAsNil() {
|
||||
x.KubeconfigFile = ""
|
||||
} else {
|
||||
yyv10 := &x.KubeconfigFile
|
||||
yym11 := z.DecBinary()
|
||||
_ = yym11
|
||||
if false {
|
||||
} else {
|
||||
*((*string)(yyv10)) = r.DecodeString()
|
||||
}
|
||||
}
|
||||
case "PolicyConfigFile":
|
||||
if r.TryDecodeAsNil() {
|
||||
x.PolicyConfigFile = ""
|
||||
} else {
|
||||
yyv12 := &x.PolicyConfigFile
|
||||
yym13 := z.DecBinary()
|
||||
_ = yym13
|
||||
if false {
|
||||
} else {
|
||||
*((*string)(yyv12)) = r.DecodeString()
|
||||
}
|
||||
}
|
||||
case "DryRun":
|
||||
if r.TryDecodeAsNil() {
|
||||
x.DryRun = false
|
||||
} else {
|
||||
yyv14 := &x.DryRun
|
||||
yym15 := z.DecBinary()
|
||||
_ = yym15
|
||||
if false {
|
||||
} else {
|
||||
*((*bool)(yyv14)) = r.DecodeBool()
|
||||
}
|
||||
}
|
||||
case "NodeSelector":
|
||||
if r.TryDecodeAsNil() {
|
||||
x.NodeSelector = ""
|
||||
} else {
|
||||
yyv16 := &x.NodeSelector
|
||||
yym17 := z.DecBinary()
|
||||
_ = yym17
|
||||
if false {
|
||||
} else {
|
||||
*((*string)(yyv16)) = r.DecodeString()
|
||||
}
|
||||
}
|
||||
case "MaxNoOfPodsToEvictPerNode":
|
||||
if r.TryDecodeAsNil() {
|
||||
x.MaxNoOfPodsToEvictPerNode = 0
|
||||
} else {
|
||||
yyv18 := &x.MaxNoOfPodsToEvictPerNode
|
||||
yym19 := z.DecBinary()
|
||||
_ = yym19
|
||||
if false {
|
||||
} else {
|
||||
*((*int)(yyv18)) = int(r.DecodeInt(codecSelferBitsize1234))
|
||||
}
|
||||
}
|
||||
default:
|
||||
z.DecStructFieldNotFound(-1, yys3)
|
||||
} // end switch yys3
|
||||
} // end for yyj3
|
||||
r.ReadMapEnd()
|
||||
}
|
||||
|
||||
func (x *DeschedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
|
||||
var h codecSelfer1234
|
||||
z, r := codec1978.GenHelperDecoder(d)
|
||||
_, _, _ = h, z, r
|
||||
var yyj20 int
|
||||
var yyb20 bool
|
||||
var yyhl20 bool = l >= 0
|
||||
yyj20++
|
||||
if yyhl20 {
|
||||
yyb20 = yyj20 > l
|
||||
} else {
|
||||
yyb20 = r.CheckBreak()
|
||||
}
|
||||
if yyb20 {
|
||||
r.ReadArrayEnd()
|
||||
return
|
||||
}
|
||||
r.ReadArrayElem()
|
||||
if r.TryDecodeAsNil() {
|
||||
x.Kind = ""
|
||||
} else {
|
||||
yyv21 := &x.Kind
|
||||
yym22 := z.DecBinary()
|
||||
_ = yym22
|
||||
if false {
|
||||
} else {
|
||||
*((*string)(yyv21)) = r.DecodeString()
|
||||
}
|
||||
}
|
||||
yyj20++
|
||||
if yyhl20 {
|
||||
yyb20 = yyj20 > l
|
||||
} else {
|
||||
yyb20 = r.CheckBreak()
|
||||
}
|
||||
if yyb20 {
|
||||
r.ReadArrayEnd()
|
||||
return
|
||||
}
|
||||
r.ReadArrayElem()
|
||||
if r.TryDecodeAsNil() {
|
||||
x.APIVersion = ""
|
||||
} else {
|
||||
yyv23 := &x.APIVersion
|
||||
yym24 := z.DecBinary()
|
||||
_ = yym24
|
||||
if false {
|
||||
} else {
|
||||
*((*string)(yyv23)) = r.DecodeString()
|
||||
}
|
||||
}
|
||||
yyj20++
|
||||
if yyhl20 {
|
||||
yyb20 = yyj20 > l
|
||||
} else {
|
||||
yyb20 = r.CheckBreak()
|
||||
}
|
||||
if yyb20 {
|
||||
r.ReadArrayEnd()
|
||||
return
|
||||
}
|
||||
r.ReadArrayElem()
|
||||
if r.TryDecodeAsNil() {
|
||||
x.DeschedulingInterval = 0
|
||||
} else {
|
||||
yyv25 := &x.DeschedulingInterval
|
||||
yym26 := z.DecBinary()
|
||||
_ = yym26
|
||||
if false {
|
||||
} else if yyxt26 := z.Extension(z.I2Rtid(yyv25)); yyxt26 != nil {
|
||||
z.DecExtension(yyv25, yyxt26)
|
||||
} else {
|
||||
*((*int64)(yyv25)) = int64(r.DecodeInt(64))
|
||||
}
|
||||
}
|
||||
yyj20++
|
||||
if yyhl20 {
|
||||
yyb20 = yyj20 > l
|
||||
} else {
|
||||
yyb20 = r.CheckBreak()
|
||||
}
|
||||
if yyb20 {
|
||||
r.ReadArrayEnd()
|
||||
return
|
||||
}
|
||||
r.ReadArrayElem()
|
||||
if r.TryDecodeAsNil() {
|
||||
x.KubeconfigFile = ""
|
||||
} else {
|
||||
yyv27 := &x.KubeconfigFile
|
||||
yym28 := z.DecBinary()
|
||||
_ = yym28
|
||||
if false {
|
||||
} else {
|
||||
*((*string)(yyv27)) = r.DecodeString()
|
||||
}
|
||||
}
|
||||
yyj20++
|
||||
if yyhl20 {
|
||||
yyb20 = yyj20 > l
|
||||
} else {
|
||||
yyb20 = r.CheckBreak()
|
||||
}
|
||||
if yyb20 {
|
||||
r.ReadArrayEnd()
|
||||
return
|
||||
}
|
||||
r.ReadArrayElem()
|
||||
if r.TryDecodeAsNil() {
|
||||
x.PolicyConfigFile = ""
|
||||
} else {
|
||||
yyv29 := &x.PolicyConfigFile
|
||||
yym30 := z.DecBinary()
|
||||
_ = yym30
|
||||
if false {
|
||||
} else {
|
||||
*((*string)(yyv29)) = r.DecodeString()
|
||||
}
|
||||
}
|
||||
yyj20++
|
||||
if yyhl20 {
|
||||
yyb20 = yyj20 > l
|
||||
} else {
|
||||
yyb20 = r.CheckBreak()
|
||||
}
|
||||
if yyb20 {
|
||||
r.ReadArrayEnd()
|
||||
return
|
||||
}
|
||||
r.ReadArrayElem()
|
||||
if r.TryDecodeAsNil() {
|
||||
x.DryRun = false
|
||||
} else {
|
||||
yyv31 := &x.DryRun
|
||||
yym32 := z.DecBinary()
|
||||
_ = yym32
|
||||
if false {
|
||||
} else {
|
||||
*((*bool)(yyv31)) = r.DecodeBool()
|
||||
}
|
||||
}
|
||||
yyj20++
|
||||
if yyhl20 {
|
||||
yyb20 = yyj20 > l
|
||||
} else {
|
||||
yyb20 = r.CheckBreak()
|
||||
}
|
||||
if yyb20 {
|
||||
r.ReadArrayEnd()
|
||||
return
|
||||
}
|
||||
r.ReadArrayElem()
|
||||
if r.TryDecodeAsNil() {
|
||||
x.NodeSelector = ""
|
||||
} else {
|
||||
yyv33 := &x.NodeSelector
|
||||
yym34 := z.DecBinary()
|
||||
_ = yym34
|
||||
if false {
|
||||
} else {
|
||||
*((*string)(yyv33)) = r.DecodeString()
|
||||
}
|
||||
}
|
||||
yyj20++
|
||||
if yyhl20 {
|
||||
yyb20 = yyj20 > l
|
||||
} else {
|
||||
yyb20 = r.CheckBreak()
|
||||
}
|
||||
if yyb20 {
|
||||
r.ReadArrayEnd()
|
||||
return
|
||||
}
|
||||
r.ReadArrayElem()
|
||||
if r.TryDecodeAsNil() {
|
||||
x.MaxNoOfPodsToEvictPerNode = 0
|
||||
} else {
|
||||
yyv35 := &x.MaxNoOfPodsToEvictPerNode
|
||||
yym36 := z.DecBinary()
|
||||
_ = yym36
|
||||
if false {
|
||||
} else {
|
||||
*((*int)(yyv35)) = int(r.DecodeInt(codecSelferBitsize1234))
|
||||
}
|
||||
}
|
||||
for {
|
||||
yyj20++
|
||||
if yyhl20 {
|
||||
yyb20 = yyj20 > l
|
||||
} else {
|
||||
yyb20 = r.CheckBreak()
|
||||
}
|
||||
if yyb20 {
|
||||
break
|
||||
}
|
||||
r.ReadArrayElem()
|
||||
z.DecStructFieldNotFound(yyj20-1, "")
|
||||
}
|
||||
r.ReadArrayEnd()
|
||||
}
|
||||
@@ -1,48 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package componentconfig
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
type DeschedulerConfiguration struct {
|
||||
metav1.TypeMeta
|
||||
|
||||
// Time interval for descheduler to run
|
||||
DeschedulingInterval time.Duration
|
||||
|
||||
// KubeconfigFile is path to kubeconfig file with authorization and master
|
||||
// location information.
|
||||
KubeconfigFile string
|
||||
|
||||
// PolicyConfigFile is the filepath to the descheduler policy configuration.
|
||||
PolicyConfigFile string
|
||||
|
||||
// Dry run
|
||||
DryRun bool
|
||||
|
||||
// Node selectors
|
||||
NodeSelector string
|
||||
|
||||
// MaxNoOfPodsToEvictPerNode restricts maximum of pods to be evicted per node.
|
||||
MaxNoOfPodsToEvictPerNode int
|
||||
}
|
||||
@@ -1,23 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import "k8s.io/apimachinery/pkg/runtime"
|
||||
|
||||
func addDefaultingFuncs(scheme *runtime.Scheme) error {
|
||||
return RegisterDefaults(scheme)
|
||||
}
|
||||
@@ -1,24 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// +k8s:deepcopy-gen=package,register
|
||||
// +k8s:conversion-gen=github.com/kubernetes-incubator/descheduler/pkg/apis/componentconfig
|
||||
// +k8s:defaulter-gen=TypeMeta
|
||||
|
||||
// Package v1alpha1 is the v1alpha1 version of the descheduler's componentconfig API
|
||||
// +groupName=deschedulercomponentconfig
|
||||
|
||||
package v1alpha1 // import "github.com/kubernetes-incubator/descheduler/pkg/apis/componentconfig/v1alpha1"
|
||||
@@ -1,60 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
)
|
||||
|
||||
var (
|
||||
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
|
||||
localSchemeBuilder = &SchemeBuilder
|
||||
AddToScheme = SchemeBuilder.AddToScheme
|
||||
)
|
||||
|
||||
// GroupName is the group name use in this package
|
||||
const GroupName = "deschedulercomponentconfig"
|
||||
const GroupVersion = "v1alpha1"
|
||||
|
||||
// SchemeGroupVersion is group version used to register these objects
|
||||
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: GroupVersion}
|
||||
|
||||
// Kind takes an unqualified kind and returns a Group qualified GroupKind
|
||||
func Kind(kind string) schema.GroupKind {
|
||||
return SchemeGroupVersion.WithKind(kind).GroupKind()
|
||||
}
|
||||
|
||||
// Resource takes an unqualified resource and returns a Group qualified GroupResource
|
||||
func Resource(resource string) schema.GroupResource {
|
||||
return SchemeGroupVersion.WithResource(resource).GroupResource()
|
||||
}
|
||||
|
||||
func init() {
|
||||
// We only register manually written functions here. The registration of the
|
||||
// generated functions takes place in the generated files. The separation
|
||||
// makes the code compile even when the generated files are missing.
|
||||
localSchemeBuilder.Register(addKnownTypes, addDefaultingFuncs)
|
||||
}
|
||||
|
||||
func addKnownTypes(scheme *runtime.Scheme) error {
|
||||
// TODO this will get cleaned up with the scheme types are fixed
|
||||
scheme.AddKnownTypes(SchemeGroupVersion,
|
||||
&DeschedulerConfiguration{},
|
||||
)
|
||||
return nil
|
||||
}
|
||||
@@ -1,664 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by codecgen - DO NOT EDIT.
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
codec1978 "github.com/ugorji/go/codec"
|
||||
pkg1_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"reflect"
|
||||
"runtime"
|
||||
time "time"
|
||||
)
|
||||
|
||||
const (
|
||||
// ----- content types ----
|
||||
codecSelferCcUTF81234 = 1
|
||||
codecSelferCcRAW1234 = 0
|
||||
// ----- value types used ----
|
||||
codecSelferValueTypeArray1234 = 10
|
||||
codecSelferValueTypeMap1234 = 9
|
||||
codecSelferValueTypeString1234 = 6
|
||||
codecSelferValueTypeInt1234 = 2
|
||||
codecSelferValueTypeUint1234 = 3
|
||||
codecSelferValueTypeFloat1234 = 4
|
||||
)
|
||||
|
||||
var (
|
||||
codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits())
|
||||
errCodecSelferOnlyMapOrArrayEncodeToStruct1234 = errors.New(`only encoded map or array can be decoded into a struct`)
|
||||
)
|
||||
|
||||
type codecSelfer1234 struct{}
|
||||
|
||||
func init() {
|
||||
if codec1978.GenVersion != 8 {
|
||||
_, file, _, _ := runtime.Caller(0)
|
||||
err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v",
|
||||
8, codec1978.GenVersion, file)
|
||||
panic(err)
|
||||
}
|
||||
if false { // reference the types, but skip this branch at build/run time
|
||||
var v0 pkg1_v1.TypeMeta
|
||||
var v1 time.Duration
|
||||
_, _ = v0, v1
|
||||
}
|
||||
}
|
||||
|
||||
func (x *DeschedulerConfiguration) CodecEncodeSelf(e *codec1978.Encoder) {
|
||||
var h codecSelfer1234
|
||||
z, r := codec1978.GenHelperEncoder(e)
|
||||
_, _, _ = h, z, r
|
||||
if x == nil {
|
||||
r.EncodeNil()
|
||||
} else {
|
||||
yym1 := z.EncBinary()
|
||||
_ = yym1
|
||||
if false {
|
||||
} else if yyxt1 := z.Extension(z.I2Rtid(x)); yyxt1 != nil {
|
||||
z.EncExtension(x, yyxt1)
|
||||
} else {
|
||||
yysep2 := !z.EncBinary()
|
||||
yy2arr2 := z.EncBasicHandle().StructToArray
|
||||
var yyq2 [8]bool
|
||||
_ = yyq2
|
||||
_, _ = yysep2, yy2arr2
|
||||
const yyr2 bool = false
|
||||
yyq2[0] = x.Kind != ""
|
||||
yyq2[1] = x.APIVersion != ""
|
||||
yyq2[2] = x.DeschedulingInterval != 0
|
||||
yyq2[4] = x.PolicyConfigFile != ""
|
||||
yyq2[5] = x.DryRun != false
|
||||
yyq2[6] = x.NodeSelector != ""
|
||||
yyq2[7] = x.MaxNoOfPodsToEvictPerNode != 0
|
||||
if yyr2 || yy2arr2 {
|
||||
r.WriteArrayStart(8)
|
||||
} else {
|
||||
var yynn2 = 1
|
||||
for _, b := range yyq2 {
|
||||
if b {
|
||||
yynn2++
|
||||
}
|
||||
}
|
||||
r.WriteMapStart(yynn2)
|
||||
yynn2 = 0
|
||||
}
|
||||
if yyr2 || yy2arr2 {
|
||||
r.WriteArrayElem()
|
||||
if yyq2[0] {
|
||||
yym4 := z.EncBinary()
|
||||
_ = yym4
|
||||
if false {
|
||||
} else {
|
||||
r.EncodeString(codecSelferCcUTF81234, string(x.Kind))
|
||||
}
|
||||
} else {
|
||||
r.EncodeString(codecSelferCcUTF81234, "")
|
||||
}
|
||||
} else {
|
||||
if yyq2[0] {
|
||||
r.WriteMapElemKey()
|
||||
r.EncStructFieldKey(codecSelferValueTypeString1234, `kind`)
|
||||
r.WriteMapElemValue()
|
||||
yym5 := z.EncBinary()
|
||||
_ = yym5
|
||||
if false {
|
||||
} else {
|
||||
r.EncodeString(codecSelferCcUTF81234, string(x.Kind))
|
||||
}
|
||||
}
|
||||
}
|
||||
if yyr2 || yy2arr2 {
|
||||
r.WriteArrayElem()
|
||||
if yyq2[1] {
|
||||
yym7 := z.EncBinary()
|
||||
_ = yym7
|
||||
if false {
|
||||
} else {
|
||||
r.EncodeString(codecSelferCcUTF81234, string(x.APIVersion))
|
||||
}
|
||||
} else {
|
||||
r.EncodeString(codecSelferCcUTF81234, "")
|
||||
}
|
||||
} else {
|
||||
if yyq2[1] {
|
||||
r.WriteMapElemKey()
|
||||
r.EncStructFieldKey(codecSelferValueTypeString1234, `apiVersion`)
|
||||
r.WriteMapElemValue()
|
||||
yym8 := z.EncBinary()
|
||||
_ = yym8
|
||||
if false {
|
||||
} else {
|
||||
r.EncodeString(codecSelferCcUTF81234, string(x.APIVersion))
|
||||
}
|
||||
}
|
||||
}
|
||||
if yyr2 || yy2arr2 {
|
||||
r.WriteArrayElem()
|
||||
if yyq2[2] {
|
||||
yym10 := z.EncBinary()
|
||||
_ = yym10
|
||||
if false {
|
||||
} else if yyxt10 := z.Extension(z.I2Rtid(x.DeschedulingInterval)); yyxt10 != nil {
|
||||
z.EncExtension(x.DeschedulingInterval, yyxt10)
|
||||
} else {
|
||||
r.EncodeInt(int64(x.DeschedulingInterval))
|
||||
}
|
||||
} else {
|
||||
r.EncodeInt(0)
|
||||
}
|
||||
} else {
|
||||
if yyq2[2] {
|
||||
r.WriteMapElemKey()
|
||||
r.EncStructFieldKey(codecSelferValueTypeString1234, `deschedulingInterval`)
|
||||
r.WriteMapElemValue()
|
||||
yym11 := z.EncBinary()
|
||||
_ = yym11
|
||||
if false {
|
||||
} else if yyxt11 := z.Extension(z.I2Rtid(x.DeschedulingInterval)); yyxt11 != nil {
|
||||
z.EncExtension(x.DeschedulingInterval, yyxt11)
|
||||
} else {
|
||||
r.EncodeInt(int64(x.DeschedulingInterval))
|
||||
}
|
||||
}
|
||||
}
|
||||
if yyr2 || yy2arr2 {
|
||||
r.WriteArrayElem()
|
||||
yym13 := z.EncBinary()
|
||||
_ = yym13
|
||||
if false {
|
||||
} else {
|
||||
r.EncodeString(codecSelferCcUTF81234, string(x.KubeconfigFile))
|
||||
}
|
||||
} else {
|
||||
r.WriteMapElemKey()
|
||||
r.EncStructFieldKey(codecSelferValueTypeString1234, `kubeconfigFile`)
|
||||
r.WriteMapElemValue()
|
||||
yym14 := z.EncBinary()
|
||||
_ = yym14
|
||||
if false {
|
||||
} else {
|
||||
r.EncodeString(codecSelferCcUTF81234, string(x.KubeconfigFile))
|
||||
}
|
||||
}
|
||||
if yyr2 || yy2arr2 {
|
||||
r.WriteArrayElem()
|
||||
if yyq2[4] {
|
||||
yym16 := z.EncBinary()
|
||||
_ = yym16
|
||||
if false {
|
||||
} else {
|
||||
r.EncodeString(codecSelferCcUTF81234, string(x.PolicyConfigFile))
|
||||
}
|
||||
} else {
|
||||
r.EncodeString(codecSelferCcUTF81234, "")
|
||||
}
|
||||
} else {
|
||||
if yyq2[4] {
|
||||
r.WriteMapElemKey()
|
||||
r.EncStructFieldKey(codecSelferValueTypeString1234, `policyConfigFile`)
|
||||
r.WriteMapElemValue()
|
||||
yym17 := z.EncBinary()
|
||||
_ = yym17
|
||||
if false {
|
||||
} else {
|
||||
r.EncodeString(codecSelferCcUTF81234, string(x.PolicyConfigFile))
|
||||
}
|
||||
}
|
||||
}
|
||||
if yyr2 || yy2arr2 {
|
||||
r.WriteArrayElem()
|
||||
if yyq2[5] {
|
||||
yym19 := z.EncBinary()
|
||||
_ = yym19
|
||||
if false {
|
||||
} else {
|
||||
r.EncodeBool(bool(x.DryRun))
|
||||
}
|
||||
} else {
|
||||
r.EncodeBool(false)
|
||||
}
|
||||
} else {
|
||||
if yyq2[5] {
|
||||
r.WriteMapElemKey()
|
||||
r.EncStructFieldKey(codecSelferValueTypeString1234, `dryRun`)
|
||||
r.WriteMapElemValue()
|
||||
yym20 := z.EncBinary()
|
||||
_ = yym20
|
||||
if false {
|
||||
} else {
|
||||
r.EncodeBool(bool(x.DryRun))
|
||||
}
|
||||
}
|
||||
}
|
||||
if yyr2 || yy2arr2 {
|
||||
r.WriteArrayElem()
|
||||
if yyq2[6] {
|
||||
yym22 := z.EncBinary()
|
||||
_ = yym22
|
||||
if false {
|
||||
} else {
|
||||
r.EncodeString(codecSelferCcUTF81234, string(x.NodeSelector))
|
||||
}
|
||||
} else {
|
||||
r.EncodeString(codecSelferCcUTF81234, "")
|
||||
}
|
||||
} else {
|
||||
if yyq2[6] {
|
||||
r.WriteMapElemKey()
|
||||
r.EncStructFieldKey(codecSelferValueTypeString1234, `nodeSelector`)
|
||||
r.WriteMapElemValue()
|
||||
yym23 := z.EncBinary()
|
||||
_ = yym23
|
||||
if false {
|
||||
} else {
|
||||
r.EncodeString(codecSelferCcUTF81234, string(x.NodeSelector))
|
||||
}
|
||||
}
|
||||
}
|
||||
if yyr2 || yy2arr2 {
|
||||
r.WriteArrayElem()
|
||||
if yyq2[7] {
|
||||
yym25 := z.EncBinary()
|
||||
_ = yym25
|
||||
if false {
|
||||
} else {
|
||||
r.EncodeInt(int64(x.MaxNoOfPodsToEvictPerNode))
|
||||
}
|
||||
} else {
|
||||
r.EncodeInt(0)
|
||||
}
|
||||
} else {
|
||||
if yyq2[7] {
|
||||
r.WriteMapElemKey()
|
||||
r.EncStructFieldKey(codecSelferValueTypeString1234, `maxNoOfPodsToEvictPerNode`)
|
||||
r.WriteMapElemValue()
|
||||
yym26 := z.EncBinary()
|
||||
_ = yym26
|
||||
if false {
|
||||
} else {
|
||||
r.EncodeInt(int64(x.MaxNoOfPodsToEvictPerNode))
|
||||
}
|
||||
}
|
||||
}
|
||||
if yyr2 || yy2arr2 {
|
||||
r.WriteArrayEnd()
|
||||
} else {
|
||||
r.WriteMapEnd()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (x *DeschedulerConfiguration) CodecDecodeSelf(d *codec1978.Decoder) {
|
||||
var h codecSelfer1234
|
||||
z, r := codec1978.GenHelperDecoder(d)
|
||||
_, _, _ = h, z, r
|
||||
yym1 := z.DecBinary()
|
||||
_ = yym1
|
||||
if false {
|
||||
} else if yyxt1 := z.Extension(z.I2Rtid(x)); yyxt1 != nil {
|
||||
z.DecExtension(x, yyxt1)
|
||||
} else {
|
||||
yyct2 := r.ContainerType()
|
||||
if yyct2 == codecSelferValueTypeMap1234 {
|
||||
yyl2 := r.ReadMapStart()
|
||||
if yyl2 == 0 {
|
||||
r.ReadMapEnd()
|
||||
} else {
|
||||
x.codecDecodeSelfFromMap(yyl2, d)
|
||||
}
|
||||
} else if yyct2 == codecSelferValueTypeArray1234 {
|
||||
yyl2 := r.ReadArrayStart()
|
||||
if yyl2 == 0 {
|
||||
r.ReadArrayEnd()
|
||||
} else {
|
||||
x.codecDecodeSelfFromArray(yyl2, d)
|
||||
}
|
||||
} else {
|
||||
panic(errCodecSelferOnlyMapOrArrayEncodeToStruct1234)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (x *DeschedulerConfiguration) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
|
||||
var h codecSelfer1234
|
||||
z, r := codec1978.GenHelperDecoder(d)
|
||||
_, _, _ = h, z, r
|
||||
var yyhl3 bool = l >= 0
|
||||
for yyj3 := 0; ; yyj3++ {
|
||||
if yyhl3 {
|
||||
if yyj3 >= l {
|
||||
break
|
||||
}
|
||||
} else {
|
||||
if r.CheckBreak() {
|
||||
break
|
||||
}
|
||||
}
|
||||
r.ReadMapElemKey()
|
||||
yys3 := z.StringView(r.DecStructFieldKey(codecSelferValueTypeString1234, z.DecScratchArrayBuffer()))
|
||||
r.ReadMapElemValue()
|
||||
switch yys3 {
|
||||
case "kind":
|
||||
if r.TryDecodeAsNil() {
|
||||
x.Kind = ""
|
||||
} else {
|
||||
yyv4 := &x.Kind
|
||||
yym5 := z.DecBinary()
|
||||
_ = yym5
|
||||
if false {
|
||||
} else {
|
||||
*((*string)(yyv4)) = r.DecodeString()
|
||||
}
|
||||
}
|
||||
case "apiVersion":
|
||||
if r.TryDecodeAsNil() {
|
||||
x.APIVersion = ""
|
||||
} else {
|
||||
yyv6 := &x.APIVersion
|
||||
yym7 := z.DecBinary()
|
||||
_ = yym7
|
||||
if false {
|
||||
} else {
|
||||
*((*string)(yyv6)) = r.DecodeString()
|
||||
}
|
||||
}
|
||||
case "deschedulingInterval":
|
||||
if r.TryDecodeAsNil() {
|
||||
x.DeschedulingInterval = 0
|
||||
} else {
|
||||
yyv8 := &x.DeschedulingInterval
|
||||
yym9 := z.DecBinary()
|
||||
_ = yym9
|
||||
if false {
|
||||
} else if yyxt9 := z.Extension(z.I2Rtid(yyv8)); yyxt9 != nil {
|
||||
z.DecExtension(yyv8, yyxt9)
|
||||
} else {
|
||||
*((*int64)(yyv8)) = int64(r.DecodeInt(64))
|
||||
}
|
||||
}
|
||||
case "kubeconfigFile":
|
||||
if r.TryDecodeAsNil() {
|
||||
x.KubeconfigFile = ""
|
||||
} else {
|
||||
yyv10 := &x.KubeconfigFile
|
||||
yym11 := z.DecBinary()
|
||||
_ = yym11
|
||||
if false {
|
||||
} else {
|
||||
*((*string)(yyv10)) = r.DecodeString()
|
||||
}
|
||||
}
|
||||
case "policyConfigFile":
|
||||
if r.TryDecodeAsNil() {
|
||||
x.PolicyConfigFile = ""
|
||||
} else {
|
||||
yyv12 := &x.PolicyConfigFile
|
||||
yym13 := z.DecBinary()
|
||||
_ = yym13
|
||||
if false {
|
||||
} else {
|
||||
*((*string)(yyv12)) = r.DecodeString()
|
||||
}
|
||||
}
|
||||
case "dryRun":
|
||||
if r.TryDecodeAsNil() {
|
||||
x.DryRun = false
|
||||
} else {
|
||||
yyv14 := &x.DryRun
|
||||
yym15 := z.DecBinary()
|
||||
_ = yym15
|
||||
if false {
|
||||
} else {
|
||||
*((*bool)(yyv14)) = r.DecodeBool()
|
||||
}
|
||||
}
|
||||
case "nodeSelector":
|
||||
if r.TryDecodeAsNil() {
|
||||
x.NodeSelector = ""
|
||||
} else {
|
||||
yyv16 := &x.NodeSelector
|
||||
yym17 := z.DecBinary()
|
||||
_ = yym17
|
||||
if false {
|
||||
} else {
|
||||
*((*string)(yyv16)) = r.DecodeString()
|
||||
}
|
||||
}
|
||||
case "maxNoOfPodsToEvictPerNode":
|
||||
if r.TryDecodeAsNil() {
|
||||
x.MaxNoOfPodsToEvictPerNode = 0
|
||||
} else {
|
||||
yyv18 := &x.MaxNoOfPodsToEvictPerNode
|
||||
yym19 := z.DecBinary()
|
||||
_ = yym19
|
||||
if false {
|
||||
} else {
|
||||
*((*int)(yyv18)) = int(r.DecodeInt(codecSelferBitsize1234))
|
||||
}
|
||||
}
|
||||
default:
|
||||
z.DecStructFieldNotFound(-1, yys3)
|
||||
} // end switch yys3
|
||||
} // end for yyj3
|
||||
r.ReadMapEnd()
|
||||
}
|
||||
|
||||
func (x *DeschedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
|
||||
var h codecSelfer1234
|
||||
z, r := codec1978.GenHelperDecoder(d)
|
||||
_, _, _ = h, z, r
|
||||
var yyj20 int
|
||||
var yyb20 bool
|
||||
var yyhl20 bool = l >= 0
|
||||
yyj20++
|
||||
if yyhl20 {
|
||||
yyb20 = yyj20 > l
|
||||
} else {
|
||||
yyb20 = r.CheckBreak()
|
||||
}
|
||||
if yyb20 {
|
||||
r.ReadArrayEnd()
|
||||
return
|
||||
}
|
||||
r.ReadArrayElem()
|
||||
if r.TryDecodeAsNil() {
|
||||
x.Kind = ""
|
||||
} else {
|
||||
yyv21 := &x.Kind
|
||||
yym22 := z.DecBinary()
|
||||
_ = yym22
|
||||
if false {
|
||||
} else {
|
||||
*((*string)(yyv21)) = r.DecodeString()
|
||||
}
|
||||
}
|
||||
yyj20++
|
||||
if yyhl20 {
|
||||
yyb20 = yyj20 > l
|
||||
} else {
|
||||
yyb20 = r.CheckBreak()
|
||||
}
|
||||
if yyb20 {
|
||||
r.ReadArrayEnd()
|
||||
return
|
||||
}
|
||||
r.ReadArrayElem()
|
||||
if r.TryDecodeAsNil() {
|
||||
x.APIVersion = ""
|
||||
} else {
|
||||
yyv23 := &x.APIVersion
|
||||
yym24 := z.DecBinary()
|
||||
_ = yym24
|
||||
if false {
|
||||
} else {
|
||||
*((*string)(yyv23)) = r.DecodeString()
|
||||
}
|
||||
}
|
||||
yyj20++
|
||||
if yyhl20 {
|
||||
yyb20 = yyj20 > l
|
||||
} else {
|
||||
yyb20 = r.CheckBreak()
|
||||
}
|
||||
if yyb20 {
|
||||
r.ReadArrayEnd()
|
||||
return
|
||||
}
|
||||
r.ReadArrayElem()
|
||||
if r.TryDecodeAsNil() {
|
||||
x.DeschedulingInterval = 0
|
||||
} else {
|
||||
yyv25 := &x.DeschedulingInterval
|
||||
yym26 := z.DecBinary()
|
||||
_ = yym26
|
||||
if false {
|
||||
} else if yyxt26 := z.Extension(z.I2Rtid(yyv25)); yyxt26 != nil {
|
||||
z.DecExtension(yyv25, yyxt26)
|
||||
} else {
|
||||
*((*int64)(yyv25)) = int64(r.DecodeInt(64))
|
||||
}
|
||||
}
|
||||
yyj20++
|
||||
if yyhl20 {
|
||||
yyb20 = yyj20 > l
|
||||
} else {
|
||||
yyb20 = r.CheckBreak()
|
||||
}
|
||||
if yyb20 {
|
||||
r.ReadArrayEnd()
|
||||
return
|
||||
}
|
||||
r.ReadArrayElem()
|
||||
if r.TryDecodeAsNil() {
|
||||
x.KubeconfigFile = ""
|
||||
} else {
|
||||
yyv27 := &x.KubeconfigFile
|
||||
yym28 := z.DecBinary()
|
||||
_ = yym28
|
||||
if false {
|
||||
} else {
|
||||
*((*string)(yyv27)) = r.DecodeString()
|
||||
}
|
||||
}
|
||||
yyj20++
|
||||
if yyhl20 {
|
||||
yyb20 = yyj20 > l
|
||||
} else {
|
||||
yyb20 = r.CheckBreak()
|
||||
}
|
||||
if yyb20 {
|
||||
r.ReadArrayEnd()
|
||||
return
|
||||
}
|
||||
r.ReadArrayElem()
|
||||
if r.TryDecodeAsNil() {
|
||||
x.PolicyConfigFile = ""
|
||||
} else {
|
||||
yyv29 := &x.PolicyConfigFile
|
||||
yym30 := z.DecBinary()
|
||||
_ = yym30
|
||||
if false {
|
||||
} else {
|
||||
*((*string)(yyv29)) = r.DecodeString()
|
||||
}
|
||||
}
|
||||
yyj20++
|
||||
if yyhl20 {
|
||||
yyb20 = yyj20 > l
|
||||
} else {
|
||||
yyb20 = r.CheckBreak()
|
||||
}
|
||||
if yyb20 {
|
||||
r.ReadArrayEnd()
|
||||
return
|
||||
}
|
||||
r.ReadArrayElem()
|
||||
if r.TryDecodeAsNil() {
|
||||
x.DryRun = false
|
||||
} else {
|
||||
yyv31 := &x.DryRun
|
||||
yym32 := z.DecBinary()
|
||||
_ = yym32
|
||||
if false {
|
||||
} else {
|
||||
*((*bool)(yyv31)) = r.DecodeBool()
|
||||
}
|
||||
}
|
||||
yyj20++
|
||||
if yyhl20 {
|
||||
yyb20 = yyj20 > l
|
||||
} else {
|
||||
yyb20 = r.CheckBreak()
|
||||
}
|
||||
if yyb20 {
|
||||
r.ReadArrayEnd()
|
||||
return
|
||||
}
|
||||
r.ReadArrayElem()
|
||||
if r.TryDecodeAsNil() {
|
||||
x.NodeSelector = ""
|
||||
} else {
|
||||
yyv33 := &x.NodeSelector
|
||||
yym34 := z.DecBinary()
|
||||
_ = yym34
|
||||
if false {
|
||||
} else {
|
||||
*((*string)(yyv33)) = r.DecodeString()
|
||||
}
|
||||
}
|
||||
yyj20++
|
||||
if yyhl20 {
|
||||
yyb20 = yyj20 > l
|
||||
} else {
|
||||
yyb20 = r.CheckBreak()
|
||||
}
|
||||
if yyb20 {
|
||||
r.ReadArrayEnd()
|
||||
return
|
||||
}
|
||||
r.ReadArrayElem()
|
||||
if r.TryDecodeAsNil() {
|
||||
x.MaxNoOfPodsToEvictPerNode = 0
|
||||
} else {
|
||||
yyv35 := &x.MaxNoOfPodsToEvictPerNode
|
||||
yym36 := z.DecBinary()
|
||||
_ = yym36
|
||||
if false {
|
||||
} else {
|
||||
*((*int)(yyv35)) = int(r.DecodeInt(codecSelferBitsize1234))
|
||||
}
|
||||
}
|
||||
for {
|
||||
yyj20++
|
||||
if yyhl20 {
|
||||
yyb20 = yyj20 > l
|
||||
} else {
|
||||
yyb20 = r.CheckBreak()
|
||||
}
|
||||
if yyb20 {
|
||||
break
|
||||
}
|
||||
r.ReadArrayElem()
|
||||
z.DecStructFieldNotFound(yyj20-1, "")
|
||||
}
|
||||
r.ReadArrayEnd()
|
||||
}
|
||||
@@ -1,48 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
type DeschedulerConfiguration struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
|
||||
// Time interval for descheduler to run
|
||||
DeschedulingInterval time.Duration `json:"deschedulingInterval,omitempty"`
|
||||
|
||||
// KubeconfigFile is path to kubeconfig file with authorization and master
|
||||
// location information.
|
||||
KubeconfigFile string `json:"kubeconfigFile"`
|
||||
|
||||
// PolicyConfigFile is the filepath to the descheduler policy configuration.
|
||||
PolicyConfigFile string `json:"policyConfigFile,omitempty"`
|
||||
|
||||
// Dry run
|
||||
DryRun bool `json:"dryRun,omitempty"`
|
||||
|
||||
// Node selectors
|
||||
NodeSelector string `json:"nodeSelector,omitempty"`
|
||||
|
||||
// MaxNoOfPodsToEvictPerNode restricts maximum of pods to be evicted per node.
|
||||
MaxNoOfPodsToEvictPerNode int `json:"maxNoOfPodsToEvictPerNode,omitempty"`
|
||||
}
|
||||
@@ -1,72 +0,0 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// This file was autogenerated by conversion-gen. Do not edit it manually!
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
time "time"
|
||||
|
||||
componentconfig "github.com/kubernetes-incubator/descheduler/pkg/apis/componentconfig"
|
||||
conversion "k8s.io/apimachinery/pkg/conversion"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
func init() {
|
||||
localSchemeBuilder.Register(RegisterConversions)
|
||||
}
|
||||
|
||||
// RegisterConversions adds conversion functions to the given scheme.
|
||||
// Public to allow building arbitrary schemes.
|
||||
func RegisterConversions(scheme *runtime.Scheme) error {
|
||||
return scheme.AddGeneratedConversionFuncs(
|
||||
Convert_v1alpha1_DeschedulerConfiguration_To_componentconfig_DeschedulerConfiguration,
|
||||
Convert_componentconfig_DeschedulerConfiguration_To_v1alpha1_DeschedulerConfiguration,
|
||||
)
|
||||
}
|
||||
|
||||
func autoConvert_v1alpha1_DeschedulerConfiguration_To_componentconfig_DeschedulerConfiguration(in *DeschedulerConfiguration, out *componentconfig.DeschedulerConfiguration, s conversion.Scope) error {
|
||||
out.DeschedulingInterval = time.Duration(in.DeschedulingInterval)
|
||||
out.KubeconfigFile = in.KubeconfigFile
|
||||
out.PolicyConfigFile = in.PolicyConfigFile
|
||||
out.DryRun = in.DryRun
|
||||
out.NodeSelector = in.NodeSelector
|
||||
out.MaxNoOfPodsToEvictPerNode = in.MaxNoOfPodsToEvictPerNode
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_v1alpha1_DeschedulerConfiguration_To_componentconfig_DeschedulerConfiguration is an autogenerated conversion function.
|
||||
func Convert_v1alpha1_DeschedulerConfiguration_To_componentconfig_DeschedulerConfiguration(in *DeschedulerConfiguration, out *componentconfig.DeschedulerConfiguration, s conversion.Scope) error {
|
||||
return autoConvert_v1alpha1_DeschedulerConfiguration_To_componentconfig_DeschedulerConfiguration(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_componentconfig_DeschedulerConfiguration_To_v1alpha1_DeschedulerConfiguration(in *componentconfig.DeschedulerConfiguration, out *DeschedulerConfiguration, s conversion.Scope) error {
|
||||
out.DeschedulingInterval = time.Duration(in.DeschedulingInterval)
|
||||
out.KubeconfigFile = in.KubeconfigFile
|
||||
out.PolicyConfigFile = in.PolicyConfigFile
|
||||
out.DryRun = in.DryRun
|
||||
out.NodeSelector = in.NodeSelector
|
||||
out.MaxNoOfPodsToEvictPerNode = in.MaxNoOfPodsToEvictPerNode
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_componentconfig_DeschedulerConfiguration_To_v1alpha1_DeschedulerConfiguration is an autogenerated conversion function.
|
||||
func Convert_componentconfig_DeschedulerConfiguration_To_v1alpha1_DeschedulerConfiguration(in *componentconfig.DeschedulerConfiguration, out *DeschedulerConfiguration, s conversion.Scope) error {
|
||||
return autoConvert_componentconfig_DeschedulerConfiguration_To_v1alpha1_DeschedulerConfiguration(in, out, s)
|
||||
}
|
||||
@@ -1,51 +0,0 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// This file was autogenerated by deepcopy-gen. Do not edit it manually!
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *DeschedulerConfiguration) DeepCopyInto(out *DeschedulerConfiguration) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeschedulerConfiguration.
|
||||
func (in *DeschedulerConfiguration) DeepCopy() *DeschedulerConfiguration {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(DeschedulerConfiguration)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *DeschedulerConfiguration) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
@@ -1,32 +0,0 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// This file was autogenerated by defaulter-gen. Do not edit it manually!
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
// RegisterDefaults adds defaulters functions to the given scheme.
|
||||
// Public to allow building arbitrary schemes.
|
||||
// All generated defaulters are covering - they call all nested defaulters.
|
||||
func RegisterDefaults(scheme *runtime.Scheme) error {
|
||||
return nil
|
||||
}
|
||||
@@ -1,51 +0,0 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// This file was autogenerated by deepcopy-gen. Do not edit it manually!
|
||||
|
||||
package componentconfig
|
||||
|
||||
import (
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *DeschedulerConfiguration) DeepCopyInto(out *DeschedulerConfiguration) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeschedulerConfiguration.
|
||||
func (in *DeschedulerConfiguration) DeepCopy() *DeschedulerConfiguration {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(DeschedulerConfiguration)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *DeschedulerConfiguration) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
@@ -1,67 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package client
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
_ "k8s.io/client-go/plugin/pkg/client/auth"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
)
|
||||
|
||||
func CreateClient(kubeconfig string) (clientset.Interface, error) {
|
||||
var cfg *rest.Config
|
||||
if len(kubeconfig) != 0 {
|
||||
master, err := GetMasterFromKubeconfig(kubeconfig)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to parse kubeconfig file: %v ", err)
|
||||
}
|
||||
|
||||
cfg, err = clientcmd.BuildConfigFromFlags(master, kubeconfig)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Unable to build config: %v", err)
|
||||
}
|
||||
|
||||
} else {
|
||||
var err error
|
||||
cfg, err = rest.InClusterConfig()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Unable to build in cluster config: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
return clientset.NewForConfig(cfg)
|
||||
}
|
||||
|
||||
func GetMasterFromKubeconfig(filename string) (string, error) {
|
||||
config, err := clientcmd.LoadFromFile(filename)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
context, ok := config.Contexts[config.CurrentContext]
|
||||
if !ok {
|
||||
return "", fmt.Errorf("Failed to get master address from kubeconfig")
|
||||
}
|
||||
|
||||
if val, ok := config.Clusters[context.Cluster]; ok {
|
||||
return val.Server, nil
|
||||
}
|
||||
return "", fmt.Errorf("Failed to get master address from kubeconfig")
|
||||
}
|
||||
@@ -1,70 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package descheduler
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
"github.com/kubernetes-incubator/descheduler/cmd/descheduler/app/options"
|
||||
"github.com/kubernetes-incubator/descheduler/pkg/descheduler/client"
|
||||
eutils "github.com/kubernetes-incubator/descheduler/pkg/descheduler/evictions/utils"
|
||||
nodeutil "github.com/kubernetes-incubator/descheduler/pkg/descheduler/node"
|
||||
"github.com/kubernetes-incubator/descheduler/pkg/descheduler/strategies"
|
||||
)
|
||||
|
||||
func Run(rs *options.DeschedulerServer) error {
|
||||
|
||||
rsclient, err := client.CreateClient(rs.KubeconfigFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rs.Client = rsclient
|
||||
|
||||
deschedulerPolicy, err := LoadPolicyConfig(rs.PolicyConfigFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if deschedulerPolicy == nil {
|
||||
return fmt.Errorf("deschedulerPolicy is nil")
|
||||
}
|
||||
|
||||
evictionPolicyGroupVersion, err := eutils.SupportEviction(rs.Client)
|
||||
if err != nil || len(evictionPolicyGroupVersion) == 0 {
|
||||
return err
|
||||
}
|
||||
|
||||
stopChannel := make(chan struct{})
|
||||
nodes, err := nodeutil.ReadyNodes(rs.Client, rs.NodeSelector, stopChannel)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(nodes) <= 1 {
|
||||
glog.V(1).Infof("The cluster size is 0 or 1 meaning eviction causes service disruption or degradation. So aborting..")
|
||||
return nil
|
||||
}
|
||||
|
||||
nodePodCount := strategies.InitializeNodePodCount(nodes)
|
||||
strategies.RemoveDuplicatePods(rs, deschedulerPolicy.Strategies["RemoveDuplicates"], evictionPolicyGroupVersion, nodes, nodePodCount)
|
||||
strategies.LowNodeUtilization(rs, deschedulerPolicy.Strategies["LowNodeUtilization"], evictionPolicyGroupVersion, nodes, nodePodCount)
|
||||
strategies.RemovePodsViolatingInterPodAntiAffinity(rs, deschedulerPolicy.Strategies["RemovePodsViolatingInterPodAntiAffinity"], evictionPolicyGroupVersion, nodes, nodePodCount)
|
||||
strategies.RemovePodsViolatingNodeAffinity(rs, deschedulerPolicy.Strategies["RemovePodsViolatingNodeAffinity"], evictionPolicyGroupVersion, nodes, nodePodCount)
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,59 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package evictions
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
policy "k8s.io/api/policy/v1beta1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
|
||||
eutils "github.com/kubernetes-incubator/descheduler/pkg/descheduler/evictions/utils"
|
||||
)
|
||||
|
||||
func EvictPod(client clientset.Interface, pod *v1.Pod, policyGroupVersion string, dryRun bool) (bool, error) {
|
||||
if dryRun {
|
||||
return true, nil
|
||||
}
|
||||
deleteOptions := &metav1.DeleteOptions{}
|
||||
// GracePeriodSeconds ?
|
||||
eviction := &policy.Eviction{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: policyGroupVersion,
|
||||
Kind: eutils.EvictionKind,
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: pod.Name,
|
||||
Namespace: pod.Namespace,
|
||||
},
|
||||
DeleteOptions: deleteOptions,
|
||||
}
|
||||
err := client.Policy().Evictions(eviction.Namespace).Evict(eviction)
|
||||
|
||||
if err == nil {
|
||||
return true, nil
|
||||
} else if apierrors.IsTooManyRequests(err) {
|
||||
return false, fmt.Errorf("error when evicting pod (ignoring) %q: %v", pod.Name, err)
|
||||
} else if apierrors.IsNotFound(err) {
|
||||
return true, fmt.Errorf("pod not found when evicting %q: %v", pod.Name, err)
|
||||
} else {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
@@ -1,40 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package evictions
|
||||
|
||||
import (
|
||||
"github.com/kubernetes-incubator/descheduler/test"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
core "k8s.io/client-go/testing"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestEvictPod(t *testing.T) {
|
||||
n1 := test.BuildTestNode("node1", 1000, 2000, 9)
|
||||
p1 := test.BuildTestPod("p1", 400, 0, n1.Name)
|
||||
fakeClient := &fake.Clientset{}
|
||||
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
|
||||
return true, &v1.PodList{Items: []v1.Pod{*p1}}, nil
|
||||
})
|
||||
evicted, _ := EvictPod(fakeClient, p1, "v1", false)
|
||||
if !evicted {
|
||||
t.Errorf("Expected %v pod to be evicted", p1.Name)
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,58 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package utils
|
||||
|
||||
import (
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
)
|
||||
|
||||
const (
|
||||
EvictionKind = "Eviction"
|
||||
EvictionSubresource = "pods/eviction"
|
||||
)
|
||||
|
||||
// SupportEviction uses Discovery API to find out if the server support eviction subresource
|
||||
// If support, it will return its groupVersion; Otherwise, it will return ""
|
||||
func SupportEviction(client clientset.Interface) (string, error) {
|
||||
discoveryClient := client.Discovery()
|
||||
groupList, err := discoveryClient.ServerGroups()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
foundPolicyGroup := false
|
||||
var policyGroupVersion string
|
||||
for _, group := range groupList.Groups {
|
||||
if group.Name == "policy" {
|
||||
foundPolicyGroup = true
|
||||
policyGroupVersion = group.PreferredVersion.GroupVersion
|
||||
break
|
||||
}
|
||||
}
|
||||
if !foundPolicyGroup {
|
||||
return "", nil
|
||||
}
|
||||
resourceList, err := discoveryClient.ServerResourcesForGroupVersion("v1")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
for _, resource := range resourceList.APIResources {
|
||||
if resource.Name == EvictionSubresource && resource.Kind == EvictionKind {
|
||||
return policyGroupVersion, nil
|
||||
}
|
||||
}
|
||||
return "", nil
|
||||
}
|
||||
@@ -1,167 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package node
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/kubernetes-incubator/descheduler/pkg/utils"
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
corelisters "k8s.io/client-go/listers/core/v1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
// ReadyNodes returns ready nodes irrespective of whether they are
|
||||
// schedulable or not.
|
||||
func ReadyNodes(client clientset.Interface, nodeSelector string, stopChannel <-chan struct{}) ([]*v1.Node, error) {
|
||||
ns, err := labels.Parse(nodeSelector)
|
||||
if err != nil {
|
||||
return []*v1.Node{}, err
|
||||
}
|
||||
|
||||
var nodes []*v1.Node
|
||||
nl := GetNodeLister(client, stopChannel)
|
||||
if nl != nil {
|
||||
// err is defined above
|
||||
if nodes, err = nl.List(ns); err != nil {
|
||||
return []*v1.Node{}, err
|
||||
}
|
||||
}
|
||||
|
||||
if len(nodes) == 0 {
|
||||
glog.V(2).Infof("node lister returned empty list, now fetch directly")
|
||||
|
||||
nItems, err := client.Core().Nodes().List(metav1.ListOptions{LabelSelector: nodeSelector})
|
||||
if err != nil {
|
||||
return []*v1.Node{}, err
|
||||
}
|
||||
|
||||
if nItems == nil || len(nItems.Items) == 0 {
|
||||
return []*v1.Node{}, nil
|
||||
}
|
||||
|
||||
for i := range nItems.Items {
|
||||
node := nItems.Items[i]
|
||||
nodes = append(nodes, &node)
|
||||
}
|
||||
}
|
||||
|
||||
readyNodes := make([]*v1.Node, 0, len(nodes))
|
||||
for _, node := range nodes {
|
||||
if IsReady(node) {
|
||||
readyNodes = append(readyNodes, node)
|
||||
}
|
||||
}
|
||||
return readyNodes, nil
|
||||
}
|
||||
|
||||
func GetNodeLister(client clientset.Interface, stopChannel <-chan struct{}) corelisters.NodeLister {
|
||||
if stopChannel == nil {
|
||||
return nil
|
||||
}
|
||||
listWatcher := cache.NewListWatchFromClient(client.Core().RESTClient(), "nodes", v1.NamespaceAll, fields.Everything())
|
||||
store := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
|
||||
nodeLister := corelisters.NewNodeLister(store)
|
||||
reflector := cache.NewReflector(listWatcher, &v1.Node{}, store, time.Hour)
|
||||
go reflector.Run(stopChannel)
|
||||
|
||||
// To give some time so that listing works, chosen randomly
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
return nodeLister
|
||||
}
|
||||
|
||||
// IsReady checks if the descheduler could run against given node.
|
||||
func IsReady(node *v1.Node) bool {
|
||||
for i := range node.Status.Conditions {
|
||||
cond := &node.Status.Conditions[i]
|
||||
// We consider the node for scheduling only when its:
|
||||
// - NodeReady condition status is ConditionTrue,
|
||||
// - NodeOutOfDisk condition status is ConditionFalse,
|
||||
// - NodeNetworkUnavailable condition status is ConditionFalse.
|
||||
if cond.Type == v1.NodeReady && cond.Status != v1.ConditionTrue {
|
||||
glog.V(1).Infof("Ignoring node %v with %v condition status %v", node.Name, cond.Type, cond.Status)
|
||||
return false
|
||||
} /*else if cond.Type == v1.NodeOutOfDisk && cond.Status != v1.ConditionFalse {
|
||||
glog.V(4).Infof("Ignoring node %v with %v condition status %v", node.Name, cond.Type, cond.Status)
|
||||
return false
|
||||
} else if cond.Type == v1.NodeNetworkUnavailable && cond.Status != v1.ConditionFalse {
|
||||
glog.V(4).Infof("Ignoring node %v with %v condition status %v", node.Name, cond.Type, cond.Status)
|
||||
return false
|
||||
}*/
|
||||
}
|
||||
// Ignore nodes that are marked unschedulable
|
||||
/*if node.Spec.Unschedulable {
|
||||
glog.V(4).Infof("Ignoring node %v since it is unschedulable", node.Name)
|
||||
return false
|
||||
}*/
|
||||
return true
|
||||
}
|
||||
|
||||
// IsNodeUschedulable checks if the node is unschedulable. This is helper function to check only in case of
|
||||
// underutilized node so that they won't be accounted for.
|
||||
func IsNodeUschedulable(node *v1.Node) bool {
|
||||
if node.Spec.Unschedulable {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// PodFitsAnyNode checks if the given pod fits any of the given nodes, based on
|
||||
// multiple criteria, like, pod node selector matching the node label, node
|
||||
// being schedulable or not.
|
||||
func PodFitsAnyNode(pod *v1.Pod, nodes []*v1.Node) bool {
|
||||
for _, node := range nodes {
|
||||
|
||||
ok, err := utils.PodMatchNodeSelector(pod, node)
|
||||
if err != nil || !ok {
|
||||
continue
|
||||
}
|
||||
if ok {
|
||||
if !IsNodeUschedulable(node) {
|
||||
glog.V(2).Infof("Pod %v can possibly be scheduled on %v", pod.Name, node.Name)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// PodFitsCurrentNode checks if the given pod fits on the given node if the pod
|
||||
// node selector matches the node label.
|
||||
func PodFitsCurrentNode(pod *v1.Pod, node *v1.Node) bool {
|
||||
ok, err := utils.PodMatchNodeSelector(pod, node)
|
||||
|
||||
if err != nil {
|
||||
glog.Error(err)
|
||||
return false
|
||||
}
|
||||
|
||||
if !ok {
|
||||
glog.V(1).Infof("Pod %v does not fit on node %v", pod.Name, node.Name)
|
||||
return false
|
||||
}
|
||||
|
||||
glog.V(3).Infof("Pod %v fits on node %v", pod.Name, node.Name)
|
||||
return true
|
||||
}
|
||||
@@ -1,347 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package node
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/kubernetes-incubator/descheduler/test"
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
)
|
||||
|
||||
func TestReadyNodes(t *testing.T) {
|
||||
node1 := test.BuildTestNode("node1", 1000, 2000, 9)
|
||||
node1.Status.Conditions = []v1.NodeCondition{{Type: v1.NodeOutOfDisk, Status: v1.ConditionTrue}}
|
||||
node2 := test.BuildTestNode("node2", 1000, 2000, 9)
|
||||
node3 := test.BuildTestNode("node3", 1000, 2000, 9)
|
||||
node3.Status.Conditions = []v1.NodeCondition{{Type: v1.NodeMemoryPressure, Status: v1.ConditionTrue}}
|
||||
node4 := test.BuildTestNode("node4", 1000, 2000, 9)
|
||||
node4.Status.Conditions = []v1.NodeCondition{{Type: v1.NodeNetworkUnavailable, Status: v1.ConditionTrue}}
|
||||
node5 := test.BuildTestNode("node5", 1000, 2000, 9)
|
||||
node5.Spec.Unschedulable = true
|
||||
node6 := test.BuildTestNode("node6", 1000, 2000, 9)
|
||||
node6.Status.Conditions = []v1.NodeCondition{{Type: v1.NodeReady, Status: v1.ConditionFalse}}
|
||||
|
||||
if !IsReady(node1) {
|
||||
t.Errorf("Expected %v to be ready", node1.Name)
|
||||
}
|
||||
if !IsReady(node2) {
|
||||
t.Errorf("Expected %v to be ready", node2.Name)
|
||||
}
|
||||
if !IsReady(node3) {
|
||||
t.Errorf("Expected %v to be ready", node3.Name)
|
||||
}
|
||||
if !IsReady(node4) {
|
||||
t.Errorf("Expected %v to be ready", node4.Name)
|
||||
}
|
||||
if !IsReady(node5) {
|
||||
t.Errorf("Expected %v to be ready", node5.Name)
|
||||
}
|
||||
if IsReady(node6) {
|
||||
t.Errorf("Expected %v to be not ready", node6.Name)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestReadyNodesWithNodeSelector(t *testing.T) {
|
||||
node1 := test.BuildTestNode("node1", 1000, 2000, 9)
|
||||
node1.Labels = map[string]string{"type": "compute"}
|
||||
node2 := test.BuildTestNode("node2", 1000, 2000, 9)
|
||||
node2.Labels = map[string]string{"type": "infra"}
|
||||
|
||||
fakeClient := fake.NewSimpleClientset(node1, node2)
|
||||
nodeSelector := "type=compute"
|
||||
nodes, _ := ReadyNodes(fakeClient, nodeSelector, nil)
|
||||
|
||||
if nodes[0].Name != "node1" {
|
||||
t.Errorf("Expected node1, got %s", nodes[0].Name)
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsNodeUschedulable(t *testing.T) {
|
||||
tests := []struct {
|
||||
description string
|
||||
node *v1.Node
|
||||
IsUnSchedulable bool
|
||||
}{
|
||||
{
|
||||
description: "Node is expected to be schedulable",
|
||||
node: &v1.Node{
|
||||
Spec: v1.NodeSpec{Unschedulable: false},
|
||||
},
|
||||
IsUnSchedulable: false,
|
||||
},
|
||||
{
|
||||
description: "Node is not expected to be schedulable because of unschedulable field",
|
||||
node: &v1.Node{
|
||||
Spec: v1.NodeSpec{Unschedulable: true},
|
||||
},
|
||||
IsUnSchedulable: true,
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
actualUnSchedulable := IsNodeUschedulable(test.node)
|
||||
if actualUnSchedulable != test.IsUnSchedulable {
|
||||
t.Errorf("Test %#v failed", test.description)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestPodFitsCurrentNode(t *testing.T) {
|
||||
|
||||
nodeLabelKey := "kubernetes.io/desiredNode"
|
||||
nodeLabelValue := "yes"
|
||||
|
||||
tests := []struct {
|
||||
description string
|
||||
pod *v1.Pod
|
||||
node *v1.Node
|
||||
success bool
|
||||
}{
|
||||
{
|
||||
description: "Pod with nodeAffinity set, expected to fit the node",
|
||||
pod: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Affinity: &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: nodeLabelKey,
|
||||
Operator: "In",
|
||||
Values: []string{
|
||||
nodeLabelValue,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
node: &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
},
|
||||
},
|
||||
},
|
||||
success: true,
|
||||
},
|
||||
{
|
||||
description: "Pod with nodeAffinity set, not expected to fit the node",
|
||||
pod: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Affinity: &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: nodeLabelKey,
|
||||
Operator: "In",
|
||||
Values: []string{
|
||||
nodeLabelValue,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
node: &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
nodeLabelKey: "no",
|
||||
},
|
||||
},
|
||||
},
|
||||
success: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
actual := PodFitsCurrentNode(tc.pod, tc.node)
|
||||
if actual != tc.success {
|
||||
t.Errorf("Test %#v failed", tc.description)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestPodFitsAnyNode(t *testing.T) {
|
||||
|
||||
nodeLabelKey := "kubernetes.io/desiredNode"
|
||||
nodeLabelValue := "yes"
|
||||
|
||||
tests := []struct {
|
||||
description string
|
||||
pod *v1.Pod
|
||||
nodes []*v1.Node
|
||||
success bool
|
||||
}{
|
||||
{
|
||||
description: "Pod expected to fit one of the nodes",
|
||||
pod: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Affinity: &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: nodeLabelKey,
|
||||
Operator: "In",
|
||||
Values: []string{
|
||||
nodeLabelValue,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
nodeLabelKey: "no",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
success: true,
|
||||
},
|
||||
{
|
||||
description: "Pod expected to fit none of the nodes",
|
||||
pod: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Affinity: &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: nodeLabelKey,
|
||||
Operator: "In",
|
||||
Values: []string{
|
||||
nodeLabelValue,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
nodeLabelKey: "unfit1",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
nodeLabelKey: "unfit2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
success: false,
|
||||
},
|
||||
{
|
||||
description: "Nodes are unschedulable but labels match, should fail",
|
||||
pod: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Affinity: &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: nodeLabelKey,
|
||||
Operator: "In",
|
||||
Values: []string{
|
||||
nodeLabelValue,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
},
|
||||
},
|
||||
Spec: v1.NodeSpec{
|
||||
Unschedulable: true,
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
nodeLabelKey: "no",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
success: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
actual := PodFitsAnyNode(tc.pod, tc.nodes)
|
||||
if actual != tc.success {
|
||||
t.Errorf("Test %#v failed", tc.description)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,143 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package pod
|
||||
|
||||
import (
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/pkg/apis/core/v1/helper/qos"
|
||||
"k8s.io/kubernetes/pkg/kubelet/types"
|
||||
)
|
||||
|
||||
// checkLatencySensitiveResourcesForAContainer checks if there are any latency sensitive resources like GPUs.
|
||||
func checkLatencySensitiveResourcesForAContainer(rl v1.ResourceList) bool {
|
||||
if rl == nil {
|
||||
return false
|
||||
}
|
||||
for rName := range rl {
|
||||
if rName == v1.ResourceNvidiaGPU {
|
||||
return true
|
||||
}
|
||||
// TODO: Add support for other high value resources like hugepages etc. once kube is rebased to 1.8.
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// IsLatencySensitivePod checks if a pod consumes high value devices like GPUs, hugepages or when cpu pinning enabled.
|
||||
func IsLatencySensitivePod(pod *v1.Pod) bool {
|
||||
for _, container := range pod.Spec.Containers {
|
||||
resourceList := container.Resources.Requests
|
||||
if checkLatencySensitiveResourcesForAContainer(resourceList) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// IsEvictable checks if a pod is evictable or not.
|
||||
func IsEvictable(pod *v1.Pod) bool {
|
||||
ownerRefList := OwnerRef(pod)
|
||||
if IsMirrorPod(pod) || IsPodWithLocalStorage(pod) || len(ownerRefList) == 0 || IsDaemonsetPod(ownerRefList) || IsCriticalPod(pod) {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// ListEvictablePodsOnNode returns the list of evictable pods on node.
|
||||
func ListEvictablePodsOnNode(client clientset.Interface, node *v1.Node) ([]*v1.Pod, error) {
|
||||
pods, err := ListPodsOnANode(client, node)
|
||||
if err != nil {
|
||||
return []*v1.Pod{}, err
|
||||
}
|
||||
evictablePods := make([]*v1.Pod, 0)
|
||||
for _, pod := range pods {
|
||||
if !IsEvictable(pod) {
|
||||
continue
|
||||
} else {
|
||||
evictablePods = append(evictablePods, pod)
|
||||
}
|
||||
}
|
||||
return evictablePods, nil
|
||||
}
|
||||
|
||||
func ListPodsOnANode(client clientset.Interface, node *v1.Node) ([]*v1.Pod, error) {
|
||||
fieldSelector, err := fields.ParseSelector("spec.nodeName=" + node.Name + ",status.phase!=" + string(api.PodSucceeded) + ",status.phase!=" + string(api.PodFailed))
|
||||
if err != nil {
|
||||
return []*v1.Pod{}, err
|
||||
}
|
||||
|
||||
podList, err := client.CoreV1().Pods(v1.NamespaceAll).List(
|
||||
metav1.ListOptions{FieldSelector: fieldSelector.String()})
|
||||
if err != nil {
|
||||
return []*v1.Pod{}, err
|
||||
}
|
||||
|
||||
pods := make([]*v1.Pod, 0)
|
||||
for i := range podList.Items {
|
||||
pods = append(pods, &podList.Items[i])
|
||||
}
|
||||
return pods, nil
|
||||
}
|
||||
|
||||
func IsCriticalPod(pod *v1.Pod) bool {
|
||||
return types.IsCriticalPod(pod)
|
||||
}
|
||||
|
||||
func IsBestEffortPod(pod *v1.Pod) bool {
|
||||
return qos.GetPodQOS(pod) == v1.PodQOSBestEffort
|
||||
}
|
||||
|
||||
func IsBurstablePod(pod *v1.Pod) bool {
|
||||
return qos.GetPodQOS(pod) == v1.PodQOSBurstable
|
||||
}
|
||||
|
||||
func IsGuaranteedPod(pod *v1.Pod) bool {
|
||||
return qos.GetPodQOS(pod) == v1.PodQOSGuaranteed
|
||||
}
|
||||
|
||||
func IsDaemonsetPod(ownerRefList []metav1.OwnerReference) bool {
|
||||
for _, ownerRef := range ownerRefList {
|
||||
if ownerRef.Kind == "DaemonSet" {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// IsMirrorPod checks whether the pod is a mirror pod.
|
||||
func IsMirrorPod(pod *v1.Pod) bool {
|
||||
_, found := pod.ObjectMeta.Annotations[types.ConfigMirrorAnnotationKey]
|
||||
return found
|
||||
}
|
||||
|
||||
func IsPodWithLocalStorage(pod *v1.Pod) bool {
|
||||
for _, volume := range pod.Spec.Volumes {
|
||||
if volume.HostPath != nil || volume.EmptyDir != nil {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// OwnerRef returns the ownerRefList for the pod.
|
||||
func OwnerRef(pod *v1.Pod) []metav1.OwnerReference {
|
||||
return pod.ObjectMeta.GetOwnerReferences()
|
||||
}
|
||||
@@ -1,84 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package pod
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/kubernetes-incubator/descheduler/test"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
)
|
||||
|
||||
func TestPodTypes(t *testing.T) {
|
||||
n1 := test.BuildTestNode("node1", 1000, 2000, 9)
|
||||
p1 := test.BuildTestPod("p1", 400, 0, n1.Name)
|
||||
|
||||
// These won't be evicted.
|
||||
p2 := test.BuildTestPod("p2", 400, 0, n1.Name)
|
||||
p3 := test.BuildTestPod("p3", 400, 0, n1.Name)
|
||||
p4 := test.BuildTestPod("p4", 400, 0, n1.Name)
|
||||
p5 := test.BuildTestPod("p5", 400, 0, n1.Name)
|
||||
p6 := test.BuildTestPod("p6", 400, 0, n1.Name)
|
||||
p6.Spec.Containers[0].Resources.Requests[v1.ResourceNvidiaGPU] = *resource.NewMilliQuantity(3, resource.DecimalSI)
|
||||
|
||||
p6.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
|
||||
p1.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
|
||||
// The following 4 pods won't get evicted.
|
||||
// A daemonset.
|
||||
//p2.Annotations = test.GetDaemonSetAnnotation()
|
||||
p2.ObjectMeta.OwnerReferences = test.GetDaemonSetOwnerRefList()
|
||||
// A pod with local storage.
|
||||
p3.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
p3.Spec.Volumes = []v1.Volume{
|
||||
{
|
||||
Name: "sample",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
|
||||
},
|
||||
},
|
||||
}
|
||||
// A Mirror Pod.
|
||||
p4.Annotations = test.GetMirrorPodAnnotation()
|
||||
// A Critical Pod.
|
||||
p5.Namespace = "kube-system"
|
||||
p5.Annotations = test.GetCriticalPodAnnotation()
|
||||
if !IsMirrorPod(p4) {
|
||||
t.Errorf("Expected p4 to be a mirror pod.")
|
||||
}
|
||||
if !IsCriticalPod(p5) {
|
||||
t.Errorf("Expected p5 to be a critical pod.")
|
||||
}
|
||||
if !IsPodWithLocalStorage(p3) {
|
||||
t.Errorf("Expected p3 to be a pod with local storage.")
|
||||
}
|
||||
ownerRefList := OwnerRef(p2)
|
||||
if !IsDaemonsetPod(ownerRefList) {
|
||||
t.Errorf("Expected p2 to be a daemonset pod.")
|
||||
}
|
||||
ownerRefList = OwnerRef(p1)
|
||||
if IsDaemonsetPod(ownerRefList) || IsPodWithLocalStorage(p1) || IsCriticalPod(p1) || IsMirrorPod(p1) {
|
||||
t.Errorf("Expected p1 to be a normal pod.")
|
||||
}
|
||||
if !IsLatencySensitivePod(p6) {
|
||||
t.Errorf("Expected p6 to be latency sensitive pod")
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,56 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package descheduler
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/kubernetes-incubator/descheduler/pkg/api"
|
||||
_ "github.com/kubernetes-incubator/descheduler/pkg/api/install"
|
||||
"github.com/kubernetes-incubator/descheduler/pkg/api/v1alpha1"
|
||||
"github.com/kubernetes-incubator/descheduler/pkg/descheduler/scheme"
|
||||
)
|
||||
|
||||
func LoadPolicyConfig(policyConfigFile string) (*api.DeschedulerPolicy, error) {
|
||||
if policyConfigFile == "" {
|
||||
glog.V(1).Infof("policy config file not specified")
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
policy, err := ioutil.ReadFile(policyConfigFile)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read policy config file %q: %+v", policyConfigFile, err)
|
||||
}
|
||||
|
||||
versionedPolicy := &v1alpha1.DeschedulerPolicy{}
|
||||
|
||||
decoder := scheme.Codecs.UniversalDecoder(v1alpha1.SchemeGroupVersion)
|
||||
if err := runtime.DecodeInto(decoder, policy, versionedPolicy); err != nil {
|
||||
return nil, fmt.Errorf("failed decoding descheduler's policy config %q: %v", policyConfigFile, err)
|
||||
}
|
||||
|
||||
internalPolicy := &api.DeschedulerPolicy{}
|
||||
if err := scheme.Scheme.Convert(versionedPolicy, internalPolicy, nil); err != nil {
|
||||
return nil, fmt.Errorf("failed converting versioned policy to internal policy version: %v", err)
|
||||
}
|
||||
|
||||
return internalPolicy, nil
|
||||
}
|
||||
@@ -1,33 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package scheme
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"k8s.io/apimachinery/pkg/apimachinery/announced"
|
||||
"k8s.io/apimachinery/pkg/apimachinery/registered"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/serializer"
|
||||
)
|
||||
|
||||
var (
|
||||
GroupFactoryRegistry = make(announced.APIGroupFactoryRegistry)
|
||||
Registry = registered.NewOrDie(os.Getenv("DESCHEDULER_API_VERSIONS"))
|
||||
Scheme = runtime.NewScheme()
|
||||
Codecs = serializer.NewCodecFactory(Scheme)
|
||||
)
|
||||
@@ -1,98 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package strategies
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
|
||||
"github.com/kubernetes-incubator/descheduler/cmd/descheduler/app/options"
|
||||
"github.com/kubernetes-incubator/descheduler/pkg/api"
|
||||
"github.com/kubernetes-incubator/descheduler/pkg/descheduler/evictions"
|
||||
podutil "github.com/kubernetes-incubator/descheduler/pkg/descheduler/pod"
|
||||
)
|
||||
|
||||
//type creator string
|
||||
type DuplicatePodsMap map[string][]*v1.Pod
|
||||
|
||||
// RemoveDuplicatePods removes the duplicate pods on node. This strategy evicts all duplicate pods on node.
|
||||
// A pod is said to be a duplicate of other if both of them are from same creator, kind and are within the same
|
||||
// namespace. As of now, this strategy won't evict daemonsets, mirror pods, critical pods and pods with local storages.
|
||||
func RemoveDuplicatePods(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, policyGroupVersion string, nodes []*v1.Node, nodepodCount nodePodEvictedCount) {
|
||||
if !strategy.Enabled {
|
||||
return
|
||||
}
|
||||
deleteDuplicatePods(ds.Client, policyGroupVersion, nodes, ds.DryRun, nodepodCount, ds.MaxNoOfPodsToEvictPerNode)
|
||||
}
|
||||
|
||||
// deleteDuplicatePods evicts the pod from node and returns the count of evicted pods.
|
||||
func deleteDuplicatePods(client clientset.Interface, policyGroupVersion string, nodes []*v1.Node, dryRun bool, nodepodCount nodePodEvictedCount, maxPodsToEvict int) int {
|
||||
podsEvicted := 0
|
||||
for _, node := range nodes {
|
||||
glog.V(1).Infof("Processing node: %#v", node.Name)
|
||||
dpm := ListDuplicatePodsOnANode(client, node)
|
||||
for creator, pods := range dpm {
|
||||
if len(pods) > 1 {
|
||||
glog.V(1).Infof("%#v", creator)
|
||||
// i = 0 does not evict the first pod
|
||||
for i := 1; i < len(pods); i++ {
|
||||
if maxPodsToEvict > 0 && nodepodCount[node]+1 > maxPodsToEvict {
|
||||
break
|
||||
}
|
||||
success, err := evictions.EvictPod(client, pods[i], policyGroupVersion, dryRun)
|
||||
if !success {
|
||||
glog.Infof("Error when evicting pod: %#v (%#v)", pods[i].Name, err)
|
||||
} else {
|
||||
nodepodCount[node]++
|
||||
glog.V(1).Infof("Evicted pod: %#v (%#v)", pods[i].Name, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
podsEvicted += nodepodCount[node]
|
||||
}
|
||||
return podsEvicted
|
||||
}
|
||||
|
||||
// ListDuplicatePodsOnANode lists duplicate pods on a given node.
|
||||
func ListDuplicatePodsOnANode(client clientset.Interface, node *v1.Node) DuplicatePodsMap {
|
||||
pods, err := podutil.ListEvictablePodsOnNode(client, node)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
return FindDuplicatePods(pods)
|
||||
}
|
||||
|
||||
// FindDuplicatePods takes a list of pods and returns a duplicatePodsMap.
|
||||
func FindDuplicatePods(pods []*v1.Pod) DuplicatePodsMap {
|
||||
dpm := DuplicatePodsMap{}
|
||||
for _, pod := range pods {
|
||||
// Ignoring the error here as in the ListDuplicatePodsOnNode function we call ListEvictablePodsOnNode
|
||||
// which checks for error.
|
||||
ownerRefList := podutil.OwnerRef(pod)
|
||||
for _, ownerRef := range ownerRefList {
|
||||
// ownerRef doesn't need namespace since the assumption is owner needs to be in the same namespace.
|
||||
s := strings.Join([]string{ownerRef.Kind, ownerRef.Name}, "/")
|
||||
dpm[s] = append(dpm[s], pod)
|
||||
}
|
||||
}
|
||||
return dpm
|
||||
}
|
||||
@@ -1,85 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package strategies
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/kubernetes-incubator/descheduler/test"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
core "k8s.io/client-go/testing"
|
||||
)
|
||||
|
||||
//TODO:@ravisantoshgudimetla This could be made table driven.
|
||||
func TestFindDuplicatePods(t *testing.T) {
|
||||
node := test.BuildTestNode("n1", 2000, 3000, 10)
|
||||
p1 := test.BuildTestPod("p1", 100, 0, node.Name)
|
||||
p2 := test.BuildTestPod("p2", 100, 0, node.Name)
|
||||
p3 := test.BuildTestPod("p3", 100, 0, node.Name)
|
||||
p4 := test.BuildTestPod("p4", 100, 0, node.Name)
|
||||
p5 := test.BuildTestPod("p5", 100, 0, node.Name)
|
||||
p6 := test.BuildTestPod("p6", 100, 0, node.Name)
|
||||
p7 := test.BuildTestPod("p7", 100, 0, node.Name)
|
||||
p8 := test.BuildTestPod("p8", 100, 0, node.Name)
|
||||
p9 := test.BuildTestPod("p9", 100, 0, node.Name)
|
||||
|
||||
// All the following pods expect for one will be evicted.
|
||||
p1.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
|
||||
p2.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
|
||||
p3.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
|
||||
p8.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
|
||||
p9.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
|
||||
|
||||
// The following 4 pods won't get evicted.
|
||||
// A daemonset.
|
||||
p4.ObjectMeta.OwnerReferences = test.GetDaemonSetOwnerRefList()
|
||||
// A pod with local storage.
|
||||
p5.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
p5.Spec.Volumes = []v1.Volume{
|
||||
{
|
||||
Name: "sample",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
|
||||
},
|
||||
},
|
||||
}
|
||||
// A Mirror Pod.
|
||||
p6.Annotations = test.GetMirrorPodAnnotation()
|
||||
// A Critical Pod.
|
||||
p7.Namespace = "kube-system"
|
||||
p7.Annotations = test.GetCriticalPodAnnotation()
|
||||
expectedEvictedPodCount := 2
|
||||
fakeClient := &fake.Clientset{}
|
||||
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
|
||||
return true, &v1.PodList{Items: []v1.Pod{*p1, *p2, *p3, *p4, *p5, *p6, *p7, *p8, *p9}}, nil
|
||||
})
|
||||
fakeClient.Fake.AddReactor("get", "nodes", func(action core.Action) (bool, runtime.Object, error) {
|
||||
return true, node, nil
|
||||
})
|
||||
npe := nodePodEvictedCount{}
|
||||
npe[node] = 0
|
||||
podsEvicted := deleteDuplicatePods(fakeClient, "v1", []*v1.Node{node}, false, npe, 2)
|
||||
if podsEvicted != expectedEvictedPodCount {
|
||||
t.Errorf("Unexpected no of pods evicted")
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,402 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package strategies
|
||||
|
||||
import (
|
||||
"sort"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
helper "k8s.io/kubernetes/pkg/api/v1/resource"
|
||||
|
||||
"github.com/kubernetes-incubator/descheduler/cmd/descheduler/app/options"
|
||||
"github.com/kubernetes-incubator/descheduler/pkg/api"
|
||||
"github.com/kubernetes-incubator/descheduler/pkg/descheduler/evictions"
|
||||
nodeutil "github.com/kubernetes-incubator/descheduler/pkg/descheduler/node"
|
||||
podutil "github.com/kubernetes-incubator/descheduler/pkg/descheduler/pod"
|
||||
)
|
||||
|
||||
type NodeUsageMap struct {
|
||||
node *v1.Node
|
||||
usage api.ResourceThresholds
|
||||
allPods []*v1.Pod
|
||||
nonRemovablePods []*v1.Pod
|
||||
bePods []*v1.Pod
|
||||
bPods []*v1.Pod
|
||||
gPods []*v1.Pod
|
||||
}
|
||||
|
||||
type NodePodsMap map[*v1.Node][]*v1.Pod
|
||||
|
||||
func LowNodeUtilization(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, evictionPolicyGroupVersion string, nodes []*v1.Node, nodepodCount nodePodEvictedCount) {
|
||||
if !strategy.Enabled {
|
||||
return
|
||||
}
|
||||
// todo: move to config validation?
|
||||
// TODO: May be create a struct for the strategy as well, so that we don't have to pass along the all the params?
|
||||
|
||||
thresholds := strategy.Params.NodeResourceUtilizationThresholds.Thresholds
|
||||
if !validateThresholds(thresholds) {
|
||||
return
|
||||
}
|
||||
targetThresholds := strategy.Params.NodeResourceUtilizationThresholds.TargetThresholds
|
||||
if !validateTargetThresholds(targetThresholds) {
|
||||
return
|
||||
}
|
||||
|
||||
npm := createNodePodsMap(ds.Client, nodes)
|
||||
lowNodes, targetNodes := classifyNodes(npm, thresholds, targetThresholds)
|
||||
|
||||
glog.V(1).Infof("Criteria for a node under utilization: CPU: %v, Mem: %v, Pods: %v",
|
||||
thresholds[v1.ResourceCPU], thresholds[v1.ResourceMemory], thresholds[v1.ResourcePods])
|
||||
|
||||
if len(lowNodes) == 0 {
|
||||
glog.V(1).Infof("No node is underutilized, nothing to do here, you might tune your thersholds further")
|
||||
return
|
||||
}
|
||||
glog.V(1).Infof("Total number of underutilized nodes: %v", len(lowNodes))
|
||||
|
||||
if len(lowNodes) < strategy.Params.NodeResourceUtilizationThresholds.NumberOfNodes {
|
||||
glog.V(1).Infof("number of nodes underutilized (%v) is less than NumberOfNodes (%v), nothing to do here", len(lowNodes), strategy.Params.NodeResourceUtilizationThresholds.NumberOfNodes)
|
||||
return
|
||||
}
|
||||
|
||||
if len(lowNodes) == len(nodes) {
|
||||
glog.V(1).Infof("all nodes are underutilized, nothing to do here")
|
||||
return
|
||||
}
|
||||
|
||||
if len(targetNodes) == 0 {
|
||||
glog.V(1).Infof("all nodes are under target utilization, nothing to do here")
|
||||
return
|
||||
}
|
||||
|
||||
glog.V(1).Infof("Criteria for a node above target utilization: CPU: %v, Mem: %v, Pods: %v",
|
||||
targetThresholds[v1.ResourceCPU], targetThresholds[v1.ResourceMemory], targetThresholds[v1.ResourcePods])
|
||||
glog.V(1).Infof("Total number of nodes above target utilization: %v", len(targetNodes))
|
||||
|
||||
totalPodsEvicted := evictPodsFromTargetNodes(ds.Client, evictionPolicyGroupVersion, targetNodes, lowNodes, targetThresholds, ds.DryRun, ds.MaxNoOfPodsToEvictPerNode, nodepodCount)
|
||||
glog.V(1).Infof("Total number of pods evicted: %v", totalPodsEvicted)
|
||||
|
||||
}
|
||||
|
||||
func validateThresholds(thresholds api.ResourceThresholds) bool {
|
||||
if thresholds == nil || len(thresholds) == 0 {
|
||||
glog.V(1).Infof("no resource threshold is configured")
|
||||
return false
|
||||
}
|
||||
for name := range thresholds {
|
||||
switch name {
|
||||
case v1.ResourceCPU:
|
||||
continue
|
||||
case v1.ResourceMemory:
|
||||
continue
|
||||
case v1.ResourcePods:
|
||||
continue
|
||||
default:
|
||||
glog.Errorf("only cpu, memory, or pods thresholds can be specified")
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
//This function could be merged into above once we are clear.
|
||||
func validateTargetThresholds(targetThresholds api.ResourceThresholds) bool {
|
||||
if targetThresholds == nil {
|
||||
glog.V(1).Infof("no target resource threshold is configured")
|
||||
return false
|
||||
} else if _, ok := targetThresholds[v1.ResourcePods]; !ok {
|
||||
glog.V(1).Infof("no target resource threshold for pods is configured")
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// classifyNodes classifies the nodes into low-utilization or high-utilization nodes. If a node lies between
|
||||
// low and high thresholds, it is simply ignored.
|
||||
func classifyNodes(npm NodePodsMap, thresholds api.ResourceThresholds, targetThresholds api.ResourceThresholds) ([]NodeUsageMap, []NodeUsageMap) {
|
||||
lowNodes, targetNodes := []NodeUsageMap{}, []NodeUsageMap{}
|
||||
for node, pods := range npm {
|
||||
usage, allPods, nonRemovablePods, bePods, bPods, gPods := NodeUtilization(node, pods)
|
||||
nuMap := NodeUsageMap{node, usage, allPods, nonRemovablePods, bePods, bPods, gPods}
|
||||
|
||||
// Check if node is underutilized and if we can schedule pods on it.
|
||||
if !nodeutil.IsNodeUschedulable(node) && IsNodeWithLowUtilization(usage, thresholds) {
|
||||
glog.V(2).Infof("Node %#v is under utilized with usage: %#v", node.Name, usage)
|
||||
lowNodes = append(lowNodes, nuMap)
|
||||
} else if IsNodeAboveTargetUtilization(usage, targetThresholds) {
|
||||
glog.V(2).Infof("Node %#v is over utilized with usage: %#v", node.Name, usage)
|
||||
targetNodes = append(targetNodes, nuMap)
|
||||
} else {
|
||||
glog.V(2).Infof("Node %#v is appropriately utilized with usage: %#v", node.Name, usage)
|
||||
}
|
||||
glog.V(2).Infof("allPods:%v, nonRemovablePods:%v, bePods:%v, bPods:%v, gPods:%v", len(allPods), len(nonRemovablePods), len(bePods), len(bPods), len(gPods))
|
||||
}
|
||||
return lowNodes, targetNodes
|
||||
}
|
||||
|
||||
// evictPodsFromTargetNodes evicts pods based on priority, if all the pods on the node have priority, if not
|
||||
// evicts them based on QoS as fallback option.
|
||||
// TODO: @ravig Break this function into smaller functions.
|
||||
func evictPodsFromTargetNodes(client clientset.Interface, evictionPolicyGroupVersion string, targetNodes, lowNodes []NodeUsageMap, targetThresholds api.ResourceThresholds, dryRun bool, maxPodsToEvict int, nodepodCount nodePodEvictedCount) int {
|
||||
podsEvicted := 0
|
||||
|
||||
SortNodesByUsage(targetNodes)
|
||||
|
||||
// upper bound on total number of pods/cpu/memory to be moved
|
||||
var totalPods, totalCpu, totalMem float64
|
||||
for _, node := range lowNodes {
|
||||
nodeCapacity := node.node.Status.Capacity
|
||||
if len(node.node.Status.Allocatable) > 0 {
|
||||
nodeCapacity = node.node.Status.Allocatable
|
||||
}
|
||||
// totalPods to be moved
|
||||
podsPercentage := targetThresholds[v1.ResourcePods] - node.usage[v1.ResourcePods]
|
||||
totalPods += ((float64(podsPercentage) * float64(nodeCapacity.Pods().Value())) / 100)
|
||||
|
||||
// totalCPU capacity to be moved
|
||||
if _, ok := targetThresholds[v1.ResourceCPU]; ok {
|
||||
cpuPercentage := targetThresholds[v1.ResourceCPU] - node.usage[v1.ResourceCPU]
|
||||
totalCpu += ((float64(cpuPercentage) * float64(nodeCapacity.Cpu().MilliValue())) / 100)
|
||||
}
|
||||
|
||||
// totalMem capacity to be moved
|
||||
if _, ok := targetThresholds[v1.ResourceMemory]; ok {
|
||||
memPercentage := targetThresholds[v1.ResourceMemory] - node.usage[v1.ResourceMemory]
|
||||
totalMem += ((float64(memPercentage) * float64(nodeCapacity.Memory().Value())) / 100)
|
||||
}
|
||||
}
|
||||
|
||||
glog.V(1).Infof("Total capacity to be moved: CPU:%v, Mem:%v, Pods:%v", totalCpu, totalMem, totalPods)
|
||||
glog.V(1).Infof("********Number of pods evicted from each node:***********")
|
||||
|
||||
for _, node := range targetNodes {
|
||||
nodeCapacity := node.node.Status.Capacity
|
||||
if len(node.node.Status.Allocatable) > 0 {
|
||||
nodeCapacity = node.node.Status.Allocatable
|
||||
}
|
||||
glog.V(3).Infof("evicting pods from node %#v with usage: %#v", node.node.Name, node.usage)
|
||||
currentPodsEvicted := nodepodCount[node.node]
|
||||
|
||||
// Check if one pod has priority, if yes, assume that all pods have priority and evict pods based on priority.
|
||||
if node.allPods[0].Spec.Priority != nil {
|
||||
glog.V(1).Infof("All pods have priority associated with them. Evicting pods based on priority")
|
||||
evictablePods := make([]*v1.Pod, 0)
|
||||
evictablePods = append(append(node.bPods, node.bePods...), node.gPods...)
|
||||
|
||||
// sort the evictable Pods based on priority. This also sorts them based on QoS. If there are multiple pods with same priority, they are sorted based on QoS tiers.
|
||||
sortPodsBasedOnPriority(evictablePods)
|
||||
evictPods(evictablePods, client, evictionPolicyGroupVersion, targetThresholds, nodeCapacity, node.usage, &totalPods, &totalCpu, &totalMem, ¤tPodsEvicted, dryRun, maxPodsToEvict)
|
||||
} else {
|
||||
// TODO: Remove this when we support only priority.
|
||||
// Falling back to evicting pods based on priority.
|
||||
glog.V(1).Infof("Evicting pods based on QoS")
|
||||
glog.V(1).Infof("There are %v non-evictable pods on the node", len(node.nonRemovablePods))
|
||||
// evict best effort pods
|
||||
evictPods(node.bePods, client, evictionPolicyGroupVersion, targetThresholds, nodeCapacity, node.usage, &totalPods, &totalCpu, &totalMem, ¤tPodsEvicted, dryRun, maxPodsToEvict)
|
||||
// evict burstable pods
|
||||
evictPods(node.bPods, client, evictionPolicyGroupVersion, targetThresholds, nodeCapacity, node.usage, &totalPods, &totalCpu, &totalMem, ¤tPodsEvicted, dryRun, maxPodsToEvict)
|
||||
// evict guaranteed pods
|
||||
evictPods(node.gPods, client, evictionPolicyGroupVersion, targetThresholds, nodeCapacity, node.usage, &totalPods, &totalCpu, &totalMem, ¤tPodsEvicted, dryRun, maxPodsToEvict)
|
||||
}
|
||||
nodepodCount[node.node] = currentPodsEvicted
|
||||
podsEvicted = podsEvicted + nodepodCount[node.node]
|
||||
glog.V(1).Infof("%v pods evicted from node %#v with usage %v", nodepodCount[node.node], node.node.Name, node.usage)
|
||||
}
|
||||
return podsEvicted
|
||||
}
|
||||
|
||||
func evictPods(inputPods []*v1.Pod,
|
||||
client clientset.Interface,
|
||||
evictionPolicyGroupVersion string,
|
||||
targetThresholds api.ResourceThresholds,
|
||||
nodeCapacity v1.ResourceList,
|
||||
nodeUsage api.ResourceThresholds,
|
||||
totalPods *float64,
|
||||
totalCpu *float64,
|
||||
totalMem *float64,
|
||||
podsEvicted *int,
|
||||
dryRun bool, maxPodsToEvict int) {
|
||||
if IsNodeAboveTargetUtilization(nodeUsage, targetThresholds) && (*totalPods > 0 || *totalCpu > 0 || *totalMem > 0) {
|
||||
onePodPercentage := api.Percentage((float64(1) * 100) / float64(nodeCapacity.Pods().Value()))
|
||||
for _, pod := range inputPods {
|
||||
if maxPodsToEvict > 0 && *podsEvicted+1 > maxPodsToEvict {
|
||||
break
|
||||
}
|
||||
cUsage := helper.GetResourceRequest(pod, v1.ResourceCPU)
|
||||
mUsage := helper.GetResourceRequest(pod, v1.ResourceMemory)
|
||||
success, err := evictions.EvictPod(client, pod, evictionPolicyGroupVersion, dryRun)
|
||||
if !success {
|
||||
glog.Warningf("Error when evicting pod: %#v (%#v)", pod.Name, err)
|
||||
} else {
|
||||
glog.V(3).Infof("Evicted pod: %#v (%#v)", pod.Name, err)
|
||||
// update remaining pods
|
||||
*podsEvicted++
|
||||
nodeUsage[v1.ResourcePods] -= onePodPercentage
|
||||
*totalPods--
|
||||
|
||||
// update remaining cpu
|
||||
*totalCpu -= float64(cUsage)
|
||||
nodeUsage[v1.ResourceCPU] -= api.Percentage((float64(cUsage) * 100) / float64(nodeCapacity.Cpu().MilliValue()))
|
||||
|
||||
// update remaining memory
|
||||
*totalMem -= float64(mUsage)
|
||||
nodeUsage[v1.ResourceMemory] -= api.Percentage(float64(mUsage) / float64(nodeCapacity.Memory().Value()) * 100)
|
||||
|
||||
glog.V(3).Infof("updated node usage: %#v", nodeUsage)
|
||||
// check if node utilization drops below target threshold or required capacity (cpu, memory, pods) is moved
|
||||
if !IsNodeAboveTargetUtilization(nodeUsage, targetThresholds) || (*totalPods <= 0 && *totalCpu <= 0 && *totalMem <= 0) {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func SortNodesByUsage(nodes []NodeUsageMap) {
|
||||
sort.Slice(nodes, func(i, j int) bool {
|
||||
var ti, tj api.Percentage
|
||||
for name, value := range nodes[i].usage {
|
||||
if name == v1.ResourceCPU || name == v1.ResourceMemory || name == v1.ResourcePods {
|
||||
ti += value
|
||||
}
|
||||
}
|
||||
for name, value := range nodes[j].usage {
|
||||
if name == v1.ResourceCPU || name == v1.ResourceMemory || name == v1.ResourcePods {
|
||||
tj += value
|
||||
}
|
||||
}
|
||||
// To return sorted in descending order
|
||||
return ti > tj
|
||||
})
|
||||
}
|
||||
|
||||
// sortPodsBasedOnPriority sorts pods based on priority and if their priorities are equal, they are sorted based on QoS tiers.
|
||||
func sortPodsBasedOnPriority(evictablePods []*v1.Pod) {
|
||||
sort.Slice(evictablePods, func(i, j int) bool {
|
||||
if evictablePods[i].Spec.Priority == nil && evictablePods[j].Spec.Priority != nil {
|
||||
return true
|
||||
}
|
||||
if evictablePods[j].Spec.Priority == nil && evictablePods[i].Spec.Priority != nil {
|
||||
return false
|
||||
}
|
||||
if (evictablePods[j].Spec.Priority == nil && evictablePods[i].Spec.Priority == nil) || (*evictablePods[i].Spec.Priority == *evictablePods[j].Spec.Priority) {
|
||||
if podutil.IsBestEffortPod(evictablePods[i]) {
|
||||
return true
|
||||
}
|
||||
if podutil.IsBurstablePod(evictablePods[i]) && podutil.IsGuaranteedPod(evictablePods[j]) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
return *evictablePods[i].Spec.Priority < *evictablePods[j].Spec.Priority
|
||||
})
|
||||
}
|
||||
|
||||
// createNodePodsMap returns nodepodsmap with evictable pods on node.
|
||||
func createNodePodsMap(client clientset.Interface, nodes []*v1.Node) NodePodsMap {
|
||||
npm := NodePodsMap{}
|
||||
for _, node := range nodes {
|
||||
pods, err := podutil.ListPodsOnANode(client, node)
|
||||
if err != nil {
|
||||
glog.Warningf("node %s will not be processed, error in accessing its pods (%#v)", node.Name, err)
|
||||
} else {
|
||||
npm[node] = pods
|
||||
}
|
||||
}
|
||||
return npm
|
||||
}
|
||||
|
||||
func IsNodeAboveTargetUtilization(nodeThresholds api.ResourceThresholds, thresholds api.ResourceThresholds) bool {
|
||||
for name, nodeValue := range nodeThresholds {
|
||||
if name == v1.ResourceCPU || name == v1.ResourceMemory || name == v1.ResourcePods {
|
||||
if value, ok := thresholds[name]; !ok {
|
||||
continue
|
||||
} else if nodeValue > value {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func IsNodeWithLowUtilization(nodeThresholds api.ResourceThresholds, thresholds api.ResourceThresholds) bool {
|
||||
for name, nodeValue := range nodeThresholds {
|
||||
if name == v1.ResourceCPU || name == v1.ResourceMemory || name == v1.ResourcePods {
|
||||
if value, ok := thresholds[name]; !ok {
|
||||
continue
|
||||
} else if nodeValue > value {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Nodeutilization returns the current usage of node.
|
||||
func NodeUtilization(node *v1.Node, pods []*v1.Pod) (api.ResourceThresholds, []*v1.Pod, []*v1.Pod, []*v1.Pod, []*v1.Pod, []*v1.Pod) {
|
||||
bePods := []*v1.Pod{}
|
||||
nonRemovablePods := []*v1.Pod{}
|
||||
bPods := []*v1.Pod{}
|
||||
gPods := []*v1.Pod{}
|
||||
totalReqs := map[v1.ResourceName]resource.Quantity{}
|
||||
for _, pod := range pods {
|
||||
// We need to compute the usage of nonRemovablePods unless it is a best effort pod. So, cannot use podutil.ListEvictablePodsOnNode
|
||||
if !podutil.IsEvictable(pod) {
|
||||
nonRemovablePods = append(nonRemovablePods, pod)
|
||||
if podutil.IsBestEffortPod(pod) {
|
||||
continue
|
||||
}
|
||||
} else if podutil.IsBestEffortPod(pod) {
|
||||
bePods = append(bePods, pod)
|
||||
continue
|
||||
} else if podutil.IsBurstablePod(pod) {
|
||||
bPods = append(bPods, pod)
|
||||
} else {
|
||||
gPods = append(gPods, pod)
|
||||
}
|
||||
|
||||
req, _ := helper.PodRequestsAndLimits(pod)
|
||||
for name, quantity := range req {
|
||||
if name == v1.ResourceCPU || name == v1.ResourceMemory {
|
||||
if value, ok := totalReqs[name]; !ok {
|
||||
totalReqs[name] = *quantity.Copy()
|
||||
} else {
|
||||
value.Add(quantity)
|
||||
totalReqs[name] = value
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
nodeCapacity := node.Status.Capacity
|
||||
if len(node.Status.Allocatable) > 0 {
|
||||
nodeCapacity = node.Status.Allocatable
|
||||
}
|
||||
|
||||
usage := api.ResourceThresholds{}
|
||||
totalCPUReq := totalReqs[v1.ResourceCPU]
|
||||
totalMemReq := totalReqs[v1.ResourceMemory]
|
||||
totalPods := len(pods)
|
||||
usage[v1.ResourceCPU] = api.Percentage((float64(totalCPUReq.MilliValue()) * 100) / float64(nodeCapacity.Cpu().MilliValue()))
|
||||
usage[v1.ResourceMemory] = api.Percentage(float64(totalMemReq.Value()) / float64(nodeCapacity.Memory().Value()) * 100)
|
||||
usage[v1.ResourcePods] = api.Percentage((float64(totalPods) * 100) / float64(nodeCapacity.Pods().Value()))
|
||||
return usage, pods, nonRemovablePods, bePods, bPods, gPods
|
||||
}
|
||||
@@ -1,328 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package strategies
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/kubernetes-incubator/descheduler/pkg/api"
|
||||
"github.com/kubernetes-incubator/descheduler/test"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
core "k8s.io/client-go/testing"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
// TODO: Make this table driven.
|
||||
func TestLowNodeUtilizationWithoutPriority(t *testing.T) {
|
||||
var thresholds = make(api.ResourceThresholds)
|
||||
var targetThresholds = make(api.ResourceThresholds)
|
||||
thresholds[v1.ResourceCPU] = 30
|
||||
thresholds[v1.ResourcePods] = 30
|
||||
targetThresholds[v1.ResourceCPU] = 50
|
||||
targetThresholds[v1.ResourcePods] = 50
|
||||
|
||||
n1 := test.BuildTestNode("n1", 4000, 3000, 9)
|
||||
n2 := test.BuildTestNode("n2", 4000, 3000, 10)
|
||||
n3 := test.BuildTestNode("n3", 4000, 3000, 10)
|
||||
// Making n3 node unschedulable so that it won't counted in lowUtilized nodes list.
|
||||
n3.Spec.Unschedulable = true
|
||||
p1 := test.BuildTestPod("p1", 400, 0, n1.Name)
|
||||
p2 := test.BuildTestPod("p2", 400, 0, n1.Name)
|
||||
p3 := test.BuildTestPod("p3", 400, 0, n1.Name)
|
||||
p4 := test.BuildTestPod("p4", 400, 0, n1.Name)
|
||||
p5 := test.BuildTestPod("p5", 400, 0, n1.Name)
|
||||
|
||||
// These won't be evicted.
|
||||
p6 := test.BuildTestPod("p6", 400, 0, n1.Name)
|
||||
p7 := test.BuildTestPod("p7", 400, 0, n1.Name)
|
||||
p8 := test.BuildTestPod("p8", 400, 0, n1.Name)
|
||||
|
||||
p1.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
|
||||
p2.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
|
||||
p3.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
|
||||
p4.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
|
||||
p5.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
|
||||
// The following 4 pods won't get evicted.
|
||||
// A daemonset.
|
||||
p6.ObjectMeta.OwnerReferences = test.GetDaemonSetOwnerRefList()
|
||||
// A pod with local storage.
|
||||
p7.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
p7.Spec.Volumes = []v1.Volume{
|
||||
{
|
||||
Name: "sample",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
|
||||
},
|
||||
},
|
||||
}
|
||||
// A Mirror Pod.
|
||||
p7.Annotations = test.GetMirrorPodAnnotation()
|
||||
// A Critical Pod.
|
||||
p8.Namespace = "kube-system"
|
||||
p8.Annotations = test.GetCriticalPodAnnotation()
|
||||
p9 := test.BuildTestPod("p9", 400, 0, n1.Name)
|
||||
p9.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
|
||||
fakeClient := &fake.Clientset{}
|
||||
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
|
||||
list := action.(core.ListAction)
|
||||
fieldString := list.GetListRestrictions().Fields.String()
|
||||
if strings.Contains(fieldString, "n1") {
|
||||
return true, &v1.PodList{Items: []v1.Pod{*p1, *p2, *p3, *p4, *p5, *p6, *p7, *p8}}, nil
|
||||
}
|
||||
if strings.Contains(fieldString, "n2") {
|
||||
return true, &v1.PodList{Items: []v1.Pod{*p9}}, nil
|
||||
}
|
||||
if strings.Contains(fieldString, "n3") {
|
||||
return true, &v1.PodList{Items: []v1.Pod{}}, nil
|
||||
}
|
||||
return true, nil, fmt.Errorf("Failed to list: %v", list)
|
||||
})
|
||||
fakeClient.Fake.AddReactor("get", "nodes", func(action core.Action) (bool, runtime.Object, error) {
|
||||
getAction := action.(core.GetAction)
|
||||
switch getAction.GetName() {
|
||||
case n1.Name:
|
||||
return true, n1, nil
|
||||
case n2.Name:
|
||||
return true, n2, nil
|
||||
case n3.Name:
|
||||
return true, n3, nil
|
||||
}
|
||||
return true, nil, fmt.Errorf("Wrong node: %v", getAction.GetName())
|
||||
})
|
||||
expectedPodsEvicted := 3
|
||||
npm := createNodePodsMap(fakeClient, []*v1.Node{n1, n2, n3})
|
||||
lowNodes, targetNodes := classifyNodes(npm, thresholds, targetThresholds)
|
||||
if len(lowNodes) != 1 {
|
||||
t.Errorf("After ignoring unschedulable nodes, expected only one node to be under utilized.")
|
||||
}
|
||||
npe := nodePodEvictedCount{}
|
||||
npe[n1] = 0
|
||||
npe[n2] = 0
|
||||
npe[n3] = 0
|
||||
podsEvicted := evictPodsFromTargetNodes(fakeClient, "v1", targetNodes, lowNodes, targetThresholds, false, 3, npe)
|
||||
if expectedPodsEvicted != podsEvicted {
|
||||
t.Errorf("Expected %#v pods to be evicted but %#v got evicted", expectedPodsEvicted, podsEvicted)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// TODO: Make this table driven.
|
||||
func TestLowNodeUtilizationWithPriorities(t *testing.T) {
|
||||
var thresholds = make(api.ResourceThresholds)
|
||||
var targetThresholds = make(api.ResourceThresholds)
|
||||
thresholds[v1.ResourceCPU] = 30
|
||||
thresholds[v1.ResourcePods] = 30
|
||||
targetThresholds[v1.ResourceCPU] = 50
|
||||
targetThresholds[v1.ResourcePods] = 50
|
||||
lowPriority := int32(0)
|
||||
highPriority := int32(10000)
|
||||
n1 := test.BuildTestNode("n1", 4000, 3000, 9)
|
||||
n2 := test.BuildTestNode("n2", 4000, 3000, 10)
|
||||
n3 := test.BuildTestNode("n3", 4000, 3000, 10)
|
||||
// Making n3 node unschedulable so that it won't counted in lowUtilized nodes list.
|
||||
n3.Spec.Unschedulable = true
|
||||
p1 := test.BuildTestPod("p1", 400, 0, n1.Name)
|
||||
p1.Spec.Priority = &highPriority
|
||||
p2 := test.BuildTestPod("p2", 400, 0, n1.Name)
|
||||
p2.Spec.Priority = &highPriority
|
||||
p3 := test.BuildTestPod("p3", 400, 0, n1.Name)
|
||||
p3.Spec.Priority = &highPriority
|
||||
p4 := test.BuildTestPod("p4", 400, 0, n1.Name)
|
||||
p4.Spec.Priority = &highPriority
|
||||
p5 := test.BuildTestPod("p5", 400, 0, n1.Name)
|
||||
p5.Spec.Priority = &lowPriority
|
||||
|
||||
// These won't be evicted.
|
||||
p6 := test.BuildTestPod("p6", 400, 0, n1.Name)
|
||||
p6.Spec.Priority = &highPriority
|
||||
p7 := test.BuildTestPod("p7", 400, 0, n1.Name)
|
||||
p7.Spec.Priority = &lowPriority
|
||||
p8 := test.BuildTestPod("p8", 400, 0, n1.Name)
|
||||
p8.Spec.Priority = &lowPriority
|
||||
|
||||
p1.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
|
||||
p2.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
|
||||
p3.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
|
||||
p4.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
|
||||
p5.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
|
||||
// The following 4 pods won't get evicted.
|
||||
// A daemonset.
|
||||
p6.ObjectMeta.OwnerReferences = test.GetDaemonSetOwnerRefList()
|
||||
// A pod with local storage.
|
||||
p7.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
p7.Spec.Volumes = []v1.Volume{
|
||||
{
|
||||
Name: "sample",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
|
||||
},
|
||||
},
|
||||
}
|
||||
// A Mirror Pod.
|
||||
p7.Annotations = test.GetMirrorPodAnnotation()
|
||||
// A Critical Pod.
|
||||
p8.Namespace = "kube-system"
|
||||
p8.Annotations = test.GetCriticalPodAnnotation()
|
||||
p9 := test.BuildTestPod("p9", 400, 0, n1.Name)
|
||||
p9.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
|
||||
fakeClient := &fake.Clientset{}
|
||||
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
|
||||
list := action.(core.ListAction)
|
||||
fieldString := list.GetListRestrictions().Fields.String()
|
||||
if strings.Contains(fieldString, "n1") {
|
||||
return true, &v1.PodList{Items: []v1.Pod{*p1, *p2, *p3, *p4, *p5, *p6, *p7, *p8}}, nil
|
||||
}
|
||||
if strings.Contains(fieldString, "n2") {
|
||||
return true, &v1.PodList{Items: []v1.Pod{*p9}}, nil
|
||||
}
|
||||
if strings.Contains(fieldString, "n3") {
|
||||
return true, &v1.PodList{Items: []v1.Pod{}}, nil
|
||||
}
|
||||
return true, nil, fmt.Errorf("Failed to list: %v", list)
|
||||
})
|
||||
fakeClient.Fake.AddReactor("get", "nodes", func(action core.Action) (bool, runtime.Object, error) {
|
||||
getAction := action.(core.GetAction)
|
||||
switch getAction.GetName() {
|
||||
case n1.Name:
|
||||
return true, n1, nil
|
||||
case n2.Name:
|
||||
return true, n2, nil
|
||||
case n3.Name:
|
||||
return true, n3, nil
|
||||
}
|
||||
return true, nil, fmt.Errorf("Wrong node: %v", getAction.GetName())
|
||||
})
|
||||
expectedPodsEvicted := 3
|
||||
npm := createNodePodsMap(fakeClient, []*v1.Node{n1, n2, n3})
|
||||
lowNodes, targetNodes := classifyNodes(npm, thresholds, targetThresholds)
|
||||
if len(lowNodes) != 1 {
|
||||
t.Errorf("After ignoring unschedulable nodes, expected only one node to be under utilized.")
|
||||
}
|
||||
npe := nodePodEvictedCount{}
|
||||
npe[n1] = 0
|
||||
npe[n2] = 0
|
||||
npe[n3] = 0
|
||||
podsEvicted := evictPodsFromTargetNodes(fakeClient, "v1", targetNodes, lowNodes, targetThresholds, false, 3, npe)
|
||||
if expectedPodsEvicted != podsEvicted {
|
||||
t.Errorf("Expected %#v pods to be evicted but %#v got evicted", expectedPodsEvicted, podsEvicted)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestSortPodsByPriority(t *testing.T) {
|
||||
n1 := test.BuildTestNode("n1", 4000, 3000, 9)
|
||||
lowPriority := int32(0)
|
||||
highPriority := int32(10000)
|
||||
p1 := test.BuildTestPod("p1", 400, 0, n1.Name)
|
||||
p1.Spec.Priority = &lowPriority
|
||||
|
||||
// BestEffort
|
||||
p2 := test.BuildTestPod("p2", 400, 0, n1.Name)
|
||||
p2.Spec.Priority = &highPriority
|
||||
|
||||
p2.Spec.Containers[0].Resources.Requests = nil
|
||||
p2.Spec.Containers[0].Resources.Limits = nil
|
||||
|
||||
// Burstable
|
||||
p3 := test.BuildTestPod("p3", 400, 0, n1.Name)
|
||||
p3.Spec.Priority = &highPriority
|
||||
|
||||
// Guaranteed
|
||||
p4 := test.BuildTestPod("p4", 400, 100, n1.Name)
|
||||
p4.Spec.Priority = &highPriority
|
||||
p4.Spec.Containers[0].Resources.Limits[v1.ResourceCPU] = *resource.NewMilliQuantity(400, resource.DecimalSI)
|
||||
p4.Spec.Containers[0].Resources.Limits[v1.ResourceMemory] = *resource.NewQuantity(100, resource.DecimalSI)
|
||||
|
||||
// Best effort with nil priorities.
|
||||
p5 := test.BuildTestPod("p5", 400, 100, n1.Name)
|
||||
p5.Spec.Priority = nil
|
||||
p6 := test.BuildTestPod("p6", 400, 100, n1.Name)
|
||||
p6.Spec.Containers[0].Resources.Limits[v1.ResourceCPU] = *resource.NewMilliQuantity(400, resource.DecimalSI)
|
||||
p6.Spec.Containers[0].Resources.Limits[v1.ResourceMemory] = *resource.NewQuantity(100, resource.DecimalSI)
|
||||
p6.Spec.Priority = nil
|
||||
|
||||
podList := []*v1.Pod{p4, p3, p2, p1, p6, p5}
|
||||
|
||||
sortPodsBasedOnPriority(podList)
|
||||
for _, pod := range podList {
|
||||
fmt.Println(pod)
|
||||
}
|
||||
if !reflect.DeepEqual(podList[len(podList)-1], p4) {
|
||||
t.Errorf("Expected last pod in sorted list to be %v which of highest priority and guaranteed but got %v", p4, podList[len(podList)-1])
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateThresholds(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input api.ResourceThresholds
|
||||
succeed bool
|
||||
}{
|
||||
{
|
||||
name: "passing nil map for threshold",
|
||||
input: nil,
|
||||
succeed: false,
|
||||
},
|
||||
{
|
||||
name: "passing no threshold",
|
||||
input: api.ResourceThresholds{},
|
||||
succeed: false,
|
||||
},
|
||||
{
|
||||
name: "passing unsupported resource name",
|
||||
input: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 40,
|
||||
v1.ResourceStorage: 25.5,
|
||||
},
|
||||
succeed: false,
|
||||
},
|
||||
{
|
||||
name: "passing invalid resource name",
|
||||
input: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 40,
|
||||
"coolResource": 42.0,
|
||||
},
|
||||
succeed: false,
|
||||
},
|
||||
{
|
||||
name: "passing a valid threshold with cpu, memory and pods",
|
||||
input: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 20,
|
||||
v1.ResourceMemory: 30,
|
||||
v1.ResourcePods: 40,
|
||||
},
|
||||
succeed: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
isValid := validateThresholds(test.input)
|
||||
|
||||
if isValid != test.succeed {
|
||||
t.Errorf("expected validity of threshold: %#v\nto be %v but got %v instead", test.input, test.succeed, isValid)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,74 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package strategies
|
||||
|
||||
import (
|
||||
"github.com/golang/glog"
|
||||
"github.com/kubernetes-incubator/descheduler/cmd/descheduler/app/options"
|
||||
"github.com/kubernetes-incubator/descheduler/pkg/api"
|
||||
"github.com/kubernetes-incubator/descheduler/pkg/descheduler/evictions"
|
||||
nodeutil "github.com/kubernetes-incubator/descheduler/pkg/descheduler/node"
|
||||
podutil "github.com/kubernetes-incubator/descheduler/pkg/descheduler/pod"
|
||||
"k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
func RemovePodsViolatingNodeAffinity(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, evictionPolicyGroupVersion string, nodes []*v1.Node, nodePodCount nodePodEvictedCount) {
|
||||
removePodsViolatingNodeAffinityCount(ds, strategy, evictionPolicyGroupVersion, nodes, nodePodCount, ds.MaxNoOfPodsToEvictPerNode)
|
||||
}
|
||||
|
||||
func removePodsViolatingNodeAffinityCount(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, evictionPolicyGroupVersion string, nodes []*v1.Node, nodepodCount nodePodEvictedCount, maxPodsToEvict int) int {
|
||||
evictedPodCount := 0
|
||||
if !strategy.Enabled {
|
||||
return evictedPodCount
|
||||
}
|
||||
|
||||
for _, nodeAffinity := range strategy.Params.NodeAffinityType {
|
||||
glog.V(2).Infof("Executing for nodeAffinityType: %v", nodeAffinity)
|
||||
|
||||
switch nodeAffinity {
|
||||
case "requiredDuringSchedulingIgnoredDuringExecution":
|
||||
for _, node := range nodes {
|
||||
glog.V(1).Infof("Processing node: %#v\n", node.Name)
|
||||
|
||||
pods, err := podutil.ListEvictablePodsOnNode(ds.Client, node)
|
||||
if err != nil {
|
||||
glog.Errorf("failed to get pods from %v: %v", node.Name, err)
|
||||
}
|
||||
|
||||
for _, pod := range pods {
|
||||
if maxPodsToEvict > 0 && nodepodCount[node]+1 > maxPodsToEvict {
|
||||
break
|
||||
}
|
||||
if pod.Spec.Affinity != nil && pod.Spec.Affinity.NodeAffinity != nil && pod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil {
|
||||
|
||||
if !nodeutil.PodFitsCurrentNode(pod, node) && nodeutil.PodFitsAnyNode(pod, nodes) {
|
||||
glog.V(1).Infof("Evicting pod: %v", pod.Name)
|
||||
evictions.EvictPod(ds.Client, pod, evictionPolicyGroupVersion, ds.DryRun)
|
||||
nodepodCount[node]++
|
||||
}
|
||||
}
|
||||
}
|
||||
evictedPodCount += nodepodCount[node]
|
||||
}
|
||||
default:
|
||||
glog.Errorf("invalid nodeAffinityType: %v", nodeAffinity)
|
||||
return evictedPodCount
|
||||
}
|
||||
}
|
||||
glog.V(1).Infof("Evicted %v pods", evictedPodCount)
|
||||
return evictedPodCount
|
||||
}
|
||||
@@ -1,184 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package strategies
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/kubernetes-incubator/descheduler/cmd/descheduler/app/options"
|
||||
"github.com/kubernetes-incubator/descheduler/pkg/api"
|
||||
"github.com/kubernetes-incubator/descheduler/test"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
core "k8s.io/client-go/testing"
|
||||
)
|
||||
|
||||
func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||
|
||||
requiredDuringSchedulingIgnoredDuringExecutionStrategy := api.DeschedulerStrategy{
|
||||
Enabled: true,
|
||||
Params: api.StrategyParameters{
|
||||
NodeAffinityType: []string{
|
||||
"requiredDuringSchedulingIgnoredDuringExecution",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
nodeLabelKey := "kubernetes.io/desiredNode"
|
||||
nodeLabelValue := "yes"
|
||||
nodeWithLabels := test.BuildTestNode("nodeWithLabels", 2000, 3000, 10)
|
||||
nodeWithLabels.Labels[nodeLabelKey] = nodeLabelValue
|
||||
|
||||
nodeWithoutLabels := test.BuildTestNode("nodeWithoutLabels", 2000, 3000, 10)
|
||||
|
||||
unschedulableNodeWithLabels := test.BuildTestNode("unschedulableNodeWithLabels", 2000, 3000, 10)
|
||||
nodeWithLabels.Labels[nodeLabelKey] = nodeLabelValue
|
||||
unschedulableNodeWithLabels.Spec.Unschedulable = true
|
||||
|
||||
addPodsToNode := func(node *v1.Node) []v1.Pod {
|
||||
podWithNodeAffinity := test.BuildTestPod("podWithNodeAffinity", 100, 0, node.Name)
|
||||
podWithNodeAffinity.Spec.Affinity = &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: nodeLabelKey,
|
||||
Operator: "In",
|
||||
Values: []string{
|
||||
nodeLabelValue,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
pod1 := test.BuildTestPod("pod1", 100, 0, node.Name)
|
||||
pod2 := test.BuildTestPod("pod2", 100, 0, node.Name)
|
||||
|
||||
podWithNodeAffinity.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
pod1.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
pod2.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
|
||||
return []v1.Pod{
|
||||
*podWithNodeAffinity,
|
||||
*pod1,
|
||||
*pod2,
|
||||
}
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
description string
|
||||
nodes []*v1.Node
|
||||
pods []v1.Pod
|
||||
strategy api.DeschedulerStrategy
|
||||
expectedEvictedPodCount int
|
||||
npe nodePodEvictedCount
|
||||
maxPodsToEvict int
|
||||
}{
|
||||
{
|
||||
description: "Strategy disabled, should not evict any pods",
|
||||
strategy: api.DeschedulerStrategy{
|
||||
Enabled: false,
|
||||
Params: api.StrategyParameters{
|
||||
NodeAffinityType: []string{
|
||||
"requiredDuringSchedulingIgnoredDuringExecution",
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedEvictedPodCount: 0,
|
||||
pods: addPodsToNode(nodeWithoutLabels),
|
||||
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
|
||||
npe: nodePodEvictedCount{nodeWithoutLabels: 0, nodeWithLabels: 0},
|
||||
maxPodsToEvict: 0,
|
||||
},
|
||||
{
|
||||
description: "Invalid strategy type, should not evict any pods",
|
||||
strategy: api.DeschedulerStrategy{
|
||||
Enabled: true,
|
||||
Params: api.StrategyParameters{
|
||||
NodeAffinityType: []string{
|
||||
"requiredDuringSchedulingRequiredDuringExecution",
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedEvictedPodCount: 0,
|
||||
pods: addPodsToNode(nodeWithoutLabels),
|
||||
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
|
||||
npe: nodePodEvictedCount{nodeWithoutLabels: 0, nodeWithLabels: 0},
|
||||
maxPodsToEvict: 0,
|
||||
},
|
||||
{
|
||||
description: "Pod is correctly scheduled on node, no eviction expected",
|
||||
strategy: requiredDuringSchedulingIgnoredDuringExecutionStrategy,
|
||||
expectedEvictedPodCount: 0,
|
||||
pods: addPodsToNode(nodeWithLabels),
|
||||
nodes: []*v1.Node{nodeWithLabels},
|
||||
npe: nodePodEvictedCount{nodeWithLabels: 0},
|
||||
maxPodsToEvict: 0,
|
||||
},
|
||||
{
|
||||
description: "Pod is scheduled on node without matching labels, another schedulable node available, should be evicted",
|
||||
expectedEvictedPodCount: 1,
|
||||
strategy: requiredDuringSchedulingIgnoredDuringExecutionStrategy,
|
||||
pods: addPodsToNode(nodeWithoutLabels),
|
||||
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
|
||||
npe: nodePodEvictedCount{nodeWithoutLabels: 0, nodeWithLabels: 0},
|
||||
maxPodsToEvict: 0,
|
||||
},
|
||||
{
|
||||
description: "Pod is scheduled on node without matching labels, another schedulable node available, maxPodsToEvict set to 1, should not be evicted",
|
||||
expectedEvictedPodCount: 1,
|
||||
strategy: requiredDuringSchedulingIgnoredDuringExecutionStrategy,
|
||||
pods: addPodsToNode(nodeWithoutLabels),
|
||||
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
|
||||
npe: nodePodEvictedCount{nodeWithoutLabels: 0, nodeWithLabels: 0},
|
||||
maxPodsToEvict: 1,
|
||||
},
|
||||
{
|
||||
description: "Pod is scheduled on node without matching labels, but no node where pod fits is available, should not evict",
|
||||
expectedEvictedPodCount: 0,
|
||||
strategy: requiredDuringSchedulingIgnoredDuringExecutionStrategy,
|
||||
pods: addPodsToNode(nodeWithoutLabels),
|
||||
nodes: []*v1.Node{nodeWithoutLabels, unschedulableNodeWithLabels},
|
||||
npe: nodePodEvictedCount{nodeWithoutLabels: 0, unschedulableNodeWithLabels: 0},
|
||||
maxPodsToEvict: 0,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
|
||||
fakeClient := &fake.Clientset{}
|
||||
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
|
||||
return true, &v1.PodList{Items: tc.pods}, nil
|
||||
})
|
||||
|
||||
ds := options.DeschedulerServer{
|
||||
Client: fakeClient,
|
||||
}
|
||||
|
||||
actualEvictedPodCount := removePodsViolatingNodeAffinityCount(&ds, tc.strategy, "v1", tc.nodes, tc.npe, tc.maxPodsToEvict)
|
||||
if actualEvictedPodCount != tc.expectedEvictedPodCount {
|
||||
t.Errorf("Test %#v failed, expected %v pod evictions, but got %v pod evictions\n", tc.description, tc.expectedEvictedPodCount, actualEvictedPodCount)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,105 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package strategies
|
||||
|
||||
import (
|
||||
"github.com/kubernetes-incubator/descheduler/cmd/descheduler/app/options"
|
||||
"github.com/kubernetes-incubator/descheduler/pkg/api"
|
||||
"github.com/kubernetes-incubator/descheduler/pkg/descheduler/evictions"
|
||||
podutil "github.com/kubernetes-incubator/descheduler/pkg/descheduler/pod"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
priorityutil "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/util"
|
||||
)
|
||||
|
||||
// RemovePodsViolatingInterPodAntiAffinity with elimination strategy
|
||||
func RemovePodsViolatingInterPodAntiAffinity(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, policyGroupVersion string, nodes []*v1.Node, nodePodCount nodePodEvictedCount) {
|
||||
if !strategy.Enabled {
|
||||
return
|
||||
}
|
||||
removePodsWithAffinityRules(ds.Client, policyGroupVersion, nodes, ds.DryRun, nodePodCount, ds.MaxNoOfPodsToEvictPerNode)
|
||||
}
|
||||
|
||||
// removePodsWithAffinityRules evicts pods on the node which are having a pod affinity rules.
|
||||
func removePodsWithAffinityRules(client clientset.Interface, policyGroupVersion string, nodes []*v1.Node, dryRun bool, nodePodCount nodePodEvictedCount, maxPodsToEvict int) int {
|
||||
podsEvicted := 0
|
||||
for _, node := range nodes {
|
||||
glog.V(1).Infof("Processing node: %#v\n", node.Name)
|
||||
pods, err := podutil.ListEvictablePodsOnNode(client, node)
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
totalPods := len(pods)
|
||||
for i := 0; i < totalPods; i++ {
|
||||
if maxPodsToEvict > 0 && nodePodCount[node]+1 > maxPodsToEvict {
|
||||
break
|
||||
}
|
||||
if checkPodsWithAntiAffinityExist(pods[i], pods) {
|
||||
success, err := evictions.EvictPod(client, pods[i], policyGroupVersion, dryRun)
|
||||
if !success {
|
||||
glog.Infof("Error when evicting pod: %#v (%#v)\n", pods[i].Name, err)
|
||||
} else {
|
||||
nodePodCount[node]++
|
||||
glog.V(1).Infof("Evicted pod: %#v (%#v)\n because of existing anti-affinity", pods[i].Name, err)
|
||||
// Since the current pod is evicted all other pods which have anti-affinity with this
|
||||
// pod need not be evicted.
|
||||
// Update pods.
|
||||
pods = append(pods[:i], pods[i+1:]...)
|
||||
i--
|
||||
totalPods--
|
||||
}
|
||||
}
|
||||
}
|
||||
podsEvicted += nodePodCount[node]
|
||||
}
|
||||
return podsEvicted
|
||||
}
|
||||
|
||||
// checkPodsWithAntiAffinityExist checks if there are other pods on the node that the current pod cannot tolerate.
|
||||
func checkPodsWithAntiAffinityExist(pod *v1.Pod, pods []*v1.Pod) bool {
|
||||
affinity := pod.Spec.Affinity
|
||||
if affinity != nil && affinity.PodAntiAffinity != nil {
|
||||
for _, term := range getPodAntiAffinityTerms(affinity.PodAntiAffinity) {
|
||||
namespaces := priorityutil.GetNamespacesFromPodAffinityTerm(pod, &term)
|
||||
selector, err := metav1.LabelSelectorAsSelector(term.LabelSelector)
|
||||
if err != nil {
|
||||
glog.Infof("%v", err)
|
||||
return false
|
||||
}
|
||||
for _, existingPod := range pods {
|
||||
if existingPod.Name != pod.Name && priorityutil.PodMatchesTermsNamespaceAndSelector(existingPod, namespaces, selector) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// getPodAntiAffinityTerms gets the antiaffinity terms for the given pod.
|
||||
func getPodAntiAffinityTerms(podAntiAffinity *v1.PodAntiAffinity) (terms []v1.PodAffinityTerm) {
|
||||
if podAntiAffinity != nil {
|
||||
if len(podAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution) != 0 {
|
||||
terms = podAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution
|
||||
}
|
||||
}
|
||||
return terms
|
||||
}
|
||||
@@ -1,89 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package strategies
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/kubernetes-incubator/descheduler/test"
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
core "k8s.io/client-go/testing"
|
||||
)
|
||||
|
||||
func TestPodAntiAffinity(t *testing.T) {
|
||||
node := test.BuildTestNode("n1", 2000, 3000, 10)
|
||||
p1 := test.BuildTestPod("p1", 100, 0, node.Name)
|
||||
p2 := test.BuildTestPod("p2", 100, 0, node.Name)
|
||||
p3 := test.BuildTestPod("p3", 100, 0, node.Name)
|
||||
p4 := test.BuildTestPod("p4", 100, 0, node.Name)
|
||||
p2.Labels = map[string]string{"foo": "bar"}
|
||||
p1.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
p2.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
p3.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
p4.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
|
||||
// set pod anti affinity
|
||||
setPodAntiAffinity(p1)
|
||||
setPodAntiAffinity(p3)
|
||||
setPodAntiAffinity(p4)
|
||||
|
||||
// create fake client
|
||||
fakeClient := &fake.Clientset{}
|
||||
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
|
||||
return true, &v1.PodList{Items: []v1.Pod{*p1, *p2, *p3, *p4}}, nil
|
||||
})
|
||||
fakeClient.Fake.AddReactor("get", "nodes", func(action core.Action) (bool, runtime.Object, error) {
|
||||
return true, node, nil
|
||||
})
|
||||
npe := nodePodEvictedCount{}
|
||||
npe[node] = 0
|
||||
expectedEvictedPodCount := 3
|
||||
podsEvicted := removePodsWithAffinityRules(fakeClient, "v1", []*v1.Node{node}, false, npe, 0)
|
||||
if podsEvicted != expectedEvictedPodCount {
|
||||
t.Errorf("Unexpected no of pods evicted: pods evicted: %d, expected: %d", podsEvicted, expectedEvictedPodCount)
|
||||
}
|
||||
npe[node] = 0
|
||||
expectedEvictedPodCount = 1
|
||||
podsEvicted = removePodsWithAffinityRules(fakeClient, "v1", []*v1.Node{node}, false, npe, 1)
|
||||
if podsEvicted != expectedEvictedPodCount {
|
||||
t.Errorf("Unexpected no of pods evicted: pods evicted: %d, expected: %d", podsEvicted, expectedEvictedPodCount)
|
||||
}
|
||||
}
|
||||
|
||||
func setPodAntiAffinity(inputPod *v1.Pod) {
|
||||
inputPod.Spec.Affinity = &v1.Affinity{
|
||||
PodAntiAffinity: &v1.PodAntiAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
|
||||
{
|
||||
LabelSelector: &metav1.LabelSelector{
|
||||
MatchExpressions: []metav1.LabelSelectorRequirement{
|
||||
{
|
||||
Key: "foo",
|
||||
Operator: metav1.LabelSelectorOpIn,
|
||||
Values: []string{"bar"},
|
||||
},
|
||||
},
|
||||
},
|
||||
TopologyKey: "region",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -1,37 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package strategies
|
||||
|
||||
import (
|
||||
"k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
// This file contains the datastructures, types & functions needed by all the strategies so that we don't have
|
||||
// to compute them again in each strategy.
|
||||
|
||||
// nodePodEvictedCount keeps count of pods evicted on node. This is used in conjunction with strategies to
|
||||
type nodePodEvictedCount map[*v1.Node]int
|
||||
|
||||
// InitializeNodePodCount initializes the nodePodCount.
|
||||
func InitializeNodePodCount(nodeList []*v1.Node) nodePodEvictedCount {
|
||||
var nodePodCount = make(nodePodEvictedCount)
|
||||
for _, node := range nodeList {
|
||||
// Initialize podsEvicted till now with 0.
|
||||
nodePodCount[node] = 0
|
||||
}
|
||||
return nodePodCount
|
||||
}
|
||||
@@ -1,94 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package utils
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||
)
|
||||
|
||||
// The following code has been copied from predicates package to avoid the
|
||||
// huge vendoring issues, mostly copied from
|
||||
// k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates/
|
||||
// Some minor changes have been made to ease the imports, but most of the code
|
||||
// remains untouched
|
||||
|
||||
// PodMatchNodeSelector checks if a pod node selector matches the node label.
|
||||
func PodMatchNodeSelector(pod *v1.Pod, node *v1.Node) (bool, error) {
|
||||
if node == nil {
|
||||
return false, fmt.Errorf("node not found")
|
||||
}
|
||||
if podMatchesNodeLabels(pod, node) {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// The pod can only schedule onto nodes that satisfy requirements in both NodeAffinity and nodeSelector.
|
||||
func podMatchesNodeLabels(pod *v1.Pod, node *v1.Node) bool {
|
||||
// Check if node.Labels match pod.Spec.NodeSelector.
|
||||
if len(pod.Spec.NodeSelector) > 0 {
|
||||
selector := labels.SelectorFromSet(pod.Spec.NodeSelector)
|
||||
if !selector.Matches(labels.Set(node.Labels)) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// 1. nil NodeSelector matches all nodes (i.e. does not filter out any nodes)
|
||||
// 2. nil []NodeSelectorTerm (equivalent to non-nil empty NodeSelector) matches no nodes
|
||||
// 3. zero-length non-nil []NodeSelectorTerm matches no nodes also, just for simplicity
|
||||
// 4. nil []NodeSelectorRequirement (equivalent to non-nil empty NodeSelectorTerm) matches no nodes
|
||||
// 5. zero-length non-nil []NodeSelectorRequirement matches no nodes also, just for simplicity
|
||||
// 6. non-nil empty NodeSelectorRequirement is not allowed
|
||||
|
||||
affinity := pod.Spec.Affinity
|
||||
if affinity != nil && affinity.NodeAffinity != nil {
|
||||
nodeAffinity := affinity.NodeAffinity
|
||||
// if no required NodeAffinity requirements, will do no-op, means select all nodes.
|
||||
if nodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution == nil {
|
||||
return true
|
||||
}
|
||||
|
||||
// Match node selector for requiredDuringSchedulingIgnoredDuringExecution.
|
||||
if nodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil {
|
||||
nodeSelectorTerms := nodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms
|
||||
glog.V(10).Infof("Match for RequiredDuringSchedulingIgnoredDuringExecution node selector terms %+v", nodeSelectorTerms)
|
||||
return nodeMatchesNodeSelectorTerms(node, nodeSelectorTerms)
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// nodeMatchesNodeSelectorTerms checks if a node's labels satisfy a list of node selector terms,
|
||||
// terms are ORed, and an empty list of terms will match nothing.
|
||||
func nodeMatchesNodeSelectorTerms(node *v1.Node, nodeSelectorTerms []v1.NodeSelectorTerm) bool {
|
||||
for _, req := range nodeSelectorTerms {
|
||||
nodeSelector, err := v1helper.NodeSelectorRequirementsAsSelector(req.MatchExpressions)
|
||||
if err != nil {
|
||||
glog.V(10).Infof("Failed to parse MatchExpressions: %+v, regarding as not match.", req.MatchExpressions)
|
||||
return false
|
||||
}
|
||||
if nodeSelector.Matches(labels.Set(node.Labels)) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
@@ -1,17 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package utils
|
||||
@@ -1,156 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package e2e
|
||||
|
||||
import (
|
||||
"github.com/golang/glog"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/kubernetes-incubator/descheduler/cmd/descheduler/app/options"
|
||||
deschedulerapi "github.com/kubernetes-incubator/descheduler/pkg/api"
|
||||
"github.com/kubernetes-incubator/descheduler/pkg/descheduler/client"
|
||||
eutils "github.com/kubernetes-incubator/descheduler/pkg/descheduler/evictions/utils"
|
||||
nodeutil "github.com/kubernetes-incubator/descheduler/pkg/descheduler/node"
|
||||
podutil "github.com/kubernetes-incubator/descheduler/pkg/descheduler/pod"
|
||||
"github.com/kubernetes-incubator/descheduler/pkg/descheduler/strategies"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
)
|
||||
|
||||
func MakePodSpec() v1.PodSpec {
|
||||
return v1.PodSpec{
|
||||
Containers: []v1.Container{{
|
||||
Name: "pause",
|
||||
Image: "kubernetes/pause",
|
||||
Ports: []v1.ContainerPort{{ContainerPort: 80}},
|
||||
Resources: v1.ResourceRequirements{
|
||||
Limits: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("100m"),
|
||||
v1.ResourceMemory: resource.MustParse("500Mi"),
|
||||
},
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("100m"),
|
||||
v1.ResourceMemory: resource.MustParse("500Mi"),
|
||||
},
|
||||
},
|
||||
}},
|
||||
}
|
||||
}
|
||||
|
||||
// RcByNameContainer returns a ReplicationControoler with specified name and container
|
||||
func RcByNameContainer(name string, replicas int32, labels map[string]string, gracePeriod *int64) *v1.ReplicationController {
|
||||
|
||||
zeroGracePeriod := int64(0)
|
||||
|
||||
// Add "name": name to the labels, overwriting if it exists.
|
||||
labels["name"] = name
|
||||
if gracePeriod == nil {
|
||||
gracePeriod = &zeroGracePeriod
|
||||
}
|
||||
return &v1.ReplicationController{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "ReplicationController",
|
||||
APIVersion: testapi.Groups[v1.GroupName].GroupVersion().String(),
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Spec: v1.ReplicationControllerSpec{
|
||||
Replicas: func(i int32) *int32 { return &i }(replicas),
|
||||
Selector: map[string]string{
|
||||
"name": name,
|
||||
},
|
||||
Template: &v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: labels,
|
||||
},
|
||||
Spec: MakePodSpec(),
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// startEndToEndForLowNodeUtilization tests the lownode utilization strategy.
|
||||
func startEndToEndForLowNodeUtilization(clientset clientset.Interface) {
|
||||
var thresholds = make(deschedulerapi.ResourceThresholds)
|
||||
var targetThresholds = make(deschedulerapi.ResourceThresholds)
|
||||
thresholds[v1.ResourceMemory] = 20
|
||||
thresholds[v1.ResourcePods] = 20
|
||||
thresholds[v1.ResourceCPU] = 85
|
||||
targetThresholds[v1.ResourceMemory] = 20
|
||||
targetThresholds[v1.ResourcePods] = 20
|
||||
targetThresholds[v1.ResourceCPU] = 90
|
||||
// Run descheduler.
|
||||
evictionPolicyGroupVersion, err := eutils.SupportEviction(clientset)
|
||||
if err != nil || len(evictionPolicyGroupVersion) == 0 {
|
||||
glog.Fatalf("%v", err)
|
||||
}
|
||||
stopChannel := make(chan struct{})
|
||||
nodes, err := nodeutil.ReadyNodes(clientset, "", stopChannel)
|
||||
if err != nil {
|
||||
glog.Fatalf("%v", err)
|
||||
}
|
||||
nodeUtilizationThresholds := deschedulerapi.NodeResourceUtilizationThresholds{Thresholds: thresholds, TargetThresholds: targetThresholds}
|
||||
nodeUtilizationStrategyParams := deschedulerapi.StrategyParameters{NodeResourceUtilizationThresholds: nodeUtilizationThresholds}
|
||||
lowNodeUtilizationStrategy := deschedulerapi.DeschedulerStrategy{Enabled: true, Params: nodeUtilizationStrategyParams}
|
||||
ds := &options.DeschedulerServer{Client: clientset}
|
||||
nodePodCount := strategies.InitializeNodePodCount(nodes)
|
||||
strategies.LowNodeUtilization(ds, lowNodeUtilizationStrategy, evictionPolicyGroupVersion, nodes, nodePodCount)
|
||||
time.Sleep(10 * time.Second)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func TestE2E(t *testing.T) {
|
||||
// If we have reached here, it means cluster would have been already setup and the kubeconfig file should
|
||||
// be in /tmp directory.
|
||||
clientSet, err := client.CreateClient("/tmp/admin.conf")
|
||||
if err != nil {
|
||||
t.Errorf("Error during client creation with %v", err)
|
||||
}
|
||||
nodeList, err := clientSet.Core().Nodes().List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
t.Errorf("Error listing node with %v", err)
|
||||
}
|
||||
// Assumption: We would have 3 node cluster by now. Kubeadm brings all the master components onto master node.
|
||||
// So, the last node would have least utilization.
|
||||
leastLoadedNode := nodeList.Items[2]
|
||||
rc := RcByNameContainer("test-rc", int32(15), map[string]string{"test": "app"}, nil)
|
||||
_, err = clientSet.CoreV1().ReplicationControllers("default").Create(rc)
|
||||
if err != nil {
|
||||
t.Errorf("Error creating deployment %v", err)
|
||||
}
|
||||
podsOnleastUtilizedNode, err := podutil.ListPodsOnANode(clientSet, &leastLoadedNode)
|
||||
if err != nil {
|
||||
t.Errorf("Error listing pods on a node %v", err)
|
||||
}
|
||||
podsBefore := len(podsOnleastUtilizedNode)
|
||||
t.Log("Eviction of pods starting")
|
||||
startEndToEndForLowNodeUtilization(clientSet)
|
||||
podsOnleastUtilizedNode, err = podutil.ListPodsOnANode(clientSet, &leastLoadedNode)
|
||||
if err != nil {
|
||||
t.Errorf("Error listing pods on a node %v", err)
|
||||
}
|
||||
podsAfter := len(podsOnleastUtilizedNode)
|
||||
if podsBefore > podsAfter {
|
||||
t.Fatalf("We should have see more pods on this node as per kubeadm's way of installing %v, %v", podsBefore, podsAfter)
|
||||
}
|
||||
}
|
||||
@@ -1,20 +0,0 @@
|
||||
# Copyright 2017 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
#!/bin/bash
|
||||
|
||||
# This just run e2e tests.
|
||||
PRJ_PREFIX="github.com/${REPO_ORG:-kubernetes-incubator}/descheduler"
|
||||
go test ${PRJ_PREFIX}/test/e2e/ -v
|
||||
|
||||
@@ -1,20 +0,0 @@
|
||||
# Copyright 2017 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
#!/bin/bash
|
||||
|
||||
# This just run unit-tests. Ignoring the current directory so as to avoid running e2e tests.
|
||||
PRJ_PREFIX="github.com/${REPO_ORG:-kubernetes-incubator}/descheduler"
|
||||
go test $(go list ${PRJ_PREFIX}/... | grep -v ${PRJ_PREFIX}/vendor/| grep -v ${PRJ_PREFIX}/test/)
|
||||
|
||||
@@ -1,121 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// BuildTestPod creates a test pod with given parameters.
|
||||
func BuildTestPod(name string, cpu int64, memory int64, nodeName string) *v1.Pod {
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: name,
|
||||
SelfLink: fmt.Sprintf("/api/v1/namespaces/default/pods/%s", name),
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{},
|
||||
Limits: v1.ResourceList{},
|
||||
},
|
||||
},
|
||||
},
|
||||
NodeName: nodeName,
|
||||
},
|
||||
}
|
||||
if cpu >= 0 {
|
||||
pod.Spec.Containers[0].Resources.Requests[v1.ResourceCPU] = *resource.NewMilliQuantity(cpu, resource.DecimalSI)
|
||||
}
|
||||
if memory >= 0 {
|
||||
pod.Spec.Containers[0].Resources.Requests[v1.ResourceMemory] = *resource.NewQuantity(memory, resource.DecimalSI)
|
||||
}
|
||||
|
||||
return pod
|
||||
}
|
||||
|
||||
// GetMirrorPodAnnotation returns the annotation needed for mirror pod.
|
||||
func GetMirrorPodAnnotation() map[string]string {
|
||||
return map[string]string{
|
||||
"kubernetes.io/created-by": "{\"kind\":\"SerializedReference\",\"apiVersion\":\"v1\",\"reference\":{\"kind\":\"Pod\"}}",
|
||||
"kubernetes.io/config.source": "api",
|
||||
"kubernetes.io/config.mirror": "mirror",
|
||||
}
|
||||
}
|
||||
|
||||
// GetNormalPodOwnerRefList returns the ownerRef needed for a pod.
|
||||
func GetNormalPodOwnerRefList() []metav1.OwnerReference {
|
||||
ownerRefList := make([]metav1.OwnerReference, 0)
|
||||
ownerRefList = append(ownerRefList, metav1.OwnerReference{Kind: "Pod", APIVersion: "v1"})
|
||||
return ownerRefList
|
||||
}
|
||||
|
||||
// GetReplicaSetOwnerRefList returns the ownerRef needed for replicaset pod.
|
||||
func GetReplicaSetOwnerRefList() []metav1.OwnerReference {
|
||||
ownerRefList := make([]metav1.OwnerReference, 0)
|
||||
ownerRefList = append(ownerRefList, metav1.OwnerReference{Kind: "ReplicaSet", APIVersion: "v1"})
|
||||
return ownerRefList
|
||||
}
|
||||
|
||||
// GetDaemonSetOwnerRefList returns the ownerRef needed for daemonset pod.
|
||||
func GetDaemonSetOwnerRefList() []metav1.OwnerReference {
|
||||
ownerRefList := make([]metav1.OwnerReference, 0)
|
||||
ownerRefList = append(ownerRefList, metav1.OwnerReference{Kind: "DaemonSet", APIVersion: "v1"})
|
||||
return ownerRefList
|
||||
}
|
||||
|
||||
// GetCriticalPodAnnotation returns the annotation needed for critical pod.
|
||||
func GetCriticalPodAnnotation() map[string]string {
|
||||
return map[string]string{
|
||||
"kubernetes.io/created-by": "{\"kind\":\"SerializedReference\",\"apiVersion\":\"v1\",\"reference\":{\"kind\":\"Pod\"}}",
|
||||
"scheduler.alpha.kubernetes.io/critical-pod": "",
|
||||
}
|
||||
}
|
||||
|
||||
// BuildTestNode creates a node with specified capacity.
|
||||
func BuildTestNode(name string, millicpu int64, mem int64, pods int64) *v1.Node {
|
||||
node := &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
SelfLink: fmt.Sprintf("/api/v1/nodes/%s", name),
|
||||
Labels: map[string]string{},
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourcePods: *resource.NewQuantity(pods, resource.DecimalSI),
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(millicpu, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(mem, resource.DecimalSI),
|
||||
},
|
||||
Allocatable: v1.ResourceList{
|
||||
v1.ResourcePods: *resource.NewQuantity(pods, resource.DecimalSI),
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(millicpu, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(mem, resource.DecimalSI),
|
||||
},
|
||||
Phase: v1.NodeRunning,
|
||||
Conditions: []v1.NodeCondition{
|
||||
{Type: v1.NodeReady, Status: v1.ConditionTrue},
|
||||
},
|
||||
},
|
||||
}
|
||||
return node
|
||||
}
|
||||
11
vendor/cloud.google.com/go/.travis.yml
generated
vendored
11
vendor/cloud.google.com/go/.travis.yml
generated
vendored
@@ -1,11 +0,0 @@
|
||||
sudo: false
|
||||
language: go
|
||||
go:
|
||||
- 1.6
|
||||
- 1.7
|
||||
install:
|
||||
- go get -v cloud.google.com/go/...
|
||||
script:
|
||||
- openssl aes-256-cbc -K $encrypted_912ff8fa81ad_key -iv $encrypted_912ff8fa81ad_iv -in key.json.enc -out key.json -d
|
||||
- GCLOUD_TESTS_GOLANG_PROJECT_ID="dulcet-port-762" GCLOUD_TESTS_GOLANG_KEY="$(pwd)/key.json"
|
||||
go test -race -v cloud.google.com/go/...
|
||||
15
vendor/cloud.google.com/go/AUTHORS
generated
vendored
15
vendor/cloud.google.com/go/AUTHORS
generated
vendored
@@ -1,15 +0,0 @@
|
||||
# This is the official list of cloud authors for copyright purposes.
|
||||
# This file is distinct from the CONTRIBUTORS files.
|
||||
# See the latter for an explanation.
|
||||
|
||||
# Names should be added to this file as:
|
||||
# Name or Organization <email address>
|
||||
# The email address is not required for organizations.
|
||||
|
||||
Filippo Valsorda <hi@filippo.io>
|
||||
Google Inc.
|
||||
Ingo Oeser <nightlyone@googlemail.com>
|
||||
Palm Stone Games, Inc.
|
||||
Paweł Knap <pawelknap88@gmail.com>
|
||||
Péter Szilágyi <peterke@gmail.com>
|
||||
Tyler Treat <ttreat31@gmail.com>
|
||||
126
vendor/cloud.google.com/go/CONTRIBUTING.md
generated
vendored
126
vendor/cloud.google.com/go/CONTRIBUTING.md
generated
vendored
@@ -1,126 +0,0 @@
|
||||
# Contributing
|
||||
|
||||
1. Sign one of the contributor license agreements below.
|
||||
1. `go get golang.org/x/review/git-codereview` to install the code reviewing tool.
|
||||
1. Get the cloud package by running `go get -d cloud.google.com/go`.
|
||||
1. If you have already checked out the source, make sure that the remote git
|
||||
origin is https://code.googlesource.com/gocloud:
|
||||
|
||||
git remote set-url origin https://code.googlesource.com/gocloud
|
||||
1. Make sure your auth is configured correctly by visiting
|
||||
https://code.googlesource.com, clicking "Generate Password", and following
|
||||
the directions.
|
||||
1. Make changes and create a change by running `git codereview change <name>`,
|
||||
provide a commit message, and use `git codereview mail` to create a Gerrit CL.
|
||||
1. Keep amending to the change and mail as your receive feedback.
|
||||
|
||||
## Integration Tests
|
||||
|
||||
In addition to the unit tests, you may run the integration test suite.
|
||||
|
||||
To run the integrations tests, creating and configuration of a project in the
|
||||
Google Developers Console is required.
|
||||
|
||||
After creating a project, you must [create a service account](https://developers.google.com/identity/protocols/OAuth2ServiceAccount#creatinganaccount).
|
||||
Ensure the project-level **Owner** [IAM role](console.cloud.google.com/iam-admin/iam/project)
|
||||
(or **Editor** and **Logs Configuration Writer** roles) are added to the
|
||||
service account.
|
||||
|
||||
Once you create a project, set the following environment variables to be able to
|
||||
run the against the actual APIs.
|
||||
|
||||
- **GCLOUD_TESTS_GOLANG_PROJECT_ID**: Developers Console project's ID (e.g. bamboo-shift-455)
|
||||
- **GCLOUD_TESTS_GOLANG_KEY**: The path to the JSON key file.
|
||||
|
||||
Install the [gcloud command-line tool][gcloudcli] to your machine and use it
|
||||
to create the indexes used in the datastore integration tests with indexes
|
||||
found in `datastore/testdata/index.yaml`:
|
||||
|
||||
From the project's root directory:
|
||||
|
||||
``` sh
|
||||
# Set the default project in your env
|
||||
$ gcloud config set project $GCLOUD_TESTS_GOLANG_PROJECT_ID
|
||||
|
||||
# Authenticate the gcloud tool with your account
|
||||
$ gcloud auth login
|
||||
|
||||
# Create the indexes
|
||||
$ gcloud preview datastore create-indexes datastore/testdata/index.yaml
|
||||
```
|
||||
|
||||
The Sink integration tests in preview/logging require a Google Cloud storage
|
||||
bucket with the same name as your test project, and with the Stackdriver Logging
|
||||
service account as owner:
|
||||
``` sh
|
||||
$ gsutil mb gs://$GCLOUD_TESTS_GOLANG_PROJECT_ID
|
||||
$ gsutil acl ch -g cloud-logs@google.com:O gs://$GCLOUD_TESTS_GOLANG_PROJECT_ID
|
||||
```
|
||||
|
||||
Once you've set the environment variables, you can run the integration tests by
|
||||
running:
|
||||
|
||||
``` sh
|
||||
$ go test -v cloud.google.com/go/...
|
||||
```
|
||||
|
||||
## Contributor License Agreements
|
||||
|
||||
Before we can accept your pull requests you'll need to sign a Contributor
|
||||
License Agreement (CLA):
|
||||
|
||||
- **If you are an individual writing original source code** and **you own the
|
||||
- intellectual property**, then you'll need to sign an [individual CLA][indvcla].
|
||||
- **If you work for a company that wants to allow you to contribute your work**,
|
||||
then you'll need to sign a [corporate CLA][corpcla].
|
||||
|
||||
You can sign these electronically (just scroll to the bottom). After that,
|
||||
we'll be able to accept your pull requests.
|
||||
|
||||
## Contributor Code of Conduct
|
||||
|
||||
As contributors and maintainers of this project,
|
||||
and in the interest of fostering an open and welcoming community,
|
||||
we pledge to respect all people who contribute through reporting issues,
|
||||
posting feature requests, updating documentation,
|
||||
submitting pull requests or patches, and other activities.
|
||||
|
||||
We are committed to making participation in this project
|
||||
a harassment-free experience for everyone,
|
||||
regardless of level of experience, gender, gender identity and expression,
|
||||
sexual orientation, disability, personal appearance,
|
||||
body size, race, ethnicity, age, religion, or nationality.
|
||||
|
||||
Examples of unacceptable behavior by participants include:
|
||||
|
||||
* The use of sexualized language or imagery
|
||||
* Personal attacks
|
||||
* Trolling or insulting/derogatory comments
|
||||
* Public or private harassment
|
||||
* Publishing other's private information,
|
||||
such as physical or electronic
|
||||
addresses, without explicit permission
|
||||
* Other unethical or unprofessional conduct.
|
||||
|
||||
Project maintainers have the right and responsibility to remove, edit, or reject
|
||||
comments, commits, code, wiki edits, issues, and other contributions
|
||||
that are not aligned to this Code of Conduct.
|
||||
By adopting this Code of Conduct,
|
||||
project maintainers commit themselves to fairly and consistently
|
||||
applying these principles to every aspect of managing this project.
|
||||
Project maintainers who do not follow or enforce the Code of Conduct
|
||||
may be permanently removed from the project team.
|
||||
|
||||
This code of conduct applies both within project spaces and in public spaces
|
||||
when an individual is representing the project or its community.
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior
|
||||
may be reported by opening an issue
|
||||
or contacting one or more of the project maintainers.
|
||||
|
||||
This Code of Conduct is adapted from the [Contributor Covenant](http://contributor-covenant.org), version 1.2.0,
|
||||
available at [http://contributor-covenant.org/version/1/2/0/](http://contributor-covenant.org/version/1/2/0/)
|
||||
|
||||
[gcloudcli]: https://developers.google.com/cloud/sdk/gcloud/
|
||||
[indvcla]: https://developers.google.com/open-source/cla/individual
|
||||
[corpcla]: https://developers.google.com/open-source/cla/corporate
|
||||
34
vendor/cloud.google.com/go/CONTRIBUTORS
generated
vendored
34
vendor/cloud.google.com/go/CONTRIBUTORS
generated
vendored
@@ -1,34 +0,0 @@
|
||||
# People who have agreed to one of the CLAs and can contribute patches.
|
||||
# The AUTHORS file lists the copyright holders; this file
|
||||
# lists people. For example, Google employees are listed here
|
||||
# but not in AUTHORS, because Google holds the copyright.
|
||||
#
|
||||
# https://developers.google.com/open-source/cla/individual
|
||||
# https://developers.google.com/open-source/cla/corporate
|
||||
#
|
||||
# Names should be added to this file as:
|
||||
# Name <email address>
|
||||
|
||||
# Keep the list alphabetically sorted.
|
||||
|
||||
Andreas Litt <andreas.litt@gmail.com>
|
||||
Andrew Gerrand <adg@golang.org>
|
||||
Brad Fitzpatrick <bradfitz@golang.org>
|
||||
Burcu Dogan <jbd@google.com>
|
||||
Dave Day <djd@golang.org>
|
||||
David Sansome <me@davidsansome.com>
|
||||
David Symonds <dsymonds@golang.org>
|
||||
Filippo Valsorda <hi@filippo.io>
|
||||
Glenn Lewis <gmlewis@google.com>
|
||||
Ingo Oeser <nightlyone@googlemail.com>
|
||||
Johan Euphrosine <proppy@google.com>
|
||||
Jonathan Amsterdam <jba@google.com>
|
||||
Luna Duclos <luna.duclos@palmstonegames.com>
|
||||
Michael McGreevy <mcgreevy@golang.org>
|
||||
Omar Jarjur <ojarjur@google.com>
|
||||
Paweł Knap <pawelknap88@gmail.com>
|
||||
Péter Szilágyi <peterke@gmail.com>
|
||||
Sarah Adams <shadams@google.com>
|
||||
Toby Burress <kurin@google.com>
|
||||
Tuo Shan <shantuo@google.com>
|
||||
Tyler Treat <ttreat31@gmail.com>
|
||||
202
vendor/cloud.google.com/go/LICENSE
generated
vendored
202
vendor/cloud.google.com/go/LICENSE
generated
vendored
@@ -1,202 +0,0 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright 2014 Google Inc.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
245
vendor/cloud.google.com/go/README.md
generated
vendored
245
vendor/cloud.google.com/go/README.md
generated
vendored
@@ -1,245 +0,0 @@
|
||||
# Google Cloud for Go
|
||||
|
||||
[](https://travis-ci.org/GoogleCloudPlatform/google-cloud-go)
|
||||
[](https://godoc.org/cloud.google.com/go)
|
||||
|
||||
``` go
|
||||
import "cloud.google.com/go"
|
||||
```
|
||||
|
||||
Go packages for Google Cloud Platform services.
|
||||
|
||||
**NOTE:** These packages are under development, and may occasionally make
|
||||
backwards-incompatible changes.
|
||||
|
||||
**NOTE:** Github repo is a mirror of [https://code.googlesource.com/gocloud](https://code.googlesource.com/gocloud).
|
||||
|
||||
## News
|
||||
|
||||
_September 8, 2016_
|
||||
|
||||
* New clients for some of Google's Machine Learning APIs: Vision, Speech, and
|
||||
Natural Language.
|
||||
|
||||
* Preview version of a new [Stackdriver Logging][cloud-logging] client in
|
||||
[`cloud.google.com/go/preview/logging`](https://godoc.org/cloud.google.com/go/preview/logging).
|
||||
This client uses gRPC as its transport layer, and supports log reading, sinks
|
||||
and metrics. It will replace the current client at `cloud.google.com/go/logging` shortly.
|
||||
|
||||
## Supported APIs
|
||||
|
||||
Google API | Status | Package
|
||||
-------------------------------|--------------|-----------------------------------------------------------
|
||||
[Datastore][cloud-datastore] | beta | [`cloud.google.com/go/datastore`][cloud-datastore-ref]
|
||||
[Storage][cloud-storage] | beta | [`cloud.google.com/go/storage`][cloud-storage-ref]
|
||||
[Pub/Sub][cloud-pubsub] | experimental | [`cloud.google.com/go/pubsub`][cloud-pubsub-ref]
|
||||
[Bigtable][cloud-bigtable] | beta | [`cloud.google.com/go/bigtable`][cloud-bigtable-ref]
|
||||
[BigQuery][cloud-bigquery] | experimental | [`cloud.google.com/go/bigquery`][cloud-bigquery-ref]
|
||||
[Logging][cloud-logging] | experimental | [`cloud.google.com/go/logging`][cloud-logging-ref]
|
||||
[Vision][cloud-vision] | experimental | [`cloud.google.com/go/vision`][cloud-vision-ref]
|
||||
[Language][cloud-language] | experimental | [`cloud.google.com/go/language/apiv1beta1`][cloud-language-ref]
|
||||
[Speech][cloud-speech] | experimental | [`cloud.google.com/go/speech/apiv1beta`][cloud-speech-ref]
|
||||
|
||||
|
||||
> **Experimental status**: the API is still being actively developed. As a
|
||||
> result, it might change in backward-incompatible ways and is not recommended
|
||||
> for production use.
|
||||
>
|
||||
> **Beta status**: the API is largely complete, but still has outstanding
|
||||
> features and bugs to be addressed. There may be minor backwards-incompatible
|
||||
> changes where necessary.
|
||||
>
|
||||
> **Stable status**: the API is mature and ready for production use. We will
|
||||
> continue addressing bugs and feature requests.
|
||||
|
||||
Documentation and examples are available at
|
||||
https://godoc.org/cloud.google.com/go
|
||||
|
||||
Visit or join the
|
||||
[google-api-go-announce group](https://groups.google.com/forum/#!forum/google-api-go-announce)
|
||||
for updates on these packages.
|
||||
|
||||
## Go Versions Supported
|
||||
|
||||
We support the two most recent major versions of Go. If Google App Engine uses
|
||||
an older version, we support that as well. You can see which versions are
|
||||
currently supported by looking at the lines following `go:` in
|
||||
[`.travis.yml`](.travis.yml).
|
||||
|
||||
## Authorization
|
||||
|
||||
By default, each API will use [Google Application Default Credentials][default-creds]
|
||||
for authorization credentials used in calling the API endpoints. This will allow your
|
||||
application to run in many environments without requiring explicit configuration.
|
||||
|
||||
Manually-configured authorization can be achieved using the
|
||||
[`golang.org/x/oauth2`](https://godoc.org/golang.org/x/oauth2) package to
|
||||
create an `oauth2.TokenSource`. This token source can be passed to the `NewClient`
|
||||
function for the relevant API using a
|
||||
[`option.WithTokenSource`](https://godoc.org/google.golang.org/api/option#WithTokenSource)
|
||||
option.
|
||||
|
||||
## Google Cloud Datastore [](https://godoc.org/cloud.google.com/go/datastore)
|
||||
|
||||
[Google Cloud Datastore][cloud-datastore] ([docs][cloud-datastore-docs]) is a fully-
|
||||
managed, schemaless database for storing non-relational data. Cloud Datastore
|
||||
automatically scales with your users and supports ACID transactions, high availability
|
||||
of reads and writes, strong consistency for reads and ancestor queries, and eventual
|
||||
consistency for all other queries.
|
||||
|
||||
Follow the [activation instructions][cloud-datastore-activation] to use the Google
|
||||
Cloud Datastore API with your project.
|
||||
|
||||
First create a `datastore.Client` to use throughout your application:
|
||||
|
||||
```go
|
||||
client, err := datastore.NewClient(ctx, "my-project-id")
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
```
|
||||
|
||||
Then use that client to interact with the API:
|
||||
|
||||
```go
|
||||
type Post struct {
|
||||
Title string
|
||||
Body string `datastore:",noindex"`
|
||||
PublishedAt time.Time
|
||||
}
|
||||
keys := []*datastore.Key{
|
||||
datastore.NewKey(ctx, "Post", "post1", 0, nil),
|
||||
datastore.NewKey(ctx, "Post", "post2", 0, nil),
|
||||
}
|
||||
posts := []*Post{
|
||||
{Title: "Post 1", Body: "...", PublishedAt: time.Now()},
|
||||
{Title: "Post 2", Body: "...", PublishedAt: time.Now()},
|
||||
}
|
||||
if _, err := client.PutMulti(ctx, keys, posts); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
```
|
||||
|
||||
## Google Cloud Storage [](https://godoc.org/cloud.google.com/go/storage)
|
||||
|
||||
[Google Cloud Storage][cloud-storage] ([docs][cloud-storage-docs]) allows you to store
|
||||
data on Google infrastructure with very high reliability, performance and availability,
|
||||
and can be used to distribute large data objects to users via direct download.
|
||||
|
||||
https://godoc.org/cloud.google.com/go/storage
|
||||
|
||||
First create a `storage.Client` to use throughout your application:
|
||||
|
||||
```go
|
||||
client, err := storage.NewClient(ctx)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
```
|
||||
|
||||
```go
|
||||
// Read the object1 from bucket.
|
||||
rc, err := client.Bucket("bucket").Object("object1").NewReader(ctx)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer rc.Close()
|
||||
body, err := ioutil.ReadAll(rc)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
```
|
||||
|
||||
## Google Cloud Pub/Sub [](https://godoc.org/cloud.google.com/go/pubsub)
|
||||
|
||||
[Google Cloud Pub/Sub][cloud-pubsub] ([docs][cloud-pubsub-docs]) allows you to connect
|
||||
your services with reliable, many-to-many, asynchronous messaging hosted on Google's
|
||||
infrastructure. Cloud Pub/Sub automatically scales as you need it and provides a foundation
|
||||
for building your own robust, global services.
|
||||
|
||||
First create a `pubsub.Client` to use throughout your application:
|
||||
|
||||
```go
|
||||
client, err := pubsub.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
```
|
||||
|
||||
```go
|
||||
// Publish "hello world" on topic1.
|
||||
topic := client.Topic("topic1")
|
||||
msgIDs, err := topic.Publish(ctx, &pubsub.Message{
|
||||
Data: []byte("hello world"),
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// Create an iterator to pull messages via subscription1.
|
||||
it, err := client.Subscription("subscription1").Pull(ctx)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
defer it.Stop()
|
||||
|
||||
// Consume N messages from the iterator.
|
||||
for i := 0; i < N; i++ {
|
||||
msg, err := it.Next()
|
||||
if err == pubsub.Done {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to retrieve message: %v", err)
|
||||
}
|
||||
|
||||
fmt.Printf("Message %d: %s\n", i, msg.Data)
|
||||
msg.Done(true) // Acknowledge that we've consumed the message.
|
||||
}
|
||||
```
|
||||
|
||||
## Contributing
|
||||
|
||||
Contributions are welcome. Please, see the
|
||||
[CONTRIBUTING](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/master/CONTRIBUTING.md)
|
||||
document for details. We're using Gerrit for our code reviews. Please don't open pull
|
||||
requests against this repo, new pull requests will be automatically closed.
|
||||
|
||||
Please note that this project is released with a Contributor Code of Conduct.
|
||||
By participating in this project you agree to abide by its terms.
|
||||
See [Contributor Code of Conduct](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/master/CONTRIBUTING.md#contributor-code-of-conduct)
|
||||
for more information.
|
||||
|
||||
[cloud-datastore]: https://cloud.google.com/datastore/
|
||||
[cloud-datastore-ref]: https://godoc.org/cloud.google.com/go/datastore
|
||||
[cloud-datastore-docs]: https://cloud.google.com/datastore/docs
|
||||
[cloud-datastore-activation]: https://cloud.google.com/datastore/docs/activate
|
||||
|
||||
[cloud-pubsub]: https://cloud.google.com/pubsub/
|
||||
[cloud-pubsub-ref]: https://godoc.org/cloud.google.com/go/pubsub
|
||||
[cloud-pubsub-docs]: https://cloud.google.com/pubsub/docs
|
||||
|
||||
[cloud-storage]: https://cloud.google.com/storage/
|
||||
[cloud-storage-ref]: https://godoc.org/cloud.google.com/go/storage
|
||||
[cloud-storage-docs]: https://cloud.google.com/storage/docs/overview
|
||||
[cloud-storage-create-bucket]: https://cloud.google.com/storage/docs/cloud-console#_creatingbuckets
|
||||
|
||||
[cloud-bigtable]: https://cloud.google.com/bigtable/
|
||||
[cloud-bigtable-ref]: https://godoc.org/cloud.google.com/go/bigtable
|
||||
|
||||
[cloud-bigquery]: https://cloud.google.com/bigquery/
|
||||
[cloud-bigquery-ref]: https://godoc.org/cloud.google.com/go/bigquery
|
||||
|
||||
[cloud-logging]: https://cloud.google.com/logging/
|
||||
[cloud-logging-ref]: https://godoc.org/cloud.google.com/go/logging
|
||||
|
||||
[cloud-vision]: https://cloud.google.com/vision/
|
||||
[cloud-vision-ref]: https://godoc.org/cloud.google.com/go/vision
|
||||
|
||||
[cloud-language]: https://cloud.google.com/natural-language
|
||||
[cloud-language-ref]: https://godoc.org/cloud.google.com/go/language/apiv1beta1
|
||||
|
||||
[cloud-speech]: https://cloud.google.com/speech
|
||||
[cloud-speech-ref]: https://godoc.org/cloud.google.com/go/speech/apiv1beta1
|
||||
|
||||
[default-creds]: https://developers.google.com/identity/protocols/application-default-credentials
|
||||
26
vendor/cloud.google.com/go/appveyor.yml
generated
vendored
26
vendor/cloud.google.com/go/appveyor.yml
generated
vendored
@@ -1,26 +0,0 @@
|
||||
# This file configures AppVeyor (http://www.appveyor.com),
|
||||
# a Windows-based CI service similar to Travis.
|
||||
|
||||
# Identifier for this run
|
||||
version: "{build}"
|
||||
|
||||
# Clone the repo into this path, which conforms to the standard
|
||||
# Go workspace structure.
|
||||
clone_folder: c:\gopath\src\cloud.google.com\go
|
||||
|
||||
environment:
|
||||
GOPATH: c:\gopath
|
||||
|
||||
install:
|
||||
# Info for debugging.
|
||||
- echo %PATH%
|
||||
- go version
|
||||
- go env
|
||||
- go get -v -d -t ./...
|
||||
|
||||
# Provide a build script, or AppVeyor will call msbuild.
|
||||
build_script:
|
||||
- go install -v ./...
|
||||
|
||||
test_script:
|
||||
- go test -short -v ./...
|
||||
60
vendor/cloud.google.com/go/authexample_test.go
generated
vendored
60
vendor/cloud.google.com/go/authexample_test.go
generated
vendored
@@ -1,60 +0,0 @@
|
||||
// Copyright 2016 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package cloud_test
|
||||
|
||||
import (
|
||||
"cloud.google.com/go/datastore"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/option"
|
||||
)
|
||||
|
||||
func Example_applicationDefaultCredentials() {
|
||||
ctx := context.Background()
|
||||
// Use Google Application Default Credentials to authorize and authenticate the client.
|
||||
// More information about Application Default Credentials and how to enable is at
|
||||
// https://developers.google.com/identity/protocols/application-default-credentials.
|
||||
//
|
||||
// This is the recommended way of authorizing and authenticating.
|
||||
//
|
||||
// Note: The example uses the datastore client, but the same steps apply to
|
||||
// the other client libraries underneath this package.
|
||||
client, err := datastore.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: handle error.
|
||||
}
|
||||
// Use the client.
|
||||
_ = client
|
||||
}
|
||||
|
||||
func Example_serviceAccountFile() {
|
||||
// Warning: The better way to use service accounts is to set GOOGLE_APPLICATION_CREDENTIALS
|
||||
// and use the Application Default Credentials.
|
||||
ctx := context.Background()
|
||||
// Use a JSON key file associated with a Google service account to
|
||||
// authenticate and authorize.
|
||||
// Go to https://console.developers.google.com/permissions/serviceaccounts to create
|
||||
// and download a service account key for your project.
|
||||
//
|
||||
// Note: The example uses the datastore client, but the same steps apply to
|
||||
// the other client libraries underneath this package.
|
||||
client, err := datastore.NewClient(ctx,
|
||||
"project-id",
|
||||
option.WithServiceAccountFile("/path/to/service-account-key.json"))
|
||||
if err != nil {
|
||||
// TODO: handle error.
|
||||
}
|
||||
// Use the client.
|
||||
_ = client
|
||||
}
|
||||
175
vendor/cloud.google.com/go/bigquery/bigquery.go
generated
vendored
175
vendor/cloud.google.com/go/bigquery/bigquery.go
generated
vendored
@@ -1,175 +0,0 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
// TODO(mcgreevy): support dry-run mode when creating jobs.
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"google.golang.org/api/option"
|
||||
"google.golang.org/api/transport"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
const prodAddr = "https://www.googleapis.com/bigquery/v2/"
|
||||
|
||||
// A Source is a source of data for the Copy function.
|
||||
type Source interface {
|
||||
implementsSource()
|
||||
}
|
||||
|
||||
// A Destination is a destination of data for the Copy function.
|
||||
type Destination interface {
|
||||
implementsDestination()
|
||||
}
|
||||
|
||||
// An Option is an optional argument to Copy.
|
||||
type Option interface {
|
||||
implementsOption()
|
||||
}
|
||||
|
||||
// A ReadSource is a source of data for the Read function.
|
||||
type ReadSource interface {
|
||||
implementsReadSource()
|
||||
}
|
||||
|
||||
// A ReadOption is an optional argument to Read.
|
||||
type ReadOption interface {
|
||||
customizeRead(conf *pagingConf)
|
||||
}
|
||||
|
||||
const Scope = "https://www.googleapis.com/auth/bigquery"
|
||||
const userAgent = "gcloud-golang-bigquery/20160429"
|
||||
|
||||
// Client may be used to perform BigQuery operations.
|
||||
type Client struct {
|
||||
service service
|
||||
projectID string
|
||||
}
|
||||
|
||||
// NewClient constructs a new Client which can perform BigQuery operations.
|
||||
// Operations performed via the client are billed to the specified GCP project.
|
||||
func NewClient(ctx context.Context, projectID string, opts ...option.ClientOption) (*Client, error) {
|
||||
o := []option.ClientOption{
|
||||
option.WithEndpoint(prodAddr),
|
||||
option.WithScopes(Scope),
|
||||
option.WithUserAgent(userAgent),
|
||||
}
|
||||
o = append(o, opts...)
|
||||
httpClient, endpoint, err := transport.NewHTTPClient(ctx, o...)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("dialing: %v", err)
|
||||
}
|
||||
|
||||
s, err := newBigqueryService(httpClient, endpoint)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("constructing bigquery client: %v", err)
|
||||
}
|
||||
|
||||
c := &Client{
|
||||
service: s,
|
||||
projectID: projectID,
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// initJobProto creates and returns a bigquery Job proto.
|
||||
// The proto is customized using any jobOptions in options.
|
||||
// The list of Options is returned with the jobOptions removed.
|
||||
func initJobProto(projectID string, options []Option) (*bq.Job, []Option) {
|
||||
job := &bq.Job{}
|
||||
|
||||
var other []Option
|
||||
for _, opt := range options {
|
||||
if o, ok := opt.(jobOption); ok {
|
||||
o.customizeJob(job, projectID)
|
||||
} else {
|
||||
other = append(other, opt)
|
||||
}
|
||||
}
|
||||
return job, other
|
||||
}
|
||||
|
||||
// Copy starts a BigQuery operation to copy data from a Source to a Destination.
|
||||
func (c *Client) Copy(ctx context.Context, dst Destination, src Source, options ...Option) (*Job, error) {
|
||||
switch dst := dst.(type) {
|
||||
case *Table:
|
||||
switch src := src.(type) {
|
||||
case *GCSReference:
|
||||
return c.load(ctx, dst, src, options)
|
||||
case *Table:
|
||||
return c.cp(ctx, dst, Tables{src}, options)
|
||||
case Tables:
|
||||
return c.cp(ctx, dst, src, options)
|
||||
case *Query:
|
||||
return c.query(ctx, dst, src, options)
|
||||
}
|
||||
case *GCSReference:
|
||||
if src, ok := src.(*Table); ok {
|
||||
return c.extract(ctx, dst, src, options)
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("no Copy operation matches dst/src pair: dst: %T ; src: %T", dst, src)
|
||||
}
|
||||
|
||||
// Query creates a query with string q. You may optionally set
|
||||
// DefaultProjectID and DefaultDatasetID on the returned query before using it.
|
||||
func (c *Client) Query(q string) *Query {
|
||||
return &Query{Q: q, client: c}
|
||||
}
|
||||
|
||||
// Read submits a query for execution and returns the results via an Iterator.
|
||||
//
|
||||
// Read uses a temporary table to hold the results of the query job.
|
||||
//
|
||||
// For more control over how a query is performed, don't use this method but
|
||||
// instead pass the Query as a Source to Client.Copy, and call Read on the
|
||||
// resulting Job.
|
||||
func (q *Query) Read(ctx context.Context, options ...ReadOption) (*Iterator, error) {
|
||||
dest := &Table{}
|
||||
job, err := q.client.Copy(ctx, dest, q, WriteTruncate)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return job.Read(ctx, options...)
|
||||
}
|
||||
|
||||
// executeQuery submits a query for execution and returns the results via an Iterator.
|
||||
func (c *Client) executeQuery(ctx context.Context, q *Query, options ...ReadOption) (*Iterator, error) {
|
||||
dest := &Table{}
|
||||
job, err := c.Copy(ctx, dest, q, WriteTruncate)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return c.Read(ctx, job, options...)
|
||||
}
|
||||
|
||||
// Dataset creates a handle to a BigQuery dataset in the client's project.
|
||||
func (c *Client) Dataset(id string) *Dataset {
|
||||
return c.DatasetInProject(c.projectID, id)
|
||||
}
|
||||
|
||||
// DatasetInProject creates a handle to a BigQuery dataset in the specified project.
|
||||
func (c *Client) DatasetInProject(projectID, datasetID string) *Dataset {
|
||||
return &Dataset{
|
||||
projectID: projectID,
|
||||
id: datasetID,
|
||||
service: c.service,
|
||||
}
|
||||
}
|
||||
47
vendor/cloud.google.com/go/bigquery/copy_op.go
generated
vendored
47
vendor/cloud.google.com/go/bigquery/copy_op.go
generated
vendored
@@ -1,47 +0,0 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
type copyOption interface {
|
||||
customizeCopy(conf *bq.JobConfigurationTableCopy)
|
||||
}
|
||||
|
||||
func (c *Client) cp(ctx context.Context, dst *Table, src Tables, options []Option) (*Job, error) {
|
||||
job, options := initJobProto(c.projectID, options)
|
||||
payload := &bq.JobConfigurationTableCopy{}
|
||||
|
||||
dst.customizeCopyDst(payload)
|
||||
src.customizeCopySrc(payload)
|
||||
|
||||
for _, opt := range options {
|
||||
o, ok := opt.(copyOption)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("option (%#v) not applicable to dst/src pair: dst: %T ; src: %T", opt, dst, src)
|
||||
}
|
||||
o.customizeCopy(payload)
|
||||
}
|
||||
|
||||
job.Configuration = &bq.JobConfiguration{
|
||||
Copy: payload,
|
||||
}
|
||||
return c.service.insertJob(ctx, job, c.projectID)
|
||||
}
|
||||
104
vendor/cloud.google.com/go/bigquery/copy_test.go
generated
vendored
104
vendor/cloud.google.com/go/bigquery/copy_test.go
generated
vendored
@@ -1,104 +0,0 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
func defaultCopyJob() *bq.Job {
|
||||
return &bq.Job{
|
||||
Configuration: &bq.JobConfiguration{
|
||||
Copy: &bq.JobConfigurationTableCopy{
|
||||
DestinationTable: &bq.TableReference{
|
||||
ProjectId: "d-project-id",
|
||||
DatasetId: "d-dataset-id",
|
||||
TableId: "d-table-id",
|
||||
},
|
||||
SourceTables: []*bq.TableReference{
|
||||
{
|
||||
ProjectId: "s-project-id",
|
||||
DatasetId: "s-dataset-id",
|
||||
TableId: "s-table-id",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestCopy(t *testing.T) {
|
||||
testCases := []struct {
|
||||
dst *Table
|
||||
src Tables
|
||||
options []Option
|
||||
want *bq.Job
|
||||
}{
|
||||
{
|
||||
dst: &Table{
|
||||
ProjectID: "d-project-id",
|
||||
DatasetID: "d-dataset-id",
|
||||
TableID: "d-table-id",
|
||||
},
|
||||
src: Tables{
|
||||
{
|
||||
ProjectID: "s-project-id",
|
||||
DatasetID: "s-dataset-id",
|
||||
TableID: "s-table-id",
|
||||
},
|
||||
},
|
||||
want: defaultCopyJob(),
|
||||
},
|
||||
{
|
||||
dst: &Table{
|
||||
ProjectID: "d-project-id",
|
||||
DatasetID: "d-dataset-id",
|
||||
TableID: "d-table-id",
|
||||
},
|
||||
src: Tables{
|
||||
{
|
||||
ProjectID: "s-project-id",
|
||||
DatasetID: "s-dataset-id",
|
||||
TableID: "s-table-id",
|
||||
},
|
||||
},
|
||||
options: []Option{CreateNever, WriteTruncate},
|
||||
want: func() *bq.Job {
|
||||
j := defaultCopyJob()
|
||||
j.Configuration.Copy.CreateDisposition = "CREATE_NEVER"
|
||||
j.Configuration.Copy.WriteDisposition = "WRITE_TRUNCATE"
|
||||
return j
|
||||
}(),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
s := &testService{}
|
||||
c := &Client{
|
||||
service: s,
|
||||
}
|
||||
if _, err := c.Copy(context.Background(), tc.dst, tc.src, tc.options...); err != nil {
|
||||
t.Errorf("err calling cp: %v", err)
|
||||
continue
|
||||
}
|
||||
if !reflect.DeepEqual(s.Job, tc.want) {
|
||||
t.Errorf("copying: got:\n%v\nwant:\n%v", s.Job, tc.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
79
vendor/cloud.google.com/go/bigquery/create_table_test.go
generated
vendored
79
vendor/cloud.google.com/go/bigquery/create_table_test.go
generated
vendored
@@ -1,79 +0,0 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
type createTableRecorder struct {
|
||||
conf *createTableConf
|
||||
service
|
||||
}
|
||||
|
||||
func (rec *createTableRecorder) createTable(ctx context.Context, conf *createTableConf) error {
|
||||
rec.conf = conf
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestCreateTableOptions(t *testing.T) {
|
||||
s := &createTableRecorder{}
|
||||
c := &Client{
|
||||
projectID: "p",
|
||||
service: s,
|
||||
}
|
||||
ds := c.Dataset("d")
|
||||
table := ds.Table("t")
|
||||
exp := time.Now()
|
||||
q := "query"
|
||||
if err := table.Create(context.Background(), TableExpiration(exp), ViewQuery(q)); err != nil {
|
||||
t.Fatalf("err calling Table.Create: %v", err)
|
||||
}
|
||||
want := createTableConf{
|
||||
projectID: "p",
|
||||
datasetID: "d",
|
||||
tableID: "t",
|
||||
expiration: exp,
|
||||
viewQuery: q,
|
||||
}
|
||||
if !reflect.DeepEqual(*s.conf, want) {
|
||||
t.Errorf("createTableConf: got:\n%v\nwant:\n%v", *s.conf, want)
|
||||
}
|
||||
|
||||
sc := Schema{fieldSchema("desc", "name", "STRING", false, true)}
|
||||
if err := table.Create(context.Background(), TableExpiration(exp), sc); err != nil {
|
||||
t.Fatalf("err calling Table.Create: %v", err)
|
||||
}
|
||||
want = createTableConf{
|
||||
projectID: "p",
|
||||
datasetID: "d",
|
||||
tableID: "t",
|
||||
expiration: exp,
|
||||
// No need for an elaborate schema, that is tested in schema_test.go.
|
||||
schema: &bq.TableSchema{
|
||||
Fields: []*bq.TableFieldSchema{
|
||||
bqTableFieldSchema("desc", "name", "STRING", "REQUIRED"),
|
||||
},
|
||||
},
|
||||
}
|
||||
if !reflect.DeepEqual(*s.conf, want) {
|
||||
t.Errorf("createTableConf: got:\n%v\nwant:\n%v", *s.conf, want)
|
||||
}
|
||||
}
|
||||
55
vendor/cloud.google.com/go/bigquery/dataset.go
generated
vendored
55
vendor/cloud.google.com/go/bigquery/dataset.go
generated
vendored
@@ -1,55 +0,0 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import "golang.org/x/net/context"
|
||||
|
||||
// Dataset is a reference to a BigQuery dataset.
|
||||
type Dataset struct {
|
||||
projectID string
|
||||
id string
|
||||
service service
|
||||
}
|
||||
|
||||
// ListTables returns a list of all the tables contained in the Dataset.
|
||||
func (d *Dataset) ListTables(ctx context.Context) ([]*Table, error) {
|
||||
var tables []*Table
|
||||
|
||||
err := getPages("", func(pageToken string) (string, error) {
|
||||
ts, tok, err := d.service.listTables(ctx, d.projectID, d.id, pageToken)
|
||||
if err == nil {
|
||||
tables = append(tables, ts...)
|
||||
}
|
||||
return tok, err
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return tables, nil
|
||||
}
|
||||
|
||||
// Create creates a dataset in the BigQuery service. An error will be returned
|
||||
// if the dataset already exists.
|
||||
func (d *Dataset) Create(ctx context.Context) error {
|
||||
return d.service.insertDataset(ctx, d.id, d.projectID)
|
||||
}
|
||||
|
||||
// Table creates a handle to a BigQuery table in the dataset.
|
||||
// To determine if a table exists, call Table.Metadata.
|
||||
// If the table does not already exist, use Table.Create to create it.
|
||||
func (d *Dataset) Table(tableID string) *Table {
|
||||
return &Table{ProjectID: d.projectID, DatasetID: d.id, TableID: tableID, service: d.service}
|
||||
}
|
||||
105
vendor/cloud.google.com/go/bigquery/dataset_test.go
generated
vendored
105
vendor/cloud.google.com/go/bigquery/dataset_test.go
generated
vendored
@@ -1,105 +0,0 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// readServiceStub services read requests by returning data from an in-memory list of values.
|
||||
type listTablesServiceStub struct {
|
||||
expectedProject, expectedDataset string
|
||||
values [][]*Table // contains pages of tables.
|
||||
pageTokens map[string]string // maps incoming page token to returned page token.
|
||||
|
||||
service
|
||||
}
|
||||
|
||||
func (s *listTablesServiceStub) listTables(ctx context.Context, projectID, datasetID, pageToken string) ([]*Table, string, error) {
|
||||
if projectID != s.expectedProject {
|
||||
return nil, "", errors.New("wrong project id")
|
||||
}
|
||||
if datasetID != s.expectedDataset {
|
||||
return nil, "", errors.New("wrong dataset id")
|
||||
}
|
||||
|
||||
tables := s.values[0]
|
||||
s.values = s.values[1:]
|
||||
return tables, s.pageTokens[pageToken], nil
|
||||
}
|
||||
|
||||
func TestListTables(t *testing.T) {
|
||||
t1 := &Table{ProjectID: "p1", DatasetID: "d1", TableID: "t1"}
|
||||
t2 := &Table{ProjectID: "p1", DatasetID: "d1", TableID: "t2"}
|
||||
t3 := &Table{ProjectID: "p1", DatasetID: "d1", TableID: "t3"}
|
||||
testCases := []struct {
|
||||
data [][]*Table
|
||||
pageTokens map[string]string
|
||||
want []*Table
|
||||
}{
|
||||
{
|
||||
data: [][]*Table{{t1, t2}, {t3}},
|
||||
pageTokens: map[string]string{"": "a", "a": ""},
|
||||
want: []*Table{t1, t2, t3},
|
||||
},
|
||||
{
|
||||
data: [][]*Table{{t1, t2}, {t3}},
|
||||
pageTokens: map[string]string{"": ""}, // no more pages after first one.
|
||||
want: []*Table{t1, t2},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
c := &Client{
|
||||
service: &listTablesServiceStub{
|
||||
expectedProject: "x",
|
||||
expectedDataset: "y",
|
||||
values: tc.data,
|
||||
pageTokens: tc.pageTokens,
|
||||
},
|
||||
projectID: "x",
|
||||
}
|
||||
got, err := c.Dataset("y").ListTables(context.Background())
|
||||
if err != nil {
|
||||
t.Errorf("err calling ListTables: %v", err)
|
||||
continue
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(got, tc.want) {
|
||||
t.Errorf("reading: got:\n%v\nwant:\n%v", got, tc.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestListTablesError(t *testing.T) {
|
||||
c := &Client{
|
||||
service: &listTablesServiceStub{
|
||||
expectedProject: "x",
|
||||
expectedDataset: "y",
|
||||
},
|
||||
projectID: "x",
|
||||
}
|
||||
// Test that service read errors are propagated back to the caller.
|
||||
// Passing "not y" as the dataset id will cause the service to return an error.
|
||||
_, err := c.Dataset("not y").ListTables(context.Background())
|
||||
if err == nil {
|
||||
// Read should not return an error; only Err should.
|
||||
t.Errorf("ListTables expected: non-nil err, got: nil")
|
||||
}
|
||||
}
|
||||
18
vendor/cloud.google.com/go/bigquery/doc.go
generated
vendored
18
vendor/cloud.google.com/go/bigquery/doc.go
generated
vendored
@@ -1,18 +0,0 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package bigquery provides a client for the BigQuery service.
|
||||
//
|
||||
// Note: This package is a work-in-progress. Backwards-incompatible changes should be expected.
|
||||
package bigquery // import "cloud.google.com/go/bigquery"
|
||||
82
vendor/cloud.google.com/go/bigquery/error.go
generated
vendored
82
vendor/cloud.google.com/go/bigquery/error.go
generated
vendored
@@ -1,82 +0,0 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
// An Error contains detailed information about a failed bigquery operation.
|
||||
type Error struct {
|
||||
// Mirrors bq.ErrorProto, but drops DebugInfo
|
||||
Location, Message, Reason string
|
||||
}
|
||||
|
||||
func (e Error) Error() string {
|
||||
return fmt.Sprintf("{Location: %q; Message: %q; Reason: %q}", e.Location, e.Message, e.Reason)
|
||||
}
|
||||
|
||||
func errorFromErrorProto(ep *bq.ErrorProto) *Error {
|
||||
if ep == nil {
|
||||
return nil
|
||||
}
|
||||
return &Error{
|
||||
Location: ep.Location,
|
||||
Message: ep.Message,
|
||||
Reason: ep.Reason,
|
||||
}
|
||||
}
|
||||
|
||||
// A MultiError contains multiple related errors.
|
||||
type MultiError []error
|
||||
|
||||
func (m MultiError) Error() string {
|
||||
switch len(m) {
|
||||
case 0:
|
||||
return "(0 errors)"
|
||||
case 1:
|
||||
return m[0].Error()
|
||||
case 2:
|
||||
return m[0].Error() + " (and 1 other error)"
|
||||
}
|
||||
return fmt.Sprintf("%s (and %d other errors)", m[0].Error(), len(m)-1)
|
||||
}
|
||||
|
||||
// RowInsertionError contains all errors that occurred when attempting to insert a row.
|
||||
type RowInsertionError struct {
|
||||
InsertID string // The InsertID associated with the affected row.
|
||||
RowIndex int // The 0-based index of the affected row in the batch of rows being inserted.
|
||||
Errors MultiError
|
||||
}
|
||||
|
||||
func (e *RowInsertionError) Error() string {
|
||||
errFmt := "insertion of row [insertID: %q; insertIndex: %v] failed with error: %s"
|
||||
return fmt.Sprintf(errFmt, e.InsertID, e.RowIndex, e.Errors.Error())
|
||||
}
|
||||
|
||||
// PutMultiError contains an error for each row which was not successfully inserted
|
||||
// into a BigQuery table.
|
||||
type PutMultiError []RowInsertionError
|
||||
|
||||
func (pme PutMultiError) Error() string {
|
||||
plural := "s"
|
||||
if len(pme) == 1 {
|
||||
plural = ""
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%v row insertion%s failed", len(pme), plural)
|
||||
}
|
||||
109
vendor/cloud.google.com/go/bigquery/error_test.go
generated
vendored
109
vendor/cloud.google.com/go/bigquery/error_test.go
generated
vendored
@@ -1,109 +0,0 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
func rowInsertionError(msg string) RowInsertionError {
|
||||
return RowInsertionError{Errors: []error{errors.New(msg)}}
|
||||
}
|
||||
|
||||
func TestPutMultiErrorString(t *testing.T) {
|
||||
testCases := []struct {
|
||||
errs PutMultiError
|
||||
want string
|
||||
}{
|
||||
{
|
||||
errs: PutMultiError{},
|
||||
want: "0 row insertions failed",
|
||||
},
|
||||
{
|
||||
errs: PutMultiError{rowInsertionError("a")},
|
||||
want: "1 row insertion failed",
|
||||
},
|
||||
{
|
||||
errs: PutMultiError{rowInsertionError("a"), rowInsertionError("b")},
|
||||
want: "2 row insertions failed",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
if tc.errs.Error() != tc.want {
|
||||
t.Errorf("PutMultiError string: got:\n%v\nwant:\n%v", tc.errs.Error(), tc.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMultiErrorString(t *testing.T) {
|
||||
testCases := []struct {
|
||||
errs MultiError
|
||||
want string
|
||||
}{
|
||||
{
|
||||
errs: MultiError{},
|
||||
want: "(0 errors)",
|
||||
},
|
||||
{
|
||||
errs: MultiError{errors.New("a")},
|
||||
want: "a",
|
||||
},
|
||||
{
|
||||
errs: MultiError{errors.New("a"), errors.New("b")},
|
||||
want: "a (and 1 other error)",
|
||||
},
|
||||
{
|
||||
errs: MultiError{errors.New("a"), errors.New("b"), errors.New("c")},
|
||||
want: "a (and 2 other errors)",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
if tc.errs.Error() != tc.want {
|
||||
t.Errorf("PutMultiError string: got:\n%v\nwant:\n%v", tc.errs.Error(), tc.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestErrorFromErrorProto(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in *bq.ErrorProto
|
||||
want *Error
|
||||
}{
|
||||
{nil, nil},
|
||||
{
|
||||
in: &bq.ErrorProto{Location: "L", Message: "M", Reason: "R"},
|
||||
want: &Error{Location: "L", Message: "M", Reason: "R"},
|
||||
},
|
||||
} {
|
||||
if got := errorFromErrorProto(test.in); !reflect.DeepEqual(got, test.want) {
|
||||
t.Errorf("%v: got %v, want %v", test.in, got, test.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestErrorString(t *testing.T) {
|
||||
e := &Error{Location: "<L>", Message: "<M>", Reason: "<R>"}
|
||||
got := e.Error()
|
||||
if !strings.Contains(got, "<L>") || !strings.Contains(got, "<M>") || !strings.Contains(got, "<R>") {
|
||||
t.Errorf(`got %q, expected to see "<L>", "<M>" and "<R>"`, got)
|
||||
}
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user