1
0
mirror of https://github.com/kubernetes-sigs/descheduler.git synced 2026-01-26 13:29:11 +01:00

Compare commits

..

187 Commits

Author SHA1 Message Date
a7i
782058a67c Update index.yaml
Signed-off-by: a7i <a7i@users.noreply.github.com>
2025-10-30 16:41:34 +00:00
a7i
6ecff2d544 Update index.yaml
Signed-off-by: a7i <a7i@users.noreply.github.com>
2025-05-04 19:49:46 +00:00
k8s-ci-robot
241e3c50f7 Update index.yaml
Signed-off-by: k8s-ci-robot <k8s-ci-robot@users.noreply.github.com>
2025-02-11 04:46:21 +00:00
k8s-ci-robot
345e1f7139 Update index.yaml
Signed-off-by: k8s-ci-robot <k8s-ci-robot@users.noreply.github.com>
2025-01-06 23:08:49 +00:00
a7i
818033a143 Update index.yaml
Signed-off-by: a7i <a7i@users.noreply.github.com>
2025-01-02 23:34:03 +00:00
k8s-ci-robot
54540412b8 Update index.yaml
Signed-off-by: k8s-ci-robot <k8s-ci-robot@users.noreply.github.com>
2024-11-20 15:09:49 +00:00
a7i
e91082a4e9 Update index.yaml
Signed-off-by: a7i <a7i@users.noreply.github.com>
2024-09-09 22:59:21 +00:00
k8s-ci-robot
74150670ed Update index.yaml
Signed-off-by: k8s-ci-robot <k8s-ci-robot@users.noreply.github.com>
2024-06-05 12:06:23 +00:00
a7i
8662aa75b1 Update index.yaml
Signed-off-by: a7i <a7i@users.noreply.github.com>
2024-05-20 14:03:13 +00:00
a7i
25876a2d2d Update index.yaml
Signed-off-by: a7i <a7i@users.noreply.github.com>
2024-01-02 18:54:21 +00:00
k8s-ci-robot
28a3011f77 Update index.yaml
Signed-off-by: k8s-ci-robot <k8s-ci-robot@users.noreply.github.com>
2023-11-29 17:22:04 +00:00
a7i
751a0dec8e Update index.yaml
Signed-off-by: a7i <a7i@users.noreply.github.com>
2023-08-24 13:07:51 +00:00
k8s-ci-robot
932bf0bb42 Update index.yaml
Signed-off-by: k8s-ci-robot <k8s-ci-robot@users.noreply.github.com>
2023-05-31 08:18:14 +00:00
a7i
da42d43c2f Update index.yaml
Signed-off-by: a7i <a7i@users.noreply.github.com>
2023-05-05 14:14:44 +00:00
k8s-ci-robot
39066a41de Update index.yaml
Signed-off-by: k8s-ci-robot <k8s-ci-robot@users.noreply.github.com>
2023-04-04 17:34:17 +00:00
damemi
640d4af538 Update index.yaml
Signed-off-by: damemi <damemi@users.noreply.github.com>
2023-01-17 14:42:00 +00:00
k8s-ci-robot
fe135ef841 Update index.yaml
Signed-off-by: k8s-ci-robot <k8s-ci-robot@users.noreply.github.com>
2022-10-17 21:53:53 +00:00
k8s-ci-robot
df30be9f04 Update index.yaml
Signed-off-by: k8s-ci-robot <k8s-ci-robot@users.noreply.github.com>
2022-09-27 15:02:20 +00:00
damemi
6dcb094ada Update index.yaml
Signed-off-by: damemi <damemi@users.noreply.github.com>
2022-09-15 16:33:37 +00:00
k8s-ci-robot
8849b415fa Update index.yaml
Signed-off-by: k8s-ci-robot <k8s-ci-robot@users.noreply.github.com>
2022-05-31 13:39:20 +00:00
damemi
3d6c3c82ae Update index.yaml
Signed-off-by: damemi <damemi@users.noreply.github.com>
2022-05-23 14:14:10 +00:00
k8s-ci-robot
245b8c0f84 Update index.yaml
Signed-off-by: k8s-ci-robot <k8s-ci-robot@users.noreply.github.com>
2022-02-28 18:56:10 +00:00
k8s-ci-robot
f6ef3370cd Update index.yaml
Signed-off-by: k8s-ci-robot <k8s-ci-robot@users.noreply.github.com>
2022-02-09 13:34:50 +00:00
k8s-ci-robot
6bdefcbeff Update index.yaml
Signed-off-by: k8s-ci-robot <k8s-ci-robot@users.noreply.github.com>
2022-02-03 20:23:32 +00:00
k8s-ci-robot
9f3411a93e Update index.yaml
Signed-off-by: k8s-ci-robot <k8s-ci-robot@users.noreply.github.com>
2021-09-29 16:37:16 +00:00
k8s-ci-robot
6f8585493e Update index.yaml
Signed-off-by: k8s-ci-robot <k8s-ci-robot@users.noreply.github.com>
2021-09-08 20:44:21 +00:00
damemi
2213cb977c Update index.yaml
Signed-off-by: damemi <damemi@users.noreply.github.com>
2021-06-08 17:43:48 +00:00
damemi
bf052b2fed Update index.yaml
Signed-off-by: damemi <damemi@users.noreply.github.com>
2020-12-10 15:23:17 +00:00
Kubernetes Prow Robot
e372b00b9a Merge pull request #460 from damemi/empty-gh-pages
Delete everything but index.yaml from gh-pages
2020-12-09 06:40:50 -08:00
Mike Dame
1795479cf0 Delete everything but index.yaml from gh-pages 2020-12-08 20:53:05 -05:00
damemi
97a0ab922f Update index.yaml
Signed-off-by: damemi <damemi@users.noreply.github.com>
2020-12-09 01:47:49 +00:00
k8s-ci-robot
81df2c3da8 Update index.yaml
Signed-off-by: k8s-ci-robot <k8s-ci-robot@users.noreply.github.com>
2020-12-08 19:34:03 +00:00
k8s-ci-robot
9e35349cb3 Update index.yaml
Signed-off-by: k8s-ci-robot <k8s-ci-robot@users.noreply.github.com>
2020-12-08 19:22:08 +00:00
damemi
6e1f88b932 Update index.yaml
Signed-off-by: damemi <damemi@users.noreply.github.com>
2020-09-01 16:59:23 +00:00
damemi
a14d4bfba2 Update index.yaml
Signed-off-by: damemi <damemi@users.noreply.github.com>
2020-07-23 18:29:33 +00:00
Kubernetes Prow Robot
0894f7740c Merge pull request #343 from ingvagabund/test-cleanup
Clean e2e test so it's easier to extend it
2020-07-21 05:21:13 -07:00
Kubernetes Prow Robot
c3f07dc366 Merge pull request #351 from KohlsTechnology/helm-release-docs
Update Release Documentation For Helm Charts
2020-07-20 20:33:14 -07:00
Sean Malloy
ca8f1051eb Update Helm Chart Version To 0.18.0 2020-07-20 12:52:38 -05:00
Sean Malloy
c7692a2e9f Update Release Documentation For Helm Charts
The descheduler now has an official helm chart. This change documents the
process to release a new helm chart artifact.
2020-07-20 12:51:50 -05:00
Jan Chaloupka
53badf7b61 e2e: create dedicated ns for each e2e test 2020-07-17 18:15:53 +02:00
Jan Chaloupka
f801c5f72f e2e: code cleanup 2020-07-17 18:15:13 +02:00
Jan Chaloupka
327880ba51 e2e: separate tests for LowNodeUtilization strategy and evict annotation 2020-07-17 18:11:01 +02:00
Jan Chaloupka
7bb8b4feda e2e: create and delete RC in the same function body
So create/delete are coupled and caller of create is responsible for calling delete as well
2020-07-17 18:06:19 +02:00
Kubernetes Prow Robot
36b1e1f061 Merge pull request #298 from stevehipwell/helm-chart
Add Helm chart
2020-07-16 16:15:00 -07:00
Steve Hipwell
4507a90bb6 Add helm chart 2020-07-16 17:36:17 +01:00
Kubernetes Prow Robot
550f68306c Merge pull request #342 from KohlsTechnology/remove-travis-ci-config
Remove Travis CI Configuration
2020-07-16 07:05:41 -07:00
Sean Malloy
2b668566ce Remove Travis CI Configuration
The e2e tests are now being run through Prow. Therefore the Travis CI
configuration can now be completely removed.
2020-07-15 23:52:27 -05:00
Kubernetes Prow Robot
ee414ea366 Merge pull request #344 from ingvagabund/kind-have-version-configurable-through-env
e2e: have k8s version configurable when creating cluster through kind
2020-07-14 06:47:21 -07:00
Jan Chaloupka
5761b5d595 e2e: have k8s version configurable when creating cluster through kind 2020-07-14 15:25:02 +02:00
Kubernetes Prow Robot
07f476dfc4 Merge pull request #340 from damemi/update-e2e
Update e2e script to use Kind setup
2020-07-13 19:57:20 -07:00
Mike Dame
f5e9f07321 Move kind setup to e2e script
This moves the kind setup (previously used by Travis) to the e2e runner script
to accomodate the switch to Prow. This provides a KIND_E2E env var to specify
whether to run the tests in kind, or (by default) to run locally).
2020-07-13 15:49:41 -04:00
Kubernetes Prow Robot
05c69ee26a Merge pull request #336 from lixiang233/avoid_duplicated_append_RemoveDuplicatePods
avoid appending list multiple times in RemoveDuplicates
2020-07-09 01:32:02 -07:00
Kubernetes Prow Robot
1623e09122 Merge pull request #332 from lixiang233/fix_test_struct_lownodeutilization
Add maxPodsToEvictPerNode to LowNodeUtilization testcase struct
2020-07-09 01:22:02 -07:00
lixiang
696aa7c505 rename maxPodsToEvict to maxPodsToEvictPerNode to avoid misunderstanding 2020-07-08 15:15:48 +08:00
lixiang
c53dce0805 Add maxPodsToEvictPerNode to testcase struct 2020-07-08 15:15:38 +08:00
lixiang
cd8b5a0354 avoid appending list multiple times in RemoveDuplicates 2020-07-07 14:42:53 +08:00
Kubernetes Prow Robot
b51f24eb8e Merge pull request #333 from lixiang233/refactor_lowNodeUtilization_func
Support only one sorting strategy in lowNodeUtilization
2020-07-02 22:48:47 -07:00
lixiang
ae3b4368ee support only one sorting strategy in lowNodeUtilization 2020-07-01 09:47:35 +08:00
Kubernetes Prow Robot
61eef93618 Merge pull request #330 from paulfantom/patch-1
examples: fix typo
2020-06-29 07:27:24 -07:00
Paweł Krupa
fa0a2ec6fe examples: fix typo
Fix incorrect example causing following error in runtime:

PodsHavingTooManyRestarts thresholds not set
2020-06-28 14:28:41 +02:00
Kubernetes Prow Robot
7ece10a643 Merge pull request #328 from KohlsTechnology/go114
Update To Go 1.14.4
2020-06-25 08:26:39 -07:00
Sean Malloy
71c8eae47e Update To Go 1.14.4
The kubernetes project has been updated to use Go 1.14.4. See below pull
request.

https://github.com/kubernetes/kubernetes/pull/88638

After making the updates to Go 1.14 "make gen" no longer worked. The
file hack/tools.go had to be created to get "make gen" working with Go
1.14.
2020-06-23 21:33:28 -05:00
Kubernetes Prow Robot
267b0837dc Merge pull request #327 from farah/farah/klog-migration
Update klog to v2
2020-06-23 08:35:40 -07:00
Kubernetes Prow Robot
c713537d56 Merge pull request #322 from lixiang233/move_pod_sort
Move sortPodsBasedOnPriority to pod util
2020-06-22 12:21:40 -07:00
Kubernetes Prow Robot
e374229707 Merge pull request #325 from KohlsTechnology/issue-template
Add initial GitHub issue templates
2020-06-22 07:42:40 -07:00
Sean Malloy
f834581a8e Fix Spelling Error 2020-06-22 09:12:22 -05:00
Ali Farah
15fcde5229 Update klog to v2 2020-06-22 20:43:22 +10:00
Sean Malloy
96c5dd3941 Add initial GitHub issue templates
Initially users can open a bug report, feature request, or misc request.
Bug reports and feature requests will have the "kind" label
automatically added.
2020-06-20 00:52:23 -05:00
lixiang
65a03e76bf Add sorting to RemovePodsViolatingInterPodAntiAffinity 2020-06-19 14:11:23 +08:00
lixiang
43525f6493 Move sortPodsBasedOnPriority to podutil 2020-06-16 19:19:03 +08:00
lixiang
7457626f62 Move helper funcs to testutil 2020-06-16 19:17:28 +08:00
Kubernetes Prow Robot
08c22e8921 Merge pull request #321 from KohlsTechnology/add-event-reason
Add Pod Eviction Reason To Events
2020-06-15 08:53:57 -07:00
Sean Malloy
4ff533ec17 Add pod eviction reason to k8s events
Prior to this change the event created for every pod eviction was
identical. Instead leverage the newly added eviction reason when
creating k8s events. This makes it easier for end users to understand
why the descheduler evicted a pod when inspecting k8s events.
2020-06-10 01:13:17 -05:00
Sean Malloy
7680e3d079 Use var declaration instead of short assignmnet
This is a very minor refactor to use a var declaration for the reason
variable. A var declaration is being used because the zero value for
strings is an empty string.

https://golang.org/ref/spec#The_zero_value
2020-06-10 01:05:05 -05:00
Kubernetes Prow Robot
305801dd0e Merge pull request #319 from damemi/remove-extra-eviction-log
Remove redundant eviction log message and add "Reason" param to EvictPod
2020-06-09 11:25:19 -07:00
Mike Dame
9951b85d60 Add optional reason parameter to EvictPod 2020-06-09 13:35:51 -04:00
Mike Dame
ff21ec9432 Remove redundant eviction log message 2020-06-09 12:17:45 -04:00
Kubernetes Prow Robot
5e15d77bf2 Merge pull request #309 from damemi/change-evict-local-storage-pods
Make evictLocalStoragePods a property of PodEvictor
2020-06-08 03:53:46 -07:00
Kubernetes Prow Robot
d833c73fc4 Merge pull request #317 from ingvagabund/extend-owners
Add myself to project reviewers
2020-06-05 10:45:44 -07:00
Kubernetes Prow Robot
795a80dfb0 Merge pull request #310 from lixiang233/ignore_no_evictable_pod_node
Skip evicting when no evictable pod found on node
2020-06-05 07:41:44 -07:00
Mike Dame
f5e4acdd8a Make evictLocalStoragePods a property of PodEvictor
This is a minor refactor of PodEvictor to include evictLocalStoragePods as a
property (so that it doesn't need to be passed to each strategy function.) It
also removes ListEvictablePodsOnNode and extends ListPodsOnANode with an optional
"filter" function parameter, so that for example IsEvictable can be passed to it
and achieve the same results as the function formerly known as ListEvictablePodsOnNode.
This approach also now allows strategies to more explicitly extend their criteria of what
IsEvictable considers an evictable pod (such as NodeAffinity, which checks that the pod
can fit on any other node).
2020-06-05 10:28:48 -04:00
Jan Chaloupka
eb4c1bb355 Add myself to project reviewers
Based on https://github.com/kubernetes/community/blob/master/community-membership.md#requirements-1:

The following apply to the part of codebase for which one would be a reviewer in an OWNERS file (for repos using the bot).

> member for at least 3 months

For a couple of years now

> Primary reviewer for at least 5 PRs to the codebase

https://github.com/kubernetes-sigs/descheduler/pull/285
https://github.com/kubernetes-sigs/descheduler/pull/275
https://github.com/kubernetes-sigs/descheduler/pull/267
https://github.com/kubernetes-sigs/descheduler/pull/254
https://github.com/kubernetes-sigs/descheduler/pull/181

> Reviewed or merged at least 20 substantial PRs to the codebase

https://github.com/kubernetes-sigs/descheduler/pulls?q=is%3Apr+is%3Aclosed+assignee%3Aingvagabund

> Knowledgeable about the codebase

yes

> Sponsored by a subproject approver
> With no objections from other approvers
> Done through PR to update the OWNERS file

this PR

> May either self-nominate, be nominated by an approver in this subproject, or be nominated by a robot

self-nominating
2020-06-04 18:16:38 +02:00
Kubernetes Prow Robot
6dfa95cc87 Merge pull request #312 from lixiang233/fix_lowUtilization_golint
Pass golint check for pkg/descheduler/strategies/
2020-06-04 07:09:15 -07:00
lixiang
eb9d974a8b pass golint check for pkg/descheduler/strategies/ 2020-06-04 14:33:24 +08:00
lixiang
d41a1f4a56 Ignore evicting when no evictable pod found on node 2020-06-04 10:31:15 +08:00
Kubernetes Prow Robot
138ad556a3 Merge pull request #315 from damemi/update-travis-yml
Remove redundant tests from Travis CI
2020-06-03 11:44:11 -07:00
Mike Dame
37124e6e45 Remove redundant tests from Travis CI
These tests are now covered by k8s test-infra prow tests.
2020-06-03 14:11:08 -04:00
Kubernetes Prow Robot
bd412bf87f Merge pull request #300 from damemi/refactor-is-evictable
Add more verbose logging to IsEvictable checks
2020-05-29 10:03:16 -07:00
Kubernetes Prow Robot
6f9b31f568 Merge pull request #308 from damemi/fix-lint-for-ci
Download golangci-lint to _output/bin directory
2020-05-29 09:57:15 -07:00
Mike Dame
c858740c4f Run golangci-lint from GOPATH 2020-05-29 12:48:42 -04:00
Kubernetes Prow Robot
bfefe634a1 Merge pull request #307 from damemi/make-verify
Add parent make verify target
2020-05-29 08:01:16 -07:00
Mike Dame
7b7b9e1cd7 Add parent make verify target 2020-05-29 10:23:38 -04:00
Mike Dame
0a4b8b0a25 Add more verbose logging to IsEvictable checks 2020-05-29 10:20:14 -04:00
Kubernetes Prow Robot
f28183dcbe Merge pull request #306 from ingvagabund/make-Params-a-pointer-II
Add missing address of api.StrategyParameters
2020-05-29 05:53:16 -07:00
Jan Chaloupka
abdf79454f Add missing address of api.StrategyParameters
https://github.com/kubernetes-sigs/descheduler/pull/285 got merged before re-running the unit tests.
2020-05-29 13:34:57 +02:00
Kubernetes Prow Robot
46b570b71d Merge pull request #296 from ingvagabund/make-Params-a-pointer
Make DeschedulerStrategy.Params a pointer
2020-05-28 22:53:15 -07:00
Kubernetes Prow Robot
616a9b5f6b Merge pull request #285 from lixiang233/Ft_change_lowUtilization_break
Change break condition and thresholds validation for lowUtilization
2020-05-28 07:36:02 -07:00
Kubernetes Prow Robot
003a4cdc2b Merge pull request #302 from lixiang233/clean_up_strategy_enable_check
Clean up code in LowNodeUtilization
2020-05-28 07:16:02 -07:00
Kubernetes Prow Robot
54ea05d8bb Merge pull request #299 from damemi/node-affinity-logs
Standardize node affinity strategy logs
2020-05-28 01:52:02 -07:00
lixiang
d0eea0cabb Clean up code in LowNodeUtilization 2020-05-28 10:35:46 +08:00
Mike Dame
f0297dfe03 Standardize node affinity strategy logs 2020-05-27 16:52:16 -04:00
Kubernetes Prow Robot
ef1f36f8e4 Merge pull request #297 from ingvagabund/add-verify-gofmt
Add verify-gofmt make target
2020-05-27 07:02:40 -07:00
Jan Chaloupka
a733c95dcc Add verify-gofmt and verify-vendor make target
So e.g. CI can run 'make verify-gofmt' instead of ./hack/verify-gofmt in case the script location changes.
2020-05-27 15:08:20 +02:00
Jan Chaloupka
5a81a0661b Make DeschedulerStrategy.Params a pointer
To avoid empty params fields when serializing:
  RemoveDuplicates:
    enabled: true
    params: {}
  RemovePodsHavingTooManyRestarts:
    enabled: true
    params:
      podsHavingTooManyRestarts: {}
  RemovePodsViolatingInterPodAntiAffinity:
    enabled: true
    params: {}
  RemovePodsViolatingNodeAffinity:
    enabled: true
    params:
      nodeAffinityType:
      - requiredDuringSchedulingIgnoredDuringExecution
  RemovePodsViolatingNodeTaints:
    enabled: true
    params: {}
2020-05-27 10:31:31 +02:00
Kubernetes Prow Robot
83e04960af Merge pull request #288 from KohlsTechnology/update-support-matrix
Update Compatibility Matrix in README
2020-05-26 06:37:12 -07:00
lixiang
2a8dc69cbb Stop condition and config validation change for lowUtilization
1.Set default CPU/Mem/Pods percentage of thresholds to 100
2.Stop evicting pods if any resource ran out
3.Add thresholds verification method and limit resource percentage within [0, 100]
4.Change testcases and readme
2020-05-25 19:03:37 +08:00
Kubernetes Prow Robot
5d82d08af3 Merge pull request #291 from KohlsTechnology/update-releasedocs
Add URL For Viewing Staging Registry
2020-05-22 09:55:11 -07:00
Sean Malloy
c265825166 Add URL For Viewing Staging Registry
This newly documented URL can be used to view the descheduler staging
registry in a web browser. This is easier to browse if the gcloud
command is not available.
2020-05-22 11:33:45 -05:00
Kubernetes Prow Robot
ed0126fb63 Merge pull request #290 from KohlsTechnology/bump-install-manifests
Update Install Manifests For Release v0.18.0
2020-05-22 07:08:38 -07:00
Sean Malloy
8c7267b379 Update Install Manifests For Release v0.18.0 2020-05-21 22:54:54 -05:00
Kubernetes Prow Robot
d85ce22975 Merge pull request #289 from KohlsTechnology/update-release-docs
Update Release Guide
2020-05-20 11:46:19 -07:00
Kubernetes Prow Robot
a9091a1e37 Merge pull request #287 from ingvagabund/evict-pod-mention-dry-run-if-set
Mention pod eviction is in dry mode when set
2020-05-20 11:44:19 -07:00
Sean Malloy
d3b0ac8e06 Update Release Guide
Starting with descheduler v0.18 a release branch will be created for
each descheduler minor release. The release guide has been updated with
the steps to create release branches.
2020-05-20 11:10:42 -05:00
Sean Malloy
435674fb44 Update Compatibility Matrix in README
The matrix has been updated with the soon the be released v0.18
details. Also, clarified the descheduler and k8s version compatibility
requirements and recommendations.
2020-05-20 10:55:57 -05:00
Jan Chaloupka
eacbae72fd Mention pod eviction is in dry mode when set
Indicate whether a pod is only virtually evicted so there's no confusion
with the real eviction.
2020-05-20 10:40:58 +02:00
Kubernetes Prow Robot
34550d4b7c Merge pull request #275 from damemi/image-in-duplicates
Consider pod image in duplicates strategy
2020-05-15 06:51:38 -07:00
Mike Dame
c6ff87dbd6 Consider pod image in duplicates strategy
This increases the specificity of the RemoveDuplicates strategy by
removing pods which not only have the same owner, but who also
must have the same list of container images. This also adds a
parameter, `ExcludeOwnerKinds` to the RemoveDuplicates strategy
which accepts a list of Kinds. If a pod has any of these Kinds as
an owner, that pod is not considered for eviction.
2020-05-15 09:37:09 -04:00
Kubernetes Prow Robot
04efe65f90 Merge pull request #279 from KohlsTechnology/eviction-logs
Add Additional Details To Pod Eviction Log Messages
2020-05-14 07:20:22 -07:00
Sean Malloy
55afde6251 Remove line break in log message from PodLifeTime strategy 2020-05-14 00:36:52 -05:00
Sean Malloy
7039b6c8aa Refactor function EvictPod to only return an error
The EvictPod function previously returned a bool and an error. The
function now only returns an error. Callers can check for failure by
testing if the returned error is not nil. This aligns the EvictPod
function with idiomatic Go best practices.

Also, the function name has been changed from EvictPod to evictPod
because it is not used outside the evictions package.
2020-05-14 00:11:54 -05:00
Sean Malloy
cff984261e Log an error when EvictPod method returns a non-nil error
End users should be able to see the detailed error from the EvictPod
method when it fails. Updates all strategies to log the error. The
PodLifeTime strategy already logs this error.
2020-05-13 23:36:32 -05:00
Sean Malloy
4819ab9c69 Add namespace to pod eviction log messages
In multi-tenant environments it is useful to know which namespace a pod
was evicted from. Therefore log the namespace when evicting pods.

Also, do not log a nil error when successfully evicting pods.
2020-05-13 23:28:25 -05:00
Kubernetes Prow Robot
25336da708 Merge pull request #281 from KohlsTechnology/update-travis-ci
Update Travis CI build matrix with latest k8s point releases
2020-05-13 08:20:26 -07:00
Sean Malloy
4941f6a16b Update Travis CI build matrix with latest k8s point releases 2020-05-12 22:38:23 -05:00
Kubernetes Prow Robot
d7e93058d4 Merge pull request #280 from damemi/k8s-1.18
Update to k8s 1.18.2 dependencies
2020-05-12 13:40:21 -07:00
Mike Dame
c20a595370 Update travis.yml with new k8s version and updated kind version 2020-05-12 15:01:25 -04:00
Mike Dame
eec1104d6e React to 1.18 by adding contexts to client calls 2020-05-12 15:01:25 -04:00
Mike Dame
741b35edf5 Update to k8s 1.18.2 dependencies 2020-05-12 14:07:31 -04:00
Kubernetes Prow Robot
c01cfcf3b6 Merge pull request #274 from KohlsTechnology/pod-lifetime-strategy
Add New PodLifeTime Strategy
2020-05-08 07:29:42 -07:00
Sean Malloy
643cd472ef Add initial production use cases section to user guide 2020-05-08 09:02:24 -05:00
Sean Malloy
668d727fc2 Add VS Code File To .gitignore 2020-05-07 23:10:48 -05:00
Sean Malloy
423ee35846 Add New PodLifeTime Strategy
The new PodLifeTime descheduler strategy can be used to evict pods that
were created more than the configured number of seconds ago.

In the below example pods created more than 24 hours ago will be evicted.
````
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
strategies:
  "PodLifeTime":
     enabled: true
     params:
        maxPodLifeTimeSeconds: 86400
````
2020-05-07 23:10:36 -05:00
Kubernetes Prow Robot
31c7855212 Merge pull request #278 from lixiang233/fix_readme
Fix readme
2020-05-07 12:19:43 -07:00
Kubernetes Prow Robot
211f3942b6 Merge pull request #276 from damemi/toomanyrestarts-pointer
Switch PodsHavingTooManyRestarts params to pointer
2020-05-07 12:17:42 -07:00
Kubernetes Prow Robot
beae282735 Merge pull request #277 from damemi/damemi-approvers
Add damemi to approvers
2020-05-07 12:15:42 -07:00
lixiang
635348efb9 Fix readme 2020-05-07 11:47:32 +08:00
Mike Dame
fa335c782f Switch PodsHavingTooManyRestarts params to pointer 2020-05-06 13:29:05 -04:00
Mike Dame
b019a58525 Add damemi to approvers 2020-05-06 11:00:58 -04:00
Kubernetes Prow Robot
78eef6c343 Merge pull request #267 from ingvagabund/drop-DeschedulerServer-from-strategies
Drop descheduler server from strategies
2020-04-28 08:34:06 -07:00
Jan Chaloupka
cbcefb5d2f Remove options.DeschedulerServer from all strategies 2020-04-28 16:13:33 +02:00
Kubernetes Prow Robot
149085fb57 Merge pull request #240 from ingvagabund/turn-strategy-params-NodeResourceUtilizationThresholds-field-into-pointer
Turn StrategyParameters.NodeResourceUtilizationThresholds field into a pointer
2020-04-28 06:48:07 -07:00
Jan Chaloupka
991eddb691 Turn StrategyParameters.NodeResourceUtilizationThresholds field into a pointer
The field is intended to be omitempty when not set. Without a pointer the strategy
serialized into json string looks like:

```json
strategies:
  LowNodeUtilization:
    enabled: true
    params:
      nodeResourceUtilizationThresholds:
        numberOfNodes: 1
        targetThresholds:
          cpu: 50
          memory: 50
          pods: 20
        thresholds:
          cpu: 50
          memory: 50
          pods: 20
  RemoveDuplicates:
    enabled: true
    params:
      nodeResourceUtilizationThresholds: {}
  RemovePodsViolatingInterPodAntiAffinity:
    enabled: true
    params:
      nodeResourceUtilizationThresholds: {}
  RemovePodsViolatingNodeAffinity:
    enabled: true
    params:
      nodeAffinityType:
      - requiredDuringSchedulingIgnoredDuringExecution
      nodeResourceUtilizationThresholds: {}
  RemovePodsViolatingNodeTaints:
    enabled: true
    params:
      nodeResourceUtilizationThresholds: {}
```

It's preferred to have the following json string instead:
```
strategies:
  LowNodeUtilization:
    enabled: true
    params:
      nodeResourceUtilizationThresholds:
        numberOfNodes: 1
        targetThresholds:
          cpu: 50
          memory: 50
          pods: 20
        thresholds:
          cpu: 50
          memory: 50
          pods: 20
  RemoveDuplicates:
    enabled: true
  RemovePodsViolatingInterPodAntiAffinity:
    enabled: true
  RemovePodsViolatingNodeAffinity:
    enabled: true
    params:
      nodeAffinityType:
      - requiredDuringSchedulingIgnoredDuringExecution
  RemovePodsViolatingNodeTaints:
    enabled: true
```
2020-04-28 15:20:30 +02:00
Kubernetes Prow Robot
91de471376 Merge pull request #254 from damemi/toomanyrestarts
Add RemoveTooManyRestarts policy
2020-04-27 12:48:05 -07:00
Mike Dame
c2d7e22749 make gen && go mod vendor 2020-04-24 10:48:28 -04:00
Mike Dame
e7c42794a0 Add RemovePodsHavingTooManyRestarts strategy 2020-04-24 10:48:28 -04:00
Kubernetes Prow Robot
3a8dfc07ed Merge pull request #266 from ingvagabund/pod-evictor
Move maximum-pods-per-nodes-evicted logic under a single invocation
2020-04-23 16:14:07 -07:00
Jan Chaloupka
077b7f6505 Drop check for MaxNoOfPodsToEvictPerNode and invoke EvictPod wrapper instead 2020-04-22 10:33:22 +02:00
Jan Chaloupka
240fa93bc5 Move maximum-pods-per-nodes-evicted logic under a single invocation
Each strategy implements a test for checking if a maximum number of pods per node
was already evicted. The test duplicates a code that can be put
under a single invocation. Thus, reducing the number of arguments passed
to each strategy given EvicPod call can encapsulate both the check
and the invocation of the pod eviction itself.
2020-04-22 10:32:16 +02:00
Kubernetes Prow Robot
6c7f846917 Merge pull request #265 from ingvagabund/tolerations-tains-code-cleanup
Tolerations taints code cleanup
2020-04-21 14:27:52 -07:00
Jan Chaloupka
6db7c3b92c Call utils.TolerationsTolerateTaintsWithFilter directly, not through checkPodsSatisfyTolerations 2020-04-21 22:34:14 +02:00
Kubernetes Prow Robot
030267107a Merge pull request #262 from ingvagabund/order-pods-by-priority-only-over-over-utilized-nodes
Order pods by priority only over over utilized nodes
2020-04-19 14:05:38 -07:00
Jan Chaloupka
1c300a9881 Drop getNoScheduleTaints and allTaintsTolerated in favor of utils.TolerationsTolerateTaintsWithFilter 2020-04-19 22:18:49 +02:00
Jan Chaloupka
0e9b33b822 allTaintsTolerated: remove for iteration through tolerations which is already implemented in utils.TolerationsTolerateTaint 2020-04-19 22:07:34 +02:00
Jan Chaloupka
36e3d1e703 Drop local implementation of toleratesTaint in favor of k8s.io/api/core/v1.Toleration.TolerateTaint
Functionally identical implementation of toleratesTaint is already provided in k8s.io/api
2020-04-19 21:58:41 +02:00
Jan Chaloupka
f53264b613 lownodeutilization: evict best-effort pods only
Unit test refactored node utilization and pod clasification
2020-04-17 11:32:34 +02:00
Jan Chaloupka
414554ae5e lownodeutilization: make unit tests with/without priority table driven 2020-04-17 11:32:28 +02:00
Jan Chaloupka
150f945592 lownodeutilization: clasify pods of over utilized nodes only
Only over utilized nodes need clasification of pods into categories.
Thus, skipping categorizing of pods which saves computation time in cases
where the number of over utilized nodes makes less than 50% of all nodes
or their fraction.
2020-04-17 11:32:22 +02:00
Kubernetes Prow Robot
9a84afece1 Merge pull request #264 from ingvagabund/remove-pods-violating-node-affinity
readme: RemovePodsViolatingNodeAffinity: reword description of the strategy
2020-04-16 20:59:08 -07:00
Jan Chaloupka
e0c101c5ae readme: RemovePodsViolatingNodeAffinity: reword description of the strategy
Compared to https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#node-affinity
the strategy kinda implements requiredDuringSchedulingRequiredDuringExecution node affinity type
for kubelets. Only addition to kubelet is the strategy checks whether is at least another node
capable of respecting the node affinity rules.

When requiredDuringSchedulingRequiredDuringExecution node affinity type is implemented in kubelet,
it's likely the strategy either gets removed or re-implemented. Stressing the relation with
requiredDuringSchedulingRequiredDuringExecution will helps consumers of descheduler
to keep in mind the kubelet will eventually take over the strategy when implemented.
2020-04-15 11:54:37 +02:00
Kubernetes Prow Robot
e3a562aea0 Merge pull request #260 from CriaHu/hyq_descheduler
fix broken link :
2020-04-07 18:01:44 -07:00
Kubernetes Prow Robot
4966e8ee08 Merge pull request #256 from damemi/fix-podfitsanynode
Fix early return from PodFitsAnyNode check
2020-04-07 08:01:43 -07:00
Kubernetes Prow Robot
d3542d5892 Merge pull request #258 from damemi/verify-deps
Add verify-vendor scripts to CI
2020-04-07 07:53:43 -07:00
Mike Dame
62d04b0fc7 go mod tidy && go mod vendor 2020-04-07 10:34:47 -04:00
Mike Dame
6ecbc85448 Add hack/{update,verify}-vendor.sh scripts 2020-04-07 10:34:47 -04:00
Cria Hu
b9b1eae6fb fix broken link :
https://git.k8s.io/community/contributors/guide/contributor-cheatsheet.md
2020-04-06 09:28:09 +08:00
Mike Dame
e95e42930d Remove early return false from PodFitsAnyNode 2020-04-02 11:53:29 -04:00
Kubernetes Prow Robot
3cabb69014 Merge pull request #255 from KohlsTechnology/go-1.13.9
Update to Go 1.13.9
2020-03-31 20:47:27 -07:00
Sean Malloy
ad8f90f177 Update to Go 1.13.9
Kubernetes releases 1.16 through 1.19 have been updated
to Go 1.13.9, so it's time to update the descheduler too.
2020-03-31 22:30:52 -05:00
Kubernetes Prow Robot
8a62cf1699 Merge pull request #246 from KohlsTechnology/go-1.13.8
Update to Go 1.13.8
2020-03-09 11:17:36 -07:00
Sean Malloy
a6b54dae99 Update to Go 1.13.8
Kubernetes releases 1.15 through 1.19 have been updated
to Go 1.13.8, so it's time to update the descheduler too.

https://github.com/kubernetes/release/issues/1134
2020-03-08 23:12:53 -05:00
Kubernetes Prow Robot
112684bcb9 Merge pull request #181 from jw-s/feature/respect-tolerations
Respect Node taints with tolerations (if exist)
2020-03-04 10:45:48 -08:00
Kubernetes Prow Robot
682e07c3cd Merge pull request #249 from ingvagabund/list-node-through-informer-in-every-iteration
List nodes through informer in every iteration
2020-03-04 10:43:48 -08:00
Jan Chaloupka
9593ce16d9 List nodes through informer in every iteration
Also, refactor the code a bit so it can be tested without checking for eviction support.
2020-03-04 16:26:13 +01:00
Kubernetes Prow Robot
2545c8b031 Merge pull request #244 from KohlsTechnology/docs-revamp
Documentation Refresh
2020-03-04 07:23:47 -08:00
Sean Malloy
c9793e7029 Clean README based on feedback from review 2020-03-03 22:07:20 -06:00
Joel Whittaker-Smith
4757132452 check for taints on low nodes and honor them 2020-03-02 14:43:11 +01:00
Sean Malloy
561b3b67b3 Clean README based on feedback from review 2020-02-28 22:38:28 -06:00
Sean Malloy
566f33e6ad Initial User and Contributor Guide Draft 2020-02-26 01:29:57 -06:00
Sean Malloy
83a75bac80 Create Structure and Links For User and Contributor Guides 2020-02-26 01:30:08 -06:00
Sean Malloy
d8772f5685 Clean up README
* Fixed small typos
* Update compatability matrix
2020-02-26 01:29:57 -06:00
Kubernetes Prow Robot
df510187d6 Merge pull request #233 from KohlsTechnology/update-release-docs
Add notes section to release documentation
2020-02-21 11:12:12 -08:00
Kubernetes Prow Robot
137b9b72e7 Merge pull request #242 from KohlsTechnology/fix-gcr-registry
Fix GCR Image Location In Job and CronJob YAML
2020-02-20 09:41:01 -08:00
Sean Malloy
09b4979673 Fix GCR Image Location In Job and CronJob YAML
The correct location for the official descheduler container image is
us.gcr.io/k8s-artifacts-prod/descheduler/descheduler:TAG_NAME.
2020-02-20 00:30:16 -06:00
Kubernetes Prow Robot
eff8185d7c Merge pull request #239 from aveshagarwal/master-fix-gen
Fix email address in docker files.
2020-02-12 07:04:52 -08:00
Avesh Agarwal
83ee94dd08 Fix email address in docker files. 2020-02-12 09:31:24 -05:00
Sean Malloy
aae52ac2ee Document gcloud command used to get container image details
The "gcloud container images describe" command is used to get the SHA256
hash of an image in the staging registry. The SHA256 hash is required
for the image promotion process.

https://github.com/kubernetes/k8s.io/tree/master/k8s.gcr.io#image-promoter
2020-02-11 22:46:27 -06:00
Sean Malloy
a7c4295c58 Add notes section to release documentation
Add a link to the test grid dashboard for automated container image builds,
and documents useful gcloud commands for woking with the staging registry.

This documentation is useful for project maintainers that are creating
releases and publishing container images. The chance of remembering
these commands and links is slim to none. Therefore documenting this
information will be very useful.
2020-02-11 22:41:35 -06:00
2106 changed files with 762 additions and 815137 deletions

1
.gitignore vendored
View File

@@ -1 +0,0 @@
_output/

View File

@@ -1,15 +0,0 @@
run:
deadline: 2m
linters:
disable-all: true
enable:
- gofmt
- gosimple
- gocyclo
- misspell
- govet
linters-settings:
goimports:
local-prefixes: sigs.k8s.io/descheduler

View File

@@ -1,31 +0,0 @@
sudo: false
language: go
go:
- 1.13.x
env:
- K8S_VERSION=v1.17.0
- K8S_VERSION=v1.16.4
- K8S_VERSION=v1.15.7
services:
- docker
before_script:
- curl -Lo kubectl https://storage.googleapis.com/kubernetes-release/release/${K8S_VERSION}/bin/linux/amd64/kubectl && chmod +x kubectl && sudo mv kubectl /usr/local/bin/
- wget https://github.com/kubernetes-sigs/kind/releases/download/v0.7.0/kind-linux-amd64
- chmod +x kind-linux-amd64
- mv kind-linux-amd64 kind
- export PATH=$PATH:$PWD
- kind create cluster --image kindest/node:${K8S_VERSION} --config=$TRAVIS_BUILD_DIR/hack/kind_config.yaml
- export KUBECONFIG="$(kind get kubeconfig-path)"
- docker pull kubernetes/pause
- kind load docker-image kubernetes/pause
- kind get kubeconfig > /tmp/admin.conf
script:
- mkdir -p ~/gopath/src/sigs.k8s.io/
- mv ~/gopath/src/github.com/kubernetes-sigs/descheduler ~/gopath/src/sigs.k8s.io/.
- hack/verify-gofmt.sh
- make lint
- make build
- make test-unit
- make test-e2e

View File

@@ -1,23 +0,0 @@
# Contributing Guidelines
Welcome to Kubernetes. We are excited about the prospect of you joining our [community](https://github.com/kubernetes/community)! The Kubernetes community abides by the CNCF [code of conduct](code-of-conduct.md). Here is an excerpt:
_As contributors and maintainers of this project, and in the interest of fostering an open and welcoming community, we pledge to respect all people who contribute through reporting issues, posting feature requests, updating documentation, submitting pull requests or patches, and other activities._
## Getting Started
We have full documentation on how to get started contributing here:
- [Contributor License Agreement](https://git.k8s.io/community/CLA.md) Kubernetes projects require that you sign a Contributor License Agreement (CLA) before we can accept your pull requests
- [Kubernetes Contributor Guide](http://git.k8s.io/community/contributors/guide) - Main contributor documentation, or you can just jump directly to the [contributing section](http://git.k8s.io/community/contributors/guide#contributing)
- [Contributor Cheat Sheet](https://git.k8s.io/community/contributors/guide/contributor-cheatsheet.md) - Common resources for existing developers
## Mentorship
- [Mentoring Initiatives](https://git.k8s.io/community/mentoring) - We have a diverse set of mentorship programs available that are always looking for volunteers!
## Contact Information
- [Slack channel](https://kubernetes.slack.com/messages/sig-scheduling)
- [Mailing list](https://groups.google.com/forum/#!forum/kubernetes-sig-scheduling)

View File

@@ -1,26 +0,0 @@
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
FROM golang:1.13.6
WORKDIR /go/src/sigs.k8s.io/descheduler
COPY . .
RUN make
FROM scratch
MAINTAINER Avesh Agarwal <avagarwa@redhat.com>
COPY --from=0 /go/src/sigs.k8s.io/descheduler/_output/bin/descheduler /bin/descheduler
CMD ["/bin/descheduler", "--help"]

View File

@@ -1,20 +0,0 @@
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
FROM scratch
MAINTAINER Avesh Agarwal <avagarwa@redhat.com>
COPY _output/bin/descheduler /bin/descheduler
CMD ["/bin/descheduler", "--help"]

201
LICENSE
View File

@@ -1,201 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright {yyyy} {name of copyright owner}
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@@ -1,81 +0,0 @@
# Copyright 2017 The Kubernetes Authors.
# #
# # Licensed under the Apache License, Version 2.0 (the "License");
# # you may not use this file except in compliance with the License.
# # You may obtain a copy of the License at
# #
# # http://www.apache.org/licenses/LICENSE-2.0
# #
# # Unless required by applicable law or agreed to in writing, software
# # distributed under the License is distributed on an "AS IS" BASIS,
# # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# # See the License for the specific language governing permissions and
# # limitations under the License.
.PHONY: test
# VERSION is currently based on the last commit
VERSION?=$(shell git describe --tags)
COMMIT=$(shell git rev-parse HEAD)
BUILD=$(shell date +%FT%T%z)
LDFLAG_LOCATION=sigs.k8s.io/descheduler/cmd/descheduler/app
LDFLAGS=-ldflags "-X ${LDFLAG_LOCATION}.version=${VERSION} -X ${LDFLAG_LOCATION}.buildDate=${BUILD} -X ${LDFLAG_LOCATION}.gitCommit=${COMMIT}"
GOLANGCI_VERSION := v1.15.0
HAS_GOLANGCI := $(shell which golangci-lint)
# REGISTRY is the container registry to push
# into. The default is to push to the staging
# registry, not production.
REGISTRY?=gcr.io/k8s-staging-descheduler
# IMAGE is the image name of descheduler
IMAGE:=descheduler:$(VERSION)
# IMAGE_GCLOUD is the image name of descheduler in the remote registry
IMAGE_GCLOUD:=$(REGISTRY)/descheduler:$(VERSION)
# TODO: upload binaries to GCS bucket
#
# In the future binaries can be uploaded to
# GCS bucket gs://k8s-staging-descheduler.
all: build
build:
CGO_ENABLED=0 go build ${LDFLAGS} -o _output/bin/descheduler sigs.k8s.io/descheduler/cmd/descheduler
dev-image: build
docker build -f Dockerfile.dev -t $(IMAGE) .
image:
docker build -t $(IMAGE) .
push-container-to-gcloud: image
gcloud auth configure-docker
docker tag $(IMAGE) $(IMAGE_GCLOUD)
docker push $(IMAGE_GCLOUD)
push: push-container-to-gcloud
clean:
rm -rf _output
test-unit:
./test/run-unit-tests.sh
test-e2e:
./test/run-e2e-tests.sh
gen:
./hack/update-generated-conversions.sh
./hack/update-generated-deep-copies.sh
./hack/update-generated-defaulters.sh
#undo go mod changes caused by above.
go mod tidy
lint:
ifndef HAS_GOLANGCI
curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(GOPATH)/bin ${GOLANGCI_VERSION}
endif
golangci-lint run

10
OWNERS
View File

@@ -1,10 +0,0 @@
approvers:
- aveshagarwal
- k82cn
- ravisantoshgudimetla
reviewers:
- aveshagarwal
- k82cn
- ravisantoshgudimetla
- damemi
- seanmalloy

237
README.md
View File

@@ -1,237 +0,0 @@
[![Build Status](https://travis-ci.org/kubernetes-sigs/descheduler.svg?branch=master)](https://travis-ci.org/kubernetes-sigs/descheduler)
[![Go Report Card](https://goreportcard.com/badge/kubernetes-sigs/descheduler)](https://goreportcard.com/report/sigs.k8s.io/descheduler)
# Descheduler for Kubernetes
## Introduction
Scheduling in Kubernetes is the process of binding pending pods to nodes, and is performed by
a component of Kubernetes called kube-scheduler. The scheduler's decisions, whether or where a
pod can or can not be scheduled, are guided by its configurable policy which comprises of set of
rules, called predicates and priorities. The scheduler's decisions are influenced by its view of
a Kubernetes cluster at that point of time when a new pod appears first time for scheduling.
As Kubernetes clusters are very dynamic and their state change over time, there may be desired
to move already running pods to some other nodes for various reasons:
* Some nodes are under or over utilized.
* The original scheduling decision does not hold true any more, as taints or labels are added to
or removed from nodes, pod/node affinity requirements are not satisfied any more.
* Some nodes failed and their pods moved to other nodes.
* New nodes are added to clusters.
Consequently, there might be several pods scheduled on less desired nodes in a cluster.
Descheduler, based on its policy, finds pods that can be moved and evicts them. Please
note, in current implementation, descheduler does not schedule replacement of evicted pods
but relies on the default scheduler for that.
## Build and Run
- Checkout the repo into your $GOPATH directory under src/sigs.k8s.io/descheduler
Build descheduler:
```sh
$ make
```
and run descheduler:
```sh
$ ./_output/bin/descheduler --kubeconfig <path to kubeconfig> --policy-config-file <path-to-policy-file>
```
If you want more information about what descheduler is doing add `-v 1` to the command line
For more information about available options run:
```
$ ./_output/bin/descheduler --help
```
## Running Descheduler as a Job or CronJob
The descheduler can be run as a job or cronjob inside of a pod. It has the advantage of
being able to be run multiple times without needing user intervention.
The descheduler pod is run as a critical pod to avoid being evicted by itself,
or by the kubelet due to an eviction event. Since critical pods are created in the
`kube-system` namespace, the descheduler job and its pod will also be created
in `kube-system` namespace.
### Setup RBAC
To give necessary permissions for the descheduler to work in a pod.
```
$ kubectl create -f kubernetes/rbac.yaml
```
### Create a configmap to store descheduler policy
```
$ kubectl create -f kubernetes/configmap.yaml
```
### Create a Job or CronJob
As a Job.
```
$ kubectl create -f kubernetes/job.yaml
```
Or as a CronJob.
```
$ kubectl create -f kubernetes/cronjob.yaml
```
## Policy and Strategies
Descheduler's policy is configurable and includes strategies to be enabled or disabled.
Five strategies, `RemoveDuplicates`, `LowNodeUtilization`, `RemovePodsViolatingInterPodAntiAffinity`, `RemovePodsViolatingNodeAffinity` , `RemovePodsViolatingNodeTaints` are currently implemented.
As part of the policy, the parameters associated with the strategies can be configured too.
By default, all strategies are enabled.
### RemoveDuplicates
This strategy makes sure that there is only one pod associated with a Replica Set (RS),
Replication Controller (RC), Deployment, or Job running on same node. If there are more,
those duplicate pods are evicted for better spreading of pods in a cluster. This issue could happen
if some nodes went down due to whatever reasons, and pods on them were moved to other nodes leading to
more than one pod associated with RS or RC, for example, running on same node. Once the failed nodes
are ready again, this strategy could be enabled to evict those duplicate pods. Currently, there are no
parameters associated with this strategy. To disable this strategy, the policy should look like:
```
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
strategies:
"RemoveDuplicates":
enabled: false
```
### LowNodeUtilization
This strategy finds nodes that are under utilized and evicts pods, if possible, from other nodes
in the hope that recreation of evicted pods will be scheduled on these underutilized nodes. The
parameters of this strategy are configured under `nodeResourceUtilizationThresholds`.
The under utilization of nodes is determined by a configurable threshold, `thresholds`. The threshold
`thresholds` can be configured for cpu, memory, and number of pods in terms of percentage. If a node's
usage is below threshold for all (cpu, memory, and number of pods), the node is considered underutilized.
Currently, pods' request resource requirements are considered for computing node resource utilization.
There is another configurable threshold, `targetThresholds`, that is used to compute those potential nodes
from where pods could be evicted. Any node, between the thresholds, `thresholds` and `targetThresholds` is
considered appropriately utilized and is not considered for eviction. The threshold, `targetThresholds`,
can be configured for cpu, memory, and number of pods too in terms of percentage.
These thresholds, `thresholds` and `targetThresholds`, could be tuned as per your cluster requirements.
An example of the policy for this strategy would look like:
```
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
strategies:
"LowNodeUtilization":
enabled: true
params:
nodeResourceUtilizationThresholds:
thresholds:
"cpu" : 20
"memory": 20
"pods": 20
targetThresholds:
"cpu" : 50
"memory": 50
"pods": 50
```
There is another parameter associated with `LowNodeUtilization` strategy, called `numberOfNodes`.
This parameter can be configured to activate the strategy only when number of under utilized nodes
are above the configured value. This could be helpful in large clusters where a few nodes could go
under utilized frequently or for a short period of time. By default, `numberOfNodes` is set to zero.
### RemovePodsViolatingInterPodAntiAffinity
This strategy makes sure that pods violating interpod anti-affinity are removed from nodes. For example, if there is podA on node and podB and podC(running on same node) have antiaffinity rules which prohibit them to run on the same node, then podA will be evicted from the node so that podB and podC could run. This issue could happen, when the anti-affinity rules for pods B,C are created when they are already running on node. Currently, there are no parameters associated with this strategy. To disable this strategy, the policy should look like:
```
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
strategies:
"RemovePodsViolatingInterPodAntiAffinity":
enabled: false
```
### RemovePodsViolatingNodeAffinity
This strategy makes sure that pods violating node affinity are removed from nodes. For example, there is podA that was scheduled on nodeA which satisfied the node affinity rule `requiredDuringSchedulingIgnoredDuringExecution` at the time of scheduling, but over time nodeA no longer satisfies the rule, then if another node nodeB is available that satisfies the node affinity rule, then podA will be evicted from nodeA. The policy file should like this -
```
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
strategies:
"RemovePodsViolatingNodeAffinity":
enabled: true
params:
nodeAffinityType:
- "requiredDuringSchedulingIgnoredDuringExecution"
```
### RemovePodsViolatingNodeTaints
This strategy makes sure that pods violating NoSchedule taints on nodes are removed. For example: there is a pod "podA" with toleration to tolerate a taint ``key=value:NoSchedule`` scheduled and running on the tainted node. If the node's taint is subsequently updated/removed, taint is no longer satisfied by its pods' tolerations and will be evicted. The policy file should look like:
````
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
strategies:
"RemovePodsViolatingNodeTaints":
enabled: true
````
## Pod Evictions
When the descheduler decides to evict pods from a node, it employs following general mechanism:
* [Critical pods](https://kubernetes.io/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/) (with priorityClassName set to system-cluster-critical or system-node-critical) are never evicted.
* Pods (static or mirrored pods or stand alone pods) not part of an RC, RS, Deployment or Jobs are
never evicted because these pods won't be recreated.
* Pods associated with DaemonSets are never evicted.
* Pods with local storage are never evicted.
* Best efforts pods are evicted before Burstable and Guaranteed pods.
* All types of pods with annotation descheduler.alpha.kubernetes.io/evict are evicted. This
annotation is used to override checks which prevent eviction and user can select which pod is evicted.
User should know how and if the pod will be recreated.
### Pod disruption Budget (PDB)
Pods subject to Pod Disruption Budget (PDB) are not evicted if descheduling violates its pod
disruption budget (PDB). The pods are evicted by using eviction subresource to handle PDB.
## Roadmap
This roadmap is not in any particular order.
* Consideration of pod affinity
* Strategy to consider pod life time
* Strategy to consider number of pending pods
* Integration with cluster autoscaler
* Integration with metrics providers for obtaining real load metrics
* Consideration of Kubernetes's scheduler's predicates
## Compatibility matrix
Descheduler | supported Kubernetes version
-------------|-----------------------------
0.4+ | 1.9+
0.1-0.3 | 1.7-1.8
## Community, discussion, contribution, and support
Learn how to engage with the Kubernetes community on the [community page](http://kubernetes.io/community/).
You can reach the maintainers of this project at:
- [Slack channel](https://kubernetes.slack.com/messages/sig-scheduling)
- [Mailing list](https://groups.google.com/forum/#!forum/kubernetes-sig-scheduling)
### Code of conduct
Participation in the Kubernetes community is governed by the [Kubernetes Code of Conduct](code-of-conduct.md).

View File

@@ -1,15 +0,0 @@
# Defined below are the security contacts for this repo.
#
# They are the contact point for the Product Security Team to reach out
# to for triaging and handling of incoming issues.
#
# The below names agree to abide by the
# [Embargo Policy](https://github.com/kubernetes/sig-release/blob/master/security-release-process-documentation/security-release-process.md#embargo-policy)
# and will be removed and replaced if they violate that agreement.
#
# DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE
# INSTRUCTIONS AT https://kubernetes.io/security/
aveshagarwal
k82cn
ravisantoshgudimetla

View File

@@ -1,24 +0,0 @@
# See https://cloud.google.com/cloud-build/docs/build-config
# this must be specified in seconds. If omitted, defaults to 600s (10 mins)
timeout: 1200s
# this prevents errors if you don't use both _GIT_TAG and _PULL_BASE_REF,
# or any new substitutions added in the future.
options:
substitution_option: ALLOW_LOOSE
steps:
- name: 'gcr.io/k8s-testimages/gcb-docker-gcloud:v20190906-745fed4'
entrypoint: make
env:
- DOCKER_CLI_EXPERIMENTAL=enabled
- VERSION=$_GIT_TAG
- BASE_REF=$_PULL_BASE_REF
args:
- push
substitutions:
# _GIT_TAG will be filled with a git-based tag for the image, of the form vYYYYMMDD-hash, and
# can be used as a substitution
_GIT_TAG: '12345'
# _PULL_BASE_REF will contain the ref that was pushed to to trigger this build -
# a branch like 'master' or 'release-0.2', or a tag like 'v0.2'.
_PULL_BASE_REF: 'master'

View File

@@ -1,61 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package options provides the descheduler flags
package options
import (
clientset "k8s.io/client-go/kubernetes"
// install the componentconfig api so we get its defaulting and conversion functions
"sigs.k8s.io/descheduler/pkg/apis/componentconfig"
"sigs.k8s.io/descheduler/pkg/apis/componentconfig/v1alpha1"
deschedulerscheme "sigs.k8s.io/descheduler/pkg/descheduler/scheme"
"github.com/spf13/pflag"
)
// DeschedulerServer configuration
type DeschedulerServer struct {
componentconfig.DeschedulerConfiguration
Client clientset.Interface
}
// NewDeschedulerServer creates a new DeschedulerServer with default parameters
func NewDeschedulerServer() *DeschedulerServer {
versioned := v1alpha1.DeschedulerConfiguration{}
deschedulerscheme.Scheme.Default(&versioned)
cfg := componentconfig.DeschedulerConfiguration{}
deschedulerscheme.Scheme.Convert(versioned, &cfg, nil)
s := DeschedulerServer{
DeschedulerConfiguration: cfg,
}
return &s
}
// AddFlags adds flags for a specific SchedulerServer to the specified FlagSet
func (rs *DeschedulerServer) AddFlags(fs *pflag.FlagSet) {
fs.DurationVar(&rs.DeschedulingInterval, "descheduling-interval", rs.DeschedulingInterval, "Time interval between two consecutive descheduler executions. Setting this value instructs the descheduler to run in a continuous loop at the interval specified.")
fs.StringVar(&rs.KubeconfigFile, "kubeconfig", rs.KubeconfigFile, "File with kube configuration.")
fs.StringVar(&rs.PolicyConfigFile, "policy-config-file", rs.PolicyConfigFile, "File with descheduler policy configuration.")
fs.BoolVar(&rs.DryRun, "dry-run", rs.DryRun, "execute descheduler in dry run mode.")
// node-selector query causes descheduler to run only on nodes that matches the node labels in the query
fs.StringVar(&rs.NodeSelector, "node-selector", rs.NodeSelector, "Selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2)")
// max-no-pods-to-evict limits the maximum number of pods to be evicted per node by descheduler.
fs.IntVar(&rs.MaxNoOfPodsToEvictPerNode, "max-pods-to-evict-per-node", rs.MaxNoOfPodsToEvictPerNode, "Limits the maximum number of pods to be evicted per node by descheduler")
// evict-local-storage-pods allows eviction of pods that are using local storage. This is false by default.
fs.BoolVar(&rs.EvictLocalStoragePods, "evict-local-storage-pods", rs.EvictLocalStoragePods, "Enables evicting pods using local storage by descheduler")
}

View File

@@ -1,61 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package app implements a Server object for running the descheduler.
package app
import (
"flag"
"io"
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
"sigs.k8s.io/descheduler/pkg/descheduler"
"github.com/spf13/cobra"
aflag "k8s.io/component-base/cli/flag"
"k8s.io/component-base/logs"
"k8s.io/klog"
)
// NewDeschedulerCommand creates a *cobra.Command object with default parameters
func NewDeschedulerCommand(out io.Writer) *cobra.Command {
s := options.NewDeschedulerServer()
cmd := &cobra.Command{
Use: "descheduler",
Short: "descheduler",
Long: `The descheduler evicts pods which may be bound to less desired nodes`,
Run: func(cmd *cobra.Command, args []string) {
logs.InitLogs()
defer logs.FlushLogs()
err := Run(s)
if err != nil {
klog.Errorf("%v", err)
}
},
}
cmd.SetOutput(out)
flags := cmd.Flags()
flags.SetNormalizeFunc(aflag.WordSepNormalizeFunc)
flags.AddGoFlagSet(flag.CommandLine)
s.AddFlags(flags)
return cmd
}
func Run(rs *options.DeschedulerServer) error {
return descheduler.Run(rs)
}

View File

@@ -1,87 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package app
import (
"fmt"
"runtime"
"strings"
"github.com/spf13/cobra"
)
var (
// gitCommit is a constant representing the source version that
// generated this build. It should be set during build via -ldflags.
gitCommit string
// version is a constant representing the version tag that
// generated this build. It should be set during build via -ldflags.
version string
// buildDate in ISO8601 format, output of $(date -u +'%Y-%m-%dT%H:%M:%SZ')
//It should be set during build via -ldflags.
buildDate string
)
// Info holds the information related to descheduler app version.
type Info struct {
Major string `json:"major"`
Minor string `json:"minor"`
GitCommit string `json:"gitCommit"`
GitVersion string `json:"gitVersion"`
BuildDate string `json:"buildDate"`
GoVersion string `json:"goVersion"`
Compiler string `json:"compiler"`
Platform string `json:"platform"`
}
// Get returns the overall codebase version. It's for detecting
// what code a binary was built from.
func Get() Info {
majorVersion, minorVersion := splitVersion(version)
return Info{
Major: majorVersion,
Minor: minorVersion,
GitCommit: gitCommit,
GitVersion: version,
BuildDate: buildDate,
GoVersion: runtime.Version(),
Compiler: runtime.Compiler,
Platform: fmt.Sprintf("%s/%s", runtime.GOOS, runtime.GOARCH),
}
}
func NewVersionCommand() *cobra.Command {
var versionCmd = &cobra.Command{
Use: "version",
Short: "Version of descheduler",
Long: `Prints the version of descheduler.`,
Run: func(cmd *cobra.Command, args []string) {
fmt.Printf("Descheduler version %+v\n", Get())
},
}
return versionCmd
}
// splitVersion splits the git version to generate major and minor versions needed.
func splitVersion(version string) (string, string) {
if version == "" {
return "", ""
}
// A sample version would be of form v0.1.0-7-ge884046, so split at first '.' and
// then return 0 and 1+(+ appended to follow semver convention) for major and minor versions.
return strings.Trim(strings.Split(version, ".")[0], "v"), strings.Split(version, ".")[1] + "+"
}

View File

@@ -1,36 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"flag"
"fmt"
"os"
"sigs.k8s.io/descheduler/cmd/descheduler/app"
)
func main() {
out := os.Stdout
cmd := app.NewDeschedulerCommand(out)
cmd.AddCommand(app.NewVersionCommand())
flag.CommandLine.Parse([]string{})
if err := cmd.Execute(); err != nil {
fmt.Println(err)
os.Exit(1)
}
}

View File

@@ -1,3 +0,0 @@
# Kubernetes Community Code of Conduct
Please refer to our [Kubernetes Community Code of Conduct](https://git.k8s.io/community/code-of-conduct.md)

View File

@@ -1,21 +0,0 @@
# Release process
## Semi-automatic
1. Make sure your repo is clean by git's standards
2. Tag the repository and push the tag `VERSION=v0.10.0 git tag -m $VERSION $VERSION; git push origin $VERSION`
3. Publish a draft release using the tag you just created
4. Perform the [image promotion process](https://github.com/kubernetes/k8s.io/tree/master/k8s.gcr.io#image-promoter)
5. Publish release
6. Email `kubernetes-sig-scheduling@googlegroups.com` to announce the release
## Manual
1. Make sure your repo is clean by git's standards
2. Tag the repository and push the tag `VERSION=v0.10.0 git tag -m $VERSION $VERSION; git push origin $VERSION`
3. Checkout the tag you just created and make sure your repo is clean by git's standards `git checkout $VERSION`
4. Build and push the container image to the staging registry `VERSION=$VERSION make push`
5. Publish a draft release using the tag you just created
6. Perform the [image promotion process](https://github.com/kubernetes/k8s.io/tree/master/k8s.gcr.io#image-promoter)
7. Publish release
8. Email `kubernetes-sig-scheduling@googlegroups.com` to announce the release

View File

@@ -1,8 +0,0 @@
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
strategies:
"RemovePodsViolatingNodeAffinity":
enabled: true
params:
nodeAffinityType:
- "requiredDuringSchedulingIgnoredDuringExecution"

View File

@@ -1,19 +0,0 @@
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
strategies:
"RemoveDuplicates":
enabled: true
"RemovePodsViolatingInterPodAntiAffinity":
enabled: true
"LowNodeUtilization":
enabled: true
params:
nodeResourceUtilizationThresholds:
thresholds:
"cpu" : 20
"memory": 20
"pods": 20
targetThresholds:
"cpu" : 50
"memory": 50
"pods": 50

14
go.mod
View File

@@ -1,14 +0,0 @@
module sigs.k8s.io/descheduler
go 1.13
require (
github.com/spf13/cobra v0.0.5
github.com/spf13/pflag v1.0.5
k8s.io/api v0.17.0
k8s.io/apimachinery v0.17.3-beta.0
k8s.io/apiserver v0.17.0
k8s.io/client-go v0.17.0
k8s.io/component-base v0.17.0
k8s.io/klog v1.0.0
)

355
go.sum
View File

@@ -1,355 +0,0 @@
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.38.0 h1:ROfEUZz+Gh5pa62DJWXSaonyu3StP6EA6lPEXPI6mCo=
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
github.com/Azure/go-autorest/autorest v0.9.0 h1:MRvx8gncNaXJqOoLmhNjUAKh33JJF8LyxPhomEtOsjs=
github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI=
github.com/Azure/go-autorest/autorest/adal v0.5.0 h1:q2gDruN08/guU9vAjuPWff0+QIrpH6ediguzdAzXAUU=
github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0=
github.com/Azure/go-autorest/autorest/date v0.1.0 h1:YGrhWfrgtFs84+h0o46rJrlmsZtyZRg470CqAXTZaGM=
github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA=
github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
github.com/Azure/go-autorest/autorest/mocks v0.2.0 h1:Ww5g4zThfD/6cLb4z6xxgeyDa7QDkizMkJKe0ysZXp0=
github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY=
github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc=
github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k=
github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc=
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/docker/docker v0.7.3-0.20190327010347-be7ac8be2ae0/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
github.com/evanphx/json-patch v4.2.0+incompatible h1:fUDGZCv/7iAN7u0puUVhvKCcsR6vRfwrJatElLBEf0I=
github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0=
github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg=
github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg=
github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc=
github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8=
github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc=
github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo=
github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I=
github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d h1:3PaI8p3seN09VjbTYC/QWlUZdZ1qS1zGjy7LH2Wt07I=
github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903 h1:LbsanbbD6LieFkXbj9YNNBupiGHJgFeLpO0j0Fza1h8=
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI=
github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d h1:7XGaL1e6bYS1yIonGp9761ExpPPV1ui0SAC59Yube9k=
github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
github.com/gophercloud/gophercloud v0.1.0 h1:P/nh25+rzXouhytV2pUHBb65fnds26Ghl8/391+sT5o=
github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8=
github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/imdario/mergo v0.3.5 h1:JboBksRwiiAJWvIYJVo46AfV+IAIKZpfrSzVKj42R4Q=
github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.8 h1:QiWkFLKq0T7mpzwOTu6BzNDbfTE8OLrYhVKYMLF46Ok=
github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs=
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v0.0.0-20180320133207-05fbef0ca5da/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.10.1 h1:q/mM8GF/n0shIN8SaAZ0V+jnLPzen6WIVZdiwrRlMlo=
github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME=
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
github.com/spf13/cobra v0.0.5 h1:f0B+LkLX6DtmRH1isoNA9VTtNUK9K8xYd28JNNfOv/s=
github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586 h1:7KByu05hhLed2MO29w7p1XfZvZ13m8mub3shuVftRs0=
golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191004110552-13f9640d40b9 h1:rjwSpXsdiK0dV8/Naq3kAw9ymfAeJIyd0upUIElB+lI=
golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456 h1:ng0gs1AKnRRuEMZoTLLlbOd+C17zUDepwGQBb/n+JVg=
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0 h1:KxkO13IPW4Lslp2bz+KHP2E3gtFlrIGNThxkZQ3g+4c=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
k8s.io/api v0.17.0 h1:H9d/lw+VkZKEVIUc8F3wgiQ+FUXTTr21M87jXLU7yqM=
k8s.io/api v0.17.0/go.mod h1:npsyOePkeP0CPwyGfXDHxvypiYMJxBWAMpQxCaJ4ZxI=
k8s.io/apimachinery v0.17.0/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg=
k8s.io/apimachinery v0.17.3-beta.0 h1:DeN0royOQ5+j3ytFWnDkxGiF5r9T7m6E9Ukzjg4vVHc=
k8s.io/apimachinery v0.17.3-beta.0/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg=
k8s.io/apiserver v0.17.0 h1:XhUix+FKFDcBygWkQNp7wKKvZL030QUlH1o8vFeSgZA=
k8s.io/apiserver v0.17.0/go.mod h1:ABM+9x/prjINN6iiffRVNCBR2Wk7uY4z+EtEGZD48cg=
k8s.io/client-go v0.17.0 h1:8QOGvUGdqDMFrm9sD6IUFl256BcffynGoe80sxgTEDg=
k8s.io/client-go v0.17.0/go.mod h1:TYgR6EUHs6k45hb6KWjVD6jFZvJV4gHDikv/It0xz+k=
k8s.io/component-base v0.17.0 h1:BnDFcmBDq+RPpxXjmuYnZXb59XNN9CaFrX8ba9+3xrA=
k8s.io/component-base v0.17.0/go.mod h1:rKuRAokNMY2nn2A6LP/MiwpoaMRHpfRnrPaUJJj1Yoc=
k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8=
k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a h1:UcxjrRMyNx/i/y8G7kPvLyy7rfbeuf1PYyBf973pgyU=
k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E=
k8s.io/utils v0.0.0-20191114184206-e782cd3c129f h1:GiPwtSzdP43eI1hpPCbROQCCIgCuiMMNF8YUVLF3vJo=
k8s.io/utils v0.0.0-20191114184206-e782cd3c129f/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI=
sigs.k8s.io/structured-merge-diff v1.0.1-0.20191108220359-b1b620dd3f06/go.mod h1:/ULNhyfzRopfcjskuui0cTITekDduZ7ycKN3oUT9R18=
sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs=
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=

View File

@@ -1,16 +0,0 @@
/*
Copyright YEAR The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

View File

@@ -1,95 +0,0 @@
#!/bin/bash
set -o errexit
set -o nounset
set -o pipefail
echo "Make sure that uuid package is installed"
master_uuid=$(uuid)
node1_uuid=$(uuid)
node2_uuid=$(uuid)
kube_apiserver_port=6443
kube_version=1.13.1
DESCHEDULER_ROOT=$(dirname "${BASH_SOURCE}")/../../
E2E_GCE_HOME=$DESCHEDULER_ROOT/hack/e2e-gce
create_cluster() {
echo "#################### Creating instances ##########################"
gcloud compute instances create descheduler-$master_uuid --image="ubuntu-1604-xenial-v20180306" --image-project="ubuntu-os-cloud" --zone=us-east1-b
# Keeping the --zone here so as to make sure that e2e's can run locally.
echo "gcloud compute instances delete descheduler-$master_uuid --zone=us-east1-b --quiet" > $E2E_GCE_HOME/delete_cluster.sh
gcloud compute instances create descheduler-$node1_uuid --image="ubuntu-1604-xenial-v20180306" --image-project="ubuntu-os-cloud" --zone=us-east1-b
echo "gcloud compute instances delete descheduler-$node1_uuid --zone=us-east1-b --quiet" >> $E2E_GCE_HOME/delete_cluster.sh
gcloud compute instances create descheduler-$node2_uuid --image="ubuntu-1604-xenial-v20180306" --image-project="ubuntu-os-cloud" --zone=us-east1-b
echo "gcloud compute instances delete descheduler-$node2_uuid --zone=us-east1-b --quiet" >> $E2E_GCE_HOME/delete_cluster.sh
# Delete the firewall port created for master.
echo "gcloud compute firewall-rules delete kubeapiserver-$master_uuid --quiet" >> $E2E_GCE_HOME/delete_cluster.sh
chmod 755 $E2E_GCE_HOME/delete_cluster.sh
}
generate_kubeadm_instance_files() {
# TODO: Check if they have come up. awk $6 contains the state(RUNNING or not).
master_private_ip=$(gcloud compute instances list | grep $master_uuid|awk '{print $4}')
node1_public_ip=$(gcloud compute instances list | grep $node1_uuid|awk '{print $5}')
node2_public_ip=$(gcloud compute instances list | grep $node2_uuid|awk '{print $5}')
echo "kubeadm init --kubernetes-version=${kube_version} --apiserver-advertise-address=${master_private_ip}" --ignore-preflight-errors=all --pod-network-cidr=10.96.0.0/12 > $E2E_GCE_HOME/kubeadm_install.sh
}
transfer_install_files() {
gcloud compute scp $E2E_GCE_HOME/kubeadm_preinstall.sh descheduler-$master_uuid:/tmp --zone=us-east1-b
gcloud compute scp $E2E_GCE_HOME/kubeadm_install.sh descheduler-$master_uuid:/tmp --zone=us-east1-b
gcloud compute scp $E2E_GCE_HOME/kubeadm_preinstall.sh descheduler-$node1_uuid:/tmp --zone=us-east1-b
gcloud compute scp $E2E_GCE_HOME/kubeadm_preinstall.sh descheduler-$node2_uuid:/tmp --zone=us-east1-b
}
install_kube() {
# Docker installation.
gcloud compute ssh descheduler-$master_uuid --command "sudo apt-get update; sudo apt-get install -y docker.io" --zone=us-east1-b
gcloud compute ssh descheduler-$node1_uuid --command "sudo apt-get update; sudo apt-get install -y docker.io" --zone=us-east1-b
gcloud compute ssh descheduler-$node2_uuid --command "sudo apt-get update; sudo apt-get install -y docker.io" --zone=us-east1-b
# kubeadm installation.
# 1. Transfer files to master, nodes.
transfer_install_files
# 2. Install kubeadm.
#TODO: Add rm /tmp/kubeadm_install.sh
# Open port for kube API server
gcloud compute firewall-rules create kubeapiserver-$master_uuid --allow tcp:6443 --source-tags=descheduler-$master_uuid --source-ranges=0.0.0.0/0 --description="Opening api server port"
gcloud compute ssh descheduler-$master_uuid --command "sudo chmod 755 /tmp/kubeadm_preinstall.sh; sudo /tmp/kubeadm_preinstall.sh" --zone=us-east1-b
kubeadm_join_command=$(gcloud compute ssh descheduler-$master_uuid --command "sudo chmod 755 /tmp/kubeadm_install.sh; sudo /tmp/kubeadm_install.sh" --zone=us-east1-b|grep 'kubeadm join')
# Copy the kubeconfig file onto /tmp for e2e tests.
gcloud compute ssh descheduler-$master_uuid --command "sudo cp /etc/kubernetes/admin.conf /tmp; sudo chmod 777 /tmp/admin.conf" --zone=us-east1-b
gcloud compute scp descheduler-$master_uuid:/tmp/admin.conf /tmp/admin.conf --zone=us-east1-b
# Postinstall on master, need to add a network plugin for kube-dns to come to running state.
gcloud compute ssh descheduler-$master_uuid --command "sudo kubectl apply -f https://raw.githubusercontent.com/cloudnativelabs/kube-router/master/daemonset/kubeadm-kuberouter.yaml --kubeconfig /etc/kubernetes/admin.conf" --zone=us-east1-b
echo $kubeadm_join_command > $E2E_GCE_HOME/kubeadm_join.sh
# Copy kubeadm_join to every node.
#TODO: Put these in a loop, so that extension becomes possible.
gcloud compute ssh descheduler-$node1_uuid --command "sudo chmod 755 /tmp/kubeadm_preinstall.sh; sudo /tmp/kubeadm_preinstall.sh" --zone=us-east1-b
gcloud compute scp $E2E_GCE_HOME/kubeadm_join.sh descheduler-$node1_uuid:/tmp --zone=us-east1-b
gcloud compute ssh descheduler-$node1_uuid --command "sudo chmod 755 /tmp/kubeadm_join.sh; sudo /tmp/kubeadm_join.sh" --zone=us-east1-b
gcloud compute ssh descheduler-$node2_uuid --command "sudo chmod 755 /tmp/kubeadm_preinstall.sh; sudo /tmp/kubeadm_preinstall.sh" --zone=us-east1-b
gcloud compute scp $E2E_GCE_HOME/kubeadm_join.sh descheduler-$node2_uuid:/tmp --zone=us-east1-b
gcloud compute ssh descheduler-$node2_uuid --command "sudo chmod 755 /tmp/kubeadm_join.sh; sudo /tmp/kubeadm_join.sh" --zone=us-east1-b
}
create_cluster
generate_kubeadm_instance_files
install_kube

View File

@@ -1,8 +0,0 @@
#!/usr/bin/env bash
set -e
gcloud auth activate-service-account --key-file "${GCE_SA_CREDS}"
gcloud config set project $GCE_PROJECT_ID
gcloud config set compute/zone $GCE_ZONE

View File

@@ -1,9 +0,0 @@
#!/usr/bin/env bash
set -e
wget https://dl.google.com/dl/cloudsdk/channels/rapid/downloads/google-cloud-sdk-176.0.0-linux-x86_64.tar.gz
tar -xvzf google-cloud-sdk-176.0.0-linux-x86_64.tar.gz
./google-cloud-sdk/install.sh -q

View File

@@ -1,11 +0,0 @@
apt-get update
apt-get install -y docker.io
apt-get update && apt-get install -y apt-transport-https
curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add -
cat <<EOF >/etc/apt/sources.list.d/kubernetes.list
deb http://apt.kubernetes.io/ kubernetes-xenial main
EOF
apt-get update
apt-get install -y kubelet kubeadm kubectl
exit 0

View File

@@ -1,6 +0,0 @@
kind: Cluster
apiVersion: kind.sigs.k8s.io/v1alpha3
nodes:
- role: control-plane
- role: worker
- role: worker

View File

@@ -1,47 +0,0 @@
#!/bin/bash
# Copyright 2014 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
# os::util::absolute_path returns the absolute path to the directory provided
function os::util::absolute_path() {
local relative_path="$1"
local absolute_path
pushd "${relative_path}" >/dev/null
relative_path="$( pwd )"
if [[ -h "${relative_path}" ]]; then
absolute_path="$( readlink "${relative_path}" )"
else
absolute_path="${relative_path}"
fi
popd >/dev/null
echo "${absolute_path}"
}
readonly -f os::util::absolute_path
# find the absolute path to the root of the Origin source tree
init_source="$( dirname "${BASH_SOURCE}" )/../.."
OS_ROOT="$( os::util::absolute_path "${init_source}" )"
export OS_ROOT
cd "${OS_ROOT}"
PRJ_PREFIX="sigs.k8s.io/descheduler"
OS_OUTPUT_BINPATH="${OS_ROOT}/_output/bin"

View File

@@ -1,9 +0,0 @@
#!/bin/bash
source "$(dirname "${BASH_SOURCE}")/lib/init.sh"
go build -o "${OS_OUTPUT_BINPATH}/conversion-gen" "k8s.io/code-generator/cmd/conversion-gen"
${OS_OUTPUT_BINPATH}/conversion-gen \
--go-header-file "hack/boilerplate/boilerplate.go.txt" \
--input-dirs "${PRJ_PREFIX}/pkg/apis/componentconfig/v1alpha1,${PRJ_PREFIX}/pkg/api/v1alpha1" \
--output-file-base zz_generated.conversion

View File

@@ -1,10 +0,0 @@
#!/bin/bash
source "$(dirname "${BASH_SOURCE}")/lib/init.sh"
go build -o "${OS_OUTPUT_BINPATH}/deepcopy-gen" "k8s.io/code-generator/cmd/deepcopy-gen"
${OS_OUTPUT_BINPATH}/deepcopy-gen \
--go-header-file "hack/boilerplate/boilerplate.go.txt" \
--input-dirs "${PRJ_PREFIX}/pkg/apis/componentconfig,${PRJ_PREFIX}/pkg/apis/componentconfig/v1alpha1,${PRJ_PREFIX}/pkg/api,${PRJ_PREFIX}/pkg/api/v1alpha1" \
--output-file-base zz_generated.deepcopy

View File

@@ -1,10 +0,0 @@
#!/bin/bash
source "$(dirname "${BASH_SOURCE}")/lib/init.sh"
go build -o "${OS_OUTPUT_BINPATH}/defaulter-gen" "k8s.io/code-generator/cmd/defaulter-gen"
${OS_OUTPUT_BINPATH}/defaulter-gen \
--go-header-file "hack/boilerplate/boilerplate.go.txt" \
--input-dirs "${PRJ_PREFIX}/pkg/apis/componentconfig/v1alpha1,${PRJ_PREFIX}/pkg/api/v1alpha1" \
--extra-peer-dirs "${PRJ_PREFIX}/pkg/apis/componentconfig/v1alpha1,${PRJ_PREFIX}/pkg/api/v1alpha1" \
--output-file-base zz_generated.defaults

View File

@@ -1,49 +0,0 @@
#!/bin/bash
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
DESCHEDULER_ROOT=$(dirname "${BASH_SOURCE}")/..
GO_VERSION=($(go version))
if [[ -z $(echo "${GO_VERSION[2]}" | grep -E 'go1.2|go1.3|go1.4|go1.5|go1.6|go1.7|go1.8|go1.9|go1.10|go1.11|go1.12|go1.13') ]]; then
echo "Unknown go version '${GO_VERSION[2]}', skipping gofmt."
exit 1
fi
cd "${DESCHEDULER_ROOT}"
find_files() {
find . -not \( \
\( \
-wholename './output' \
-o -wholename './_output' \
-o -wholename './release' \
-o -wholename './target' \
-o -wholename './.git' \
-o -wholename '*/third_party/*' \
-o -wholename '*/Godeps/*' \
-o -wholename '*/vendor/*' \
\) -prune \
\) -name '*.go'
}
GOFMT="gofmt -s -w"
find_files | xargs $GOFMT -l

View File

@@ -1,54 +0,0 @@
#!/bin/bash
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
DESCHEDULER_ROOT=$(dirname "${BASH_SOURCE}")/..
GO_VERSION=($(go version))
if [[ -z $(echo "${GO_VERSION[2]}" | grep -E 'go1.2|go1.3|go1.4|go1.5|go1.6|go1.7|go1.8|go1.9|go1.10|go1.11|go1.12|go1.13') ]]; then
echo "Unknown go version '${GO_VERSION[2]}', skipping gofmt."
exit 1
fi
cd "${DESCHEDULER_ROOT}"
find_files() {
find . -not \( \
\( \
-wholename './output' \
-o -wholename './_output' \
-o -wholename './release' \
-o -wholename './target' \
-o -wholename './.git' \
-o -wholename '*/third_party/*' \
-o -wholename '*/Godeps/*' \
-o -wholename '*/vendor/*' \
\) -prune \
\) -name '*.go'
}
GOFMT="gofmt -s"
bad_files=$(find_files | xargs $GOFMT -l)
if [[ -n "${bad_files}" ]]; then
echo "!!! '$GOFMT' needs to be run on the following files: "
echo "${bad_files}"
exit 1
fi

762
index.yaml Normal file
View File

@@ -0,0 +1,762 @@
apiVersion: v1
entries:
descheduler:
- apiVersion: v1
appVersion: 0.34.0
created: "2025-10-30T16:41:34.519386946Z"
description: Descheduler for Kubernetes is used to rebalance clusters by evicting
pods that can potentially be scheduled on better nodes. In the current implementation,
descheduler does not schedule replacement of evicted pods but relies on the
default scheduler for that.
digest: 0fb91a600418400c8581eae65adab602ff2589104540306e5c6936a50fdd5bd6
home: https://github.com/kubernetes-sigs/descheduler
icon: https://kubernetes.io/images/favicon.png
keywords:
- kubernetes
- descheduler
- kube-scheduler
maintainers:
- email: sig-scheduling@kubernetes.io
name: Kubernetes SIG Scheduling
name: descheduler
sources:
- https://github.com/kubernetes-sigs/descheduler
urls:
- https://github.com/kubernetes-sigs/descheduler/releases/download/descheduler-helm-chart-0.34.0/descheduler-0.34.0.tgz
version: 0.34.0
- apiVersion: v1
appVersion: 0.33.0
created: "2025-05-04T19:49:46.589102937Z"
description: Descheduler for Kubernetes is used to rebalance clusters by evicting
pods that can potentially be scheduled on better nodes. In the current implementation,
descheduler does not schedule replacement of evicted pods but relies on the
default scheduler for that.
digest: 1f32da32dedc6c066f197637e1fbd10546e9c6987322b88e71add1c9765c978f
home: https://github.com/kubernetes-sigs/descheduler
icon: https://kubernetes.io/images/favicon.png
keywords:
- kubernetes
- descheduler
- kube-scheduler
maintainers:
- email: sig-scheduling@kubernetes.io
name: Kubernetes SIG Scheduling
name: descheduler
sources:
- https://github.com/kubernetes-sigs/descheduler
urls:
- https://github.com/kubernetes-sigs/descheduler/releases/download/descheduler-helm-chart-0.33.0/descheduler-0.33.0.tgz
version: 0.33.0
- apiVersion: v1
appVersion: 0.32.2
created: "2025-02-11T04:46:21.248724497Z"
description: Descheduler for Kubernetes is used to rebalance clusters by evicting
pods that can potentially be scheduled on better nodes. In the current implementation,
descheduler does not schedule replacement of evicted pods but relies on the
default scheduler for that.
digest: 011205c924597543fe93bf6ce66d75a33bc6a7d1a029eedd4c0390a16e57e264
home: https://github.com/kubernetes-sigs/descheduler
icon: https://kubernetes.io/images/favicon.png
keywords:
- kubernetes
- descheduler
- kube-scheduler
maintainers:
- email: kubernetes-sig-scheduling@googlegroups.com
name: Kubernetes SIG Scheduling
name: descheduler
sources:
- https://github.com/kubernetes-sigs/descheduler
urls:
- https://github.com/kubernetes-sigs/descheduler/releases/download/descheduler-helm-chart-0.32.2/descheduler-0.32.2.tgz
version: 0.32.2
- apiVersion: v1
appVersion: 0.32.1
created: "2025-01-06T23:08:49.134597641Z"
description: Descheduler for Kubernetes is used to rebalance clusters by evicting
pods that can potentially be scheduled on better nodes. In the current implementation,
descheduler does not schedule replacement of evicted pods but relies on the
default scheduler for that.
digest: e104f5e14b8ef7f5166912e1e9a61785977f5f17ca95c43e744d0685528f8436
home: https://github.com/kubernetes-sigs/descheduler
icon: https://kubernetes.io/images/favicon.png
keywords:
- kubernetes
- descheduler
- kube-scheduler
maintainers:
- email: kubernetes-sig-scheduling@googlegroups.com
name: Kubernetes SIG Scheduling
name: descheduler
sources:
- https://github.com/kubernetes-sigs/descheduler
urls:
- https://github.com/kubernetes-sigs/descheduler/releases/download/descheduler-helm-chart-0.32.1/descheduler-0.32.1.tgz
version: 0.32.1
- apiVersion: v1
appVersion: 0.32.0
created: "2025-01-02T23:34:02.843431016Z"
description: Descheduler for Kubernetes is used to rebalance clusters by evicting
pods that can potentially be scheduled on better nodes. In the current implementation,
descheduler does not schedule replacement of evicted pods but relies on the
default scheduler for that.
digest: 4ca12ecc4916ae1c47ae9b03571e371766e6b6846cc1821299bbd2da75042bb3
home: https://github.com/kubernetes-sigs/descheduler
icon: https://kubernetes.io/images/favicon.png
keywords:
- kubernetes
- descheduler
- kube-scheduler
maintainers:
- email: kubernetes-sig-scheduling@googlegroups.com
name: Kubernetes SIG Scheduling
name: descheduler
sources:
- https://github.com/kubernetes-sigs/descheduler
urls:
- https://github.com/kubernetes-sigs/descheduler/releases/download/descheduler-helm-chart-0.32.0/descheduler-0.32.0.tgz
version: 0.32.0
- apiVersion: v1
appVersion: 0.31.0
created: "2024-09-09T22:59:21.130329884Z"
description: Descheduler for Kubernetes is used to rebalance clusters by evicting
pods that can potentially be scheduled on better nodes. In the current implementation,
descheduler does not schedule replacement of evicted pods but relies on the
default scheduler for that.
digest: 7e9d95713e2384b0976e6ea89742550c83c9adf52005e9c0cc9da3f04477d530
home: https://github.com/kubernetes-sigs/descheduler
icon: https://kubernetes.io/images/favicon.png
keywords:
- kubernetes
- descheduler
- kube-scheduler
maintainers:
- email: kubernetes-sig-scheduling@googlegroups.com
name: Kubernetes SIG Scheduling
name: descheduler
sources:
- https://github.com/kubernetes-sigs/descheduler
urls:
- https://github.com/kubernetes-sigs/descheduler/releases/download/descheduler-helm-chart-0.31.0/descheduler-0.31.0.tgz
version: 0.31.0
- apiVersion: v1
appVersion: 0.30.2
created: "2024-11-20T15:09:49.215945303Z"
description: Descheduler for Kubernetes is used to rebalance clusters by evicting
pods that can potentially be scheduled on better nodes. In the current implementation,
descheduler does not schedule replacement of evicted pods but relies on the
default scheduler for that.
digest: 7608d3966b7d1fc08fe31cdffc283dc2a4d7a473fe63efa1672a5926b81774c2
home: https://github.com/kubernetes-sigs/descheduler
icon: https://kubernetes.io/images/favicon.png
keywords:
- kubernetes
- descheduler
- kube-scheduler
maintainers:
- email: kubernetes-sig-scheduling@googlegroups.com
name: Kubernetes SIG Scheduling
name: descheduler
sources:
- https://github.com/kubernetes-sigs/descheduler
urls:
- https://github.com/kubernetes-sigs/descheduler/releases/download/descheduler-helm-chart-0.30.2/descheduler-0.30.2.tgz
version: 0.30.2
- apiVersion: v1
appVersion: 0.30.1
created: "2024-06-05T12:06:23.870221344Z"
description: Descheduler for Kubernetes is used to rebalance clusters by evicting
pods that can potentially be scheduled on better nodes. In the current implementation,
descheduler does not schedule replacement of evicted pods but relies on the
default scheduler for that.
digest: 8ab41d27011119ff62abfb86fa9253d66a1c74b7247d32c5786a96d3fd11abd2
home: https://github.com/kubernetes-sigs/descheduler
icon: https://kubernetes.io/images/favicon.png
keywords:
- kubernetes
- descheduler
- kube-scheduler
maintainers:
- email: kubernetes-sig-scheduling@googlegroups.com
name: Kubernetes SIG Scheduling
name: descheduler
sources:
- https://github.com/kubernetes-sigs/descheduler
urls:
- https://github.com/kubernetes-sigs/descheduler/releases/download/descheduler-helm-chart-0.30.1/descheduler-0.30.1.tgz
version: 0.30.1
- apiVersion: v1
appVersion: 0.30.0
created: "2024-05-20T14:03:13.843460018Z"
description: Descheduler for Kubernetes is used to rebalance clusters by evicting
pods that can potentially be scheduled on better nodes. In the current implementation,
descheduler does not schedule replacement of evicted pods but relies on the
default scheduler for that.
digest: 7599edc8d0c09821e290438a850509d762a6ad79d48024ab5c9ec56e70cc4229
home: https://github.com/kubernetes-sigs/descheduler
icon: https://kubernetes.io/images/favicon.png
keywords:
- kubernetes
- descheduler
- kube-scheduler
maintainers:
- email: kubernetes-sig-scheduling@googlegroups.com
name: Kubernetes SIG Scheduling
name: descheduler
sources:
- https://github.com/kubernetes-sigs/descheduler
urls:
- https://github.com/kubernetes-sigs/descheduler/releases/download/descheduler-helm-chart-0.30.0/descheduler-0.30.0.tgz
version: 0.30.0
- apiVersion: v1
appVersion: 0.29.0
created: "2024-01-02T18:54:20.992594156Z"
description: Descheduler for Kubernetes is used to rebalance clusters by evicting
pods that can potentially be scheduled on better nodes. In the current implementation,
descheduler does not schedule replacement of evicted pods but relies on the
default scheduler for that.
digest: f6157f5d02bf90e8f84e74a8d7ccb1660b802d4aa710fdb32d9398eaccd50f67
home: https://github.com/kubernetes-sigs/descheduler
icon: https://kubernetes.io/images/favicon.png
keywords:
- kubernetes
- descheduler
- kube-scheduler
maintainers:
- email: kubernetes-sig-scheduling@googlegroups.com
name: Kubernetes SIG Scheduling
name: descheduler
sources:
- https://github.com/kubernetes-sigs/descheduler
urls:
- https://github.com/kubernetes-sigs/descheduler/releases/download/descheduler-helm-chart-0.29.0/descheduler-0.29.0.tgz
version: 0.29.0
- apiVersion: v1
appVersion: 0.28.1
created: "2023-11-29T17:22:04.773831615Z"
description: Descheduler for Kubernetes is used to rebalance clusters by evicting
pods that can potentially be scheduled on better nodes. In the current implementation,
descheduler does not schedule replacement of evicted pods but relies on the
default scheduler for that.
digest: 33ccfc268727e9b129254bc450c192d3084d56967ceb315e4a1d6d744bc204ea
home: https://github.com/kubernetes-sigs/descheduler
icon: https://raw.githubusercontent.com/kubernetes-sigs/descheduler/master/assets/logo/descheduler-stacked-color.png
keywords:
- kubernetes
- descheduler
- kube-scheduler
maintainers:
- email: kubernetes-sig-scheduling@googlegroups.com
name: Kubernetes SIG Scheduling
name: descheduler
sources:
- https://github.com/kubernetes-sigs/descheduler
urls:
- https://github.com/kubernetes-sigs/descheduler/releases/download/descheduler-helm-chart-0.28.1/descheduler-0.28.1.tgz
version: 0.28.1
- apiVersion: v1
appVersion: 0.28.0
created: "2023-08-24T13:07:51.303386087Z"
description: Descheduler for Kubernetes is used to rebalance clusters by evicting
pods that can potentially be scheduled on better nodes. In the current implementation,
descheduler does not schedule replacement of evicted pods but relies on the
default scheduler for that.
digest: 50efd424df52a1bfd6e40900e634394e66db4a901ff797b595481855dcb01584
home: https://github.com/kubernetes-sigs/descheduler
icon: https://kubernetes.io/images/favicon.png
keywords:
- kubernetes
- descheduler
- kube-scheduler
maintainers:
- email: kubernetes-sig-scheduling@googlegroups.com
name: Kubernetes SIG Scheduling
name: descheduler
sources:
- https://github.com/kubernetes-sigs/descheduler
urls:
- https://github.com/kubernetes-sigs/descheduler/releases/download/descheduler-helm-chart-0.28.0/descheduler-0.28.0.tgz
version: 0.28.0
- apiVersion: v1
appVersion: 0.27.1
created: "2023-05-31T08:18:14.881530101Z"
description: Descheduler for Kubernetes is used to rebalance clusters by evicting
pods that can potentially be scheduled on better nodes. In the current implementation,
descheduler does not schedule replacement of evicted pods but relies on the
default scheduler for that.
digest: c7d93a7fc65f8311bd1853893bd4c5b3f3f4dabcf182c896846752d99720c98b
home: https://github.com/kubernetes-sigs/descheduler
icon: https://kubernetes.io/images/favicon.png
keywords:
- kubernetes
- descheduler
- kube-scheduler
maintainers:
- email: kubernetes-sig-scheduling@googlegroups.com
name: Kubernetes SIG Scheduling
name: descheduler
sources:
- https://github.com/kubernetes-sigs/descheduler
urls:
- https://github.com/kubernetes-sigs/descheduler/releases/download/descheduler-helm-chart-0.27.1/descheduler-0.27.1.tgz
version: 0.27.1
- apiVersion: v1
appVersion: 0.27.0
created: "2023-05-05T14:14:44.725587487Z"
description: Descheduler for Kubernetes is used to rebalance clusters by evicting
pods that can potentially be scheduled on better nodes. In the current implementation,
descheduler does not schedule replacement of evicted pods but relies on the
default scheduler for that.
digest: dd06be2789590c070b204e0f7f0910ec111d90a13e8b5a46e3b80b015e458b11
home: https://github.com/kubernetes-sigs/descheduler
icon: https://kubernetes.io/images/favicon.png
keywords:
- kubernetes
- descheduler
- kube-scheduler
maintainers:
- email: kubernetes-sig-scheduling@googlegroups.com
name: Kubernetes SIG Scheduling
name: descheduler
sources:
- https://github.com/kubernetes-sigs/descheduler
urls:
- https://github.com/kubernetes-sigs/descheduler/releases/download/descheduler-helm-chart-0.27.0/descheduler-0.27.0.tgz
version: 0.27.0
- apiVersion: v1
appVersion: 0.26.1
created: "2023-04-04T17:34:17.284123888Z"
description: Descheduler for Kubernetes is used to rebalance clusters by evicting
pods that can potentially be scheduled on better nodes. In the current implementation,
descheduler does not schedule replacement of evicted pods but relies on the
default scheduler for that.
digest: 391167e6741cc412bc12bc0e5a6ab513a63528fa4d161396e2c14f017eb1b7b3
home: https://github.com/kubernetes-sigs/descheduler
icon: https://kubernetes.io/images/favicon.png
keywords:
- kubernetes
- descheduler
- kube-scheduler
maintainers:
- email: kubernetes-sig-scheduling@googlegroups.com
name: Kubernetes SIG Scheduling
name: descheduler
sources:
- https://github.com/kubernetes-sigs/descheduler
urls:
- https://github.com/kubernetes-sigs/descheduler/releases/download/descheduler-helm-chart-0.26.1/descheduler-0.26.1.tgz
version: 0.26.1
- apiVersion: v1
appVersion: 0.26.0
created: "2023-01-17T14:42:00.271072093Z"
description: Descheduler for Kubernetes is used to rebalance clusters by evicting
pods that can potentially be scheduled on better nodes. In the current implementation,
descheduler does not schedule replacement of evicted pods but relies on the
default scheduler for that.
digest: 4bb1285dcb65814059b13d268d0f230a0993527fddc652a0b82a2039aed7a1b4
home: https://github.com/kubernetes-sigs/descheduler
icon: https://kubernetes.io/images/favicon.png
keywords:
- kubernetes
- descheduler
- kube-scheduler
maintainers:
- email: kubernetes-sig-scheduling@googlegroups.com
name: Kubernetes SIG Scheduling
name: descheduler
sources:
- https://github.com/kubernetes-sigs/descheduler
urls:
- https://github.com/kubernetes-sigs/descheduler/releases/download/descheduler-helm-chart-0.26.0/descheduler-0.26.0.tgz
version: 0.26.0
- apiVersion: v1
appVersion: 0.25.1
created: "2022-10-17T21:53:53.644159593Z"
description: Descheduler for Kubernetes is used to rebalance clusters by evicting
pods that can potentially be scheduled on better nodes. In the current implementation,
descheduler does not schedule replacement of evicted pods but relies on the
default scheduler for that.
digest: 68b973e5294bcc1735ec7cfb9c82769c4dccc4a2ef75d8cf2a3244ebaaa73b5d
home: https://github.com/kubernetes-sigs/descheduler
icon: https://kubernetes.io/images/favicon.png
keywords:
- kubernetes
- descheduler
- kube-scheduler
maintainers:
- email: kubernetes-sig-scheduling@googlegroups.com
name: Kubernetes SIG Scheduling
name: descheduler
sources:
- https://github.com/kubernetes-sigs/descheduler
urls:
- https://github.com/kubernetes-sigs/descheduler/releases/download/descheduler-helm-chart-0.25.2/descheduler-0.25.2.tgz
version: 0.25.2
- apiVersion: v1
appVersion: 0.25.1
created: "2022-09-27T15:02:20.445257034Z"
description: Descheduler for Kubernetes is used to rebalance clusters by evicting
pods that can potentially be scheduled on better nodes. In the current implementation,
descheduler does not schedule replacement of evicted pods but relies on the
default scheduler for that.
digest: 758d970bf9fc7d575f3a171affd3a60475dafba6405cfcfd98d42062582a75ec
home: https://github.com/kubernetes-sigs/descheduler
icon: https://kubernetes.io/images/favicon.png
keywords:
- kubernetes
- descheduler
- kube-scheduler
maintainers:
- email: kubernetes-sig-scheduling@googlegroups.com
name: Kubernetes SIG Scheduling
name: descheduler
sources:
- https://github.com/kubernetes-sigs/descheduler
urls:
- https://github.com/kubernetes-sigs/descheduler/releases/download/descheduler-helm-chart-0.25.1/descheduler-0.25.1.tgz
version: 0.25.1
- apiVersion: v1
appVersion: 0.25.0
created: "2022-09-15T16:33:37.881987127Z"
description: Descheduler for Kubernetes is used to rebalance clusters by evicting
pods that can potentially be scheduled on better nodes. In the current implementation,
descheduler does not schedule replacement of evicted pods but relies on the
default scheduler for that.
digest: 8aa3f1af4a9f3ab49dd9c177c88596993d1a1481b83318ebeff3a49e49714bdd
home: https://github.com/kubernetes-sigs/descheduler
icon: https://kubernetes.io/images/favicon.png
keywords:
- kubernetes
- descheduler
- kube-scheduler
maintainers:
- email: kubernetes-sig-scheduling@googlegroups.com
name: Kubernetes SIG Scheduling
name: descheduler
sources:
- https://github.com/kubernetes-sigs/descheduler
urls:
- https://github.com/kubernetes-sigs/descheduler/releases/download/descheduler-helm-chart-0.25.0/descheduler-0.25.0.tgz
version: 0.25.0
- apiVersion: v1
appVersion: 0.24.1
created: "2022-05-31T13:39:20.603755914Z"
description: Descheduler for Kubernetes is used to rebalance clusters by evicting
pods that can potentially be scheduled on better nodes. In the current implementation,
descheduler does not schedule replacement of evicted pods but relies on the
default scheduler for that.
digest: 3cec16a47c2416e5f5946eae473ce7fd073dfeb2ca49fa87e900886c57707fcf
home: https://github.com/kubernetes-sigs/descheduler
icon: https://kubernetes.io/images/favicon.png
keywords:
- kubernetes
- descheduler
- kube-scheduler
maintainers:
- email: kubernetes-sig-scheduling@googlegroups.com
name: Kubernetes SIG Scheduling
name: descheduler
sources:
- https://github.com/kubernetes-sigs/descheduler
urls:
- https://github.com/kubernetes-sigs/descheduler/releases/download/descheduler-helm-chart-0.24.1/descheduler-0.24.1.tgz
version: 0.24.1
- apiVersion: v1
appVersion: 0.24.0
created: "2022-05-23T14:14:10.037216155Z"
description: Descheduler for Kubernetes is used to rebalance clusters by evicting
pods that can potentially be scheduled on better nodes. In the current implementation,
descheduler does not schedule replacement of evicted pods but relies on the
default scheduler for that.
digest: 2cd39048ebca1bf37d99be4f659364bedaa5c6e6cfb62640914e327884945b24
home: https://github.com/kubernetes-sigs/descheduler
icon: https://kubernetes.io/images/favicon.png
keywords:
- kubernetes
- descheduler
- kube-scheduler
maintainers:
- email: kubernetes-sig-scheduling@googlegroups.com
name: Kubernetes SIG Scheduling
name: descheduler
sources:
- https://github.com/kubernetes-sigs/descheduler
urls:
- https://github.com/kubernetes-sigs/descheduler/releases/download/descheduler-helm-chart-0.24.0/descheduler-0.24.0.tgz
version: 0.24.0
- apiVersion: v1
appVersion: 0.23.1
created: "2022-02-28T18:56:10.944563703Z"
description: Descheduler for Kubernetes is used to rebalance clusters by evicting
pods that can potentially be scheduled on better nodes. In the current implementation,
descheduler does not schedule replacement of evicted pods but relies on the
default scheduler for that.
digest: 7c6c08ca5d1a0be8632e3596d721f5fc99117e3a422642cb908f5a2d2e355059
home: https://github.com/kubernetes-sigs/descheduler
icon: https://kubernetes.io/images/favicon.png
keywords:
- kubernetes
- descheduler
- kube-scheduler
maintainers:
- email: kubernetes-sig-scheduling@googlegroups.com
name: Kubernetes SIG Scheduling
name: descheduler
sources:
- https://github.com/kubernetes-sigs/descheduler
urls:
- https://github.com/kubernetes-sigs/descheduler/releases/download/descheduler-helm-chart-0.23.2/descheduler-0.23.2.tgz
version: 0.23.2
- apiVersion: v1
appVersion: 0.23.0
created: "2022-02-09T13:34:50.093591254Z"
description: Descheduler for Kubernetes is used to rebalance clusters by evicting
pods that can potentially be scheduled on better nodes. In the current implementation,
descheduler does not schedule replacement of evicted pods but relies on the
default scheduler for that.
digest: a862faecae9eae025558d69716ebd67e467cc0316a431487d9f64dd8a60b7848
home: https://github.com/kubernetes-sigs/descheduler
icon: https://kubernetes.io/images/favicon.png
keywords:
- kubernetes
- descheduler
- kube-scheduler
maintainers:
- email: kubernetes-sig-scheduling@googlegroups.com
name: Kubernetes SIG Scheduling
name: descheduler
sources:
- https://github.com/kubernetes-sigs/descheduler
urls:
- https://github.com/kubernetes-sigs/descheduler/releases/download/descheduler-helm-chart-0.23.1/descheduler-0.23.1.tgz
version: 0.23.1
- apiVersion: v1
appVersion: 0.23.0
created: "2022-02-03T20:23:32.21492446Z"
description: Descheduler for Kubernetes is used to rebalance clusters by evicting
pods that can potentially be scheduled on better nodes. In the current implementation,
descheduler does not schedule replacement of evicted pods but relies on the
default scheduler for that.
digest: 224b16599938ee331612c01dd5889a88a3ed3ef1872a9e108621aba3fb608713
home: https://github.com/kubernetes-sigs/descheduler
icon: https://kubernetes.io/images/favicon.png
keywords:
- kubernetes
- descheduler
- kube-scheduler
maintainers:
- email: kubernetes-sig-scheduling@googlegroups.com
name: Kubernetes SIG Scheduling
name: descheduler
sources:
- https://github.com/kubernetes-sigs/descheduler
urls:
- https://github.com/kubernetes-sigs/descheduler/releases/download/descheduler-helm-chart-0.23.0/descheduler-0.23.0.tgz
version: 0.23.0
- apiVersion: v1
appVersion: 0.22.1
created: "2021-09-29T16:37:16.381510827Z"
description: Descheduler for Kubernetes is used to rebalance clusters by evicting
pods that can potentially be scheduled on better nodes. In the current implementation,
descheduler does not schedule replacement of evicted pods but relies on the
default scheduler for that.
digest: 83a407bbf981c300b6c75df148045be56797517cfcf7b744dfe8cbd3aa946c56
home: https://github.com/kubernetes-sigs/descheduler
icon: https://kubernetes.io/images/favicon.png
keywords:
- kubernetes
- descheduler
- kube-scheduler
maintainers:
- email: kubernetes-sig-scheduling@googlegroups.com
name: Kubernetes SIG Scheduling
name: descheduler
sources:
- https://github.com/kubernetes-sigs/descheduler
urls:
- https://github.com/kubernetes-sigs/descheduler/releases/download/descheduler-helm-chart-0.22.1/descheduler-0.22.1.tgz
version: 0.22.1
- apiVersion: v1
appVersion: 0.22.0
created: "2021-09-08T20:44:21.974382529Z"
description: Descheduler for Kubernetes is used to rebalance clusters by evicting
pods that can potentially be scheduled on better nodes. In the current implementation,
descheduler does not schedule replacement of evicted pods but relies on the
default scheduler for that.
digest: 9dd645e058c66eba711acd604026dbbb546ccd5366253464bcea38d63c51ed69
home: https://github.com/kubernetes-sigs/descheduler
icon: https://kubernetes.io/images/favicon.png
keywords:
- kubernetes
- descheduler
- kube-scheduler
maintainers:
- email: kubernetes-sig-scheduling@googlegroups.com
name: Kubernetes SIG Scheduling
name: descheduler
sources:
- https://github.com/kubernetes-sigs/descheduler
urls:
- https://github.com/kubernetes-sigs/descheduler/releases/download/descheduler-helm-chart-0.22.0/descheduler-0.22.0.tgz
version: 0.22.0
- apiVersion: v1
appVersion: 0.21.0
created: "2021-06-08T17:43:48.212056769Z"
description: Descheduler for Kubernetes is used to rebalance clusters by evicting
pods that can potentially be scheduled on better nodes. In the current implementation,
descheduler does not schedule replacement of evicted pods but relies on the
default scheduler for that.
digest: bb951c9c5e2c76855e0bb1a3a20c5238963acd515d7c250e3b27ccefa002bec2
home: https://github.com/kubernetes-sigs/descheduler
icon: https://kubernetes.io/images/favicon.png
keywords:
- kubernetes
- descheduler
- kube-scheduler
maintainers:
- email: kubernetes-sig-scheduling@googlegroups.com
name: Kubernetes SIG Scheduling
name: descheduler
sources:
- https://github.com/kubernetes-sigs/descheduler
urls:
- https://github.com/kubernetes-sigs/descheduler/releases/download/descheduler-helm-chart-0.21.0/descheduler-0.21.0.tgz
version: 0.21.0
- apiVersion: v1
appVersion: 0.20.0
created: "2020-12-10T15:23:17.367104788Z"
description: Descheduler for Kubernetes is used to rebalance clusters by evicting
pods that can potentially be scheduled on better nodes. In the current implementation,
descheduler does not schedule replacement of evicted pods but relies on the
default scheduler for that.
digest: f8f0177966994e3cdb61385069ebba3f8f658107ab800a382b30d5fe809789d0
home: https://github.com/kubernetes-sigs/descheduler
icon: https://kubernetes.io/images/favicon.png
keywords:
- kubernetes
- descheduler
- kube-scheduler
maintainers:
- email: kubernetes-sig-scheduling@googlegroups.com
name: Kubernetes SIG Scheduling
name: descheduler
sources:
- https://github.com/kubernetes-sigs/descheduler
urls:
- https://github.com/kubernetes-sigs/descheduler/releases/download/descheduler-helm-chart-0.20.0/descheduler-0.20.0.tgz
version: 0.20.0
- apiVersion: v1
appVersion: 0.19.0
created: "2020-12-09T01:47:49.250755956Z"
description: Descheduler for Kubernetes is used to rebalance clusters by evicting
pods that can potentially be scheduled on better nodes. In the current implementation,
descheduler does not schedule replacement of evicted pods but relies on the
default scheduler for that.
digest: 1331ce7da686311e18e552edd8353786694049a747ccab6616362e892579e766
home: https://github.com/kubernetes-sigs/descheduler
icon: https://kubernetes.io/images/favicon.png
keywords:
- kubernetes
- descheduler
- kube-scheduler
maintainers:
- email: kubernetes-sig-scheduling@googlegroups.com
name: Kubernetes SIG Scheduling
name: descheduler
sources:
- https://github.com/kubernetes-sigs/descheduler
urls:
- https://github.com/kubernetes-sigs/descheduler/releases/download/descheduler-helm-chart-0.19.2/descheduler-0.19.2.tgz
version: 0.19.2
- apiVersion: v1
appVersion: 0.18.0
created: "2020-12-08T19:34:02.786640856Z"
description: Descheduler for Kubernetes is used to rebalance clusters by evicting
pods that can potentially be scheduled on better nodes. In the current implementation,
descheduler does not schedule replacement of evicted pods but relies on the
default scheduler for that.
digest: 55b8b8e015bfa3d0794ca1d7316b2b412211fa6ccbd7649a80f7a1d6ec097f26
home: https://github.com/kubernetes-sigs/descheduler
icon: https://kubernetes.io/images/favicon.png
keywords:
- kubernetes
- descheduler
- kube-scheduler
maintainers:
- email: kubernetes-sig-scheduling@googlegroups.com
name: Kubernetes SIG Scheduling
name: descheduler
sources:
- https://github.com/kubernetes-sigs/descheduler
urls:
- https://github.com/kubernetes-sigs/descheduler/releases/download/descheduler-helm-chart-0.18.2/descheduler-0.18.2.tgz
version: 0.18.2
descheduler-helm-chart:
- apiVersion: v1
appVersion: 0.19.0
created: "2020-12-08T19:22:08.294224978Z"
deprecated: true
description: DEPRECATED - Descheduler for Kubernetes is used to rebalance clusters
by evicting pods that can potentially be scheduled on better nodes. In the current
implementation, descheduler does not schedule replacement of evicted pods but
relies on the default scheduler for that.
digest: b77ac9bb15ec71f7006d7d4faefffbb29b0cbcdabbef6389c858690beb12e529
home: https://github.com/kubernetes-sigs/descheduler
icon: https://kubernetes.io/images/favicon.png
keywords:
- kubernetes
- descheduler
- kube-scheduler
name: descheduler-helm-chart
sources:
- https://github.com/kubernetes-sigs/descheduler
urls:
- https://github.com/kubernetes-sigs/descheduler/releases/download/descheduler-helm-chart-0.19.1/descheduler-helm-chart-0.19.1.tgz
version: 0.19.1
- apiVersion: v1
appVersion: 0.19.0
created: "2020-09-01T16:59:22.515242595Z"
description: Descheduler for Kubernetes is used to rebalance clusters by evicting
pods that can potentially be scheduled on better nodes. In the current implementation,
descheduler does not schedule replacement of evicted pods but relies on the
default scheduler for that.
digest: dc70fc1ac8b57872e54c3406000617d4cbd0b2a9653abf0054c345c471b4be1c
home: https://github.com/kubernetes-sigs/descheduler
icon: https://kubernetes.io/images/favicon.png
keywords:
- kubernetes
- descheduler
- kube-scheduler
maintainers:
- email: kubernetes-sig-scheduling@googlegroups.com
name: Kubernetes SIG Scheduling
name: descheduler-helm-chart
sources:
- https://github.com/kubernetes-sigs/descheduler
urls:
- https://github.com/kubernetes-sigs/descheduler/releases/download/descheduler-helm-chart-0.19.0/descheduler-helm-chart-0.19.0.tgz
version: 0.19.0
- apiVersion: v1
appVersion: 0.18.0
created: "2020-07-23T18:29:32.622241657Z"
description: Descheduler for Kubernetes is used to rebalance clusters by evicting
pods that can potentially be scheduled on better nodes. In the current implementation,
descheduler does not schedule replacement of evicted pods but relies on the
default scheduler for that.
digest: d2a7516d4b2bbd288d5016b935d1a0ce4db9cc40c4f8a6981fe5075d71f4bcda
home: https://github.com/kubernetes-sigs/descheduler
icon: https://kubernetes.io/images/favicon.png
keywords:
- kubernetes
- descheduler
- kube-scheduler
maintainers:
- email: steve.hipwell@github.com
name: stevehipwell
name: descheduler-helm-chart
sources:
- https://github.com/kubernetes-sigs/descheduler
urls:
- https://github.com/kubernetes-sigs/descheduler/releases/download/descheduler-helm-chart-0.18.1/descheduler-helm-chart-0.18.1.tgz
version: 0.18.1
generated: "2025-10-30T16:41:34.519616538Z"

View File

@@ -1,28 +0,0 @@
---
apiVersion: v1
kind: ConfigMap
metadata:
name: descheduler-policy-configmap
namespace: kube-system
data:
policy.yaml: |
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
strategies:
"RemoveDuplicates":
enabled: true
"RemovePodsViolatingInterPodAntiAffinity":
enabled: true
"LowNodeUtilization":
enabled: true
params:
nodeResourceUtilizationThresholds:
thresholds:
"cpu" : 20
"memory": 20
"pods": 20
targetThresholds:
"cpu" : 50
"memory": 50
"pods": 50

View File

@@ -1,35 +0,0 @@
---
apiVersion: batch/v1beta1
kind: CronJob
metadata:
name: descheduler-cronjob
namespace: kube-system
spec:
schedule: "*/2 * * * *"
concurrencyPolicy: "Forbid"
jobTemplate:
spec:
template:
metadata:
name: descheduler-pod
spec:
priorityClassName: system-cluster-critical
containers:
- name: descheduler
image: us.gcr.io/k8s-artifacts-prod/descheduler:v0.10.0
volumeMounts:
- mountPath: /policy-dir
name: policy-volume
command:
- "/bin/descheduler"
args:
- "--policy-config-file"
- "/policy-dir/policy.yaml"
- "--v"
- "3"
restartPolicy: "Never"
serviceAccountName: descheduler-sa
volumes:
- name: policy-volume
configMap:
name: descheduler-policy-configmap

View File

@@ -1,33 +0,0 @@
---
apiVersion: batch/v1
kind: Job
metadata:
name: descheduler-job
namespace: kube-system
spec:
parallelism: 1
completions: 1
template:
metadata:
name: descheduler-pod
spec:
priorityClassName: system-cluster-critical
containers:
- name: descheduler
image: us.gcr.io/k8s-artifacts-prod/descheduler:v0.10.0
volumeMounts:
- mountPath: /policy-dir
name: policy-volume
command:
- "/bin/descheduler"
args:
- "--policy-config-file"
- "/policy-dir/policy.yaml"
- "--v"
- "3"
restartPolicy: "Never"
serviceAccountName: descheduler-sa
volumes:
- name: policy-volume
configMap:
name: descheduler-policy-configmap

View File

@@ -1,40 +0,0 @@
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: descheduler-cluster-role
namespace: kube-system
rules:
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "update"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "watch", "list"]
- apiGroups: [""]
resources: ["pods"]
verbs: ["get", "watch", "list", "delete"]
- apiGroups: [""]
resources: ["pods/eviction"]
verbs: ["create"]
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: descheduler-sa
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: descheduler-cluster-role-binding
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: descheduler-cluster-role
subjects:
- name: descheduler-sa
kind: ServiceAccount
namespace: kube-system

View File

@@ -1,19 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// +k8s:deepcopy-gen=package,register
package api // import "sigs.k8s.io/descheduler/pkg/api"

View File

@@ -1,59 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package api
import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"sigs.k8s.io/descheduler/pkg/descheduler/scheme"
)
var (
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
AddToScheme = SchemeBuilder.AddToScheme
Scheme = runtime.NewScheme()
)
// GroupName is the group name use in this package
const GroupName = "descheduler"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal}
func init() {
if err := addKnownTypes(scheme.Scheme); err != nil {
panic(err)
}
}
// Kind takes an unqualified kind and returns a Group qualified GroupKind
func Kind(kind string) schema.GroupKind {
return SchemeGroupVersion.WithKind(kind).GroupKind()
}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(SchemeGroupVersion,
&DeschedulerPolicy{},
)
return nil
}

View File

@@ -1,60 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package api
import (
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type DeschedulerPolicy struct {
metav1.TypeMeta
// Strategies
Strategies StrategyList
}
type StrategyName string
type StrategyList map[StrategyName]DeschedulerStrategy
type DeschedulerStrategy struct {
// Enabled or disabled
Enabled bool
// Weight
Weight int
// Strategy parameters
Params StrategyParameters
}
// Only one of its members may be specified
type StrategyParameters struct {
NodeResourceUtilizationThresholds NodeResourceUtilizationThresholds
NodeAffinityType []string
}
type Percentage float64
type ResourceThresholds map[v1.ResourceName]Percentage
type NodeResourceUtilizationThresholds struct {
Thresholds ResourceThresholds
TargetThresholds ResourceThresholds
NumberOfNodes int
}

View File

@@ -1,23 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import "k8s.io/apimachinery/pkg/runtime"
func addDefaultingFuncs(scheme *runtime.Scheme) error {
return RegisterDefaults(scheme)
}

View File

@@ -1,24 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// +k8s:deepcopy-gen=package,register
// +k8s:conversion-gen=sigs.k8s.io/descheduler/pkg/api
// +k8s:defaulter-gen=TypeMeta
// Package v1alpha1 is the v1alpha1 version of the descheduler API
// +groupName=descheduler
package v1alpha1 // import "sigs.k8s.io/descheduler/pkg/api/v1alpha1"

View File

@@ -1,61 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)
var (
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
localSchemeBuilder = &SchemeBuilder
AddToScheme = SchemeBuilder.AddToScheme
)
// GroupName is the group name used in this package
const GroupName = "descheduler"
const GroupVersion = "v1alpha1"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: GroupVersion}
// Kind takes an unqualified kind and returns a Group qualified GroupKind
func Kind(kind string) schema.GroupKind {
return SchemeGroupVersion.WithKind(kind).GroupKind()
}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
func init() {
// We only register manually written functions here. The registration of the
// generated functions takes place in the generated files. The separation
// makes the code compile even when the generated files are missing.
localSchemeBuilder.Register(addKnownTypes, addDefaultingFuncs)
}
func addKnownTypes(scheme *runtime.Scheme) error {
// TODO this will get cleaned up with the scheme types are fixed
scheme.AddKnownTypes(SchemeGroupVersion,
&DeschedulerPolicy{},
)
return nil
}

View File

@@ -1,60 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type DeschedulerPolicy struct {
metav1.TypeMeta `json:",inline"`
// Strategies
Strategies StrategyList `json:"strategies,omitempty"`
}
type StrategyName string
type StrategyList map[StrategyName]DeschedulerStrategy
type DeschedulerStrategy struct {
// Enabled or disabled
Enabled bool `json:"enabled,omitempty"`
// Weight
Weight int `json:"weight,omitempty"`
// Strategy parameters
Params StrategyParameters `json:"params,omitempty"`
}
// Only one of its members may be specified
type StrategyParameters struct {
NodeResourceUtilizationThresholds NodeResourceUtilizationThresholds `json:"nodeResourceUtilizationThresholds,omitempty"`
NodeAffinityType []string `json:"nodeAffinityType,omitempty"`
}
type Percentage float64
type ResourceThresholds map[v1.ResourceName]Percentage
type NodeResourceUtilizationThresholds struct {
Thresholds ResourceThresholds `json:"thresholds,omitempty"`
TargetThresholds ResourceThresholds `json:"targetThresholds,omitempty"`
NumberOfNodes int `json:"numberOfNodes,omitempty"`
}

View File

@@ -1,177 +0,0 @@
// +build !ignore_autogenerated
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by conversion-gen. DO NOT EDIT.
package v1alpha1
import (
unsafe "unsafe"
conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime"
api "sigs.k8s.io/descheduler/pkg/api"
)
func init() {
localSchemeBuilder.Register(RegisterConversions)
}
// RegisterConversions adds conversion functions to the given scheme.
// Public to allow building arbitrary schemes.
func RegisterConversions(s *runtime.Scheme) error {
if err := s.AddGeneratedConversionFunc((*DeschedulerPolicy)(nil), (*api.DeschedulerPolicy)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_DeschedulerPolicy_To_api_DeschedulerPolicy(a.(*DeschedulerPolicy), b.(*api.DeschedulerPolicy), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*api.DeschedulerPolicy)(nil), (*DeschedulerPolicy)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_api_DeschedulerPolicy_To_v1alpha1_DeschedulerPolicy(a.(*api.DeschedulerPolicy), b.(*DeschedulerPolicy), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*DeschedulerStrategy)(nil), (*api.DeschedulerStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_DeschedulerStrategy_To_api_DeschedulerStrategy(a.(*DeschedulerStrategy), b.(*api.DeschedulerStrategy), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*api.DeschedulerStrategy)(nil), (*DeschedulerStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_api_DeschedulerStrategy_To_v1alpha1_DeschedulerStrategy(a.(*api.DeschedulerStrategy), b.(*DeschedulerStrategy), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*NodeResourceUtilizationThresholds)(nil), (*api.NodeResourceUtilizationThresholds)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_NodeResourceUtilizationThresholds_To_api_NodeResourceUtilizationThresholds(a.(*NodeResourceUtilizationThresholds), b.(*api.NodeResourceUtilizationThresholds), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*api.NodeResourceUtilizationThresholds)(nil), (*NodeResourceUtilizationThresholds)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_api_NodeResourceUtilizationThresholds_To_v1alpha1_NodeResourceUtilizationThresholds(a.(*api.NodeResourceUtilizationThresholds), b.(*NodeResourceUtilizationThresholds), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*StrategyParameters)(nil), (*api.StrategyParameters)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_StrategyParameters_To_api_StrategyParameters(a.(*StrategyParameters), b.(*api.StrategyParameters), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*api.StrategyParameters)(nil), (*StrategyParameters)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_api_StrategyParameters_To_v1alpha1_StrategyParameters(a.(*api.StrategyParameters), b.(*StrategyParameters), scope)
}); err != nil {
return err
}
return nil
}
func autoConvert_v1alpha1_DeschedulerPolicy_To_api_DeschedulerPolicy(in *DeschedulerPolicy, out *api.DeschedulerPolicy, s conversion.Scope) error {
out.Strategies = *(*api.StrategyList)(unsafe.Pointer(&in.Strategies))
return nil
}
// Convert_v1alpha1_DeschedulerPolicy_To_api_DeschedulerPolicy is an autogenerated conversion function.
func Convert_v1alpha1_DeschedulerPolicy_To_api_DeschedulerPolicy(in *DeschedulerPolicy, out *api.DeschedulerPolicy, s conversion.Scope) error {
return autoConvert_v1alpha1_DeschedulerPolicy_To_api_DeschedulerPolicy(in, out, s)
}
func autoConvert_api_DeschedulerPolicy_To_v1alpha1_DeschedulerPolicy(in *api.DeschedulerPolicy, out *DeschedulerPolicy, s conversion.Scope) error {
out.Strategies = *(*StrategyList)(unsafe.Pointer(&in.Strategies))
return nil
}
// Convert_api_DeschedulerPolicy_To_v1alpha1_DeschedulerPolicy is an autogenerated conversion function.
func Convert_api_DeschedulerPolicy_To_v1alpha1_DeschedulerPolicy(in *api.DeschedulerPolicy, out *DeschedulerPolicy, s conversion.Scope) error {
return autoConvert_api_DeschedulerPolicy_To_v1alpha1_DeschedulerPolicy(in, out, s)
}
func autoConvert_v1alpha1_DeschedulerStrategy_To_api_DeschedulerStrategy(in *DeschedulerStrategy, out *api.DeschedulerStrategy, s conversion.Scope) error {
out.Enabled = in.Enabled
out.Weight = in.Weight
if err := Convert_v1alpha1_StrategyParameters_To_api_StrategyParameters(&in.Params, &out.Params, s); err != nil {
return err
}
return nil
}
// Convert_v1alpha1_DeschedulerStrategy_To_api_DeschedulerStrategy is an autogenerated conversion function.
func Convert_v1alpha1_DeschedulerStrategy_To_api_DeschedulerStrategy(in *DeschedulerStrategy, out *api.DeschedulerStrategy, s conversion.Scope) error {
return autoConvert_v1alpha1_DeschedulerStrategy_To_api_DeschedulerStrategy(in, out, s)
}
func autoConvert_api_DeschedulerStrategy_To_v1alpha1_DeschedulerStrategy(in *api.DeschedulerStrategy, out *DeschedulerStrategy, s conversion.Scope) error {
out.Enabled = in.Enabled
out.Weight = in.Weight
if err := Convert_api_StrategyParameters_To_v1alpha1_StrategyParameters(&in.Params, &out.Params, s); err != nil {
return err
}
return nil
}
// Convert_api_DeschedulerStrategy_To_v1alpha1_DeschedulerStrategy is an autogenerated conversion function.
func Convert_api_DeschedulerStrategy_To_v1alpha1_DeschedulerStrategy(in *api.DeschedulerStrategy, out *DeschedulerStrategy, s conversion.Scope) error {
return autoConvert_api_DeschedulerStrategy_To_v1alpha1_DeschedulerStrategy(in, out, s)
}
func autoConvert_v1alpha1_NodeResourceUtilizationThresholds_To_api_NodeResourceUtilizationThresholds(in *NodeResourceUtilizationThresholds, out *api.NodeResourceUtilizationThresholds, s conversion.Scope) error {
out.Thresholds = *(*api.ResourceThresholds)(unsafe.Pointer(&in.Thresholds))
out.TargetThresholds = *(*api.ResourceThresholds)(unsafe.Pointer(&in.TargetThresholds))
out.NumberOfNodes = in.NumberOfNodes
return nil
}
// Convert_v1alpha1_NodeResourceUtilizationThresholds_To_api_NodeResourceUtilizationThresholds is an autogenerated conversion function.
func Convert_v1alpha1_NodeResourceUtilizationThresholds_To_api_NodeResourceUtilizationThresholds(in *NodeResourceUtilizationThresholds, out *api.NodeResourceUtilizationThresholds, s conversion.Scope) error {
return autoConvert_v1alpha1_NodeResourceUtilizationThresholds_To_api_NodeResourceUtilizationThresholds(in, out, s)
}
func autoConvert_api_NodeResourceUtilizationThresholds_To_v1alpha1_NodeResourceUtilizationThresholds(in *api.NodeResourceUtilizationThresholds, out *NodeResourceUtilizationThresholds, s conversion.Scope) error {
out.Thresholds = *(*ResourceThresholds)(unsafe.Pointer(&in.Thresholds))
out.TargetThresholds = *(*ResourceThresholds)(unsafe.Pointer(&in.TargetThresholds))
out.NumberOfNodes = in.NumberOfNodes
return nil
}
// Convert_api_NodeResourceUtilizationThresholds_To_v1alpha1_NodeResourceUtilizationThresholds is an autogenerated conversion function.
func Convert_api_NodeResourceUtilizationThresholds_To_v1alpha1_NodeResourceUtilizationThresholds(in *api.NodeResourceUtilizationThresholds, out *NodeResourceUtilizationThresholds, s conversion.Scope) error {
return autoConvert_api_NodeResourceUtilizationThresholds_To_v1alpha1_NodeResourceUtilizationThresholds(in, out, s)
}
func autoConvert_v1alpha1_StrategyParameters_To_api_StrategyParameters(in *StrategyParameters, out *api.StrategyParameters, s conversion.Scope) error {
if err := Convert_v1alpha1_NodeResourceUtilizationThresholds_To_api_NodeResourceUtilizationThresholds(&in.NodeResourceUtilizationThresholds, &out.NodeResourceUtilizationThresholds, s); err != nil {
return err
}
out.NodeAffinityType = *(*[]string)(unsafe.Pointer(&in.NodeAffinityType))
return nil
}
// Convert_v1alpha1_StrategyParameters_To_api_StrategyParameters is an autogenerated conversion function.
func Convert_v1alpha1_StrategyParameters_To_api_StrategyParameters(in *StrategyParameters, out *api.StrategyParameters, s conversion.Scope) error {
return autoConvert_v1alpha1_StrategyParameters_To_api_StrategyParameters(in, out, s)
}
func autoConvert_api_StrategyParameters_To_v1alpha1_StrategyParameters(in *api.StrategyParameters, out *StrategyParameters, s conversion.Scope) error {
if err := Convert_api_NodeResourceUtilizationThresholds_To_v1alpha1_NodeResourceUtilizationThresholds(&in.NodeResourceUtilizationThresholds, &out.NodeResourceUtilizationThresholds, s); err != nil {
return err
}
out.NodeAffinityType = *(*[]string)(unsafe.Pointer(&in.NodeAffinityType))
return nil
}
// Convert_api_StrategyParameters_To_v1alpha1_StrategyParameters is an autogenerated conversion function.
func Convert_api_StrategyParameters_To_v1alpha1_StrategyParameters(in *api.StrategyParameters, out *StrategyParameters, s conversion.Scope) error {
return autoConvert_api_StrategyParameters_To_v1alpha1_StrategyParameters(in, out, s)
}

View File

@@ -1,170 +0,0 @@
// +build !ignore_autogenerated
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package v1alpha1
import (
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DeschedulerPolicy) DeepCopyInto(out *DeschedulerPolicy) {
*out = *in
out.TypeMeta = in.TypeMeta
if in.Strategies != nil {
in, out := &in.Strategies, &out.Strategies
*out = make(StrategyList, len(*in))
for key, val := range *in {
(*out)[key] = *val.DeepCopy()
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeschedulerPolicy.
func (in *DeschedulerPolicy) DeepCopy() *DeschedulerPolicy {
if in == nil {
return nil
}
out := new(DeschedulerPolicy)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *DeschedulerPolicy) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DeschedulerStrategy) DeepCopyInto(out *DeschedulerStrategy) {
*out = *in
in.Params.DeepCopyInto(&out.Params)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeschedulerStrategy.
func (in *DeschedulerStrategy) DeepCopy() *DeschedulerStrategy {
if in == nil {
return nil
}
out := new(DeschedulerStrategy)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NodeResourceUtilizationThresholds) DeepCopyInto(out *NodeResourceUtilizationThresholds) {
*out = *in
if in.Thresholds != nil {
in, out := &in.Thresholds, &out.Thresholds
*out = make(ResourceThresholds, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.TargetThresholds != nil {
in, out := &in.TargetThresholds, &out.TargetThresholds
*out = make(ResourceThresholds, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeResourceUtilizationThresholds.
func (in *NodeResourceUtilizationThresholds) DeepCopy() *NodeResourceUtilizationThresholds {
if in == nil {
return nil
}
out := new(NodeResourceUtilizationThresholds)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in ResourceThresholds) DeepCopyInto(out *ResourceThresholds) {
{
in := &in
*out = make(ResourceThresholds, len(*in))
for key, val := range *in {
(*out)[key] = val
}
return
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceThresholds.
func (in ResourceThresholds) DeepCopy() ResourceThresholds {
if in == nil {
return nil
}
out := new(ResourceThresholds)
in.DeepCopyInto(out)
return *out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in StrategyList) DeepCopyInto(out *StrategyList) {
{
in := &in
*out = make(StrategyList, len(*in))
for key, val := range *in {
(*out)[key] = *val.DeepCopy()
}
return
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StrategyList.
func (in StrategyList) DeepCopy() StrategyList {
if in == nil {
return nil
}
out := new(StrategyList)
in.DeepCopyInto(out)
return *out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *StrategyParameters) DeepCopyInto(out *StrategyParameters) {
*out = *in
in.NodeResourceUtilizationThresholds.DeepCopyInto(&out.NodeResourceUtilizationThresholds)
if in.NodeAffinityType != nil {
in, out := &in.NodeAffinityType, &out.NodeAffinityType
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StrategyParameters.
func (in *StrategyParameters) DeepCopy() *StrategyParameters {
if in == nil {
return nil
}
out := new(StrategyParameters)
in.DeepCopyInto(out)
return out
}

View File

@@ -1,32 +0,0 @@
// +build !ignore_autogenerated
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by defaulter-gen. DO NOT EDIT.
package v1alpha1
import (
runtime "k8s.io/apimachinery/pkg/runtime"
)
// RegisterDefaults adds defaulters functions to the given scheme.
// Public to allow building arbitrary schemes.
// All generated defaulters are covering - they call all nested defaulters.
func RegisterDefaults(scheme *runtime.Scheme) error {
return nil
}

View File

@@ -1,170 +0,0 @@
// +build !ignore_autogenerated
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package api
import (
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DeschedulerPolicy) DeepCopyInto(out *DeschedulerPolicy) {
*out = *in
out.TypeMeta = in.TypeMeta
if in.Strategies != nil {
in, out := &in.Strategies, &out.Strategies
*out = make(StrategyList, len(*in))
for key, val := range *in {
(*out)[key] = *val.DeepCopy()
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeschedulerPolicy.
func (in *DeschedulerPolicy) DeepCopy() *DeschedulerPolicy {
if in == nil {
return nil
}
out := new(DeschedulerPolicy)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *DeschedulerPolicy) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DeschedulerStrategy) DeepCopyInto(out *DeschedulerStrategy) {
*out = *in
in.Params.DeepCopyInto(&out.Params)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeschedulerStrategy.
func (in *DeschedulerStrategy) DeepCopy() *DeschedulerStrategy {
if in == nil {
return nil
}
out := new(DeschedulerStrategy)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NodeResourceUtilizationThresholds) DeepCopyInto(out *NodeResourceUtilizationThresholds) {
*out = *in
if in.Thresholds != nil {
in, out := &in.Thresholds, &out.Thresholds
*out = make(ResourceThresholds, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.TargetThresholds != nil {
in, out := &in.TargetThresholds, &out.TargetThresholds
*out = make(ResourceThresholds, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeResourceUtilizationThresholds.
func (in *NodeResourceUtilizationThresholds) DeepCopy() *NodeResourceUtilizationThresholds {
if in == nil {
return nil
}
out := new(NodeResourceUtilizationThresholds)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in ResourceThresholds) DeepCopyInto(out *ResourceThresholds) {
{
in := &in
*out = make(ResourceThresholds, len(*in))
for key, val := range *in {
(*out)[key] = val
}
return
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceThresholds.
func (in ResourceThresholds) DeepCopy() ResourceThresholds {
if in == nil {
return nil
}
out := new(ResourceThresholds)
in.DeepCopyInto(out)
return *out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in StrategyList) DeepCopyInto(out *StrategyList) {
{
in := &in
*out = make(StrategyList, len(*in))
for key, val := range *in {
(*out)[key] = *val.DeepCopy()
}
return
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StrategyList.
func (in StrategyList) DeepCopy() StrategyList {
if in == nil {
return nil
}
out := new(StrategyList)
in.DeepCopyInto(out)
return *out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *StrategyParameters) DeepCopyInto(out *StrategyParameters) {
*out = *in
in.NodeResourceUtilizationThresholds.DeepCopyInto(&out.NodeResourceUtilizationThresholds)
if in.NodeAffinityType != nil {
in, out := &in.NodeAffinityType, &out.NodeAffinityType
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StrategyParameters.
func (in *StrategyParameters) DeepCopy() *StrategyParameters {
if in == nil {
return nil
}
out := new(StrategyParameters)
in.DeepCopyInto(out)
return out
}

View File

@@ -1,19 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// +k8s:deepcopy-gen=package,register
package componentconfig // import "sigs.k8s.io/descheduler/pkg/apis/componentconfig"

View File

@@ -1,58 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package componentconfig
import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"sigs.k8s.io/descheduler/pkg/descheduler/scheme"
)
var (
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
AddToScheme = SchemeBuilder.AddToScheme
)
// GroupName is the group name used in this package
const GroupName = "deschedulercomponentconfig"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal}
func init() {
if err := addKnownTypes(scheme.Scheme); err != nil {
panic(err)
}
}
// Kind takes an unqualified kind and returns a Group qualified GroupKind
func Kind(kind string) schema.GroupKind {
return SchemeGroupVersion.WithKind(kind).GroupKind()
}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(SchemeGroupVersion,
&DeschedulerConfiguration{},
)
return nil
}

View File

@@ -1,51 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package componentconfig
import (
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type DeschedulerConfiguration struct {
metav1.TypeMeta
// Time interval for descheduler to run
DeschedulingInterval time.Duration
// KubeconfigFile is path to kubeconfig file with authorization and master
// location information.
KubeconfigFile string
// PolicyConfigFile is the filepath to the descheduler policy configuration.
PolicyConfigFile string
// Dry run
DryRun bool
// Node selectors
NodeSelector string
// MaxNoOfPodsToEvictPerNode restricts maximum of pods to be evicted per node.
MaxNoOfPodsToEvictPerNode int
// EvictLocalStoragePods allows pods using local storage to be evicted.
EvictLocalStoragePods bool
}

View File

@@ -1,23 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import "k8s.io/apimachinery/pkg/runtime"
func addDefaultingFuncs(scheme *runtime.Scheme) error {
return RegisterDefaults(scheme)
}

View File

@@ -1,24 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// +k8s:deepcopy-gen=package,register
// +k8s:conversion-gen=sigs.k8s.io/descheduler/pkg/apis/componentconfig
// +k8s:defaulter-gen=TypeMeta
// Package v1alpha1 is the v1alpha1 version of the descheduler's componentconfig API
// +groupName=deschedulercomponentconfig
package v1alpha1 // import "sigs.k8s.io/descheduler/pkg/apis/componentconfig/v1alpha1"

View File

@@ -1,60 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)
var (
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
localSchemeBuilder = &SchemeBuilder
AddToScheme = SchemeBuilder.AddToScheme
)
// GroupName is the group name use in this package
const GroupName = "deschedulercomponentconfig"
const GroupVersion = "v1alpha1"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: GroupVersion}
// Kind takes an unqualified kind and returns a Group qualified GroupKind
func Kind(kind string) schema.GroupKind {
return SchemeGroupVersion.WithKind(kind).GroupKind()
}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
func init() {
// We only register manually written functions here. The registration of the
// generated functions takes place in the generated files. The separation
// makes the code compile even when the generated files are missing.
localSchemeBuilder.Register(addKnownTypes, addDefaultingFuncs)
}
func addKnownTypes(scheme *runtime.Scheme) error {
// TODO this will get cleaned up with the scheme types are fixed
scheme.AddKnownTypes(SchemeGroupVersion,
&DeschedulerConfiguration{},
)
return nil
}

View File

@@ -1,51 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type DeschedulerConfiguration struct {
metav1.TypeMeta `json:",inline"`
// Time interval for descheduler to run
DeschedulingInterval time.Duration `json:"deschedulingInterval,omitempty"`
// KubeconfigFile is path to kubeconfig file with authorization and master
// location information.
KubeconfigFile string `json:"kubeconfigFile"`
// PolicyConfigFile is the filepath to the descheduler policy configuration.
PolicyConfigFile string `json:"policyConfigFile,omitempty"`
// Dry run
DryRun bool `json:"dryRun,omitempty"`
// Node selectors
NodeSelector string `json:"nodeSelector,omitempty"`
// MaxNoOfPodsToEvictPerNode restricts maximum of pods to be evicted per node.
MaxNoOfPodsToEvictPerNode int `json:"maxNoOfPodsToEvictPerNode,omitempty"`
// EvictLocalStoragePods allows pods using local storage to be evicted.
EvictLocalStoragePods bool `json:"evictLocalStoragePods,omitempty"`
}

View File

@@ -1,81 +0,0 @@
// +build !ignore_autogenerated
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by conversion-gen. DO NOT EDIT.
package v1alpha1
import (
time "time"
conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime"
componentconfig "sigs.k8s.io/descheduler/pkg/apis/componentconfig"
)
func init() {
localSchemeBuilder.Register(RegisterConversions)
}
// RegisterConversions adds conversion functions to the given scheme.
// Public to allow building arbitrary schemes.
func RegisterConversions(s *runtime.Scheme) error {
if err := s.AddGeneratedConversionFunc((*DeschedulerConfiguration)(nil), (*componentconfig.DeschedulerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_DeschedulerConfiguration_To_componentconfig_DeschedulerConfiguration(a.(*DeschedulerConfiguration), b.(*componentconfig.DeschedulerConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*componentconfig.DeschedulerConfiguration)(nil), (*DeschedulerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_componentconfig_DeschedulerConfiguration_To_v1alpha1_DeschedulerConfiguration(a.(*componentconfig.DeschedulerConfiguration), b.(*DeschedulerConfiguration), scope)
}); err != nil {
return err
}
return nil
}
func autoConvert_v1alpha1_DeschedulerConfiguration_To_componentconfig_DeschedulerConfiguration(in *DeschedulerConfiguration, out *componentconfig.DeschedulerConfiguration, s conversion.Scope) error {
out.DeschedulingInterval = time.Duration(in.DeschedulingInterval)
out.KubeconfigFile = in.KubeconfigFile
out.PolicyConfigFile = in.PolicyConfigFile
out.DryRun = in.DryRun
out.NodeSelector = in.NodeSelector
out.MaxNoOfPodsToEvictPerNode = in.MaxNoOfPodsToEvictPerNode
out.EvictLocalStoragePods = in.EvictLocalStoragePods
return nil
}
// Convert_v1alpha1_DeschedulerConfiguration_To_componentconfig_DeschedulerConfiguration is an autogenerated conversion function.
func Convert_v1alpha1_DeschedulerConfiguration_To_componentconfig_DeschedulerConfiguration(in *DeschedulerConfiguration, out *componentconfig.DeschedulerConfiguration, s conversion.Scope) error {
return autoConvert_v1alpha1_DeschedulerConfiguration_To_componentconfig_DeschedulerConfiguration(in, out, s)
}
func autoConvert_componentconfig_DeschedulerConfiguration_To_v1alpha1_DeschedulerConfiguration(in *componentconfig.DeschedulerConfiguration, out *DeschedulerConfiguration, s conversion.Scope) error {
out.DeschedulingInterval = time.Duration(in.DeschedulingInterval)
out.KubeconfigFile = in.KubeconfigFile
out.PolicyConfigFile = in.PolicyConfigFile
out.DryRun = in.DryRun
out.NodeSelector = in.NodeSelector
out.MaxNoOfPodsToEvictPerNode = in.MaxNoOfPodsToEvictPerNode
out.EvictLocalStoragePods = in.EvictLocalStoragePods
return nil
}
// Convert_componentconfig_DeschedulerConfiguration_To_v1alpha1_DeschedulerConfiguration is an autogenerated conversion function.
func Convert_componentconfig_DeschedulerConfiguration_To_v1alpha1_DeschedulerConfiguration(in *componentconfig.DeschedulerConfiguration, out *DeschedulerConfiguration, s conversion.Scope) error {
return autoConvert_componentconfig_DeschedulerConfiguration_To_v1alpha1_DeschedulerConfiguration(in, out, s)
}

View File

@@ -1,50 +0,0 @@
// +build !ignore_autogenerated
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package v1alpha1
import (
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DeschedulerConfiguration) DeepCopyInto(out *DeschedulerConfiguration) {
*out = *in
out.TypeMeta = in.TypeMeta
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeschedulerConfiguration.
func (in *DeschedulerConfiguration) DeepCopy() *DeschedulerConfiguration {
if in == nil {
return nil
}
out := new(DeschedulerConfiguration)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *DeschedulerConfiguration) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}

View File

@@ -1,32 +0,0 @@
// +build !ignore_autogenerated
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by defaulter-gen. DO NOT EDIT.
package v1alpha1
import (
runtime "k8s.io/apimachinery/pkg/runtime"
)
// RegisterDefaults adds defaulters functions to the given scheme.
// Public to allow building arbitrary schemes.
// All generated defaulters are covering - they call all nested defaulters.
func RegisterDefaults(scheme *runtime.Scheme) error {
return nil
}

View File

@@ -1,50 +0,0 @@
// +build !ignore_autogenerated
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package componentconfig
import (
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DeschedulerConfiguration) DeepCopyInto(out *DeschedulerConfiguration) {
*out = *in
out.TypeMeta = in.TypeMeta
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeschedulerConfiguration.
func (in *DeschedulerConfiguration) DeepCopy() *DeschedulerConfiguration {
if in == nil {
return nil
}
out := new(DeschedulerConfiguration)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *DeschedulerConfiguration) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}

View File

@@ -1,68 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package client
import (
"fmt"
clientset "k8s.io/client-go/kubernetes"
// Ensure to load all auth plugins.
_ "k8s.io/client-go/plugin/pkg/client/auth"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
)
func CreateClient(kubeconfig string) (clientset.Interface, error) {
var cfg *rest.Config
if len(kubeconfig) != 0 {
master, err := GetMasterFromKubeconfig(kubeconfig)
if err != nil {
return nil, fmt.Errorf("Failed to parse kubeconfig file: %v ", err)
}
cfg, err = clientcmd.BuildConfigFromFlags(master, kubeconfig)
if err != nil {
return nil, fmt.Errorf("Unable to build config: %v", err)
}
} else {
var err error
cfg, err = rest.InClusterConfig()
if err != nil {
return nil, fmt.Errorf("Unable to build in cluster config: %v", err)
}
}
return clientset.NewForConfig(cfg)
}
func GetMasterFromKubeconfig(filename string) (string, error) {
config, err := clientcmd.LoadFromFile(filename)
if err != nil {
return "", err
}
context, ok := config.Contexts[config.CurrentContext]
if !ok {
return "", fmt.Errorf("Failed to get master address from kubeconfig")
}
if val, ok := config.Clusters[context.Cluster]; ok {
return val.Server, nil
}
return "", fmt.Errorf("Failed to get master address from kubeconfig")
}

View File

@@ -1,84 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package descheduler
import (
"fmt"
"k8s.io/klog"
"k8s.io/apimachinery/pkg/util/wait"
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
"sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/descheduler/client"
eutils "sigs.k8s.io/descheduler/pkg/descheduler/evictions/utils"
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
"sigs.k8s.io/descheduler/pkg/descheduler/strategies"
"sigs.k8s.io/descheduler/pkg/utils"
)
func Run(rs *options.DeschedulerServer) error {
rsclient, err := client.CreateClient(rs.KubeconfigFile)
if err != nil {
return err
}
rs.Client = rsclient
deschedulerPolicy, err := LoadPolicyConfig(rs.PolicyConfigFile)
if err != nil {
return err
}
if deschedulerPolicy == nil {
return fmt.Errorf("deschedulerPolicy is nil")
}
return RunDeschedulerStrategies(rs, deschedulerPolicy)
}
func RunDeschedulerStrategies(rs *options.DeschedulerServer, deschedulerPolicy *api.DeschedulerPolicy) error {
evictionPolicyGroupVersion, err := eutils.SupportEviction(rs.Client)
if err != nil || len(evictionPolicyGroupVersion) == 0 {
return err
}
stopChannel := make(chan struct{})
nodes, err := nodeutil.ReadyNodes(rs.Client, rs.NodeSelector, stopChannel)
if err != nil {
return err
}
if len(nodes) <= 1 {
klog.V(1).Infof("The cluster size is 0 or 1 meaning eviction causes service disruption or degradation. So aborting..")
return nil
}
nodePodCount := utils.InitializeNodePodCount(nodes)
wait.Until(func() {
strategies.RemoveDuplicatePods(rs, deschedulerPolicy.Strategies["RemoveDuplicates"], evictionPolicyGroupVersion, nodes, nodePodCount)
strategies.LowNodeUtilization(rs, deschedulerPolicy.Strategies["LowNodeUtilization"], evictionPolicyGroupVersion, nodes, nodePodCount)
strategies.RemovePodsViolatingInterPodAntiAffinity(rs, deschedulerPolicy.Strategies["RemovePodsViolatingInterPodAntiAffinity"], evictionPolicyGroupVersion, nodes, nodePodCount)
strategies.RemovePodsViolatingNodeAffinity(rs, deschedulerPolicy.Strategies["RemovePodsViolatingNodeAffinity"], evictionPolicyGroupVersion, nodes, nodePodCount)
strategies.RemovePodsViolatingNodeTaints(rs, deschedulerPolicy.Strategies["RemovePodsViolatingNodeTaints"], evictionPolicyGroupVersion, nodes, nodePodCount)
// If there was no interval specified, send a signal to the stopChannel to end the wait.Until loop after 1 iteration
if rs.DeschedulingInterval.Seconds() == 0 {
close(stopChannel)
}
}, rs.DeschedulingInterval, stopChannel)
return nil
}

View File

@@ -1,68 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package evictions
import (
"fmt"
v1 "k8s.io/api/core/v1"
policy "k8s.io/api/policy/v1beta1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/scheme"
clientcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/tools/record"
"k8s.io/klog"
eutils "sigs.k8s.io/descheduler/pkg/descheduler/evictions/utils"
)
func EvictPod(client clientset.Interface, pod *v1.Pod, policyGroupVersion string, dryRun bool) (bool, error) {
if dryRun {
return true, nil
}
deleteOptions := &metav1.DeleteOptions{}
// GracePeriodSeconds ?
eviction := &policy.Eviction{
TypeMeta: metav1.TypeMeta{
APIVersion: policyGroupVersion,
Kind: eutils.EvictionKind,
},
ObjectMeta: metav1.ObjectMeta{
Name: pod.Name,
Namespace: pod.Namespace,
},
DeleteOptions: deleteOptions,
}
err := client.PolicyV1beta1().Evictions(eviction.Namespace).Evict(eviction)
if err == nil {
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(klog.V(3).Infof)
eventBroadcaster.StartRecordingToSink(&clientcorev1.EventSinkImpl{Interface: client.CoreV1().Events(pod.Namespace)})
r := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "sigs.k8s.io.descheduler"})
r.Event(pod, v1.EventTypeNormal, "Descheduled", "pod evicted by sigs.k8s.io/descheduler")
return true, nil
} else if apierrors.IsTooManyRequests(err) {
return false, fmt.Errorf("error when evicting pod (ignoring) %q: %v", pod.Name, err)
} else if apierrors.IsNotFound(err) {
return true, fmt.Errorf("pod not found when evicting %q: %v", pod.Name, err)
} else {
return false, err
}
}

View File

@@ -1,64 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package evictions
import (
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/kubernetes/fake"
core "k8s.io/client-go/testing"
"sigs.k8s.io/descheduler/test"
"testing"
)
func TestEvictPod(t *testing.T) {
node1 := test.BuildTestNode("node1", 1000, 2000, 9)
pod1 := test.BuildTestPod("p1", 400, 0, "node1")
tests := []struct {
description string
node *v1.Node
pod *v1.Pod
pods []v1.Pod
want bool
}{
{
description: "test pod eviction - pod present",
node: node1,
pod: pod1,
pods: []v1.Pod{*pod1},
want: true,
},
{
description: "test pod eviction - pod absent",
node: node1,
pod: pod1,
pods: []v1.Pod{*test.BuildTestPod("p2", 400, 0, "node1"), *test.BuildTestPod("p3", 450, 0, "node1")},
want: true,
},
}
for _, test := range tests {
fakeClient := &fake.Clientset{}
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
return true, &v1.PodList{Items: test.pods}, nil
})
got, _ := EvictPod(fakeClient, test.pod, "v1", false)
if got != test.want {
t.Errorf("Test error for Desc: %s. Expected %v pod eviction to be %v, got %v", test.description, test.pod.Name, test.want, got)
}
}
}

View File

@@ -1,58 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package utils
import (
clientset "k8s.io/client-go/kubernetes"
)
const (
EvictionKind = "Eviction"
EvictionSubresource = "pods/eviction"
)
// SupportEviction uses Discovery API to find out if the server support eviction subresource
// If support, it will return its groupVersion; Otherwise, it will return ""
func SupportEviction(client clientset.Interface) (string, error) {
discoveryClient := client.Discovery()
groupList, err := discoveryClient.ServerGroups()
if err != nil {
return "", err
}
foundPolicyGroup := false
var policyGroupVersion string
for _, group := range groupList.Groups {
if group.Name == "policy" {
foundPolicyGroup = true
policyGroupVersion = group.PreferredVersion.GroupVersion
break
}
}
if !foundPolicyGroup {
return "", nil
}
resourceList, err := discoveryClient.ServerResourcesForGroupVersion("v1")
if err != nil {
return "", err
}
for _, resource := range resourceList.APIResources {
if resource.Name == EvictionSubresource && resource.Kind == EvictionKind {
return policyGroupVersion, nil
}
}
return "", nil
}

View File

@@ -1,164 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package node
import (
"time"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
clientset "k8s.io/client-go/kubernetes"
corelisters "k8s.io/client-go/listers/core/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/klog"
"sigs.k8s.io/descheduler/pkg/utils"
)
// ReadyNodes returns ready nodes irrespective of whether they are
// schedulable or not.
func ReadyNodes(client clientset.Interface, nodeSelector string, stopChannel <-chan struct{}) ([]*v1.Node, error) {
ns, err := labels.Parse(nodeSelector)
if err != nil {
return []*v1.Node{}, err
}
var nodes []*v1.Node
nl := GetNodeLister(client, stopChannel)
if nl != nil {
// err is defined above
if nodes, err = nl.List(ns); err != nil {
return []*v1.Node{}, err
}
}
if len(nodes) == 0 {
klog.V(2).Infof("node lister returned empty list, now fetch directly")
nItems, err := client.CoreV1().Nodes().List(metav1.ListOptions{LabelSelector: nodeSelector})
if err != nil {
return []*v1.Node{}, err
}
if nItems == nil || len(nItems.Items) == 0 {
return []*v1.Node{}, nil
}
for i := range nItems.Items {
node := nItems.Items[i]
nodes = append(nodes, &node)
}
}
readyNodes := make([]*v1.Node, 0, len(nodes))
for _, node := range nodes {
if IsReady(node) {
readyNodes = append(readyNodes, node)
}
}
return readyNodes, nil
}
func GetNodeLister(client clientset.Interface, stopChannel <-chan struct{}) corelisters.NodeLister {
if stopChannel == nil {
return nil
}
listWatcher := cache.NewListWatchFromClient(client.CoreV1().RESTClient(), "nodes", v1.NamespaceAll, fields.Everything())
store := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
nodeLister := corelisters.NewNodeLister(store)
reflector := cache.NewReflector(listWatcher, &v1.Node{}, store, time.Hour)
go reflector.Run(stopChannel)
// To give some time so that listing works, chosen randomly
time.Sleep(100 * time.Millisecond)
return nodeLister
}
// IsReady checks if the descheduler could run against given node.
func IsReady(node *v1.Node) bool {
for i := range node.Status.Conditions {
cond := &node.Status.Conditions[i]
// We consider the node for scheduling only when its:
// - NodeReady condition status is ConditionTrue,
// - NodeOutOfDisk condition status is ConditionFalse,
// - NodeNetworkUnavailable condition status is ConditionFalse.
if cond.Type == v1.NodeReady && cond.Status != v1.ConditionTrue {
klog.V(1).Infof("Ignoring node %v with %v condition status %v", node.Name, cond.Type, cond.Status)
return false
} /*else if cond.Type == v1.NodeOutOfDisk && cond.Status != v1.ConditionFalse {
klog.V(4).Infof("Ignoring node %v with %v condition status %v", node.Name, cond.Type, cond.Status)
return false
} else if cond.Type == v1.NodeNetworkUnavailable && cond.Status != v1.ConditionFalse {
klog.V(4).Infof("Ignoring node %v with %v condition status %v", node.Name, cond.Type, cond.Status)
return false
}*/
}
// Ignore nodes that are marked unschedulable
/*if node.Spec.Unschedulable {
klog.V(4).Infof("Ignoring node %v since it is unschedulable", node.Name)
return false
}*/
return true
}
// IsNodeUnschedulable checks if the node is unschedulable. This is helper function to check only in case of
// underutilized node so that they won't be accounted for.
func IsNodeUnschedulable(node *v1.Node) bool {
return node.Spec.Unschedulable
}
// PodFitsAnyNode checks if the given pod fits any of the given nodes, based on
// multiple criteria, like, pod node selector matching the node label, node
// being schedulable or not.
func PodFitsAnyNode(pod *v1.Pod, nodes []*v1.Node) bool {
for _, node := range nodes {
ok, err := utils.PodMatchNodeSelector(pod, node)
if err != nil || !ok {
continue
}
if ok {
if !IsNodeUnschedulable(node) {
klog.V(2).Infof("Pod %v can possibly be scheduled on %v", pod.Name, node.Name)
return true
}
return false
}
}
return false
}
// PodFitsCurrentNode checks if the given pod fits on the given node if the pod
// node selector matches the node label.
func PodFitsCurrentNode(pod *v1.Pod, node *v1.Node) bool {
ok, err := utils.PodMatchNodeSelector(pod, node)
if err != nil {
klog.Error(err)
return false
}
if !ok {
klog.V(1).Infof("Pod %v does not fit on node %v", pod.Name, node.Name)
return false
}
klog.V(3).Infof("Pod %v fits on node %v", pod.Name, node.Name)
return true
}

View File

@@ -1,342 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package node
import (
"testing"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes/fake"
"sigs.k8s.io/descheduler/test"
)
func TestReadyNodes(t *testing.T) {
node1 := test.BuildTestNode("node2", 1000, 2000, 9)
node2 := test.BuildTestNode("node3", 1000, 2000, 9)
node2.Status.Conditions = []v1.NodeCondition{{Type: v1.NodeMemoryPressure, Status: v1.ConditionTrue}}
node3 := test.BuildTestNode("node4", 1000, 2000, 9)
node3.Status.Conditions = []v1.NodeCondition{{Type: v1.NodeNetworkUnavailable, Status: v1.ConditionTrue}}
node4 := test.BuildTestNode("node5", 1000, 2000, 9)
node4.Spec.Unschedulable = true
node5 := test.BuildTestNode("node6", 1000, 2000, 9)
node5.Status.Conditions = []v1.NodeCondition{{Type: v1.NodeReady, Status: v1.ConditionFalse}}
if !IsReady(node1) {
t.Errorf("Expected %v to be ready", node2.Name)
}
if !IsReady(node2) {
t.Errorf("Expected %v to be ready", node3.Name)
}
if !IsReady(node3) {
t.Errorf("Expected %v to be ready", node4.Name)
}
if !IsReady(node4) {
t.Errorf("Expected %v to be ready", node5.Name)
}
if IsReady(node5) {
t.Errorf("Expected %v to be not ready", node5.Name)
}
}
func TestReadyNodesWithNodeSelector(t *testing.T) {
node1 := test.BuildTestNode("node1", 1000, 2000, 9)
node1.Labels = map[string]string{"type": "compute"}
node2 := test.BuildTestNode("node2", 1000, 2000, 9)
node2.Labels = map[string]string{"type": "infra"}
fakeClient := fake.NewSimpleClientset(node1, node2)
nodeSelector := "type=compute"
nodes, _ := ReadyNodes(fakeClient, nodeSelector, nil)
if nodes[0].Name != "node1" {
t.Errorf("Expected node1, got %s", nodes[0].Name)
}
}
func TestIsNodeUnschedulable(t *testing.T) {
tests := []struct {
description string
node *v1.Node
IsUnSchedulable bool
}{
{
description: "Node is expected to be schedulable",
node: &v1.Node{
Spec: v1.NodeSpec{Unschedulable: false},
},
IsUnSchedulable: false,
},
{
description: "Node is not expected to be schedulable because of unschedulable field",
node: &v1.Node{
Spec: v1.NodeSpec{Unschedulable: true},
},
IsUnSchedulable: true,
},
}
for _, test := range tests {
actualUnSchedulable := IsNodeUnschedulable(test.node)
if actualUnSchedulable != test.IsUnSchedulable {
t.Errorf("Test %#v failed", test.description)
}
}
}
func TestPodFitsCurrentNode(t *testing.T) {
nodeLabelKey := "kubernetes.io/desiredNode"
nodeLabelValue := "yes"
tests := []struct {
description string
pod *v1.Pod
node *v1.Node
success bool
}{
{
description: "Pod with nodeAffinity set, expected to fit the node",
pod: &v1.Pod{
Spec: v1.PodSpec{
Affinity: &v1.Affinity{
NodeAffinity: &v1.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
NodeSelectorTerms: []v1.NodeSelectorTerm{
{
MatchExpressions: []v1.NodeSelectorRequirement{
{
Key: nodeLabelKey,
Operator: "In",
Values: []string{
nodeLabelValue,
},
},
},
},
},
},
},
},
},
},
node: &v1.Node{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
nodeLabelKey: nodeLabelValue,
},
},
},
success: true,
},
{
description: "Pod with nodeAffinity set, not expected to fit the node",
pod: &v1.Pod{
Spec: v1.PodSpec{
Affinity: &v1.Affinity{
NodeAffinity: &v1.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
NodeSelectorTerms: []v1.NodeSelectorTerm{
{
MatchExpressions: []v1.NodeSelectorRequirement{
{
Key: nodeLabelKey,
Operator: "In",
Values: []string{
nodeLabelValue,
},
},
},
},
},
},
},
},
},
},
node: &v1.Node{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
nodeLabelKey: "no",
},
},
},
success: false,
},
}
for _, tc := range tests {
actual := PodFitsCurrentNode(tc.pod, tc.node)
if actual != tc.success {
t.Errorf("Test %#v failed", tc.description)
}
}
}
func TestPodFitsAnyNode(t *testing.T) {
nodeLabelKey := "kubernetes.io/desiredNode"
nodeLabelValue := "yes"
tests := []struct {
description string
pod *v1.Pod
nodes []*v1.Node
success bool
}{
{
description: "Pod expected to fit one of the nodes",
pod: &v1.Pod{
Spec: v1.PodSpec{
Affinity: &v1.Affinity{
NodeAffinity: &v1.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
NodeSelectorTerms: []v1.NodeSelectorTerm{
{
MatchExpressions: []v1.NodeSelectorRequirement{
{
Key: nodeLabelKey,
Operator: "In",
Values: []string{
nodeLabelValue,
},
},
},
},
},
},
},
},
},
},
nodes: []*v1.Node{
{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
nodeLabelKey: nodeLabelValue,
},
},
},
{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
nodeLabelKey: "no",
},
},
},
},
success: true,
},
{
description: "Pod expected to fit none of the nodes",
pod: &v1.Pod{
Spec: v1.PodSpec{
Affinity: &v1.Affinity{
NodeAffinity: &v1.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
NodeSelectorTerms: []v1.NodeSelectorTerm{
{
MatchExpressions: []v1.NodeSelectorRequirement{
{
Key: nodeLabelKey,
Operator: "In",
Values: []string{
nodeLabelValue,
},
},
},
},
},
},
},
},
},
},
nodes: []*v1.Node{
{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
nodeLabelKey: "unfit1",
},
},
},
{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
nodeLabelKey: "unfit2",
},
},
},
},
success: false,
},
{
description: "Nodes are unschedulable but labels match, should fail",
pod: &v1.Pod{
Spec: v1.PodSpec{
Affinity: &v1.Affinity{
NodeAffinity: &v1.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
NodeSelectorTerms: []v1.NodeSelectorTerm{
{
MatchExpressions: []v1.NodeSelectorRequirement{
{
Key: nodeLabelKey,
Operator: "In",
Values: []string{
nodeLabelValue,
},
},
},
},
},
},
},
},
},
},
nodes: []*v1.Node{
{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
nodeLabelKey: nodeLabelValue,
},
},
Spec: v1.NodeSpec{
Unschedulable: true,
},
},
{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
nodeLabelKey: "no",
},
},
},
},
success: false,
},
}
for _, tc := range tests {
actual := PodFitsAnyNode(tc.pod, tc.nodes)
if actual != tc.success {
t.Errorf("Test %#v failed", tc.description)
}
}
}

View File

@@ -1,125 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package pod
import (
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
clientset "k8s.io/client-go/kubernetes"
"sigs.k8s.io/descheduler/pkg/utils"
)
const (
evictPodAnnotationKey = "descheduler.alpha.kubernetes.io/evict"
)
// IsEvictable checks if a pod is evictable or not.
func IsEvictable(pod *v1.Pod, evictLocalStoragePods bool) bool {
ownerRefList := OwnerRef(pod)
if !HaveEvictAnnotation(pod) && (IsMirrorPod(pod) || (!evictLocalStoragePods && IsPodWithLocalStorage(pod)) || len(ownerRefList) == 0 || IsDaemonsetPod(ownerRefList) || IsCriticalPod(pod)) {
return false
}
return true
}
// ListEvictablePodsOnNode returns the list of evictable pods on node.
func ListEvictablePodsOnNode(client clientset.Interface, node *v1.Node, evictLocalStoragePods bool) ([]*v1.Pod, error) {
pods, err := ListPodsOnANode(client, node)
if err != nil {
return []*v1.Pod{}, err
}
evictablePods := make([]*v1.Pod, 0)
for _, pod := range pods {
if !IsEvictable(pod, evictLocalStoragePods) {
continue
} else {
evictablePods = append(evictablePods, pod)
}
}
return evictablePods, nil
}
func ListPodsOnANode(client clientset.Interface, node *v1.Node) ([]*v1.Pod, error) {
fieldSelector, err := fields.ParseSelector("spec.nodeName=" + node.Name + ",status.phase!=" + string(v1.PodSucceeded) + ",status.phase!=" + string(v1.PodFailed))
if err != nil {
return []*v1.Pod{}, err
}
podList, err := client.CoreV1().Pods(v1.NamespaceAll).List(
metav1.ListOptions{FieldSelector: fieldSelector.String()})
if err != nil {
return []*v1.Pod{}, err
}
pods := make([]*v1.Pod, 0)
for i := range podList.Items {
pods = append(pods, &podList.Items[i])
}
return pods, nil
}
func IsCriticalPod(pod *v1.Pod) bool {
return utils.IsCriticalPod(pod)
}
func IsBestEffortPod(pod *v1.Pod) bool {
return utils.GetPodQOS(pod) == v1.PodQOSBestEffort
}
func IsBurstablePod(pod *v1.Pod) bool {
return utils.GetPodQOS(pod) == v1.PodQOSBurstable
}
func IsGuaranteedPod(pod *v1.Pod) bool {
return utils.GetPodQOS(pod) == v1.PodQOSGuaranteed
}
func IsDaemonsetPod(ownerRefList []metav1.OwnerReference) bool {
for _, ownerRef := range ownerRefList {
if ownerRef.Kind == "DaemonSet" {
return true
}
}
return false
}
// IsMirrorPod checks whether the pod is a mirror pod.
func IsMirrorPod(pod *v1.Pod) bool {
return utils.IsMirrorPod(pod)
}
// HaveEvictAnnotation checks if the pod have evict annotation
func HaveEvictAnnotation(pod *v1.Pod) bool {
_, found := pod.ObjectMeta.Annotations[evictPodAnnotationKey]
return found
}
func IsPodWithLocalStorage(pod *v1.Pod) bool {
for _, volume := range pod.Spec.Volumes {
if volume.HostPath != nil || volume.EmptyDir != nil {
return true
}
}
return false
}
// OwnerRef returns the ownerRefList for the pod.
func OwnerRef(pod *v1.Pod) []metav1.OwnerReference {
return pod.ObjectMeta.GetOwnerReferences()
}

View File

@@ -1,237 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package pod
import (
"testing"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"sigs.k8s.io/descheduler/pkg/utils"
"sigs.k8s.io/descheduler/test"
)
func TestIsEvictable(t *testing.T) {
n1 := test.BuildTestNode("node1", 1000, 2000, 13)
type testCase struct {
pod *v1.Pod
runBefore func(*v1.Pod)
evictLocalStoragePods bool
result bool
}
testCases := []testCase{
{
pod: test.BuildTestPod("p1", 400, 0, n1.Name),
runBefore: func(pod *v1.Pod) {
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
},
evictLocalStoragePods: false,
result: true,
}, {
pod: test.BuildTestPod("p2", 400, 0, n1.Name),
runBefore: func(pod *v1.Pod) {
pod.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"}
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
},
evictLocalStoragePods: false,
result: true,
}, {
pod: test.BuildTestPod("p3", 400, 0, n1.Name),
runBefore: func(pod *v1.Pod) {
pod.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
},
evictLocalStoragePods: false,
result: true,
}, {
pod: test.BuildTestPod("p4", 400, 0, n1.Name),
runBefore: func(pod *v1.Pod) {
pod.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"}
pod.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
},
evictLocalStoragePods: false,
result: true,
}, {
pod: test.BuildTestPod("p5", 400, 0, n1.Name),
runBefore: func(pod *v1.Pod) {
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
pod.Spec.Volumes = []v1.Volume{
{
Name: "sample",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
EmptyDir: &v1.EmptyDirVolumeSource{
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
},
},
}
},
evictLocalStoragePods: false,
result: false,
}, {
pod: test.BuildTestPod("p6", 400, 0, n1.Name),
runBefore: func(pod *v1.Pod) {
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
pod.Spec.Volumes = []v1.Volume{
{
Name: "sample",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
EmptyDir: &v1.EmptyDirVolumeSource{
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
},
},
}
},
evictLocalStoragePods: true,
result: true,
}, {
pod: test.BuildTestPod("p7", 400, 0, n1.Name),
runBefore: func(pod *v1.Pod) {
pod.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"}
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
pod.Spec.Volumes = []v1.Volume{
{
Name: "sample",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
EmptyDir: &v1.EmptyDirVolumeSource{
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
},
},
}
},
evictLocalStoragePods: false,
result: true,
}, {
pod: test.BuildTestPod("p8", 400, 0, n1.Name),
runBefore: func(pod *v1.Pod) {
pod.ObjectMeta.OwnerReferences = test.GetDaemonSetOwnerRefList()
},
evictLocalStoragePods: false,
result: false,
}, {
pod: test.BuildTestPod("p9", 400, 0, n1.Name),
runBefore: func(pod *v1.Pod) {
pod.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"}
pod.ObjectMeta.OwnerReferences = test.GetDaemonSetOwnerRefList()
},
evictLocalStoragePods: false,
result: true,
}, {
pod: test.BuildTestPod("p10", 400, 0, n1.Name),
runBefore: func(pod *v1.Pod) {
pod.Annotations = test.GetMirrorPodAnnotation()
},
evictLocalStoragePods: false,
result: false,
}, {
pod: test.BuildTestPod("p11", 400, 0, n1.Name),
runBefore: func(pod *v1.Pod) {
pod.Annotations = test.GetMirrorPodAnnotation()
pod.Annotations["descheduler.alpha.kubernetes.io/evict"] = "true"
},
evictLocalStoragePods: false,
result: true,
}, {
pod: test.BuildTestPod("p12", 400, 0, n1.Name),
runBefore: func(pod *v1.Pod) {
priority := utils.SystemCriticalPriority
pod.Spec.Priority = &priority
},
evictLocalStoragePods: false,
result: false,
}, {
pod: test.BuildTestPod("p13", 400, 0, n1.Name),
runBefore: func(pod *v1.Pod) {
priority := utils.SystemCriticalPriority
pod.Spec.Priority = &priority
pod.Annotations = map[string]string{
"descheduler.alpha.kubernetes.io/evict": "true",
}
},
evictLocalStoragePods: false,
result: true,
},
}
for _, test := range testCases {
test.runBefore(test.pod)
result := IsEvictable(test.pod, test.evictLocalStoragePods)
if result != test.result {
t.Errorf("IsEvictable should return for pod %s %t, but it returns %t", test.pod.Name, test.result, result)
}
}
}
func TestPodTypes(t *testing.T) {
n1 := test.BuildTestNode("node1", 1000, 2000, 9)
p1 := test.BuildTestPod("p1", 400, 0, n1.Name)
// These won't be evicted.
p2 := test.BuildTestPod("p2", 400, 0, n1.Name)
p3 := test.BuildTestPod("p3", 400, 0, n1.Name)
p4 := test.BuildTestPod("p4", 400, 0, n1.Name)
p5 := test.BuildTestPod("p5", 400, 0, n1.Name)
p6 := test.BuildTestPod("p6", 400, 0, n1.Name)
p6.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
p1.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
// The following 4 pods won't get evicted.
// A daemonset.
//p2.Annotations = test.GetDaemonSetAnnotation()
p2.ObjectMeta.OwnerReferences = test.GetDaemonSetOwnerRefList()
// A pod with local storage.
p3.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
p3.Spec.Volumes = []v1.Volume{
{
Name: "sample",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
EmptyDir: &v1.EmptyDirVolumeSource{
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
},
},
}
// A Mirror Pod.
p4.Annotations = test.GetMirrorPodAnnotation()
// A Critical Pod.
p5.Namespace = "kube-system"
priority := utils.SystemCriticalPriority
p5.Spec.Priority = &priority
systemCriticalPriority := utils.SystemCriticalPriority
p5.Spec.Priority = &systemCriticalPriority
if !IsMirrorPod(p4) {
t.Errorf("Expected p4 to be a mirror pod.")
}
if !IsCriticalPod(p5) {
t.Errorf("Expected p5 to be a critical pod.")
}
if !IsPodWithLocalStorage(p3) {
t.Errorf("Expected p3 to be a pod with local storage.")
}
ownerRefList := OwnerRef(p2)
if !IsDaemonsetPod(ownerRefList) {
t.Errorf("Expected p2 to be a daemonset pod.")
}
ownerRefList = OwnerRef(p1)
if IsDaemonsetPod(ownerRefList) || IsPodWithLocalStorage(p1) || IsCriticalPod(p1) || IsMirrorPod(p1) {
t.Errorf("Expected p1 to be a normal pod.")
}
}

View File

@@ -1,55 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package descheduler
import (
"fmt"
"io/ioutil"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/klog"
"sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/api/v1alpha1"
"sigs.k8s.io/descheduler/pkg/descheduler/scheme"
)
func LoadPolicyConfig(policyConfigFile string) (*api.DeschedulerPolicy, error) {
if policyConfigFile == "" {
klog.V(1).Infof("policy config file not specified")
return nil, nil
}
policy, err := ioutil.ReadFile(policyConfigFile)
if err != nil {
return nil, fmt.Errorf("failed to read policy config file %q: %+v", policyConfigFile, err)
}
versionedPolicy := &v1alpha1.DeschedulerPolicy{}
decoder := scheme.Codecs.UniversalDecoder(v1alpha1.SchemeGroupVersion)
if err := runtime.DecodeInto(decoder, policy, versionedPolicy); err != nil {
return nil, fmt.Errorf("failed decoding descheduler's policy config %q: %v", policyConfigFile, err)
}
internalPolicy := &api.DeschedulerPolicy{}
if err := scheme.Scheme.Convert(versionedPolicy, internalPolicy, nil); err != nil {
return nil, fmt.Errorf("failed converting versioned policy to internal policy version: %v", err)
}
return internalPolicy, nil
}

View File

@@ -1,27 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package scheme
import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/serializer"
)
var (
Scheme = runtime.NewScheme()
Codecs = serializer.NewCodecFactory(Scheme)
)

View File

@@ -1,97 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package strategies
import (
"strings"
"k8s.io/api/core/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/klog"
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
"sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
"sigs.k8s.io/descheduler/pkg/utils"
)
//type creator string
type DuplicatePodsMap map[string][]*v1.Pod
// RemoveDuplicatePods removes the duplicate pods on node. This strategy evicts all duplicate pods on node.
// A pod is said to be a duplicate of other if both of them are from same creator, kind and are within the same
// namespace. As of now, this strategy won't evict daemonsets, mirror pods, critical pods and pods with local storages.
func RemoveDuplicatePods(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, policyGroupVersion string, nodes []*v1.Node, nodepodCount utils.NodePodEvictedCount) {
if !strategy.Enabled {
return
}
deleteDuplicatePods(ds.Client, policyGroupVersion, nodes, ds.DryRun, nodepodCount, ds.MaxNoOfPodsToEvictPerNode, ds.EvictLocalStoragePods)
}
// deleteDuplicatePods evicts the pod from node and returns the count of evicted pods.
func deleteDuplicatePods(client clientset.Interface, policyGroupVersion string, nodes []*v1.Node, dryRun bool, nodepodCount utils.NodePodEvictedCount, maxPodsToEvict int, evictLocalStoragePods bool) int {
podsEvicted := 0
for _, node := range nodes {
klog.V(1).Infof("Processing node: %#v", node.Name)
dpm := ListDuplicatePodsOnANode(client, node, evictLocalStoragePods)
for creator, pods := range dpm {
if len(pods) > 1 {
klog.V(1).Infof("%#v", creator)
// i = 0 does not evict the first pod
for i := 1; i < len(pods); i++ {
if maxPodsToEvict > 0 && nodepodCount[node]+1 > maxPodsToEvict {
break
}
success, err := evictions.EvictPod(client, pods[i], policyGroupVersion, dryRun)
if !success {
klog.Infof("Error when evicting pod: %#v (%#v)", pods[i].Name, err)
} else {
nodepodCount[node]++
klog.V(1).Infof("Evicted pod: %#v (%#v)", pods[i].Name, err)
}
}
}
}
podsEvicted += nodepodCount[node]
}
return podsEvicted
}
// ListDuplicatePodsOnANode lists duplicate pods on a given node.
func ListDuplicatePodsOnANode(client clientset.Interface, node *v1.Node, evictLocalStoragePods bool) DuplicatePodsMap {
pods, err := podutil.ListEvictablePodsOnNode(client, node, evictLocalStoragePods)
if err != nil {
return nil
}
return FindDuplicatePods(pods)
}
// FindDuplicatePods takes a list of pods and returns a duplicatePodsMap.
func FindDuplicatePods(pods []*v1.Pod) DuplicatePodsMap {
dpm := DuplicatePodsMap{}
// Ignoring the error here as in the ListDuplicatePodsOnNode function we call ListEvictablePodsOnNode which checks for error.
for _, pod := range pods {
ownerRefList := podutil.OwnerRef(pod)
for _, ownerRef := range ownerRefList {
// Namespace/Kind/Name should be unique for the cluster.
s := strings.Join([]string{pod.ObjectMeta.Namespace, ownerRef.Kind, ownerRef.Name}, "/")
dpm[s] = append(dpm[s], pod)
}
}
return dpm
}

View File

@@ -1,146 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package strategies
import (
"testing"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/kubernetes/fake"
core "k8s.io/client-go/testing"
"sigs.k8s.io/descheduler/pkg/utils"
"sigs.k8s.io/descheduler/test"
)
func TestFindDuplicatePods(t *testing.T) {
// first setup pods
node := test.BuildTestNode("n1", 2000, 3000, 10)
p1 := test.BuildTestPod("p1", 100, 0, node.Name)
p1.Namespace = "dev"
p2 := test.BuildTestPod("p2", 100, 0, node.Name)
p2.Namespace = "dev"
p3 := test.BuildTestPod("p3", 100, 0, node.Name)
p3.Namespace = "dev"
p4 := test.BuildTestPod("p4", 100, 0, node.Name)
p5 := test.BuildTestPod("p5", 100, 0, node.Name)
p6 := test.BuildTestPod("p6", 100, 0, node.Name)
p7 := test.BuildTestPod("p7", 100, 0, node.Name)
p7.Namespace = "kube-system"
p8 := test.BuildTestPod("p8", 100, 0, node.Name)
p8.Namespace = "test"
p9 := test.BuildTestPod("p9", 100, 0, node.Name)
p9.Namespace = "test"
p10 := test.BuildTestPod("p10", 100, 0, node.Name)
p10.Namespace = "test"
// ### Evictable Pods ###
// Three Pods in the "default" Namespace, bound to same ReplicaSet. 2 should be evicted.
ownerRef1 := test.GetReplicaSetOwnerRefList()
p1.ObjectMeta.OwnerReferences = ownerRef1
p2.ObjectMeta.OwnerReferences = ownerRef1
p3.ObjectMeta.OwnerReferences = ownerRef1
// Three Pods in the "test" Namespace, bound to same ReplicaSet. 2 should be evicted.
ownerRef2 := test.GetReplicaSetOwnerRefList()
p8.ObjectMeta.OwnerReferences = ownerRef2
p9.ObjectMeta.OwnerReferences = ownerRef2
p10.ObjectMeta.OwnerReferences = ownerRef2
// ### Non-evictable Pods ###
// A DaemonSet.
p4.ObjectMeta.OwnerReferences = test.GetDaemonSetOwnerRefList()
// A Pod with local storage.
p5.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
p5.Spec.Volumes = []v1.Volume{
{
Name: "sample",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
EmptyDir: &v1.EmptyDirVolumeSource{
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
},
},
}
// A Mirror Pod.
p6.Annotations = test.GetMirrorPodAnnotation()
// A Critical Pod.
priority := utils.SystemCriticalPriority
p7.Spec.Priority = &priority
testCases := []struct {
description string
maxPodsToEvict int
pods []v1.Pod
expectedEvictedPodCount int
}{
{
description: "Three pods in the `dev` Namespace, bound to same ReplicaSet. 2 should be evicted.",
maxPodsToEvict: 5,
pods: []v1.Pod{*p1, *p2, *p3},
expectedEvictedPodCount: 2,
},
{
description: "Three Pods in the `test` Namespace, bound to same ReplicaSet. 2 should be evicted.",
maxPodsToEvict: 5,
pods: []v1.Pod{*p8, *p9, *p10},
expectedEvictedPodCount: 2,
},
{
description: "Three Pods in the `dev` Namespace, three Pods in the `test` Namespace. Bound to ReplicaSet with same name. 4 should be evicted.",
maxPodsToEvict: 5,
pods: []v1.Pod{*p1, *p2, *p3, *p8, *p9, *p10},
expectedEvictedPodCount: 4,
},
{
description: "Pods are: part of DaemonSet, with local storage, mirror pod annotation, critical pod annotation - none should be evicted.",
maxPodsToEvict: 2,
pods: []v1.Pod{*p4, *p5, *p6, *p7},
expectedEvictedPodCount: 0,
},
{
description: "Test all Pods: 4 should be evicted.",
maxPodsToEvict: 5,
pods: []v1.Pod{*p1, *p2, *p3, *p4, *p5, *p6, *p7, *p8, *p9, *p10},
expectedEvictedPodCount: 4,
},
}
for _, testCase := range testCases {
npe := utils.NodePodEvictedCount{}
npe[node] = 0
fakeClient := &fake.Clientset{}
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
return true, &v1.PodList{Items: testCase.pods}, nil
})
fakeClient.Fake.AddReactor("get", "nodes", func(action core.Action) (bool, runtime.Object, error) {
return true, node, nil
})
podsEvicted := deleteDuplicatePods(fakeClient, "v1", []*v1.Node{node}, false, npe, testCase.maxPodsToEvict, false)
if podsEvicted != testCase.expectedEvictedPodCount {
t.Errorf("Test error for description: %s. Expected evicted pods count %v, got %v", testCase.description, testCase.expectedEvictedPodCount, podsEvicted)
}
}
}

View File

@@ -1,401 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package strategies
import (
"sort"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/klog"
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
"sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
"sigs.k8s.io/descheduler/pkg/utils"
)
type NodeUsageMap struct {
node *v1.Node
usage api.ResourceThresholds
allPods []*v1.Pod
nonRemovablePods []*v1.Pod
bePods []*v1.Pod
bPods []*v1.Pod
gPods []*v1.Pod
}
type NodePodsMap map[*v1.Node][]*v1.Pod
func LowNodeUtilization(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, evictionPolicyGroupVersion string, nodes []*v1.Node, nodepodCount utils.NodePodEvictedCount) {
if !strategy.Enabled {
return
}
// todo: move to config validation?
// TODO: May be create a struct for the strategy as well, so that we don't have to pass along the all the params?
thresholds := strategy.Params.NodeResourceUtilizationThresholds.Thresholds
if !validateThresholds(thresholds) {
return
}
targetThresholds := strategy.Params.NodeResourceUtilizationThresholds.TargetThresholds
if !validateTargetThresholds(targetThresholds) {
return
}
npm := createNodePodsMap(ds.Client, nodes)
lowNodes, targetNodes := classifyNodes(npm, thresholds, targetThresholds, ds.EvictLocalStoragePods)
klog.V(1).Infof("Criteria for a node under utilization: CPU: %v, Mem: %v, Pods: %v",
thresholds[v1.ResourceCPU], thresholds[v1.ResourceMemory], thresholds[v1.ResourcePods])
if len(lowNodes) == 0 {
klog.V(1).Infof("No node is underutilized, nothing to do here, you might tune your thresholds further")
return
}
klog.V(1).Infof("Total number of underutilized nodes: %v", len(lowNodes))
if len(lowNodes) < strategy.Params.NodeResourceUtilizationThresholds.NumberOfNodes {
klog.V(1).Infof("number of nodes underutilized (%v) is less than NumberOfNodes (%v), nothing to do here", len(lowNodes), strategy.Params.NodeResourceUtilizationThresholds.NumberOfNodes)
return
}
if len(lowNodes) == len(nodes) {
klog.V(1).Infof("all nodes are underutilized, nothing to do here")
return
}
if len(targetNodes) == 0 {
klog.V(1).Infof("all nodes are under target utilization, nothing to do here")
return
}
klog.V(1).Infof("Criteria for a node above target utilization: CPU: %v, Mem: %v, Pods: %v",
targetThresholds[v1.ResourceCPU], targetThresholds[v1.ResourceMemory], targetThresholds[v1.ResourcePods])
klog.V(1).Infof("Total number of nodes above target utilization: %v", len(targetNodes))
totalPodsEvicted := evictPodsFromTargetNodes(ds.Client, evictionPolicyGroupVersion, targetNodes, lowNodes, targetThresholds, ds.DryRun, ds.MaxNoOfPodsToEvictPerNode, nodepodCount)
klog.V(1).Infof("Total number of pods evicted: %v", totalPodsEvicted)
}
func validateThresholds(thresholds api.ResourceThresholds) bool {
if thresholds == nil || len(thresholds) == 0 {
klog.V(1).Infof("no resource threshold is configured")
return false
}
for name := range thresholds {
switch name {
case v1.ResourceCPU:
continue
case v1.ResourceMemory:
continue
case v1.ResourcePods:
continue
default:
klog.Errorf("only cpu, memory, or pods thresholds can be specified")
return false
}
}
return true
}
//This function could be merged into above once we are clear.
func validateTargetThresholds(targetThresholds api.ResourceThresholds) bool {
if targetThresholds == nil {
klog.V(1).Infof("no target resource threshold is configured")
return false
} else if _, ok := targetThresholds[v1.ResourcePods]; !ok {
klog.V(1).Infof("no target resource threshold for pods is configured")
return false
}
return true
}
// classifyNodes classifies the nodes into low-utilization or high-utilization nodes. If a node lies between
// low and high thresholds, it is simply ignored.
func classifyNodes(npm NodePodsMap, thresholds api.ResourceThresholds, targetThresholds api.ResourceThresholds, evictLocalStoragePods bool) ([]NodeUsageMap, []NodeUsageMap) {
lowNodes, targetNodes := []NodeUsageMap{}, []NodeUsageMap{}
for node, pods := range npm {
usage, allPods, nonRemovablePods, bePods, bPods, gPods := NodeUtilization(node, pods, evictLocalStoragePods)
nuMap := NodeUsageMap{node, usage, allPods, nonRemovablePods, bePods, bPods, gPods}
// Check if node is underutilized and if we can schedule pods on it.
if !nodeutil.IsNodeUnschedulable(node) && IsNodeWithLowUtilization(usage, thresholds) {
klog.V(2).Infof("Node %#v is under utilized with usage: %#v", node.Name, usage)
lowNodes = append(lowNodes, nuMap)
} else if IsNodeAboveTargetUtilization(usage, targetThresholds) {
klog.V(2).Infof("Node %#v is over utilized with usage: %#v", node.Name, usage)
targetNodes = append(targetNodes, nuMap)
} else {
klog.V(2).Infof("Node %#v is appropriately utilized with usage: %#v", node.Name, usage)
}
klog.V(2).Infof("allPods:%v, nonRemovablePods:%v, bePods:%v, bPods:%v, gPods:%v", len(allPods), len(nonRemovablePods), len(bePods), len(bPods), len(gPods))
}
return lowNodes, targetNodes
}
// evictPodsFromTargetNodes evicts pods based on priority, if all the pods on the node have priority, if not
// evicts them based on QoS as fallback option.
// TODO: @ravig Break this function into smaller functions.
func evictPodsFromTargetNodes(client clientset.Interface, evictionPolicyGroupVersion string, targetNodes, lowNodes []NodeUsageMap, targetThresholds api.ResourceThresholds, dryRun bool, maxPodsToEvict int, nodepodCount utils.NodePodEvictedCount) int {
podsEvicted := 0
SortNodesByUsage(targetNodes)
// upper bound on total number of pods/cpu/memory to be moved
var totalPods, totalCPU, totalMem float64
for _, node := range lowNodes {
nodeCapacity := node.node.Status.Capacity
if len(node.node.Status.Allocatable) > 0 {
nodeCapacity = node.node.Status.Allocatable
}
// totalPods to be moved
podsPercentage := targetThresholds[v1.ResourcePods] - node.usage[v1.ResourcePods]
totalPods += ((float64(podsPercentage) * float64(nodeCapacity.Pods().Value())) / 100)
// totalCPU capacity to be moved
if _, ok := targetThresholds[v1.ResourceCPU]; ok {
cpuPercentage := targetThresholds[v1.ResourceCPU] - node.usage[v1.ResourceCPU]
totalCPU += ((float64(cpuPercentage) * float64(nodeCapacity.Cpu().MilliValue())) / 100)
}
// totalMem capacity to be moved
if _, ok := targetThresholds[v1.ResourceMemory]; ok {
memPercentage := targetThresholds[v1.ResourceMemory] - node.usage[v1.ResourceMemory]
totalMem += ((float64(memPercentage) * float64(nodeCapacity.Memory().Value())) / 100)
}
}
klog.V(1).Infof("Total capacity to be moved: CPU:%v, Mem:%v, Pods:%v", totalCPU, totalMem, totalPods)
klog.V(1).Infof("********Number of pods evicted from each node:***********")
for _, node := range targetNodes {
nodeCapacity := node.node.Status.Capacity
if len(node.node.Status.Allocatable) > 0 {
nodeCapacity = node.node.Status.Allocatable
}
klog.V(3).Infof("evicting pods from node %#v with usage: %#v", node.node.Name, node.usage)
currentPodsEvicted := nodepodCount[node.node]
// Check if one pod has priority, if yes, assume that all pods have priority and evict pods based on priority.
if node.allPods[0].Spec.Priority != nil {
klog.V(1).Infof("All pods have priority associated with them. Evicting pods based on priority")
evictablePods := make([]*v1.Pod, 0)
evictablePods = append(append(node.bPods, node.bePods...), node.gPods...)
// sort the evictable Pods based on priority. This also sorts them based on QoS. If there are multiple pods with same priority, they are sorted based on QoS tiers.
sortPodsBasedOnPriority(evictablePods)
evictPods(evictablePods, client, evictionPolicyGroupVersion, targetThresholds, nodeCapacity, node.usage, &totalPods, &totalCPU, &totalMem, &currentPodsEvicted, dryRun, maxPodsToEvict)
} else {
// TODO: Remove this when we support only priority.
// Falling back to evicting pods based on priority.
klog.V(1).Infof("Evicting pods based on QoS")
klog.V(1).Infof("There are %v non-evictable pods on the node", len(node.nonRemovablePods))
// evict best effort pods
evictPods(node.bePods, client, evictionPolicyGroupVersion, targetThresholds, nodeCapacity, node.usage, &totalPods, &totalCPU, &totalMem, &currentPodsEvicted, dryRun, maxPodsToEvict)
// evict burstable pods
evictPods(node.bPods, client, evictionPolicyGroupVersion, targetThresholds, nodeCapacity, node.usage, &totalPods, &totalCPU, &totalMem, &currentPodsEvicted, dryRun, maxPodsToEvict)
// evict guaranteed pods
evictPods(node.gPods, client, evictionPolicyGroupVersion, targetThresholds, nodeCapacity, node.usage, &totalPods, &totalCPU, &totalMem, &currentPodsEvicted, dryRun, maxPodsToEvict)
}
nodepodCount[node.node] = currentPodsEvicted
podsEvicted = podsEvicted + nodepodCount[node.node]
klog.V(1).Infof("%v pods evicted from node %#v with usage %v", nodepodCount[node.node], node.node.Name, node.usage)
}
return podsEvicted
}
func evictPods(inputPods []*v1.Pod,
client clientset.Interface,
evictionPolicyGroupVersion string,
targetThresholds api.ResourceThresholds,
nodeCapacity v1.ResourceList,
nodeUsage api.ResourceThresholds,
totalPods *float64,
totalCPU *float64,
totalMem *float64,
podsEvicted *int,
dryRun bool, maxPodsToEvict int) {
if IsNodeAboveTargetUtilization(nodeUsage, targetThresholds) && (*totalPods > 0 || *totalCPU > 0 || *totalMem > 0) {
onePodPercentage := api.Percentage((float64(1) * 100) / float64(nodeCapacity.Pods().Value()))
for _, pod := range inputPods {
if maxPodsToEvict > 0 && *podsEvicted+1 > maxPodsToEvict {
break
}
cUsage := utils.GetResourceRequest(pod, v1.ResourceCPU)
mUsage := utils.GetResourceRequest(pod, v1.ResourceMemory)
success, err := evictions.EvictPod(client, pod, evictionPolicyGroupVersion, dryRun)
if !success {
klog.Warningf("Error when evicting pod: %#v (%#v)", pod.Name, err)
} else {
klog.V(3).Infof("Evicted pod: %#v (%#v)", pod.Name, err)
// update remaining pods
*podsEvicted++
nodeUsage[v1.ResourcePods] -= onePodPercentage
*totalPods--
// update remaining cpu
*totalCPU -= float64(cUsage)
nodeUsage[v1.ResourceCPU] -= api.Percentage((float64(cUsage) * 100) / float64(nodeCapacity.Cpu().MilliValue()))
// update remaining memory
*totalMem -= float64(mUsage)
nodeUsage[v1.ResourceMemory] -= api.Percentage(float64(mUsage) / float64(nodeCapacity.Memory().Value()) * 100)
klog.V(3).Infof("updated node usage: %#v", nodeUsage)
// check if node utilization drops below target threshold or required capacity (cpu, memory, pods) is moved
if !IsNodeAboveTargetUtilization(nodeUsage, targetThresholds) || (*totalPods <= 0 && *totalCPU <= 0 && *totalMem <= 0) {
break
}
}
}
}
}
func SortNodesByUsage(nodes []NodeUsageMap) {
sort.Slice(nodes, func(i, j int) bool {
var ti, tj api.Percentage
for name, value := range nodes[i].usage {
if name == v1.ResourceCPU || name == v1.ResourceMemory || name == v1.ResourcePods {
ti += value
}
}
for name, value := range nodes[j].usage {
if name == v1.ResourceCPU || name == v1.ResourceMemory || name == v1.ResourcePods {
tj += value
}
}
// To return sorted in descending order
return ti > tj
})
}
// sortPodsBasedOnPriority sorts pods based on priority and if their priorities are equal, they are sorted based on QoS tiers.
func sortPodsBasedOnPriority(evictablePods []*v1.Pod) {
sort.Slice(evictablePods, func(i, j int) bool {
if evictablePods[i].Spec.Priority == nil && evictablePods[j].Spec.Priority != nil {
return true
}
if evictablePods[j].Spec.Priority == nil && evictablePods[i].Spec.Priority != nil {
return false
}
if (evictablePods[j].Spec.Priority == nil && evictablePods[i].Spec.Priority == nil) || (*evictablePods[i].Spec.Priority == *evictablePods[j].Spec.Priority) {
if podutil.IsBestEffortPod(evictablePods[i]) {
return true
}
if podutil.IsBurstablePod(evictablePods[i]) && podutil.IsGuaranteedPod(evictablePods[j]) {
return true
}
return false
}
return *evictablePods[i].Spec.Priority < *evictablePods[j].Spec.Priority
})
}
// createNodePodsMap returns nodepodsmap with evictable pods on node.
func createNodePodsMap(client clientset.Interface, nodes []*v1.Node) NodePodsMap {
npm := NodePodsMap{}
for _, node := range nodes {
pods, err := podutil.ListPodsOnANode(client, node)
if err != nil {
klog.Warningf("node %s will not be processed, error in accessing its pods (%#v)", node.Name, err)
} else {
npm[node] = pods
}
}
return npm
}
func IsNodeAboveTargetUtilization(nodeThresholds api.ResourceThresholds, thresholds api.ResourceThresholds) bool {
for name, nodeValue := range nodeThresholds {
if name == v1.ResourceCPU || name == v1.ResourceMemory || name == v1.ResourcePods {
if value, ok := thresholds[name]; !ok {
continue
} else if nodeValue > value {
return true
}
}
}
return false
}
func IsNodeWithLowUtilization(nodeThresholds api.ResourceThresholds, thresholds api.ResourceThresholds) bool {
for name, nodeValue := range nodeThresholds {
if name == v1.ResourceCPU || name == v1.ResourceMemory || name == v1.ResourcePods {
if value, ok := thresholds[name]; !ok {
continue
} else if nodeValue > value {
return false
}
}
}
return true
}
// NodeUtilization returns the current usage of node.
func NodeUtilization(node *v1.Node, pods []*v1.Pod, evictLocalStoragePods bool) (api.ResourceThresholds, []*v1.Pod, []*v1.Pod, []*v1.Pod, []*v1.Pod, []*v1.Pod) {
bePods := []*v1.Pod{}
nonRemovablePods := []*v1.Pod{}
bPods := []*v1.Pod{}
gPods := []*v1.Pod{}
totalReqs := map[v1.ResourceName]resource.Quantity{}
for _, pod := range pods {
// We need to compute the usage of nonRemovablePods unless it is a best effort pod. So, cannot use podutil.ListEvictablePodsOnNode
if !podutil.IsEvictable(pod, evictLocalStoragePods) {
nonRemovablePods = append(nonRemovablePods, pod)
if podutil.IsBestEffortPod(pod) {
continue
}
} else if podutil.IsBestEffortPod(pod) {
bePods = append(bePods, pod)
continue
} else if podutil.IsBurstablePod(pod) {
bPods = append(bPods, pod)
} else {
gPods = append(gPods, pod)
}
req, _ := utils.PodRequestsAndLimits(pod)
for name, quantity := range req {
if name == v1.ResourceCPU || name == v1.ResourceMemory {
if value, ok := totalReqs[name]; !ok {
totalReqs[name] = quantity.DeepCopy()
} else {
value.Add(quantity)
totalReqs[name] = value
}
}
}
}
nodeCapacity := node.Status.Capacity
if len(node.Status.Allocatable) > 0 {
nodeCapacity = node.Status.Allocatable
}
usage := api.ResourceThresholds{}
totalCPUReq := totalReqs[v1.ResourceCPU]
totalMemReq := totalReqs[v1.ResourceMemory]
totalPods := len(pods)
usage[v1.ResourceCPU] = api.Percentage((float64(totalCPUReq.MilliValue()) * 100) / float64(nodeCapacity.Cpu().MilliValue()))
usage[v1.ResourceMemory] = api.Percentage(float64(totalMemReq.Value()) / float64(nodeCapacity.Memory().Value()) * 100)
usage[v1.ResourcePods] = api.Percentage((float64(totalPods) * 100) / float64(nodeCapacity.Pods().Value()))
return usage, pods, nonRemovablePods, bePods, bPods, gPods
}

View File

@@ -1,329 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package strategies
import (
"fmt"
"strings"
"testing"
"reflect"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/kubernetes/fake"
core "k8s.io/client-go/testing"
"sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/utils"
"sigs.k8s.io/descheduler/test"
)
// TODO: Make this table driven.
func TestLowNodeUtilizationWithoutPriority(t *testing.T) {
var thresholds = make(api.ResourceThresholds)
var targetThresholds = make(api.ResourceThresholds)
thresholds[v1.ResourceCPU] = 30
thresholds[v1.ResourcePods] = 30
targetThresholds[v1.ResourceCPU] = 50
targetThresholds[v1.ResourcePods] = 50
n1 := test.BuildTestNode("n1", 4000, 3000, 9)
n2 := test.BuildTestNode("n2", 4000, 3000, 10)
n3 := test.BuildTestNode("n3", 4000, 3000, 10)
// Making n3 node unschedulable so that it won't counted in lowUtilized nodes list.
n3.Spec.Unschedulable = true
p1 := test.BuildTestPod("p1", 400, 0, n1.Name)
p2 := test.BuildTestPod("p2", 400, 0, n1.Name)
p3 := test.BuildTestPod("p3", 400, 0, n1.Name)
p4 := test.BuildTestPod("p4", 400, 0, n1.Name)
p5 := test.BuildTestPod("p5", 400, 0, n1.Name)
// These won't be evicted.
p6 := test.BuildTestPod("p6", 400, 0, n1.Name)
p7 := test.BuildTestPod("p7", 400, 0, n1.Name)
p8 := test.BuildTestPod("p8", 400, 0, n1.Name)
p1.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
p2.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
p3.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
p4.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
p5.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
// The following 4 pods won't get evicted.
// A daemonset.
p6.ObjectMeta.OwnerReferences = test.GetDaemonSetOwnerRefList()
// A pod with local storage.
p7.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
p7.Spec.Volumes = []v1.Volume{
{
Name: "sample",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
EmptyDir: &v1.EmptyDirVolumeSource{
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
},
},
}
// A Mirror Pod.
p7.Annotations = test.GetMirrorPodAnnotation()
// A Critical Pod.
p8.Namespace = "kube-system"
priority := utils.SystemCriticalPriority
p8.Spec.Priority = &priority
p9 := test.BuildTestPod("p9", 400, 0, n1.Name)
p9.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
fakeClient := &fake.Clientset{}
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
list := action.(core.ListAction)
fieldString := list.GetListRestrictions().Fields.String()
if strings.Contains(fieldString, "n1") {
return true, &v1.PodList{Items: []v1.Pod{*p1, *p2, *p3, *p4, *p5, *p6, *p7, *p8}}, nil
}
if strings.Contains(fieldString, "n2") {
return true, &v1.PodList{Items: []v1.Pod{*p9}}, nil
}
if strings.Contains(fieldString, "n3") {
return true, &v1.PodList{Items: []v1.Pod{}}, nil
}
return true, nil, fmt.Errorf("Failed to list: %v", list)
})
fakeClient.Fake.AddReactor("get", "nodes", func(action core.Action) (bool, runtime.Object, error) {
getAction := action.(core.GetAction)
switch getAction.GetName() {
case n1.Name:
return true, n1, nil
case n2.Name:
return true, n2, nil
case n3.Name:
return true, n3, nil
}
return true, nil, fmt.Errorf("Wrong node: %v", getAction.GetName())
})
expectedPodsEvicted := 3
npm := createNodePodsMap(fakeClient, []*v1.Node{n1, n2, n3})
lowNodes, targetNodes := classifyNodes(npm, thresholds, targetThresholds, false)
if len(lowNodes) != 1 {
t.Errorf("After ignoring unschedulable nodes, expected only one node to be under utilized.")
}
npe := utils.NodePodEvictedCount{}
npe[n1] = 0
npe[n2] = 0
npe[n3] = 0
podsEvicted := evictPodsFromTargetNodes(fakeClient, "v1", targetNodes, lowNodes, targetThresholds, false, 3, npe)
if expectedPodsEvicted != podsEvicted {
t.Errorf("Expected %#v pods to be evicted but %#v got evicted", expectedPodsEvicted, podsEvicted)
}
}
// TODO: Make this table driven.
func TestLowNodeUtilizationWithPriorities(t *testing.T) {
var thresholds = make(api.ResourceThresholds)
var targetThresholds = make(api.ResourceThresholds)
thresholds[v1.ResourceCPU] = 30
thresholds[v1.ResourcePods] = 30
targetThresholds[v1.ResourceCPU] = 50
targetThresholds[v1.ResourcePods] = 50
lowPriority := int32(0)
highPriority := int32(10000)
n1 := test.BuildTestNode("n1", 4000, 3000, 9)
n2 := test.BuildTestNode("n2", 4000, 3000, 10)
n3 := test.BuildTestNode("n3", 4000, 3000, 10)
// Making n3 node unschedulable so that it won't counted in lowUtilized nodes list.
n3.Spec.Unschedulable = true
p1 := test.BuildTestPod("p1", 400, 0, n1.Name)
p1.Spec.Priority = &highPriority
p2 := test.BuildTestPod("p2", 400, 0, n1.Name)
p2.Spec.Priority = &highPriority
p3 := test.BuildTestPod("p3", 400, 0, n1.Name)
p3.Spec.Priority = &highPriority
p4 := test.BuildTestPod("p4", 400, 0, n1.Name)
p4.Spec.Priority = &highPriority
p5 := test.BuildTestPod("p5", 400, 0, n1.Name)
p5.Spec.Priority = &lowPriority
// These won't be evicted.
p6 := test.BuildTestPod("p6", 400, 0, n1.Name)
p6.Spec.Priority = &highPriority
p7 := test.BuildTestPod("p7", 400, 0, n1.Name)
p7.Spec.Priority = &lowPriority
p8 := test.BuildTestPod("p8", 400, 0, n1.Name)
p8.Spec.Priority = &lowPriority
p1.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
p2.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
p3.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
p4.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
p5.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
// The following 4 pods won't get evicted.
// A daemonset.
p6.ObjectMeta.OwnerReferences = test.GetDaemonSetOwnerRefList()
// A pod with local storage.
p7.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
p7.Spec.Volumes = []v1.Volume{
{
Name: "sample",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
EmptyDir: &v1.EmptyDirVolumeSource{
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
},
},
}
// A Mirror Pod.
p7.Annotations = test.GetMirrorPodAnnotation()
// A Critical Pod.
p8.Namespace = "kube-system"
priority := utils.SystemCriticalPriority
p8.Spec.Priority = &priority
p9 := test.BuildTestPod("p9", 400, 0, n1.Name)
p9.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
fakeClient := &fake.Clientset{}
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
list := action.(core.ListAction)
fieldString := list.GetListRestrictions().Fields.String()
if strings.Contains(fieldString, "n1") {
return true, &v1.PodList{Items: []v1.Pod{*p1, *p2, *p3, *p4, *p5, *p6, *p7, *p8}}, nil
}
if strings.Contains(fieldString, "n2") {
return true, &v1.PodList{Items: []v1.Pod{*p9}}, nil
}
if strings.Contains(fieldString, "n3") {
return true, &v1.PodList{Items: []v1.Pod{}}, nil
}
return true, nil, fmt.Errorf("Failed to list: %v", list)
})
fakeClient.Fake.AddReactor("get", "nodes", func(action core.Action) (bool, runtime.Object, error) {
getAction := action.(core.GetAction)
switch getAction.GetName() {
case n1.Name:
return true, n1, nil
case n2.Name:
return true, n2, nil
case n3.Name:
return true, n3, nil
}
return true, nil, fmt.Errorf("Wrong node: %v", getAction.GetName())
})
expectedPodsEvicted := 3
npm := createNodePodsMap(fakeClient, []*v1.Node{n1, n2, n3})
lowNodes, targetNodes := classifyNodes(npm, thresholds, targetThresholds, false)
if len(lowNodes) != 1 {
t.Errorf("After ignoring unschedulable nodes, expected only one node to be under utilized.")
}
npe := utils.NodePodEvictedCount{}
npe[n1] = 0
npe[n2] = 0
npe[n3] = 0
podsEvicted := evictPodsFromTargetNodes(fakeClient, "v1", targetNodes, lowNodes, targetThresholds, false, 3, npe)
if expectedPodsEvicted != podsEvicted {
t.Errorf("Expected %#v pods to be evicted but %#v got evicted", expectedPodsEvicted, podsEvicted)
}
}
func TestSortPodsByPriority(t *testing.T) {
n1 := test.BuildTestNode("n1", 4000, 3000, 9)
lowPriority := int32(0)
highPriority := int32(10000)
p1 := test.BuildTestPod("p1", 400, 0, n1.Name)
p1.Spec.Priority = &lowPriority
// BestEffort
p2 := test.BuildTestPod("p2", 400, 0, n1.Name)
p2.Spec.Priority = &highPriority
p2.Spec.Containers[0].Resources.Requests = nil
p2.Spec.Containers[0].Resources.Limits = nil
// Burstable
p3 := test.BuildTestPod("p3", 400, 0, n1.Name)
p3.Spec.Priority = &highPriority
// Guaranteed
p4 := test.BuildTestPod("p4", 400, 100, n1.Name)
p4.Spec.Priority = &highPriority
p4.Spec.Containers[0].Resources.Limits[v1.ResourceCPU] = *resource.NewMilliQuantity(400, resource.DecimalSI)
p4.Spec.Containers[0].Resources.Limits[v1.ResourceMemory] = *resource.NewQuantity(100, resource.DecimalSI)
// Best effort with nil priorities.
p5 := test.BuildTestPod("p5", 400, 100, n1.Name)
p5.Spec.Priority = nil
p6 := test.BuildTestPod("p6", 400, 100, n1.Name)
p6.Spec.Containers[0].Resources.Limits[v1.ResourceCPU] = *resource.NewMilliQuantity(400, resource.DecimalSI)
p6.Spec.Containers[0].Resources.Limits[v1.ResourceMemory] = *resource.NewQuantity(100, resource.DecimalSI)
p6.Spec.Priority = nil
podList := []*v1.Pod{p4, p3, p2, p1, p6, p5}
sortPodsBasedOnPriority(podList)
if !reflect.DeepEqual(podList[len(podList)-1], p4) {
t.Errorf("Expected last pod in sorted list to be %v which of highest priority and guaranteed but got %v", p4, podList[len(podList)-1])
}
}
func TestValidateThresholds(t *testing.T) {
tests := []struct {
name string
input api.ResourceThresholds
succeed bool
}{
{
name: "passing nil map for threshold",
input: nil,
succeed: false,
},
{
name: "passing no threshold",
input: api.ResourceThresholds{},
succeed: false,
},
{
name: "passing unsupported resource name",
input: api.ResourceThresholds{
v1.ResourceCPU: 40,
v1.ResourceStorage: 25.5,
},
succeed: false,
},
{
name: "passing invalid resource name",
input: api.ResourceThresholds{
v1.ResourceCPU: 40,
"coolResource": 42.0,
},
succeed: false,
},
{
name: "passing a valid threshold with cpu, memory and pods",
input: api.ResourceThresholds{
v1.ResourceCPU: 20,
v1.ResourceMemory: 30,
v1.ResourcePods: 40,
},
succeed: true,
},
}
for _, test := range tests {
isValid := validateThresholds(test.input)
if isValid != test.succeed {
t.Errorf("expected validity of threshold: %#v\nto be %v but got %v instead", test.input, test.succeed, isValid)
}
}
}

View File

@@ -1,76 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package strategies
import (
"k8s.io/api/core/v1"
"k8s.io/klog"
"sigs.k8s.io/descheduler/pkg/utils"
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
"sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
)
func RemovePodsViolatingNodeAffinity(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, evictionPolicyGroupVersion string, nodes []*v1.Node, nodePodCount utils.NodePodEvictedCount) {
removePodsViolatingNodeAffinityCount(ds, strategy, evictionPolicyGroupVersion, nodes, nodePodCount, ds.MaxNoOfPodsToEvictPerNode, ds.EvictLocalStoragePods)
}
func removePodsViolatingNodeAffinityCount(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, evictionPolicyGroupVersion string, nodes []*v1.Node, nodepodCount utils.NodePodEvictedCount, maxPodsToEvict int, evictLocalStoragePods bool) int {
evictedPodCount := 0
if !strategy.Enabled {
return evictedPodCount
}
for _, nodeAffinity := range strategy.Params.NodeAffinityType {
klog.V(2).Infof("Executing for nodeAffinityType: %v", nodeAffinity)
switch nodeAffinity {
case "requiredDuringSchedulingIgnoredDuringExecution":
for _, node := range nodes {
klog.V(1).Infof("Processing node: %#v\n", node.Name)
pods, err := podutil.ListEvictablePodsOnNode(ds.Client, node, evictLocalStoragePods)
if err != nil {
klog.Errorf("failed to get pods from %v: %v", node.Name, err)
}
for _, pod := range pods {
if maxPodsToEvict > 0 && nodepodCount[node]+1 > maxPodsToEvict {
break
}
if pod.Spec.Affinity != nil && pod.Spec.Affinity.NodeAffinity != nil && pod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil {
if !nodeutil.PodFitsCurrentNode(pod, node) && nodeutil.PodFitsAnyNode(pod, nodes) {
klog.V(1).Infof("Evicting pod: %v", pod.Name)
evictions.EvictPod(ds.Client, pod, evictionPolicyGroupVersion, ds.DryRun)
nodepodCount[node]++
}
}
}
evictedPodCount += nodepodCount[node]
}
default:
klog.Errorf("invalid nodeAffinityType: %v", nodeAffinity)
return evictedPodCount
}
}
klog.V(1).Infof("Evicted %v pods", evictedPodCount)
return evictedPodCount
}

View File

@@ -1,185 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package strategies
import (
"testing"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/kubernetes/fake"
core "k8s.io/client-go/testing"
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
"sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/utils"
"sigs.k8s.io/descheduler/test"
)
func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
requiredDuringSchedulingIgnoredDuringExecutionStrategy := api.DeschedulerStrategy{
Enabled: true,
Params: api.StrategyParameters{
NodeAffinityType: []string{
"requiredDuringSchedulingIgnoredDuringExecution",
},
},
}
nodeLabelKey := "kubernetes.io/desiredNode"
nodeLabelValue := "yes"
nodeWithLabels := test.BuildTestNode("nodeWithLabels", 2000, 3000, 10)
nodeWithLabels.Labels[nodeLabelKey] = nodeLabelValue
nodeWithoutLabels := test.BuildTestNode("nodeWithoutLabels", 2000, 3000, 10)
unschedulableNodeWithLabels := test.BuildTestNode("unschedulableNodeWithLabels", 2000, 3000, 10)
nodeWithLabels.Labels[nodeLabelKey] = nodeLabelValue
unschedulableNodeWithLabels.Spec.Unschedulable = true
addPodsToNode := func(node *v1.Node) []v1.Pod {
podWithNodeAffinity := test.BuildTestPod("podWithNodeAffinity", 100, 0, node.Name)
podWithNodeAffinity.Spec.Affinity = &v1.Affinity{
NodeAffinity: &v1.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
NodeSelectorTerms: []v1.NodeSelectorTerm{
{
MatchExpressions: []v1.NodeSelectorRequirement{
{
Key: nodeLabelKey,
Operator: "In",
Values: []string{
nodeLabelValue,
},
},
},
},
},
},
},
}
pod1 := test.BuildTestPod("pod1", 100, 0, node.Name)
pod2 := test.BuildTestPod("pod2", 100, 0, node.Name)
podWithNodeAffinity.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
pod1.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
pod2.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
return []v1.Pod{
*podWithNodeAffinity,
*pod1,
*pod2,
}
}
tests := []struct {
description string
nodes []*v1.Node
pods []v1.Pod
strategy api.DeschedulerStrategy
expectedEvictedPodCount int
npe utils.NodePodEvictedCount
maxPodsToEvict int
}{
{
description: "Strategy disabled, should not evict any pods",
strategy: api.DeschedulerStrategy{
Enabled: false,
Params: api.StrategyParameters{
NodeAffinityType: []string{
"requiredDuringSchedulingIgnoredDuringExecution",
},
},
},
expectedEvictedPodCount: 0,
pods: addPodsToNode(nodeWithoutLabels),
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
npe: utils.NodePodEvictedCount{nodeWithoutLabels: 0, nodeWithLabels: 0},
maxPodsToEvict: 0,
},
{
description: "Invalid strategy type, should not evict any pods",
strategy: api.DeschedulerStrategy{
Enabled: true,
Params: api.StrategyParameters{
NodeAffinityType: []string{
"requiredDuringSchedulingRequiredDuringExecution",
},
},
},
expectedEvictedPodCount: 0,
pods: addPodsToNode(nodeWithoutLabels),
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
npe: utils.NodePodEvictedCount{nodeWithoutLabels: 0, nodeWithLabels: 0},
maxPodsToEvict: 0,
},
{
description: "Pod is correctly scheduled on node, no eviction expected",
strategy: requiredDuringSchedulingIgnoredDuringExecutionStrategy,
expectedEvictedPodCount: 0,
pods: addPodsToNode(nodeWithLabels),
nodes: []*v1.Node{nodeWithLabels},
npe: utils.NodePodEvictedCount{nodeWithLabels: 0},
maxPodsToEvict: 0,
},
{
description: "Pod is scheduled on node without matching labels, another schedulable node available, should be evicted",
expectedEvictedPodCount: 1,
strategy: requiredDuringSchedulingIgnoredDuringExecutionStrategy,
pods: addPodsToNode(nodeWithoutLabels),
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
npe: utils.NodePodEvictedCount{nodeWithoutLabels: 0, nodeWithLabels: 0},
maxPodsToEvict: 0,
},
{
description: "Pod is scheduled on node without matching labels, another schedulable node available, maxPodsToEvict set to 1, should not be evicted",
expectedEvictedPodCount: 1,
strategy: requiredDuringSchedulingIgnoredDuringExecutionStrategy,
pods: addPodsToNode(nodeWithoutLabels),
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
npe: utils.NodePodEvictedCount{nodeWithoutLabels: 0, nodeWithLabels: 0},
maxPodsToEvict: 1,
},
{
description: "Pod is scheduled on node without matching labels, but no node where pod fits is available, should not evict",
expectedEvictedPodCount: 0,
strategy: requiredDuringSchedulingIgnoredDuringExecutionStrategy,
pods: addPodsToNode(nodeWithoutLabels),
nodes: []*v1.Node{nodeWithoutLabels, unschedulableNodeWithLabels},
npe: utils.NodePodEvictedCount{nodeWithoutLabels: 0, unschedulableNodeWithLabels: 0},
maxPodsToEvict: 0,
},
}
for _, tc := range tests {
fakeClient := &fake.Clientset{}
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
return true, &v1.PodList{Items: tc.pods}, nil
})
ds := options.DeschedulerServer{
Client: fakeClient,
}
actualEvictedPodCount := removePodsViolatingNodeAffinityCount(&ds, tc.strategy, "v1", tc.nodes, tc.npe, tc.maxPodsToEvict, false)
if actualEvictedPodCount != tc.expectedEvictedPodCount {
t.Errorf("Test %#v failed, expected %v pod evictions, but got %v pod evictions\n", tc.description, tc.expectedEvictedPodCount, actualEvictedPodCount)
}
}
}

View File

@@ -1,139 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package strategies
import (
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
"sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
"sigs.k8s.io/descheduler/pkg/utils"
"k8s.io/api/core/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/klog"
)
const (
TolerationOpExists v1.TolerationOperator = "Exists"
TolerationOpEqual v1.TolerationOperator = "Equal"
)
// RemovePodsViolatingNodeTaints with elimination strategy
func RemovePodsViolatingNodeTaints(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, policyGroupVersion string, nodes []*v1.Node, nodePodCount utils.NodePodEvictedCount) {
if !strategy.Enabled {
return
}
deletePodsViolatingNodeTaints(ds.Client, policyGroupVersion, nodes, ds.DryRun, nodePodCount, ds.MaxNoOfPodsToEvictPerNode, ds.EvictLocalStoragePods)
}
// deletePodsViolatingNodeTaints evicts pods on the node which violate NoSchedule Taints on nodes
func deletePodsViolatingNodeTaints(client clientset.Interface, policyGroupVersion string, nodes []*v1.Node, dryRun bool, nodePodCount utils.NodePodEvictedCount, maxPodsToEvict int, evictLocalStoragePods bool) int {
podsEvicted := 0
for _, node := range nodes {
klog.V(1).Infof("Processing node: %#v\n", node.Name)
pods, err := podutil.ListEvictablePodsOnNode(client, node, evictLocalStoragePods)
if err != nil {
//no pods evicted as error encountered retrieving evictable Pods
return 0
}
totalPods := len(pods)
for i := 0; i < totalPods; i++ {
if maxPodsToEvict > 0 && nodePodCount[node]+1 > maxPodsToEvict {
break
}
if !checkPodsSatisfyTolerations(pods[i], node) {
success, err := evictions.EvictPod(client, pods[i], policyGroupVersion, dryRun)
if !success {
klog.Errorf("Error when evicting pod: %#v (%#v)\n", pods[i].Name, err)
} else {
nodePodCount[node]++
klog.V(1).Infof("Evicted pod: %#v (%#v)", pods[i].Name, err)
}
}
}
podsEvicted += nodePodCount[node]
}
return podsEvicted
}
// checkPodsSatisfyTolerations checks if the node's taints (NoSchedule) are still satisfied by pods' tolerations.
func checkPodsSatisfyTolerations(pod *v1.Pod, node *v1.Node) bool {
tolerations := pod.Spec.Tolerations
taints := node.Spec.Taints
if len(taints) == 0 {
return true
}
noScheduleTaints := getNoScheduleTaints(taints)
if !allTaintsTolerated(noScheduleTaints, tolerations) {
klog.V(2).Infof("Not all taints are tolerated after update for Pod %v on node %v", pod.Name, node.Name)
return false
}
return true
}
// getNoScheduleTaints return a slice of NoSchedule taints from the a slice of taints that it receives.
func getNoScheduleTaints(taints []v1.Taint) []v1.Taint {
result := []v1.Taint{}
for i := range taints {
if taints[i].Effect == v1.TaintEffectNoSchedule {
result = append(result, taints[i])
}
}
return result
}
//toleratesTaint returns true if a toleration tolerates a taint, or false otherwise
func toleratesTaint(toleration *v1.Toleration, taint *v1.Taint) bool {
if (len(toleration.Key) > 0 && toleration.Key != taint.Key) ||
(len(toleration.Effect) > 0 && toleration.Effect != taint.Effect) {
return false
}
switch toleration.Operator {
// empty operator means Equal
case "", TolerationOpEqual:
return toleration.Value == taint.Value
case TolerationOpExists:
return true
default:
return false
}
}
// allTaintsTolerated returns true if all are tolerated, or false otherwise.
func allTaintsTolerated(taints []v1.Taint, tolerations []v1.Toleration) bool {
if len(taints) == 0 {
return true
}
if len(tolerations) == 0 && len(taints) > 0 {
return false
}
for i := range taints {
tolerated := false
for j := range tolerations {
if toleratesTaint(&tolerations[j], &taints[i]) {
tolerated = true
break
}
}
if !tolerated {
return false
}
}
return true
}

View File

@@ -1,298 +0,0 @@
package strategies
import (
"fmt"
"testing"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/kubernetes/fake"
core "k8s.io/client-go/testing"
"sigs.k8s.io/descheduler/pkg/utils"
"sigs.k8s.io/descheduler/test"
)
func createNoScheduleTaint(key, value string, index int) v1.Taint {
return v1.Taint{
Key: "testTaint" + fmt.Sprintf("%v", index),
Value: "test" + fmt.Sprintf("%v", index),
Effect: v1.TaintEffectNoSchedule,
}
}
func addTaintsToNode(node *v1.Node, key, value string, indices []int) *v1.Node {
taints := []v1.Taint{}
for _, index := range indices {
taints = append(taints, createNoScheduleTaint(key, value, index))
}
node.Spec.Taints = taints
return node
}
func addTolerationToPod(pod *v1.Pod, key, value string, index int) *v1.Pod {
if pod.Annotations == nil {
pod.Annotations = map[string]string{}
}
pod.Spec.Tolerations = []v1.Toleration{{Key: key + fmt.Sprintf("%v", index), Value: value + fmt.Sprintf("%v", index), Effect: v1.TaintEffectNoSchedule}}
return pod
}
func TestDeletePodsViolatingNodeTaints(t *testing.T) {
node1 := test.BuildTestNode("n1", 2000, 3000, 10)
node1 = addTaintsToNode(node1, "testTaint", "test", []int{1})
node2 := test.BuildTestNode("n2", 2000, 3000, 10)
node1 = addTaintsToNode(node2, "testingTaint", "testing", []int{1})
p1 := test.BuildTestPod("p1", 100, 0, node1.Name)
p2 := test.BuildTestPod("p2", 100, 0, node1.Name)
p3 := test.BuildTestPod("p3", 100, 0, node1.Name)
p4 := test.BuildTestPod("p4", 100, 0, node1.Name)
p5 := test.BuildTestPod("p5", 100, 0, node1.Name)
p6 := test.BuildTestPod("p6", 100, 0, node1.Name)
p1.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
p2.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
p3.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
p4.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
p5.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
p6.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
p7 := test.BuildTestPod("p7", 100, 0, node2.Name)
p8 := test.BuildTestPod("p8", 100, 0, node2.Name)
p9 := test.BuildTestPod("p9", 100, 0, node2.Name)
p10 := test.BuildTestPod("p10", 100, 0, node2.Name)
p11 := test.BuildTestPod("p11", 100, 0, node2.Name)
p11.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
// The following 4 pods won't get evicted.
// A Critical Pod.
p7.Namespace = "kube-system"
priority := utils.SystemCriticalPriority
p7.Spec.Priority = &priority
// A daemonset.
p8.ObjectMeta.OwnerReferences = test.GetDaemonSetOwnerRefList()
// A pod with local storage.
p9.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
p9.Spec.Volumes = []v1.Volume{
{
Name: "sample",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
EmptyDir: &v1.EmptyDirVolumeSource{
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
},
},
}
// A Mirror Pod.
p10.Annotations = test.GetMirrorPodAnnotation()
p1 = addTolerationToPod(p1, "testTaint", "test", 1)
p3 = addTolerationToPod(p3, "testTaint", "test", 1)
p4 = addTolerationToPod(p4, "testTaintX", "testX", 1)
tests := []struct {
description string
nodes []*v1.Node
pods []v1.Pod
evictLocalStoragePods bool
npe utils.NodePodEvictedCount
maxPodsToEvict int
expectedEvictedPodCount int
}{
{
description: "Pods not tolerating node taint should be evicted",
pods: []v1.Pod{*p1, *p2, *p3},
nodes: []*v1.Node{node1},
evictLocalStoragePods: false,
npe: utils.NodePodEvictedCount{node1: 0},
maxPodsToEvict: 0,
expectedEvictedPodCount: 1, //p2 gets evicted
},
{
description: "Pods with tolerations but not tolerating node taint should be evicted",
pods: []v1.Pod{*p1, *p3, *p4},
nodes: []*v1.Node{node1},
evictLocalStoragePods: false,
npe: utils.NodePodEvictedCount{node1: 0},
maxPodsToEvict: 0,
expectedEvictedPodCount: 1, //p4 gets evicted
},
{
description: "Only <maxPodsToEvict> number of Pods not tolerating node taint should be evicted",
pods: []v1.Pod{*p1, *p5, *p6},
nodes: []*v1.Node{node1},
evictLocalStoragePods: false,
npe: utils.NodePodEvictedCount{node1: 0},
maxPodsToEvict: 1,
expectedEvictedPodCount: 1, //p5 or p6 gets evicted
},
{
description: "Critical pods not tolerating node taint should not be evicted",
pods: []v1.Pod{*p7, *p8, *p9, *p10},
nodes: []*v1.Node{node2},
evictLocalStoragePods: false,
npe: utils.NodePodEvictedCount{node2: 0},
maxPodsToEvict: 0,
expectedEvictedPodCount: 0,
},
{
description: "Critical pods except storage pods not tolerating node taint should not be evicted",
pods: []v1.Pod{*p7, *p8, *p9, *p10},
nodes: []*v1.Node{node2},
evictLocalStoragePods: true,
npe: utils.NodePodEvictedCount{node2: 0},
maxPodsToEvict: 0,
expectedEvictedPodCount: 1,
},
{
description: "Critical and non critical pods, only non critical pods not tolerating node taint should be evicted",
pods: []v1.Pod{*p7, *p8, *p10, *p11},
nodes: []*v1.Node{node2},
evictLocalStoragePods: false,
npe: utils.NodePodEvictedCount{node2: 0},
maxPodsToEvict: 0,
expectedEvictedPodCount: 1,
},
}
for _, tc := range tests {
// create fake client
fakeClient := &fake.Clientset{}
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
return true, &v1.PodList{Items: tc.pods}, nil
})
actualEvictedPodCount := deletePodsViolatingNodeTaints(fakeClient, "v1", tc.nodes, false, tc.npe, tc.maxPodsToEvict, tc.evictLocalStoragePods)
if actualEvictedPodCount != tc.expectedEvictedPodCount {
t.Errorf("Test %#v failed, Unexpected no of pods evicted: pods evicted: %d, expected: %d", tc.description, actualEvictedPodCount, tc.expectedEvictedPodCount)
}
}
}
func TestToleratesTaint(t *testing.T) {
testCases := []struct {
description string
toleration v1.Toleration
taint v1.Taint
expectTolerated bool
}{
{
description: "toleration and taint have the same key and effect, and operator is Exists, and taint has no value, expect tolerated",
toleration: v1.Toleration{
Key: "foo",
Operator: TolerationOpExists,
Effect: v1.TaintEffectNoSchedule,
},
taint: v1.Taint{
Key: "foo",
Effect: v1.TaintEffectNoSchedule,
},
expectTolerated: true,
},
{
description: "toleration and taint have the same key and effect, and operator is Exists, and taint has some value, expect tolerated",
toleration: v1.Toleration{
Key: "foo",
Operator: TolerationOpExists,
Effect: v1.TaintEffectNoSchedule,
},
taint: v1.Taint{
Key: "foo",
Value: "bar",
Effect: v1.TaintEffectNoSchedule,
},
expectTolerated: true,
},
{
description: "toleration and taint have the same effect, toleration has empty key and operator is Exists, means match all taints, expect tolerated",
toleration: v1.Toleration{
Key: "",
Operator: TolerationOpExists,
Effect: v1.TaintEffectNoSchedule,
},
taint: v1.Taint{
Key: "foo",
Value: "bar",
Effect: v1.TaintEffectNoSchedule,
},
expectTolerated: true,
},
{
description: "toleration and taint have the same key, effect and value, and operator is Equal, expect tolerated",
toleration: v1.Toleration{
Key: "foo",
Operator: TolerationOpEqual,
Value: "bar",
Effect: v1.TaintEffectNoSchedule,
},
taint: v1.Taint{
Key: "foo",
Value: "bar",
Effect: v1.TaintEffectNoSchedule,
},
expectTolerated: true,
},
{
description: "toleration and taint have the same key and effect, but different values, and operator is Equal, expect not tolerated",
toleration: v1.Toleration{
Key: "foo",
Operator: TolerationOpEqual,
Value: "value1",
Effect: v1.TaintEffectNoSchedule,
},
taint: v1.Taint{
Key: "foo",
Value: "value2",
Effect: v1.TaintEffectNoSchedule,
},
expectTolerated: false,
},
{
description: "toleration and taint have the same key and value, but different effects, and operator is Equal, expect not tolerated",
toleration: v1.Toleration{
Key: "foo",
Operator: TolerationOpEqual,
Value: "bar",
Effect: v1.TaintEffectNoSchedule,
},
taint: v1.Taint{
Key: "foo",
Value: "bar",
Effect: v1.TaintEffectNoExecute,
},
expectTolerated: false,
},
}
for _, tc := range testCases {
if tolerated := toleratesTaint(&tc.toleration, &tc.taint); tc.expectTolerated != tolerated {
t.Errorf("[%s] expect %v, got %v: toleration %+v, taint %s", tc.description, tc.expectTolerated, tolerated, tc.toleration, tc.taint.ToString())
}
}
}
func TestFilterNoExecuteTaints(t *testing.T) {
taints := []v1.Taint{
{
Key: "one",
Value: "one",
Effect: v1.TaintEffectNoExecute,
},
{
Key: "two",
Value: "two",
Effect: v1.TaintEffectNoSchedule,
},
}
taints = getNoScheduleTaints(taints)
if len(taints) != 1 || taints[0].Key != "two" {
t.Errorf("Filtering doesn't work. Got %v", taints)
}
}

View File

@@ -1,104 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package strategies
import (
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
"sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
"sigs.k8s.io/descheduler/pkg/utils"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/klog"
)
// RemovePodsViolatingInterPodAntiAffinity with elimination strategy
func RemovePodsViolatingInterPodAntiAffinity(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, policyGroupVersion string, nodes []*v1.Node, nodePodCount utils.NodePodEvictedCount) {
if !strategy.Enabled {
return
}
removePodsWithAffinityRules(ds.Client, policyGroupVersion, nodes, ds.DryRun, nodePodCount, ds.MaxNoOfPodsToEvictPerNode, ds.EvictLocalStoragePods)
}
// removePodsWithAffinityRules evicts pods on the node which are having a pod affinity rules.
func removePodsWithAffinityRules(client clientset.Interface, policyGroupVersion string, nodes []*v1.Node, dryRun bool, nodePodCount utils.NodePodEvictedCount, maxPodsToEvict int, evictLocalStoragePods bool) int {
podsEvicted := 0
for _, node := range nodes {
klog.V(1).Infof("Processing node: %#v\n", node.Name)
pods, err := podutil.ListEvictablePodsOnNode(client, node, evictLocalStoragePods)
if err != nil {
return 0
}
totalPods := len(pods)
for i := 0; i < totalPods; i++ {
if maxPodsToEvict > 0 && nodePodCount[node]+1 > maxPodsToEvict {
break
}
if checkPodsWithAntiAffinityExist(pods[i], pods) {
success, err := evictions.EvictPod(client, pods[i], policyGroupVersion, dryRun)
if !success {
klog.Infof("Error when evicting pod: %#v (%#v)\n", pods[i].Name, err)
} else {
nodePodCount[node]++
klog.V(1).Infof("Evicted pod: %#v (%#v)\n because of existing anti-affinity", pods[i].Name, err)
// Since the current pod is evicted all other pods which have anti-affinity with this
// pod need not be evicted.
// Update pods.
pods = append(pods[:i], pods[i+1:]...)
i--
totalPods--
}
}
}
podsEvicted += nodePodCount[node]
}
return podsEvicted
}
// checkPodsWithAntiAffinityExist checks if there are other pods on the node that the current pod cannot tolerate.
func checkPodsWithAntiAffinityExist(pod *v1.Pod, pods []*v1.Pod) bool {
affinity := pod.Spec.Affinity
if affinity != nil && affinity.PodAntiAffinity != nil {
for _, term := range getPodAntiAffinityTerms(affinity.PodAntiAffinity) {
namespaces := utils.GetNamespacesFromPodAffinityTerm(pod, &term)
selector, err := metav1.LabelSelectorAsSelector(term.LabelSelector)
if err != nil {
klog.Infof("%v", err)
return false
}
for _, existingPod := range pods {
if existingPod.Name != pod.Name && utils.PodMatchesTermsNamespaceAndSelector(existingPod, namespaces, selector) {
return true
}
}
}
}
return false
}
// getPodAntiAffinityTerms gets the antiaffinity terms for the given pod.
func getPodAntiAffinityTerms(podAntiAffinity *v1.PodAntiAffinity) (terms []v1.PodAffinityTerm) {
if podAntiAffinity != nil {
if len(podAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution) != 0 {
terms = podAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution
}
}
return terms
}

View File

@@ -1,90 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package strategies
import (
"testing"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/kubernetes/fake"
core "k8s.io/client-go/testing"
"sigs.k8s.io/descheduler/pkg/utils"
"sigs.k8s.io/descheduler/test"
)
func TestPodAntiAffinity(t *testing.T) {
node := test.BuildTestNode("n1", 2000, 3000, 10)
p1 := test.BuildTestPod("p1", 100, 0, node.Name)
p2 := test.BuildTestPod("p2", 100, 0, node.Name)
p3 := test.BuildTestPod("p3", 100, 0, node.Name)
p4 := test.BuildTestPod("p4", 100, 0, node.Name)
p2.Labels = map[string]string{"foo": "bar"}
p1.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
p2.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
p3.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
p4.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
// set pod anti affinity
setPodAntiAffinity(p1)
setPodAntiAffinity(p3)
setPodAntiAffinity(p4)
// create fake client
fakeClient := &fake.Clientset{}
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
return true, &v1.PodList{Items: []v1.Pod{*p1, *p2, *p3, *p4}}, nil
})
fakeClient.Fake.AddReactor("get", "nodes", func(action core.Action) (bool, runtime.Object, error) {
return true, node, nil
})
npe := utils.NodePodEvictedCount{}
npe[node] = 0
expectedEvictedPodCount := 3
podsEvicted := removePodsWithAffinityRules(fakeClient, "v1", []*v1.Node{node}, false, npe, 0, false)
if podsEvicted != expectedEvictedPodCount {
t.Errorf("Unexpected no of pods evicted: pods evicted: %d, expected: %d", podsEvicted, expectedEvictedPodCount)
}
npe[node] = 0
expectedEvictedPodCount = 1
podsEvicted = removePodsWithAffinityRules(fakeClient, "v1", []*v1.Node{node}, false, npe, 1, false)
if podsEvicted != expectedEvictedPodCount {
t.Errorf("Unexpected no of pods evicted: pods evicted: %d, expected: %d", podsEvicted, expectedEvictedPodCount)
}
}
func setPodAntiAffinity(inputPod *v1.Pod) {
inputPod.Spec.Affinity = &v1.Affinity{
PodAntiAffinity: &v1.PodAntiAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
{
LabelSelector: &metav1.LabelSelector{
MatchExpressions: []metav1.LabelSelectorRequirement{
{
Key: "foo",
Operator: metav1.LabelSelectorOpIn,
Values: []string{"bar"},
},
},
},
TopologyKey: "region",
},
},
},
}
}

View File

@@ -1,181 +0,0 @@
package utils
import (
"fmt"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/component-base/featuregate"
)
const (
// owner: @jinxu
// beta: v1.10
//
// New local storage types to support local storage capacity isolation
LocalStorageCapacityIsolation featuregate.Feature = "LocalStorageCapacityIsolation"
// owner: @egernst
// alpha: v1.16
//
// Enables PodOverhead, for accounting pod overheads which are specific to a given RuntimeClass
PodOverhead featuregate.Feature = "PodOverhead"
)
// GetResourceRequest finds and returns the request value for a specific resource.
func GetResourceRequest(pod *v1.Pod, resource v1.ResourceName) int64 {
if resource == v1.ResourcePods {
return 1
}
requestQuantity := GetResourceRequestQuantity(pod, resource)
if resource == v1.ResourceCPU {
return requestQuantity.MilliValue()
}
return requestQuantity.Value()
}
// GetResourceRequestQuantity finds and returns the request quantity for a specific resource.
func GetResourceRequestQuantity(pod *v1.Pod, resourceName v1.ResourceName) resource.Quantity {
requestQuantity := resource.Quantity{}
switch resourceName {
case v1.ResourceCPU:
requestQuantity = resource.Quantity{Format: resource.DecimalSI}
case v1.ResourceMemory, v1.ResourceStorage, v1.ResourceEphemeralStorage:
requestQuantity = resource.Quantity{Format: resource.BinarySI}
default:
requestQuantity = resource.Quantity{Format: resource.DecimalSI}
}
if resourceName == v1.ResourceEphemeralStorage && !utilfeature.DefaultFeatureGate.Enabled(LocalStorageCapacityIsolation) {
// if the local storage capacity isolation feature gate is disabled, pods request 0 disk
return requestQuantity
}
for _, container := range pod.Spec.Containers {
if rQuantity, ok := container.Resources.Requests[resourceName]; ok {
requestQuantity.Add(rQuantity)
}
}
for _, container := range pod.Spec.InitContainers {
if rQuantity, ok := container.Resources.Requests[resourceName]; ok {
if requestQuantity.Cmp(rQuantity) < 0 {
requestQuantity = rQuantity.DeepCopy()
}
}
}
// if PodOverhead feature is supported, add overhead for running a pod
// to the total requests if the resource total is non-zero
if pod.Spec.Overhead != nil && utilfeature.DefaultFeatureGate.Enabled(PodOverhead) {
if podOverhead, ok := pod.Spec.Overhead[resourceName]; ok && !requestQuantity.IsZero() {
requestQuantity.Add(podOverhead)
}
}
return requestQuantity
}
// IsMirrorPod returns true if the passed Pod is a Mirror Pod.
func IsMirrorPod(pod *v1.Pod) bool {
_, ok := pod.Annotations[v1.MirrorPodAnnotationKey]
return ok
}
// IsStaticPod returns true if the pod is a static pod.
func IsStaticPod(pod *v1.Pod) bool {
source, err := GetPodSource(pod)
return err == nil && source != "api"
}
// GetPodSource returns the source of the pod based on the annotation.
func GetPodSource(pod *v1.Pod) (string, error) {
if pod.Annotations != nil {
if source, ok := pod.Annotations["kubernetes.io/config.source"]; ok {
return source, nil
}
}
return "", fmt.Errorf("cannot get source of pod %q", pod.UID)
}
// IsCriticalPod returns true if pod's priority is greater than or equal to SystemCriticalPriority.
func IsCriticalPod(pod *v1.Pod) bool {
if IsStaticPod(pod) {
return true
}
if IsMirrorPod(pod) {
return true
}
if pod.Spec.Priority != nil && IsCriticalPodBasedOnPriority(*pod.Spec.Priority) {
return true
}
return false
}
// IsCriticalPodBasedOnPriority checks if the given pod is a critical pod based on priority resolved from pod Spec.
func IsCriticalPodBasedOnPriority(priority int32) bool {
return priority >= SystemCriticalPriority
}
// PodRequestsAndLimits returns a dictionary of all defined resources summed up for all
// containers of the pod. If PodOverhead feature is enabled, pod overhead is added to the
// total container resource requests and to the total container limits which have a
// non-zero quantity.
func PodRequestsAndLimits(pod *v1.Pod) (reqs, limits v1.ResourceList) {
reqs, limits = v1.ResourceList{}, v1.ResourceList{}
for _, container := range pod.Spec.Containers {
addResourceList(reqs, container.Resources.Requests)
addResourceList(limits, container.Resources.Limits)
}
// init containers define the minimum of any resource
for _, container := range pod.Spec.InitContainers {
maxResourceList(reqs, container.Resources.Requests)
maxResourceList(limits, container.Resources.Limits)
}
// if PodOverhead feature is supported, add overhead for running a pod
// to the sum of reqeuests and to non-zero limits:
if pod.Spec.Overhead != nil && utilfeature.DefaultFeatureGate.Enabled(PodOverhead) {
addResourceList(reqs, pod.Spec.Overhead)
for name, quantity := range pod.Spec.Overhead {
if value, ok := limits[name]; ok && !value.IsZero() {
value.Add(quantity)
limits[name] = value
}
}
}
return
}
// addResourceList adds the resources in newList to list
func addResourceList(list, newList v1.ResourceList) {
for name, quantity := range newList {
if value, ok := list[name]; !ok {
list[name] = quantity.DeepCopy()
} else {
value.Add(quantity)
list[name] = value
}
}
}
// maxResourceList sets list to the greater of list/newList for every resource
// either list
func maxResourceList(list, new v1.ResourceList) {
for name, quantity := range new {
if value, ok := list[name]; !ok {
list[name] = quantity.DeepCopy()
continue
} else {
if quantity.Cmp(value) > 0 {
list[name] = quantity.DeepCopy()
}
}
}
}

View File

@@ -1,128 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package utils
import (
"fmt"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/selection"
"k8s.io/klog"
)
// The following code has been copied from predicates package to avoid the
// huge vendoring issues, mostly copied from
// k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates/
// Some minor changes have been made to ease the imports, but most of the code
// remains untouched
// PodMatchNodeSelector checks if a pod node selector matches the node label.
func PodMatchNodeSelector(pod *v1.Pod, node *v1.Node) (bool, error) {
if node == nil {
return false, fmt.Errorf("node not found")
}
if podMatchesNodeLabels(pod, node) {
return true, nil
}
return false, nil
}
// The pod can only schedule onto nodes that satisfy requirements in both NodeAffinity and nodeSelector.
func podMatchesNodeLabels(pod *v1.Pod, node *v1.Node) bool {
// Check if node.Labels match pod.Spec.NodeSelector.
if len(pod.Spec.NodeSelector) > 0 {
selector := labels.SelectorFromSet(pod.Spec.NodeSelector)
if !selector.Matches(labels.Set(node.Labels)) {
return false
}
}
// 1. nil NodeSelector matches all nodes (i.e. does not filter out any nodes)
// 2. nil []NodeSelectorTerm (equivalent to non-nil empty NodeSelector) matches no nodes
// 3. zero-length non-nil []NodeSelectorTerm matches no nodes also, just for simplicity
// 4. nil []NodeSelectorRequirement (equivalent to non-nil empty NodeSelectorTerm) matches no nodes
// 5. zero-length non-nil []NodeSelectorRequirement matches no nodes also, just for simplicity
// 6. non-nil empty NodeSelectorRequirement is not allowed
affinity := pod.Spec.Affinity
if affinity != nil && affinity.NodeAffinity != nil {
nodeAffinity := affinity.NodeAffinity
// if no required NodeAffinity requirements, will do no-op, means select all nodes.
if nodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution == nil {
return true
}
// Match node selector for requiredDuringSchedulingIgnoredDuringExecution.
if nodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil {
nodeSelectorTerms := nodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms
klog.V(10).Infof("Match for RequiredDuringSchedulingIgnoredDuringExecution node selector terms %+v", nodeSelectorTerms)
return nodeMatchesNodeSelectorTerms(node, nodeSelectorTerms)
}
}
return true
}
// nodeMatchesNodeSelectorTerms checks if a node's labels satisfy a list of node selector terms,
// terms are ORed, and an empty list of terms will match nothing.
func nodeMatchesNodeSelectorTerms(node *v1.Node, nodeSelectorTerms []v1.NodeSelectorTerm) bool {
for _, req := range nodeSelectorTerms {
nodeSelector, err := NodeSelectorRequirementsAsSelector(req.MatchExpressions)
if err != nil {
klog.V(10).Infof("Failed to parse MatchExpressions: %+v, regarding as not match.", req.MatchExpressions)
return false
}
if nodeSelector.Matches(labels.Set(node.Labels)) {
return true
}
}
return false
}
// NodeSelectorRequirementsAsSelector converts the []NodeSelectorRequirement api type into a struct that implements
// labels.Selector.
func NodeSelectorRequirementsAsSelector(nsm []v1.NodeSelectorRequirement) (labels.Selector, error) {
if len(nsm) == 0 {
return labels.Nothing(), nil
}
selector := labels.NewSelector()
for _, expr := range nsm {
var op selection.Operator
switch expr.Operator {
case v1.NodeSelectorOpIn:
op = selection.In
case v1.NodeSelectorOpNotIn:
op = selection.NotIn
case v1.NodeSelectorOpExists:
op = selection.Exists
case v1.NodeSelectorOpDoesNotExist:
op = selection.DoesNotExist
case v1.NodeSelectorOpGt:
op = selection.GreaterThan
case v1.NodeSelectorOpLt:
op = selection.LessThan
default:
return nil, fmt.Errorf("%q is not a valid node selector operator", expr.Operator)
}
r, err := labels.NewRequirement(expr.Key, op, expr.Values)
if err != nil {
return nil, err
}
selector = selector.Add(*r)
}
return selector, nil
}

View File

@@ -1,35 +0,0 @@
package utils
import (
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/sets"
)
const SystemCriticalPriority = 2 * int32(1000000000)
// GetNamespacesFromPodAffinityTerm returns a set of names
// according to the namespaces indicated in podAffinityTerm.
// If namespaces is empty it considers the given pod's namespace.
func GetNamespacesFromPodAffinityTerm(pod *v1.Pod, podAffinityTerm *v1.PodAffinityTerm) sets.String {
names := sets.String{}
if len(podAffinityTerm.Namespaces) == 0 {
names.Insert(pod.Namespace)
} else {
names.Insert(podAffinityTerm.Namespaces...)
}
return names
}
// PodMatchesTermsNamespaceAndSelector returns true if the given <pod>
// matches the namespace and selector defined by <affinityPod>`s <term>.
func PodMatchesTermsNamespaceAndSelector(pod *v1.Pod, namespaces sets.String, selector labels.Selector) bool {
if !namespaces.Has(pod.Namespace) {
return false
}
if !selector.Matches(labels.Set(pod.Labels)) {
return false
}
return true
}

View File

@@ -1,85 +0,0 @@
package utils
import (
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/util/sets"
)
var supportedQoSComputeResources = sets.NewString(string(v1.ResourceCPU), string(v1.ResourceMemory))
// QOSList is a set of (resource name, QoS class) pairs.
type QOSList map[v1.ResourceName]v1.PodQOSClass
func isSupportedQoSComputeResource(name v1.ResourceName) bool {
return supportedQoSComputeResources.Has(string(name))
}
// GetPodQOS returns the QoS class of a pod.
// A pod is besteffort if none of its containers have specified any requests or limits.
// A pod is guaranteed only when requests and limits are specified for all the containers and they are equal.
// A pod is burstable if limits and requests do not match across all containers.
func GetPodQOS(pod *v1.Pod) v1.PodQOSClass {
requests := v1.ResourceList{}
limits := v1.ResourceList{}
zeroQuantity := resource.MustParse("0")
isGuaranteed := true
allContainers := []v1.Container{}
allContainers = append(allContainers, pod.Spec.Containers...)
allContainers = append(allContainers, pod.Spec.InitContainers...)
for _, container := range allContainers {
// process requests
for name, quantity := range container.Resources.Requests {
if !isSupportedQoSComputeResource(name) {
continue
}
if quantity.Cmp(zeroQuantity) == 1 {
delta := quantity.DeepCopy()
if _, exists := requests[name]; !exists {
requests[name] = delta
} else {
delta.Add(requests[name])
requests[name] = delta
}
}
}
// process limits
qosLimitsFound := sets.NewString()
for name, quantity := range container.Resources.Limits {
if !isSupportedQoSComputeResource(name) {
continue
}
if quantity.Cmp(zeroQuantity) == 1 {
qosLimitsFound.Insert(string(name))
delta := quantity.DeepCopy()
if _, exists := limits[name]; !exists {
limits[name] = delta
} else {
delta.Add(limits[name])
limits[name] = delta
}
}
}
if !qosLimitsFound.HasAll(string(v1.ResourceMemory), string(v1.ResourceCPU)) {
isGuaranteed = false
}
}
if len(requests) == 0 && len(limits) == 0 {
return v1.PodQOSBestEffort
}
// Check is requests match limits for all resources.
if isGuaranteed {
for name, req := range requests {
if lim, exists := limits[name]; !exists || lim.Cmp(req) != 0 {
isGuaranteed = false
break
}
}
}
if isGuaranteed &&
len(requests) == len(limits) {
return v1.PodQOSGuaranteed
}
return v1.PodQOSBurstable
}

View File

@@ -1,35 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package utils
import v1 "k8s.io/api/core/v1"
// This file contains the datastructures, types & functions needed by all the strategies so that we don't have
// to compute them again in each strategy.
// NodePodEvictedCount keeps count of pods evicted on node. This is used in conjunction with strategies to
type NodePodEvictedCount map[*v1.Node]int
// InitializeNodePodCount initializes the nodePodCount.
func InitializeNodePodCount(nodeList []*v1.Node) NodePodEvictedCount {
var nodePodCount = make(NodePodEvictedCount)
for _, node := range nodeList {
// Initialize podsEvicted till now with 0.
nodePodCount[node] = 0
}
return nodePodCount
}

View File

@@ -1,249 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"math"
"testing"
"time"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/klog"
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
"sigs.k8s.io/descheduler/pkg/api"
deschedulerapi "sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/descheduler"
"sigs.k8s.io/descheduler/pkg/descheduler/client"
eutils "sigs.k8s.io/descheduler/pkg/descheduler/evictions/utils"
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
"sigs.k8s.io/descheduler/pkg/descheduler/strategies"
"sigs.k8s.io/descheduler/pkg/utils"
)
func MakePodSpec() v1.PodSpec {
return v1.PodSpec{
Containers: []v1.Container{{
Name: "pause",
ImagePullPolicy: "Never",
Image: "kubernetes/pause",
Ports: []v1.ContainerPort{{ContainerPort: 80}},
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("100m"),
v1.ResourceMemory: resource.MustParse("1000Mi"),
},
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("100m"),
v1.ResourceMemory: resource.MustParse("800Mi"),
},
},
}},
}
}
// RcByNameContainer returns a ReplicationControoler with specified name and container
func RcByNameContainer(name string, replicas int32, labels map[string]string, gracePeriod *int64) *v1.ReplicationController {
zeroGracePeriod := int64(0)
// Add "name": name to the labels, overwriting if it exists.
labels["name"] = name
if gracePeriod == nil {
gracePeriod = &zeroGracePeriod
}
return &v1.ReplicationController{
TypeMeta: metav1.TypeMeta{
Kind: "ReplicationController",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Spec: v1.ReplicationControllerSpec{
Replicas: func(i int32) *int32 { return &i }(replicas),
Selector: map[string]string{
"name": name,
},
Template: &v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: labels,
},
Spec: MakePodSpec(),
},
},
}
}
// startEndToEndForLowNodeUtilization tests the lownode utilization strategy.
func startEndToEndForLowNodeUtilization(clientset clientset.Interface) {
var thresholds = make(deschedulerapi.ResourceThresholds)
var targetThresholds = make(deschedulerapi.ResourceThresholds)
thresholds[v1.ResourceMemory] = 20
thresholds[v1.ResourcePods] = 20
thresholds[v1.ResourceCPU] = 85
targetThresholds[v1.ResourceMemory] = 20
targetThresholds[v1.ResourcePods] = 20
targetThresholds[v1.ResourceCPU] = 90
// Run descheduler.
evictionPolicyGroupVersion, err := eutils.SupportEviction(clientset)
if err != nil || len(evictionPolicyGroupVersion) == 0 {
klog.Fatalf("%v", err)
}
stopChannel := make(chan struct{})
nodes, err := nodeutil.ReadyNodes(clientset, "", stopChannel)
if err != nil {
klog.Fatalf("%v", err)
}
nodeUtilizationThresholds := deschedulerapi.NodeResourceUtilizationThresholds{Thresholds: thresholds, TargetThresholds: targetThresholds}
nodeUtilizationStrategyParams := deschedulerapi.StrategyParameters{NodeResourceUtilizationThresholds: nodeUtilizationThresholds}
lowNodeUtilizationStrategy := deschedulerapi.DeschedulerStrategy{Enabled: true, Params: nodeUtilizationStrategyParams}
ds := &options.DeschedulerServer{Client: clientset}
nodePodCount := utils.InitializeNodePodCount(nodes)
strategies.LowNodeUtilization(ds, lowNodeUtilizationStrategy, evictionPolicyGroupVersion, nodes, nodePodCount)
time.Sleep(10 * time.Second)
}
func TestE2E(t *testing.T) {
// If we have reached here, it means cluster would have been already setup and the kubeconfig file should
// be in /tmp directory as admin.conf.
clientSet, err := client.CreateClient("/tmp/admin.conf")
if err != nil {
t.Errorf("Error during client creation with %v", err)
}
nodeList, err := clientSet.CoreV1().Nodes().List(metav1.ListOptions{})
if err != nil {
t.Errorf("Error listing node with %v", err)
}
// Assumption: We would have 3 node cluster by now. Kubeadm brings all the master components onto master node.
// So, the last node would have least utilization.
rc := RcByNameContainer("test-rc", int32(15), map[string]string{"test": "app"}, nil)
_, err = clientSet.CoreV1().ReplicationControllers("default").Create(rc)
if err != nil {
t.Errorf("Error creating deployment %v", err)
}
evictPods(t, clientSet, nodeList, rc)
rc.Spec.Template.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"}
rc.Spec.Replicas = func(i int32) *int32 { return &i }(15)
rc.Spec.Template.Spec.Volumes = []v1.Volume{
{
Name: "sample",
VolumeSource: v1.VolumeSource{
EmptyDir: &v1.EmptyDirVolumeSource{
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
},
},
}
_, err = clientSet.CoreV1().ReplicationControllers("default").Create(rc)
if err != nil {
t.Errorf("Error creating deployment %v", err)
}
evictPods(t, clientSet, nodeList, rc)
}
func TestDeschedulingInterval(t *testing.T) {
clientSet, err := client.CreateClient("/tmp/admin.conf")
if err != nil {
t.Errorf("Error during client creation with %v", err)
}
// By default, the DeschedulingInterval param should be set to 0, meaning Descheduler only runs once then exits
s := options.NewDeschedulerServer()
s.Client = clientSet
deschedulerPolicy := &api.DeschedulerPolicy{}
c := make(chan bool)
go func() {
err := descheduler.RunDeschedulerStrategies(s, deschedulerPolicy)
if err != nil {
t.Errorf("Error running descheduler strategies: %+v", err)
}
c <- true
}()
select {
case <-c:
// successfully returned
case <-time.After(3 * time.Minute):
t.Errorf("descheduler.Run timed out even without descheduling-interval set")
}
}
func evictPods(t *testing.T, clientSet clientset.Interface, nodeList *v1.NodeList, rc *v1.ReplicationController) {
var leastLoadedNode v1.Node
podsBefore := math.MaxInt16
for i := range nodeList.Items {
// Skip the Master Node
if _, exist := nodeList.Items[i].Labels["node-role.kubernetes.io/master"]; exist {
continue
}
// List all the pods on the current Node
podsOnANode, err := podutil.ListEvictablePodsOnNode(clientSet, &nodeList.Items[i], true)
if err != nil {
t.Errorf("Error listing pods on a node %v", err)
}
// Update leastLoadedNode if necessary
if tmpLoads := len(podsOnANode); tmpLoads < podsBefore {
leastLoadedNode = nodeList.Items[i]
podsBefore = tmpLoads
}
}
t.Log("Eviction of pods starting")
startEndToEndForLowNodeUtilization(clientSet)
podsOnleastUtilizedNode, err := podutil.ListEvictablePodsOnNode(clientSet, &leastLoadedNode, true)
if err != nil {
t.Errorf("Error listing pods on a node %v", err)
}
podsAfter := len(podsOnleastUtilizedNode)
if podsBefore > podsAfter {
t.Fatalf("We should have see more pods on this node as per kubeadm's way of installing %v, %v", podsBefore, podsAfter)
}
//set number of replicas to 0
rc.Spec.Replicas = func(i int32) *int32 { return &i }(0)
_, err = clientSet.CoreV1().ReplicationControllers("default").Update(rc)
if err != nil {
t.Errorf("Error updating replica controller %v", err)
}
allPodsDeleted := false
//wait 30 seconds until all pods are deleted
for i := 0; i < 6; i++ {
scale, _ := clientSet.CoreV1().ReplicationControllers("default").GetScale(rc.Name, metav1.GetOptions{})
if scale.Spec.Replicas == 0 {
allPodsDeleted = true
break
}
time.Sleep(5 * time.Second)
}
if !allPodsDeleted {
t.Errorf("Deleting of rc pods took too long")
}
err = clientSet.CoreV1().ReplicationControllers("default").Delete(rc.Name, &metav1.DeleteOptions{})
if err != nil {
t.Errorf("Error deleting rc %v", err)
}
//wait until rc is deleted
time.Sleep(5 * time.Second)
}

View File

@@ -1,19 +0,0 @@
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/bin/bash
# This just run e2e tests.
PRJ_PREFIX="sigs.k8s.io/descheduler"
go test ${PRJ_PREFIX}/test/e2e/ -v

View File

@@ -1,19 +0,0 @@
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/bin/bash
# This just run unit-tests. Ignoring the current directory so as to avoid running e2e tests.
PRJ_PREFIX="sigs.k8s.io/descheduler"
go test $(go list ${PRJ_PREFIX}/... | grep -v ${PRJ_PREFIX}/vendor/| grep -v ${PRJ_PREFIX}/test/)

View File

@@ -1,113 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package test
import (
"fmt"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// BuildTestPod creates a test pod with given parameters.
func BuildTestPod(name string, cpu int64, memory int64, nodeName string) *v1.Pod {
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Namespace: "default",
Name: name,
SelfLink: fmt.Sprintf("/api/v1/namespaces/default/pods/%s", name),
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{},
Limits: v1.ResourceList{},
},
},
},
NodeName: nodeName,
},
}
if cpu >= 0 {
pod.Spec.Containers[0].Resources.Requests[v1.ResourceCPU] = *resource.NewMilliQuantity(cpu, resource.DecimalSI)
}
if memory >= 0 {
pod.Spec.Containers[0].Resources.Requests[v1.ResourceMemory] = *resource.NewQuantity(memory, resource.DecimalSI)
}
return pod
}
// GetMirrorPodAnnotation returns the annotation needed for mirror pod.
func GetMirrorPodAnnotation() map[string]string {
return map[string]string{
"kubernetes.io/created-by": "{\"kind\":\"SerializedReference\",\"apiVersion\":\"v1\",\"reference\":{\"kind\":\"Pod\"}}",
"kubernetes.io/config.source": "api",
"kubernetes.io/config.mirror": "mirror",
}
}
// GetNormalPodOwnerRefList returns the ownerRef needed for a pod.
func GetNormalPodOwnerRefList() []metav1.OwnerReference {
ownerRefList := make([]metav1.OwnerReference, 0)
ownerRefList = append(ownerRefList, metav1.OwnerReference{Kind: "Pod", APIVersion: "v1"})
return ownerRefList
}
// GetReplicaSetOwnerRefList returns the ownerRef needed for replicaset pod.
func GetReplicaSetOwnerRefList() []metav1.OwnerReference {
ownerRefList := make([]metav1.OwnerReference, 0)
ownerRefList = append(ownerRefList, metav1.OwnerReference{Kind: "ReplicaSet", APIVersion: "v1", Name: "replicaset-1"})
return ownerRefList
}
// GetDaemonSetOwnerRefList returns the ownerRef needed for daemonset pod.
func GetDaemonSetOwnerRefList() []metav1.OwnerReference {
ownerRefList := make([]metav1.OwnerReference, 0)
ownerRefList = append(ownerRefList, metav1.OwnerReference{Kind: "DaemonSet", APIVersion: "v1"})
return ownerRefList
}
// BuildTestNode creates a node with specified capacity.
func BuildTestNode(name string, millicpu int64, mem int64, pods int64) *v1.Node {
node := &v1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: name,
SelfLink: fmt.Sprintf("/api/v1/nodes/%s", name),
Labels: map[string]string{},
},
Status: v1.NodeStatus{
Capacity: v1.ResourceList{
v1.ResourcePods: *resource.NewQuantity(pods, resource.DecimalSI),
v1.ResourceCPU: *resource.NewMilliQuantity(millicpu, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(mem, resource.DecimalSI),
},
Allocatable: v1.ResourceList{
v1.ResourcePods: *resource.NewQuantity(pods, resource.DecimalSI),
v1.ResourceCPU: *resource.NewMilliQuantity(millicpu, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(mem, resource.DecimalSI),
},
Phase: v1.NodeRunning,
Conditions: []v1.NodeCondition{
{Type: v1.NodeReady, Status: v1.ConditionTrue},
},
},
}
return node
}

202
vendor/cloud.google.com/go/LICENSE generated vendored
View File

@@ -1,202 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@@ -1,513 +0,0 @@
// Copyright 2014 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package metadata provides access to Google Compute Engine (GCE)
// metadata and API service accounts.
//
// This package is a wrapper around the GCE metadata service,
// as documented at https://developers.google.com/compute/docs/metadata.
package metadata // import "cloud.google.com/go/compute/metadata"
import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
"net"
"net/http"
"net/url"
"os"
"runtime"
"strings"
"sync"
"time"
)
const (
// metadataIP is the documented metadata server IP address.
metadataIP = "169.254.169.254"
// metadataHostEnv is the environment variable specifying the
// GCE metadata hostname. If empty, the default value of
// metadataIP ("169.254.169.254") is used instead.
// This is variable name is not defined by any spec, as far as
// I know; it was made up for the Go package.
metadataHostEnv = "GCE_METADATA_HOST"
userAgent = "gcloud-golang/0.1"
)
type cachedValue struct {
k string
trim bool
mu sync.Mutex
v string
}
var (
projID = &cachedValue{k: "project/project-id", trim: true}
projNum = &cachedValue{k: "project/numeric-project-id", trim: true}
instID = &cachedValue{k: "instance/id", trim: true}
)
var (
defaultClient = &Client{hc: &http.Client{
Transport: &http.Transport{
Dial: (&net.Dialer{
Timeout: 2 * time.Second,
KeepAlive: 30 * time.Second,
}).Dial,
ResponseHeaderTimeout: 2 * time.Second,
},
}}
subscribeClient = &Client{hc: &http.Client{
Transport: &http.Transport{
Dial: (&net.Dialer{
Timeout: 2 * time.Second,
KeepAlive: 30 * time.Second,
}).Dial,
},
}}
)
// NotDefinedError is returned when requested metadata is not defined.
//
// The underlying string is the suffix after "/computeMetadata/v1/".
//
// This error is not returned if the value is defined to be the empty
// string.
type NotDefinedError string
func (suffix NotDefinedError) Error() string {
return fmt.Sprintf("metadata: GCE metadata %q not defined", string(suffix))
}
func (c *cachedValue) get(cl *Client) (v string, err error) {
defer c.mu.Unlock()
c.mu.Lock()
if c.v != "" {
return c.v, nil
}
if c.trim {
v, err = cl.getTrimmed(c.k)
} else {
v, err = cl.Get(c.k)
}
if err == nil {
c.v = v
}
return
}
var (
onGCEOnce sync.Once
onGCE bool
)
// OnGCE reports whether this process is running on Google Compute Engine.
func OnGCE() bool {
onGCEOnce.Do(initOnGCE)
return onGCE
}
func initOnGCE() {
onGCE = testOnGCE()
}
func testOnGCE() bool {
// The user explicitly said they're on GCE, so trust them.
if os.Getenv(metadataHostEnv) != "" {
return true
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
resc := make(chan bool, 2)
// Try two strategies in parallel.
// See https://github.com/googleapis/google-cloud-go/issues/194
go func() {
req, _ := http.NewRequest("GET", "http://"+metadataIP, nil)
req.Header.Set("User-Agent", userAgent)
res, err := defaultClient.hc.Do(req.WithContext(ctx))
if err != nil {
resc <- false
return
}
defer res.Body.Close()
resc <- res.Header.Get("Metadata-Flavor") == "Google"
}()
go func() {
addrs, err := net.LookupHost("metadata.google.internal")
if err != nil || len(addrs) == 0 {
resc <- false
return
}
resc <- strsContains(addrs, metadataIP)
}()
tryHarder := systemInfoSuggestsGCE()
if tryHarder {
res := <-resc
if res {
// The first strategy succeeded, so let's use it.
return true
}
// Wait for either the DNS or metadata server probe to
// contradict the other one and say we are running on
// GCE. Give it a lot of time to do so, since the system
// info already suggests we're running on a GCE BIOS.
timer := time.NewTimer(5 * time.Second)
defer timer.Stop()
select {
case res = <-resc:
return res
case <-timer.C:
// Too slow. Who knows what this system is.
return false
}
}
// There's no hint from the system info that we're running on
// GCE, so use the first probe's result as truth, whether it's
// true or false. The goal here is to optimize for speed for
// users who are NOT running on GCE. We can't assume that
// either a DNS lookup or an HTTP request to a blackholed IP
// address is fast. Worst case this should return when the
// metaClient's Transport.ResponseHeaderTimeout or
// Transport.Dial.Timeout fires (in two seconds).
return <-resc
}
// systemInfoSuggestsGCE reports whether the local system (without
// doing network requests) suggests that we're running on GCE. If this
// returns true, testOnGCE tries a bit harder to reach its metadata
// server.
func systemInfoSuggestsGCE() bool {
if runtime.GOOS != "linux" {
// We don't have any non-Linux clues available, at least yet.
return false
}
slurp, _ := ioutil.ReadFile("/sys/class/dmi/id/product_name")
name := strings.TrimSpace(string(slurp))
return name == "Google" || name == "Google Compute Engine"
}
// Subscribe calls Client.Subscribe on a client designed for subscribing (one with no
// ResponseHeaderTimeout).
func Subscribe(suffix string, fn func(v string, ok bool) error) error {
return subscribeClient.Subscribe(suffix, fn)
}
// Get calls Client.Get on the default client.
func Get(suffix string) (string, error) { return defaultClient.Get(suffix) }
// ProjectID returns the current instance's project ID string.
func ProjectID() (string, error) { return defaultClient.ProjectID() }
// NumericProjectID returns the current instance's numeric project ID.
func NumericProjectID() (string, error) { return defaultClient.NumericProjectID() }
// InternalIP returns the instance's primary internal IP address.
func InternalIP() (string, error) { return defaultClient.InternalIP() }
// ExternalIP returns the instance's primary external (public) IP address.
func ExternalIP() (string, error) { return defaultClient.ExternalIP() }
// Hostname returns the instance's hostname. This will be of the form
// "<instanceID>.c.<projID>.internal".
func Hostname() (string, error) { return defaultClient.Hostname() }
// InstanceTags returns the list of user-defined instance tags,
// assigned when initially creating a GCE instance.
func InstanceTags() ([]string, error) { return defaultClient.InstanceTags() }
// InstanceID returns the current VM's numeric instance ID.
func InstanceID() (string, error) { return defaultClient.InstanceID() }
// InstanceName returns the current VM's instance ID string.
func InstanceName() (string, error) { return defaultClient.InstanceName() }
// Zone returns the current VM's zone, such as "us-central1-b".
func Zone() (string, error) { return defaultClient.Zone() }
// InstanceAttributes calls Client.InstanceAttributes on the default client.
func InstanceAttributes() ([]string, error) { return defaultClient.InstanceAttributes() }
// ProjectAttributes calls Client.ProjectAttributes on the default client.
func ProjectAttributes() ([]string, error) { return defaultClient.ProjectAttributes() }
// InstanceAttributeValue calls Client.InstanceAttributeValue on the default client.
func InstanceAttributeValue(attr string) (string, error) {
return defaultClient.InstanceAttributeValue(attr)
}
// ProjectAttributeValue calls Client.ProjectAttributeValue on the default client.
func ProjectAttributeValue(attr string) (string, error) {
return defaultClient.ProjectAttributeValue(attr)
}
// Scopes calls Client.Scopes on the default client.
func Scopes(serviceAccount string) ([]string, error) { return defaultClient.Scopes(serviceAccount) }
func strsContains(ss []string, s string) bool {
for _, v := range ss {
if v == s {
return true
}
}
return false
}
// A Client provides metadata.
type Client struct {
hc *http.Client
}
// NewClient returns a Client that can be used to fetch metadata. All HTTP requests
// will use the given http.Client instead of the default client.
func NewClient(c *http.Client) *Client {
return &Client{hc: c}
}
// getETag returns a value from the metadata service as well as the associated ETag.
// This func is otherwise equivalent to Get.
func (c *Client) getETag(suffix string) (value, etag string, err error) {
// Using a fixed IP makes it very difficult to spoof the metadata service in
// a container, which is an important use-case for local testing of cloud
// deployments. To enable spoofing of the metadata service, the environment
// variable GCE_METADATA_HOST is first inspected to decide where metadata
// requests shall go.
host := os.Getenv(metadataHostEnv)
if host == "" {
// Using 169.254.169.254 instead of "metadata" here because Go
// binaries built with the "netgo" tag and without cgo won't
// know the search suffix for "metadata" is
// ".google.internal", and this IP address is documented as
// being stable anyway.
host = metadataIP
}
u := "http://" + host + "/computeMetadata/v1/" + suffix
req, _ := http.NewRequest("GET", u, nil)
req.Header.Set("Metadata-Flavor", "Google")
req.Header.Set("User-Agent", userAgent)
res, err := c.hc.Do(req)
if err != nil {
return "", "", err
}
defer res.Body.Close()
if res.StatusCode == http.StatusNotFound {
return "", "", NotDefinedError(suffix)
}
all, err := ioutil.ReadAll(res.Body)
if err != nil {
return "", "", err
}
if res.StatusCode != 200 {
return "", "", &Error{Code: res.StatusCode, Message: string(all)}
}
return string(all), res.Header.Get("Etag"), nil
}
// Get returns a value from the metadata service.
// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/".
//
// If the GCE_METADATA_HOST environment variable is not defined, a default of
// 169.254.169.254 will be used instead.
//
// If the requested metadata is not defined, the returned error will
// be of type NotDefinedError.
func (c *Client) Get(suffix string) (string, error) {
val, _, err := c.getETag(suffix)
return val, err
}
func (c *Client) getTrimmed(suffix string) (s string, err error) {
s, err = c.Get(suffix)
s = strings.TrimSpace(s)
return
}
func (c *Client) lines(suffix string) ([]string, error) {
j, err := c.Get(suffix)
if err != nil {
return nil, err
}
s := strings.Split(strings.TrimSpace(j), "\n")
for i := range s {
s[i] = strings.TrimSpace(s[i])
}
return s, nil
}
// ProjectID returns the current instance's project ID string.
func (c *Client) ProjectID() (string, error) { return projID.get(c) }
// NumericProjectID returns the current instance's numeric project ID.
func (c *Client) NumericProjectID() (string, error) { return projNum.get(c) }
// InstanceID returns the current VM's numeric instance ID.
func (c *Client) InstanceID() (string, error) { return instID.get(c) }
// InternalIP returns the instance's primary internal IP address.
func (c *Client) InternalIP() (string, error) {
return c.getTrimmed("instance/network-interfaces/0/ip")
}
// ExternalIP returns the instance's primary external (public) IP address.
func (c *Client) ExternalIP() (string, error) {
return c.getTrimmed("instance/network-interfaces/0/access-configs/0/external-ip")
}
// Hostname returns the instance's hostname. This will be of the form
// "<instanceID>.c.<projID>.internal".
func (c *Client) Hostname() (string, error) {
return c.getTrimmed("instance/hostname")
}
// InstanceTags returns the list of user-defined instance tags,
// assigned when initially creating a GCE instance.
func (c *Client) InstanceTags() ([]string, error) {
var s []string
j, err := c.Get("instance/tags")
if err != nil {
return nil, err
}
if err := json.NewDecoder(strings.NewReader(j)).Decode(&s); err != nil {
return nil, err
}
return s, nil
}
// InstanceName returns the current VM's instance ID string.
func (c *Client) InstanceName() (string, error) {
host, err := c.Hostname()
if err != nil {
return "", err
}
return strings.Split(host, ".")[0], nil
}
// Zone returns the current VM's zone, such as "us-central1-b".
func (c *Client) Zone() (string, error) {
zone, err := c.getTrimmed("instance/zone")
// zone is of the form "projects/<projNum>/zones/<zoneName>".
if err != nil {
return "", err
}
return zone[strings.LastIndex(zone, "/")+1:], nil
}
// InstanceAttributes returns the list of user-defined attributes,
// assigned when initially creating a GCE VM instance. The value of an
// attribute can be obtained with InstanceAttributeValue.
func (c *Client) InstanceAttributes() ([]string, error) { return c.lines("instance/attributes/") }
// ProjectAttributes returns the list of user-defined attributes
// applying to the project as a whole, not just this VM. The value of
// an attribute can be obtained with ProjectAttributeValue.
func (c *Client) ProjectAttributes() ([]string, error) { return c.lines("project/attributes/") }
// InstanceAttributeValue returns the value of the provided VM
// instance attribute.
//
// If the requested attribute is not defined, the returned error will
// be of type NotDefinedError.
//
// InstanceAttributeValue may return ("", nil) if the attribute was
// defined to be the empty string.
func (c *Client) InstanceAttributeValue(attr string) (string, error) {
return c.Get("instance/attributes/" + attr)
}
// ProjectAttributeValue returns the value of the provided
// project attribute.
//
// If the requested attribute is not defined, the returned error will
// be of type NotDefinedError.
//
// ProjectAttributeValue may return ("", nil) if the attribute was
// defined to be the empty string.
func (c *Client) ProjectAttributeValue(attr string) (string, error) {
return c.Get("project/attributes/" + attr)
}
// Scopes returns the service account scopes for the given account.
// The account may be empty or the string "default" to use the instance's
// main account.
func (c *Client) Scopes(serviceAccount string) ([]string, error) {
if serviceAccount == "" {
serviceAccount = "default"
}
return c.lines("instance/service-accounts/" + serviceAccount + "/scopes")
}
// Subscribe subscribes to a value from the metadata service.
// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/".
// The suffix may contain query parameters.
//
// Subscribe calls fn with the latest metadata value indicated by the provided
// suffix. If the metadata value is deleted, fn is called with the empty string
// and ok false. Subscribe blocks until fn returns a non-nil error or the value
// is deleted. Subscribe returns the error value returned from the last call to
// fn, which may be nil when ok == false.
func (c *Client) Subscribe(suffix string, fn func(v string, ok bool) error) error {
const failedSubscribeSleep = time.Second * 5
// First check to see if the metadata value exists at all.
val, lastETag, err := c.getETag(suffix)
if err != nil {
return err
}
if err := fn(val, true); err != nil {
return err
}
ok := true
if strings.ContainsRune(suffix, '?') {
suffix += "&wait_for_change=true&last_etag="
} else {
suffix += "?wait_for_change=true&last_etag="
}
for {
val, etag, err := c.getETag(suffix + url.QueryEscape(lastETag))
if err != nil {
if _, deleted := err.(NotDefinedError); !deleted {
time.Sleep(failedSubscribeSleep)
continue // Retry on other errors.
}
ok = false
}
lastETag = etag
if err := fn(val, ok); err != nil || !ok {
return err
}
}
}
// Error contains an error response from the server.
type Error struct {
// Code is the HTTP response status code.
Code int
// Message is the server response message.
Message string
}
func (e *Error) Error() string {
return fmt.Sprintf("compute: Received %d `%s`", e.Code, e.Message)
}

View File

@@ -1,191 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
Copyright 2015 Microsoft Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@@ -1,191 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
Copyright 2015 Microsoft Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@@ -1,292 +0,0 @@
# Azure Active Directory authentication for Go
This is a standalone package for authenticating with Azure Active
Directory from other Go libraries and applications, in particular the [Azure SDK
for Go](https://github.com/Azure/azure-sdk-for-go).
Note: Despite the package's name it is not related to other "ADAL" libraries
maintained in the [github.com/AzureAD](https://github.com/AzureAD) org. Issues
should be opened in [this repo's](https://github.com/Azure/go-autorest/issues)
or [the SDK's](https://github.com/Azure/azure-sdk-for-go/issues) issue
trackers.
## Install
```bash
go get -u github.com/Azure/go-autorest/autorest/adal
```
## Usage
An Active Directory application is required in order to use this library. An application can be registered in the [Azure Portal](https://portal.azure.com/) by following these [guidelines](https://docs.microsoft.com/en-us/azure/active-directory/develop/active-directory-integrating-applications) or using the [Azure CLI](https://github.com/Azure/azure-cli).
### Register an Azure AD Application with secret
1. Register a new application with a `secret` credential
```
az ad app create \
--display-name example-app \
--homepage https://example-app/home \
--identifier-uris https://example-app/app \
--password secret
```
2. Create a service principal using the `Application ID` from previous step
```
az ad sp create --id "Application ID"
```
* Replace `Application ID` with `appId` from step 1.
### Register an Azure AD Application with certificate
1. Create a private key
```
openssl genrsa -out "example-app.key" 2048
```
2. Create the certificate
```
openssl req -new -key "example-app.key" -subj "/CN=example-app" -out "example-app.csr"
openssl x509 -req -in "example-app.csr" -signkey "example-app.key" -out "example-app.crt" -days 10000
```
3. Create the PKCS12 version of the certificate containing also the private key
```
openssl pkcs12 -export -out "example-app.pfx" -inkey "example-app.key" -in "example-app.crt" -passout pass:
```
4. Register a new application with the certificate content form `example-app.crt`
```
certificateContents="$(tail -n+2 "example-app.crt" | head -n-1)"
az ad app create \
--display-name example-app \
--homepage https://example-app/home \
--identifier-uris https://example-app/app \
--key-usage Verify --end-date 2018-01-01 \
--key-value "${certificateContents}"
```
5. Create a service principal using the `Application ID` from previous step
```
az ad sp create --id "APPLICATION_ID"
```
* Replace `APPLICATION_ID` with `appId` from step 4.
### Grant the necessary permissions
Azure relies on a Role-Based Access Control (RBAC) model to manage the access to resources at a fine-grained
level. There is a set of [pre-defined roles](https://docs.microsoft.com/en-us/azure/active-directory/role-based-access-built-in-roles)
which can be assigned to a service principal of an Azure AD application depending of your needs.
```
az role assignment create --assigner "SERVICE_PRINCIPAL_ID" --role "ROLE_NAME"
```
* Replace the `SERVICE_PRINCIPAL_ID` with the `appId` from previous step.
* Replace the `ROLE_NAME` with a role name of your choice.
It is also possible to define custom role definitions.
```
az role definition create --role-definition role-definition.json
```
* Check [custom roles](https://docs.microsoft.com/en-us/azure/active-directory/role-based-access-control-custom-roles) for more details regarding the content of `role-definition.json` file.
### Acquire Access Token
The common configuration used by all flows:
```Go
const activeDirectoryEndpoint = "https://login.microsoftonline.com/"
tenantID := "TENANT_ID"
oauthConfig, err := adal.NewOAuthConfig(activeDirectoryEndpoint, tenantID)
applicationID := "APPLICATION_ID"
callback := func(token adal.Token) error {
// This is called after the token is acquired
}
// The resource for which the token is acquired
resource := "https://management.core.windows.net/"
```
* Replace the `TENANT_ID` with your tenant ID.
* Replace the `APPLICATION_ID` with the value from previous section.
#### Client Credentials
```Go
applicationSecret := "APPLICATION_SECRET"
spt, err := adal.NewServicePrincipalToken(
*oauthConfig,
appliationID,
applicationSecret,
resource,
callbacks...)
if err != nil {
return nil, err
}
// Acquire a new access token
err = spt.Refresh()
if (err == nil) {
token := spt.Token
}
```
* Replace the `APPLICATION_SECRET` with the `password` value from previous section.
#### Client Certificate
```Go
certificatePath := "./example-app.pfx"
certData, err := ioutil.ReadFile(certificatePath)
if err != nil {
return nil, fmt.Errorf("failed to read the certificate file (%s): %v", certificatePath, err)
}
// Get the certificate and private key from pfx file
certificate, rsaPrivateKey, err := decodePkcs12(certData, "")
if err != nil {
return nil, fmt.Errorf("failed to decode pkcs12 certificate while creating spt: %v", err)
}
spt, err := adal.NewServicePrincipalTokenFromCertificate(
*oauthConfig,
applicationID,
certificate,
rsaPrivateKey,
resource,
callbacks...)
// Acquire a new access token
err = spt.Refresh()
if (err == nil) {
token := spt.Token
}
```
* Update the certificate path to point to the example-app.pfx file which was created in previous section.
#### Device Code
```Go
oauthClient := &http.Client{}
// Acquire the device code
deviceCode, err := adal.InitiateDeviceAuth(
oauthClient,
*oauthConfig,
applicationID,
resource)
if err != nil {
return nil, fmt.Errorf("Failed to start device auth flow: %s", err)
}
// Display the authentication message
fmt.Println(*deviceCode.Message)
// Wait here until the user is authenticated
token, err := adal.WaitForUserCompletion(oauthClient, deviceCode)
if err != nil {
return nil, fmt.Errorf("Failed to finish device auth flow: %s", err)
}
spt, err := adal.NewServicePrincipalTokenFromManualToken(
*oauthConfig,
applicationID,
resource,
*token,
callbacks...)
if (err == nil) {
token := spt.Token
}
```
#### Username password authenticate
```Go
spt, err := adal.NewServicePrincipalTokenFromUsernamePassword(
*oauthConfig,
applicationID,
username,
password,
resource,
callbacks...)
if (err == nil) {
token := spt.Token
}
```
#### Authorization code authenticate
``` Go
spt, err := adal.NewServicePrincipalTokenFromAuthorizationCode(
*oauthConfig,
applicationID,
clientSecret,
authorizationCode,
redirectURI,
resource,
callbacks...)
err = spt.Refresh()
if (err == nil) {
token := spt.Token
}
```
### Command Line Tool
A command line tool is available in `cmd/adal.go` that can acquire a token for a given resource. It supports all flows mentioned above.
```
adal -h
Usage of ./adal:
-applicationId string
application id
-certificatePath string
path to pk12/PFC application certificate
-mode string
authentication mode (device, secret, cert, refresh) (default "device")
-resource string
resource for which the token is requested
-secret string
application secret
-tenantId string
tenant id
-tokenCachePath string
location of oath token cache (default "/home/cgc/.adal/accessToken.json")
```
Example acquire a token for `https://management.core.windows.net/` using device code flow:
```
adal -mode device \
-applicationId "APPLICATION_ID" \
-tenantId "TENANT_ID" \
-resource https://management.core.windows.net/
```

View File

@@ -1,151 +0,0 @@
package adal
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"errors"
"fmt"
"net/url"
)
const (
activeDirectoryEndpointTemplate = "%s/oauth2/%s%s"
)
// OAuthConfig represents the endpoints needed
// in OAuth operations
type OAuthConfig struct {
AuthorityEndpoint url.URL `json:"authorityEndpoint"`
AuthorizeEndpoint url.URL `json:"authorizeEndpoint"`
TokenEndpoint url.URL `json:"tokenEndpoint"`
DeviceCodeEndpoint url.URL `json:"deviceCodeEndpoint"`
}
// IsZero returns true if the OAuthConfig object is zero-initialized.
func (oac OAuthConfig) IsZero() bool {
return oac == OAuthConfig{}
}
func validateStringParam(param, name string) error {
if len(param) == 0 {
return fmt.Errorf("parameter '" + name + "' cannot be empty")
}
return nil
}
// NewOAuthConfig returns an OAuthConfig with tenant specific urls
func NewOAuthConfig(activeDirectoryEndpoint, tenantID string) (*OAuthConfig, error) {
apiVer := "1.0"
return NewOAuthConfigWithAPIVersion(activeDirectoryEndpoint, tenantID, &apiVer)
}
// NewOAuthConfigWithAPIVersion returns an OAuthConfig with tenant specific urls.
// If apiVersion is not nil the "api-version" query parameter will be appended to the endpoint URLs with the specified value.
func NewOAuthConfigWithAPIVersion(activeDirectoryEndpoint, tenantID string, apiVersion *string) (*OAuthConfig, error) {
if err := validateStringParam(activeDirectoryEndpoint, "activeDirectoryEndpoint"); err != nil {
return nil, err
}
api := ""
// it's legal for tenantID to be empty so don't validate it
if apiVersion != nil {
if err := validateStringParam(*apiVersion, "apiVersion"); err != nil {
return nil, err
}
api = fmt.Sprintf("?api-version=%s", *apiVersion)
}
u, err := url.Parse(activeDirectoryEndpoint)
if err != nil {
return nil, err
}
authorityURL, err := u.Parse(tenantID)
if err != nil {
return nil, err
}
authorizeURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "authorize", api))
if err != nil {
return nil, err
}
tokenURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "token", api))
if err != nil {
return nil, err
}
deviceCodeURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "devicecode", api))
if err != nil {
return nil, err
}
return &OAuthConfig{
AuthorityEndpoint: *authorityURL,
AuthorizeEndpoint: *authorizeURL,
TokenEndpoint: *tokenURL,
DeviceCodeEndpoint: *deviceCodeURL,
}, nil
}
// MultiTenantOAuthConfig provides endpoints for primary and aulixiary tenant IDs.
type MultiTenantOAuthConfig interface {
PrimaryTenant() *OAuthConfig
AuxiliaryTenants() []*OAuthConfig
}
// OAuthOptions contains optional OAuthConfig creation arguments.
type OAuthOptions struct {
APIVersion string
}
func (c OAuthOptions) apiVersion() string {
if c.APIVersion != "" {
return fmt.Sprintf("?api-version=%s", c.APIVersion)
}
return "1.0"
}
// NewMultiTenantOAuthConfig creates an object that support multitenant OAuth configuration.
// See https://docs.microsoft.com/en-us/azure/azure-resource-manager/authenticate-multi-tenant for more information.
func NewMultiTenantOAuthConfig(activeDirectoryEndpoint, primaryTenantID string, auxiliaryTenantIDs []string, options OAuthOptions) (MultiTenantOAuthConfig, error) {
if len(auxiliaryTenantIDs) == 0 || len(auxiliaryTenantIDs) > 3 {
return nil, errors.New("must specify one to three auxiliary tenants")
}
mtCfg := multiTenantOAuthConfig{
cfgs: make([]*OAuthConfig, len(auxiliaryTenantIDs)+1),
}
apiVer := options.apiVersion()
pri, err := NewOAuthConfigWithAPIVersion(activeDirectoryEndpoint, primaryTenantID, &apiVer)
if err != nil {
return nil, fmt.Errorf("failed to create OAuthConfig for primary tenant: %v", err)
}
mtCfg.cfgs[0] = pri
for i := range auxiliaryTenantIDs {
aux, err := NewOAuthConfig(activeDirectoryEndpoint, auxiliaryTenantIDs[i])
if err != nil {
return nil, fmt.Errorf("failed to create OAuthConfig for tenant '%s': %v", auxiliaryTenantIDs[i], err)
}
mtCfg.cfgs[i+1] = aux
}
return mtCfg, nil
}
type multiTenantOAuthConfig struct {
// first config in the slice is the primary tenant
cfgs []*OAuthConfig
}
func (m multiTenantOAuthConfig) PrimaryTenant() *OAuthConfig {
return m.cfgs[0]
}
func (m multiTenantOAuthConfig) AuxiliaryTenants() []*OAuthConfig {
return m.cfgs[1:]
}

View File

@@ -1,242 +0,0 @@
package adal
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*
This file is largely based on rjw57/oauth2device's code, with the follow differences:
* scope -> resource, and only allow a single one
* receive "Message" in the DeviceCode struct and show it to users as the prompt
* azure-xplat-cli has the following behavior that this emulates:
- does not send client_secret during the token exchange
- sends resource again in the token exchange request
*/
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"strings"
"time"
)
const (
logPrefix = "autorest/adal/devicetoken:"
)
var (
// ErrDeviceGeneric represents an unknown error from the token endpoint when using device flow
ErrDeviceGeneric = fmt.Errorf("%s Error while retrieving OAuth token: Unknown Error", logPrefix)
// ErrDeviceAccessDenied represents an access denied error from the token endpoint when using device flow
ErrDeviceAccessDenied = fmt.Errorf("%s Error while retrieving OAuth token: Access Denied", logPrefix)
// ErrDeviceAuthorizationPending represents the server waiting on the user to complete the device flow
ErrDeviceAuthorizationPending = fmt.Errorf("%s Error while retrieving OAuth token: Authorization Pending", logPrefix)
// ErrDeviceCodeExpired represents the server timing out and expiring the code during device flow
ErrDeviceCodeExpired = fmt.Errorf("%s Error while retrieving OAuth token: Code Expired", logPrefix)
// ErrDeviceSlowDown represents the service telling us we're polling too often during device flow
ErrDeviceSlowDown = fmt.Errorf("%s Error while retrieving OAuth token: Slow Down", logPrefix)
// ErrDeviceCodeEmpty represents an empty device code from the device endpoint while using device flow
ErrDeviceCodeEmpty = fmt.Errorf("%s Error while retrieving device code: Device Code Empty", logPrefix)
// ErrOAuthTokenEmpty represents an empty OAuth token from the token endpoint when using device flow
ErrOAuthTokenEmpty = fmt.Errorf("%s Error while retrieving OAuth token: Token Empty", logPrefix)
errCodeSendingFails = "Error occurred while sending request for Device Authorization Code"
errCodeHandlingFails = "Error occurred while handling response from the Device Endpoint"
errTokenSendingFails = "Error occurred while sending request with device code for a token"
errTokenHandlingFails = "Error occurred while handling response from the Token Endpoint (during device flow)"
errStatusNotOK = "Error HTTP status != 200"
)
// DeviceCode is the object returned by the device auth endpoint
// It contains information to instruct the user to complete the auth flow
type DeviceCode struct {
DeviceCode *string `json:"device_code,omitempty"`
UserCode *string `json:"user_code,omitempty"`
VerificationURL *string `json:"verification_url,omitempty"`
ExpiresIn *int64 `json:"expires_in,string,omitempty"`
Interval *int64 `json:"interval,string,omitempty"`
Message *string `json:"message"` // Azure specific
Resource string // store the following, stored when initiating, used when exchanging
OAuthConfig OAuthConfig
ClientID string
}
// TokenError is the object returned by the token exchange endpoint
// when something is amiss
type TokenError struct {
Error *string `json:"error,omitempty"`
ErrorCodes []int `json:"error_codes,omitempty"`
ErrorDescription *string `json:"error_description,omitempty"`
Timestamp *string `json:"timestamp,omitempty"`
TraceID *string `json:"trace_id,omitempty"`
}
// DeviceToken is the object return by the token exchange endpoint
// It can either look like a Token or an ErrorToken, so put both here
// and check for presence of "Error" to know if we are in error state
type deviceToken struct {
Token
TokenError
}
// InitiateDeviceAuth initiates a device auth flow. It returns a DeviceCode
// that can be used with CheckForUserCompletion or WaitForUserCompletion.
func InitiateDeviceAuth(sender Sender, oauthConfig OAuthConfig, clientID, resource string) (*DeviceCode, error) {
v := url.Values{
"client_id": []string{clientID},
"resource": []string{resource},
}
s := v.Encode()
body := ioutil.NopCloser(strings.NewReader(s))
req, err := http.NewRequest(http.MethodPost, oauthConfig.DeviceCodeEndpoint.String(), body)
if err != nil {
return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeSendingFails, err.Error())
}
req.ContentLength = int64(len(s))
req.Header.Set(contentType, mimeTypeFormPost)
resp, err := sender.Do(req)
if err != nil {
return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeSendingFails, err.Error())
}
defer resp.Body.Close()
rb, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, err.Error())
}
if resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, errStatusNotOK)
}
if len(strings.Trim(string(rb), " ")) == 0 {
return nil, ErrDeviceCodeEmpty
}
var code DeviceCode
err = json.Unmarshal(rb, &code)
if err != nil {
return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, err.Error())
}
code.ClientID = clientID
code.Resource = resource
code.OAuthConfig = oauthConfig
return &code, nil
}
// CheckForUserCompletion takes a DeviceCode and checks with the Azure AD OAuth endpoint
// to see if the device flow has: been completed, timed out, or otherwise failed
func CheckForUserCompletion(sender Sender, code *DeviceCode) (*Token, error) {
v := url.Values{
"client_id": []string{code.ClientID},
"code": []string{*code.DeviceCode},
"grant_type": []string{OAuthGrantTypeDeviceCode},
"resource": []string{code.Resource},
}
s := v.Encode()
body := ioutil.NopCloser(strings.NewReader(s))
req, err := http.NewRequest(http.MethodPost, code.OAuthConfig.TokenEndpoint.String(), body)
if err != nil {
return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenSendingFails, err.Error())
}
req.ContentLength = int64(len(s))
req.Header.Set(contentType, mimeTypeFormPost)
resp, err := sender.Do(req)
if err != nil {
return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenSendingFails, err.Error())
}
defer resp.Body.Close()
rb, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, err.Error())
}
if resp.StatusCode != http.StatusOK && len(strings.Trim(string(rb), " ")) == 0 {
return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, errStatusNotOK)
}
if len(strings.Trim(string(rb), " ")) == 0 {
return nil, ErrOAuthTokenEmpty
}
var token deviceToken
err = json.Unmarshal(rb, &token)
if err != nil {
return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, err.Error())
}
if token.Error == nil {
return &token.Token, nil
}
switch *token.Error {
case "authorization_pending":
return nil, ErrDeviceAuthorizationPending
case "slow_down":
return nil, ErrDeviceSlowDown
case "access_denied":
return nil, ErrDeviceAccessDenied
case "code_expired":
return nil, ErrDeviceCodeExpired
default:
return nil, ErrDeviceGeneric
}
}
// WaitForUserCompletion calls CheckForUserCompletion repeatedly until a token is granted or an error state occurs.
// This prevents the user from looping and checking against 'ErrDeviceAuthorizationPending'.
func WaitForUserCompletion(sender Sender, code *DeviceCode) (*Token, error) {
intervalDuration := time.Duration(*code.Interval) * time.Second
waitDuration := intervalDuration
for {
token, err := CheckForUserCompletion(sender, code)
if err == nil {
return token, nil
}
switch err {
case ErrDeviceSlowDown:
waitDuration += waitDuration
case ErrDeviceAuthorizationPending:
// noop
default: // everything else is "fatal" to us
return nil, err
}
if waitDuration > (intervalDuration * 3) {
return nil, fmt.Errorf("%s Error waiting for user to complete device flow. Server told us to slow_down too much", logPrefix)
}
time.Sleep(waitDuration)
}
}

View File

@@ -1,11 +0,0 @@
module github.com/Azure/go-autorest/autorest/adal
go 1.12
require (
github.com/Azure/go-autorest/autorest/date v0.1.0
github.com/Azure/go-autorest/autorest/mocks v0.1.0
github.com/Azure/go-autorest/tracing v0.5.0
github.com/dgrijalva/jwt-go v3.2.0+incompatible
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2
)

View File

@@ -1,12 +0,0 @@
github.com/Azure/go-autorest/autorest/date v0.1.0 h1:YGrhWfrgtFs84+h0o46rJrlmsZtyZRg470CqAXTZaGM=
github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA=
github.com/Azure/go-autorest/autorest/mocks v0.1.0 h1:Kx+AUU2Te+A3JIyYn6Dfs+cFgx5XorQKuIXrZGoq/SI=
github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k=
github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk=
github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=

Some files were not shown because too many files have changed in this diff Show More