1
0
mirror of https://github.com/kubernetes-sigs/descheduler.git synced 2026-01-26 05:14:13 +01:00

Compare commits

...

791 Commits

Author SHA1 Message Date
Kubernetes Prow Robot
7ccb7ec675 Merge pull request #964 from damemi/0.25.1-helm-chart
Update Helm chart to v0.25.1
2022-09-27 08:01:51 -07:00
Mike Dame
ca9cd92557 Update Helm chart to v0.25.1 2022-09-27 14:36:55 +00:00
Kubernetes Prow Robot
425197466c Merge pull request #963 from damemi/0.25.1-updates
[release-1.25] Doc updates for v0.25.1
2022-09-27 07:15:51 -07:00
Mike Dame
ea4304f429 Prep doc updates for v0.25.1 2022-09-27 13:48:14 +00:00
Kubernetes Prow Robot
cda52e52fd Merge pull request #962 from knelasevero/release-1.25-issue-960
backport 7349b39 (issue 960) into release-1.25
2022-09-27 05:15:51 -07:00
Vlastimil Holer
3998a0246b includeSoftConstraints not being respected for TopologySpreadConstraint
Issue #960

Signed-off-by: Vlastimil Holer <vh@fortrabbit.com>
2022-09-27 13:50:10 +02:00
Kubernetes Prow Robot
59834cf8a7 Merge pull request #936 from pravarag/update-helm-chart-v1.25
Update helm chart version to v1.25.0
2022-09-14 09:03:00 -07:00
Kubernetes Prow Robot
82ed18fd2b Merge pull request #947 from eminaktas/metric-label-fix
feat: change DeschedulerVersion and GitVersion labels
2022-09-12 12:23:05 -07:00
eminaktas
2c17af79f4 feat: change DeschedulerVersion and GitVersion labels
This commit changes build_info metric labels
- AppVersion label will show major+minor version
  for example 0.24.1
  minor version numbers and commit hash

Signed-off-by: eminaktas <eminaktas34@gmail.com>
2022-09-12 21:36:40 +03:00
Kubernetes Prow Robot
72bf50fde6 Merge pull request #929 from knelasevero/ev-filter-plugin
Add new DefaultEvictor plugin with args
2022-09-12 09:21:24 -07:00
Lucas Severo Alves
f47c2c4407 add new preevectionfilter plugin with args 2022-09-12 16:56:21 +02:00
Kubernetes Prow Robot
16619fcf44 Merge pull request #931 from a7i/amir/v1beta1
remove TODO comments for cronjob v1beta1 support
2022-09-12 05:07:25 -07:00
Kubernetes Prow Robot
0317be1b76 Merge pull request #935 from pravarag/update-docs-1.25-release
Update docs & manifests for v0.25.0
2022-09-08 06:49:45 -07:00
Kubernetes Prow Robot
d8bac08592 Merge pull request #945 from gallowaystorm/patch-1
feat: add RemovePodsHavingTooManyRestarts to values.yaml
2022-09-07 18:22:06 -07:00
Storm Galloway
d14df1fedf feat: add RemovePodsHavingTooManyRestarts to yaml
This does the following:
1. Enables RemovePodsHavingTooManyRestarts when using Helm by default (it is not currently)
2. Adds RemovePodsHavingTooManyRestarts to the values.yaml for clearer configs
2022-09-07 14:42:35 -05:00
Kubernetes Prow Robot
8a769603a6 Merge pull request #928 from a7i/podlifetime-states-version-clarification
clarify which version PodLifeTime introduced states parameter and deprecated podStatusPhases
2022-09-07 09:40:37 -07:00
Kubernetes Prow Robot
137a6b999f Merge pull request #943 from gallowaystorm/patch-1
Add RemovePodsViolatingTopologySpreadConstraint to values.yaml
2022-09-06 19:58:37 -07:00
Storm Galloway
334b4bb12c Add RemovePodsViolatingTopologySpreadConstraint to values.yaml
- add RemovePodsViolatingTopologySpreadConstraint yaml to values.yaml to make chart config clearer
2022-09-06 12:47:06 -05:00
Kubernetes Prow Robot
5a2a180f17 Merge pull request #938 from a7i/remove-kubectl-dep
remove dependency on kubectl
2022-09-06 09:33:52 -07:00
Amir Alavi
1265b4c325 remove dependency on kubectl
Signed-off-by: Amir Alavi <amiralavi7@gmail.com>
2022-09-06 10:47:22 -04:00
Kubernetes Prow Robot
ea8e648cfb Merge pull request #933 from a7i/1.25-rc.0
Bump to k8s 1.25
2022-09-06 07:30:56 -07:00
Amir Alavi
e8fae9a3b7 remove pod security policy; additional policy/v1beta1 cleanup; use informers for descheduler unit tests
update go to 1.19 and helm kubernetes cluster to 1.25
bump -rc.0 to 1.25 GA
bump k8s utils library
bump golang-ci
use go 1.19 for helm github action
upgrade kubectl from 0.20 to 0.25

Signed-off-by: Amir Alavi <amiralavi7@gmail.com>
2022-09-04 10:30:40 -04:00
JaneLiuL
c9b0fbe467 Bump to k8s 1.25-rc.0 2022-09-03 09:57:56 -04:00
Pravar Agrawal
66694bb767 update helm chart version to v1.25.0 2022-09-03 00:00:16 +05:30
Pravar Agrawal
e68ceb2273 Update docs & manifests for v0.25.0 2022-09-02 23:51:21 +05:30
Amir Alavi
dcb81f65a9 remove TODO comments for cronjob v1beta1 support 2022-08-30 15:53:21 -04:00
Amir Alavi
face080485 clarify which version PodLifeTime introduced states parameter and deprecated podStatusPhases
Signed-off-by: Amir Alavi <amiralavi7@gmail.com>
2022-08-27 14:59:14 -04:00
Kubernetes Prow Robot
1eade5bf91 Merge pull request #922 from jklaw90/remove-plugin
removing dupe plugin interface check
2022-08-25 12:48:08 -07:00
Julian Lawrence
bfcd310a16 removing dupe plugin interface check 2022-08-24 16:12:24 -07:00
Kubernetes Prow Robot
70df89601a Merge pull request #910 from JaneLiuL/master
bring lownodeutilization and highnodeutilization to plugin
2022-08-17 05:41:13 -07:00
JaneLiuL
680e650706 bring lownodeutilization and highnodeutilization to plugin 2022-08-17 17:30:46 +08:00
Kubernetes Prow Robot
b743b2d5f7 Merge pull request #903 from knelasevero/migrate-podantiaffinity
Migrate RemovePodsViolatingInterPodAntiAffinity into a plugin
2022-08-16 07:59:08 -07:00
Kubernetes Prow Robot
cfc5d0c24a Merge pull request #916 from ingvagabund/skip-fitsRequest-for-current-node
NodeFit: do not check whether node fitsRequest when a pod is already assigned to the node
2022-08-16 07:31:08 -07:00
Kubernetes Prow Robot
ddd145c69a Merge pull request #898 from JaneLiuL/security-gh
add security scan into gh-actions
2022-08-16 05:47:08 -07:00
Jan Chaloupka
d99bdfffc8 NodeFit: do not check whether node fitsRequest when a pod is already assigned to the node 2022-08-16 13:38:11 +02:00
Lucas Severo Alves
a2dd86ac3b Migrate RemovePodsViolatingInterPodAntiAffinity into a plugin 2022-08-16 12:29:25 +02:00
JaneLiuL
50676622de add security scan into gh-actions 2022-08-16 09:36:46 +08:00
Kubernetes Prow Robot
fa3ddc6fee Merge pull request #908 from jklaw90/migrate-RemovePodsViolatingTopologySpreadConstraint
RemovePodsViolatingTopologySpreadConstraint Plugin
2022-08-15 14:25:55 -07:00
Julian Lawrence
674bf4655d migrate plugin - pods violating topologyspread
updated to remove older params
2022-08-15 08:23:04 -07:00
Kubernetes Prow Robot
6d4abe88ca Merge pull request #913 from a7i/migrate-PodLifeTime-to-plugin
Migrate PodLifeTime to plugin
2022-08-15 07:14:14 -07:00
Amir Alavi
d4ff3aef61 Migrate PodLifeTime to plugin 2022-08-15 08:54:42 -04:00
Kubernetes Prow Robot
83c4f5d526 Merge pull request #912 from ingvagabund/container-engine
CONTAINER_ENGINE to override the default docker engine
2022-08-11 18:46:43 -07:00
Jan Chaloupka
d1a9190c50 CONTAINER_ENGINE to override the default docker engine 2022-08-11 16:09:46 +02:00
Kubernetes Prow Robot
a1d4770634 Merge pull request #911 from knelasevero/local-ct-install
introduce ct for local helm install test
2022-08-11 06:10:58 -07:00
Lucas Severo Alves
ba85e794b2 introduce ct for local helm install test 2022-08-10 18:01:42 +02:00
Kubernetes Prow Robot
0a50d5a7da Merge pull request #892 from JaneLiuL/master
bring removeduplicates to plugin
2022-08-10 02:40:30 -07:00
Kubernetes Prow Robot
2de4e23425 Merge pull request #906 from a7i/node-affinity-use-existing-validation
NodeAffinity plugin to use the existing validation methods
2022-08-10 02:02:31 -07:00
JaneLiuL
3474725176 bring removeduplicates to plugin 2022-08-10 15:02:28 +08:00
Amir Alavi
27fa7a70a1 NodeAffinity plugin to use the existing validation methods 2022-08-09 13:38:33 -04:00
Kubernetes Prow Robot
ccfaeb2275 Merge pull request #902 from BinacsLee/binacs/migrate-removepodshavingtoomanyrestarts-to-plugin
Migrate RemovePodsHavingTooManyRestarts to plugin
2022-08-09 09:10:36 -07:00
BinacsLee
d798e7d204 Migrate RemovePodsHavingTooManyRestarts to plugin 2022-08-09 22:05:36 +08:00
Kubernetes Prow Robot
788e9f86bd Merge pull request #860 from knelasevero/migrate-node-afinity-to-plugin
Migrate RemovePodsViolatingNodeAffinity to plugin
2022-08-09 06:14:51 -07:00
Lucas Severo Alves
0c3bf7f957 Migrate RemovePodsViolatingNodeAffinity into a plugin 2022-08-09 14:05:51 +02:00
Kubernetes Prow Robot
349453264e Merge pull request #904 from knelasevero/add-helm-test-step-1
add helm ct install.
2022-08-09 03:54:51 -07:00
Lucas Severo Alves
e9c23fe42f add helm ct install. First step, see https://github.com/kubernetes-sigs/descheduler/pull/895#issuecomment-1203608848 2022-08-08 21:11:20 +02:00
Kubernetes Prow Robot
27ed7d15b9 Merge pull request #899 from a7i/separate-args-validations
separate args validation for better reuse
2022-08-08 08:46:19 -07:00
Amir Alavi
55d4ed479c separate args validation for better reuse 2022-08-05 10:46:04 -04:00
Kubernetes Prow Robot
d109ea64d0 Merge pull request #861 from a7i/migrate-RemoveFailedPods-to-plugin
Migrate RemoveFailedPods to plugin
2022-08-04 07:49:46 -07:00
Amir Alavi
330def2e56 Migrate RemoveFailedPods to plugin 2022-08-02 23:30:49 -04:00
Kubernetes Prow Robot
9880ed5372 Merge pull request #896 from ingvagabund/update-owners
Remove emeritus_approvers from reviewers
2022-08-02 07:31:45 -07:00
Jan Chaloupka
d4ecff5ba4 Remove emeritus_approvers from reviewers 2022-08-02 12:46:23 +02:00
Kubernetes Prow Robot
46e712163a Merge pull request #888 from knelasevero/fix-memory-leak-shutdown-broadcaster
fix: events memory leak. Using new events implementation and take recorder out of EvictPod
2022-08-01 05:14:28 -07:00
Kubernetes Prow Robot
0d1d485850 Merge pull request #894 from ingvagabund/remove-migrated-node-taint-strategy
Remove RemovePodsViolatingNodeTaints strategy already migrated into a plugin
2022-07-28 07:47:11 -07:00
Jan Chaloupka
1294106a22 Remove RemovePodsViolatingNodeTaints strategy already migrated into a plugin 2022-07-28 16:34:42 +02:00
Lucas Severo Alves
0aa233415e use new events implementation and take recorder out of EvictPod 2022-07-28 15:18:21 +02:00
Kubernetes Prow Robot
0d3ff8a84f Merge pull request #857 from ingvagabund/migrate-RemovePodsViolatingNodeTaints-to-plugin
Migrate RemovePodsViolatingNodeTaints to a plugin
2022-07-26 05:59:10 -07:00
Jan Chaloupka
704f6d4496 Migrate RemovePodsViolatingNodeTaints into a plugin 2022-07-21 20:52:24 +02:00
Kubernetes Prow Robot
c699dd1ccc Merge pull request #885 from damemi/evict-options
Add EvictOptions struct to EvictPod()
2022-07-20 10:08:35 -07:00
Mike Dame
d5e66ab62e Add EvictOptions struct to EvictPod() 2022-07-20 16:52:44 +00:00
Kubernetes Prow Robot
3a486f1a79 Merge pull request #882 from iijimakazuyuki/master
Add default lease resource name in Helm chart's ClusterRole
2022-07-11 07:15:49 -07:00
Kubernetes Prow Robot
6e69a10396 Merge pull request #846 from ingvagabund/evictor-interface
Prepare pod evictor for the descheduling framework plugin
2022-07-09 11:27:46 -07:00
Kubernetes Prow Robot
d78994fe6d Merge pull request #883 from a7i/approver-a7i
code approvers: add a7i
2022-07-08 08:23:46 -07:00
Amir Alavi
9ef87b9937 code approvers: add a7i 2022-07-08 09:09:42 -04:00
Kazuyuki Iijima
8b849106ed Add default lease resource name in ClusterRole
Signed-off-by: Kazuyuki Iijima <iijimakazuyuki@gmail.com>
2022-07-08 21:45:17 +09:00
Kubernetes Prow Robot
2ea0a2e1c0 Merge pull request #876 from iijimakazuyuki/master
Use lease resource name from Helm value
2022-07-08 04:43:47 -07:00
Kazuyuki Iijima
329c357834 Use lease resource name from Helm value
Signed-off-by: Kazuyuki Iijima <iijimakazuyuki@gmail.com>
2022-07-08 00:25:39 +09:00
Kubernetes Prow Robot
8072a8c82e Merge pull request #871 from knelasevero/fix-chart-path
fix: chart path can't be relative
2022-07-07 07:11:34 -07:00
Kubernetes Prow Robot
7a7393f5ff Merge pull request #872 from JaneLiuL/master
fix log-file and log-dir issue
2022-07-07 06:57:35 -07:00
Lucas Severo Alves
df65157a3b disable vcs maintainer check 2022-07-06 20:09:07 +02:00
JaneLiuL
754f8c9def fix log-file and log-dir issue 2022-07-06 15:50:43 +08:00
Lucas Severo Alves
d75e9e8c4e fix comment space lint issue 2022-07-04 18:26:28 +02:00
Lucas Severo Alves
2cd79c6816 chart path can´t be relative 2022-07-04 18:19:09 +02:00
Kubernetes Prow Robot
aff9a0ba06 Merge pull request #836 from a7i/balancedomains-belowavg
TopologySpreadConstraint: only evaluate nodes below ideal avg when balancing domains
2022-07-03 18:57:22 -07:00
Kubernetes Prow Robot
e1a10c36de Merge pull request #854 from knelasevero/improve-helm-setup
Improving helm setup
2022-07-01 11:03:23 -07:00
Kubernetes Prow Robot
d8897635b0 Merge pull request #834 from a7i/podlifetime-container-state
PodLifeTime: support container states PodInitializing and ContainerCreating
2022-06-28 07:59:58 -07:00
Amir Alavi
abf5752260 PodLifeTime: add States field and deprecate PodStatusPhases 2022-06-25 15:13:18 -04:00
Amir Alavi
934fffb669 RemovePodsViolatingTopologySpreadConstraint: test case to cover tainted nodes and eviction loop 2022-06-20 20:53:30 -04:00
Amir Alavi
7a5e67d462 topologyspreadconstraint_test: ensure specific pods were evicted 2022-06-20 19:21:58 -04:00
Amir Alavi
469bde0a01 TopologySpreadConstraint: only evaluate nodes below ideal avg when balancing domains 2022-06-20 18:42:34 -04:00
Jan Chaloupka
c838614b6c EvictPod: stop returning an error
When an error is returned a strategy either stops completely or starts
processing another node. Given the error can be a transient error or
only one of the limits can get exceeded it is fair to just skip a
pod that failed eviction and proceed to the next instead.

In order to optimize the processing and stop earlier, it is more
practical to implement a check which will say when a limit was
exceeded.
2022-06-17 10:12:57 +02:00
Jan Chaloupka
cc49f9fcc2 Drop node parameter from EvictPod
The method uses the node object to only get the node name.
The node name can be retrieved from the pod object.

Some strategies might try to evict a pod in Pending state which
does not have the .spec.nodeName field set. Thus, skipping
the test for the node limit.
2022-06-17 10:10:25 +02:00
Amir Alavi
4e710cdf3b PodLifeTime: support container states PodInitializing and ContainerCreating 2022-06-16 21:17:49 -04:00
Jan Chaloupka
d5ee855221 Pass the strategy name into evictor through context 2022-06-16 16:32:13 +02:00
Kubernetes Prow Robot
b2418ef481 Merge pull request #847 from ingvagabund/split-pod-evictor-and-evictor-filter
Split pod evictor and evictor filter
2022-06-16 07:22:49 -07:00
Kubernetes Prow Robot
1f1aad335a Merge pull request #856 from a7i/pod-sort-age-random
remove random creation timestamp from pod sort by age test
2022-06-16 01:18:49 -07:00
Kubernetes Prow Robot
627d219dab Merge pull request #852 from knelasevero/existing-contraints
Check existing constraints before assigning
2022-06-15 06:07:17 -07:00
Lucas Severo Alves
30c972e49e change namespaceTopologySpreadConstraints from map to slice 2022-06-15 14:54:56 +02:00
Amir Alavi
a7cfb25e9b remove random creation timestamp from pod sort by age test 2022-06-14 23:28:58 -04:00
Lucas Severo Alves
45e1cdbd01 WIP: improving helm setup 2022-06-14 17:08:00 +02:00
Lucas Severo Alves
dad3db3187 Check existing constraints before assigning 2022-06-14 10:43:59 +02:00
Jan Chaloupka
d2130747d8 Split pod evictor and evictor filter 2022-06-13 18:48:05 +02:00
Kubernetes Prow Robot
84c8d1ca03 Merge pull request #843 from damemi/docs-readme-1.24.1
Update docs, manifests for 0.24.1 on master
2022-06-08 06:58:24 -07:00
Mike Dame
5dfd54e500 Docs and readme updates 2022-06-08 13:35:22 +00:00
Kubernetes Prow Robot
7550fba2fa Merge pull request #840 from a7i/nodefit-docs
nodeFit: fix docs spacing for placement of the field
2022-06-07 12:28:27 -07:00
Amir Alavi
038b6e1ca7 nodeFit: fix docs spacing for placement of the field 2022-06-07 11:59:30 -04:00
Kubernetes Prow Robot
98a946dea7 Merge pull request #833 from a7i/podlifetime-sort-age
PodLifeTime: sort pods by creation timestamp
2022-06-07 01:23:49 -07:00
Amir Alavi
871a10344e e2e: use kubernetes utils pointer library 2022-06-06 22:05:32 -04:00
Amir Alavi
311d75223f PodLifeTime: sort pods by creation timestamp 2022-06-06 21:49:24 -04:00
Kubernetes Prow Robot
33807ed8e4 Merge pull request #830 from a7i/revert-805-cleanup/lownodeutilization
Revert "cleanup lownodeutilization code"
2022-06-01 08:51:03 -07:00
Amir Alavi
3cc0a68f13 lownodeutilization: clarify comments and variable naming for underutilized vs. overutilized 2022-06-01 11:39:38 -04:00
Amir Alavi
8e1d35cb3c Revert "cleanup lownodeutilization code" 2022-06-01 11:28:09 -04:00
Kubernetes Prow Robot
59c4904ddc Merge pull request #805 from xiaoanyunfei/cleanup/lownodeutilization
cleanup lownodeutilization code
2022-06-01 07:45:02 -07:00
Kubernetes Prow Robot
c5604c760d Merge pull request #825 from damemi/cloudbuild-timeout
Increase cloudbuild timeout to 25 minutes
2022-06-01 06:47:02 -07:00
Mike Dame
f769296243 Increase cloudbuild timeout to 25 minutes 2022-05-31 18:51:53 +00:00
Kubernetes Prow Robot
8972bd9bf0 Merge pull request #823 from damemi/fix-version-cmd
Fix version command to parse helm chart tags
2022-05-31 11:14:53 -07:00
Kubernetes Prow Robot
873381197b Merge pull request #821 from damemi/test-version-updates
Update helm tests util versions and release guide
2022-05-31 10:44:52 -07:00
Mike Dame
af45591c25 Fix version command to parse helm chart tags 2022-05-31 17:39:08 +00:00
Mike Dame
17e986418f Update helm tests util versions and release guide 2022-05-31 17:05:14 +00:00
Kubernetes Prow Robot
5a9e65833f Merge pull request #818 from damemi/release-guide-updates
Update release guide docs
2022-05-26 08:21:24 -07:00
Mike Dame
725ca47bda Update release guide docs 2022-05-25 15:36:58 +00:00
Kubernetes Prow Robot
f39058af1c Merge pull request #813 from stephan2012/bugfix/leader-election-chart-812
Arguments must be strings, not bool or number
2022-05-25 07:12:44 -07:00
Kubernetes Prow Robot
332d61dba8 Merge pull request #814 from stephan2012/bugfix/missing-keys-803
Add podAnnotations and podLabels to values and docs
2022-05-24 09:38:06 -07:00
Stephan Austermühle
3cbae5e72b Fix type error for the leader election
Also, add the missing update verb in the ClusterRole and adds required
time units to leaseDuration, renewDeadline, retryPeriod in the Chart
example.
2022-05-24 18:11:18 +02:00
Stephan Austermühle
d8a609a6e7 Add more precise description 2022-05-24 18:07:58 +02:00
Stephan Austermühle
f0fa4c0cc0 Add podAnnotations and podLabels to values and docs 2022-05-24 10:02:16 +02:00
Kubernetes Prow Robot
e61823c299 Merge pull request #809 from damemi/CVE-2022-27191
bump: golang.org/x/crypto
2022-05-23 21:39:10 -07:00
Mike Dame
14b83e6cc5 bump: golang.org/x/crypto 2022-05-23 21:17:27 +00:00
sunxiaofei
5e3b825427 cleanup lownodeutilization code 2022-05-23 17:20:35 +08:00
Kubernetes Prow Robot
15794ba00d Merge pull request #801 from KohlsTechnology/bump-go-1.18
Bump To Go 1.18.2
2022-05-18 23:54:07 -07:00
Sean Malloy
e494a5817e Bump To Go 1.18.2
The main k/k repo was updated to Go 1.18.2 for the
k8s v1.24.0 release. See below PR for reference.

https://github.com/kubernetes/kubernetes/pull/110044
2022-05-18 09:36:32 -05:00
Kubernetes Prow Robot
eb0be65687 Merge pull request #796 from JaneLiuL/master
Update helm chart version to v0.24
2022-05-16 15:06:19 -07:00
JaneLiuL
64786460cd Update helm chart version to v0.24 2022-05-13 08:27:15 +08:00
Kubernetes Prow Robot
9c110c4004 Merge pull request #791 from JaneLiuL/master
Bump to k8s 1.24.0
2022-05-12 12:06:33 -07:00
Kubernetes Prow Robot
0eddf7f108 Merge pull request #792 from pravarag/update-docs-1.24
Update Docs and Manifests for v0.24.0
2022-05-12 11:31:15 -07:00
Kubernetes Prow Robot
3c8d6c4d53 Merge pull request #795 from damemi/update-e2e
Update e2e test versions
2022-05-12 09:29:14 -07:00
Mike Dame
6e84d0a6ba React to removal of offensive language
https://github.com/kubernetes/kubeadm/issues/2200 went into effect in 1.24, so
e2es broke without the update.
2022-05-12 15:35:07 +00:00
Mike Dame
fb1df468ad golint fix 2022-05-12 14:21:34 +00:00
Mike Dame
ac4d576df8 Update e2e test versions 2022-05-12 14:16:53 +00:00
Pravar Agrawal
314ad65b04 Update docs and manifests for v0.24.0 2022-05-04 22:08:49 +05:30
JaneLiuL
969a618933 Bump to k8s 1.24.0 2022-05-04 10:17:47 +08:00
Kubernetes Prow Robot
028f205e8c Merge pull request #790 from ingvagabund/636
Added request considerations to NodeFit Feature [#636 follow up]
2022-05-03 19:09:16 -07:00
Jan Chaloupka
3eca2782d4 Addressing review comments
Both LowNode and HighNode utilization strategies evict only as many pods
as there's free resources on other nodes. Thus, the resource fit test
is always true by definition.
2022-04-28 18:54:54 +02:00
RyanDevlin
16eb9063b6 NodeFit parameter now considers pod requests 2022-04-28 10:16:52 +02:00
Kubernetes Prow Robot
eac3b4b54a Merge pull request #788 from ryan4yin/master
fix: incorrect yaml indentation in readme
2022-04-26 06:46:53 -07:00
Ryan Yin
d08cea731a fix: incorrect indentation 2022-04-26 06:05:12 +08:00
Kubernetes Prow Robot
0fc5ba9316 Merge pull request #787 from JaneLiuL/master
bump to k8s 1.24-rc.0
2022-04-25 12:05:43 -07:00
JaneLiuL
ecbd10afe2 bump to k8s 1.24-rc.0 2022-04-21 09:11:04 +08:00
Kubernetes Prow Robot
e5ed0540f2 Merge pull request #779 from pravarag/user-docs-typo
Fix missing param in user-guide for PodLifeTime strategy
2022-04-11 01:44:06 -07:00
Pravar Agrawal
4e972a7602 fix missing param in user-guide 2022-04-07 10:02:26 +05:30
Kubernetes Prow Robot
ae20b5b034 Merge pull request #732 from eminaktas/feature/metric-scape
feat: Add metric scrape configs in Helm Chart
2022-03-30 07:06:27 -07:00
Kubernetes Prow Robot
406e3ed5b3 Merge pull request #771 from dineshbhor/fix-highnodeutilization-node-sorting
Sort nodes in ascending order for HighNodeUtilization
2022-03-29 02:58:47 -07:00
dineshbhor
7589aaf00b Sort nodes in ascending order for HighNodeUtilization 2022-03-29 17:54:18 +09:00
eminaktas
ca90b53913 feat: Add metric scrape configs in Helm Chart
Signed-off-by: eminaktas <emin.aktas@trendyol.com>
2022-03-28 23:41:56 +03:00
Kubernetes Prow Robot
238eebeaca Merge pull request #722 from Dentrax/feature/leaderelection
feat(leaderelection): impl leader election for HA Deployment
2022-03-28 09:39:23 -07:00
Kubernetes Prow Robot
cf59d08193 Merge pull request #751 from HelmutLety/redo_#473
feat: Add DeviationThreshold Paramter for LowNodeUtilization, (Previous attempt - #473 )
2022-03-28 03:53:24 -07:00
HelmutLety
2ea65e69dc feat(LowNodeUtilization): useDeviationThresholds, redo of #473
[751]: normalize Percentage in nodeutilization and clean the tests
2022-03-28 12:35:01 +02:00
Kubernetes Prow Robot
7f6a2a69b0 Merge pull request #777 from JacobHenner/support-taint-exclusions
Add RemovePodsViolatingNodeTaints taint exclusion
2022-03-28 02:47:23 -07:00
Jacob Henner
ac3362149b Add RemovePodsViolatingNodeTaints taint exclusion
Add taint exclusion to RemovePodsViolatingNodeTaints. This permits node
taints to be ignored by allowing users to specify ignored taint keys or
ignored taint key=value pairs.
2022-03-27 13:48:40 -04:00
Furkan
0a52af9ab8 feat(leaderelection): impl leader election
Signed-off-by: Furkan <furkan.turkal@trendyol.com>
Signed-off-by: eminaktas <eminaktas34@gmail.com>
Co-authored-by: Emin <emin.aktas@trendyol.com>
Co-authored-by: Yasin <yasintaha.erol@trendyol.com>
2022-03-25 14:33:14 +03:00
Kubernetes Prow Robot
07bbdc61c4 Merge pull request #762 from ingvagabund/nodeutilization-refactor
Promote NodeUsage to NodeInfo, evaluate thresholds separately
2022-03-15 17:33:48 -07:00
Kubernetes Prow Robot
17595fdcfc Merge pull request #764 from ingvagabund/taints-prefer-no-scheduler
RemovePodsViolatingNodeTaints: optionally include PreferNoSchedule taint
2022-03-14 17:36:10 -07:00
Jan Chaloupka
285523f0d9 RemovePodsViolatingNodeTaints: optionally include PreferNoSchedule taint 2022-03-14 16:46:03 +01:00
Kubernetes Prow Robot
c55a897599 Merge pull request #759 from JaneLiuL/master
OWNERS: add janeliul as a reviewer
2022-03-11 10:29:07 -08:00
Jan Chaloupka
52ff50f2d1 Promote NodeUsage to NodeInfo, evaluate thresholds separately 2022-03-11 13:52:37 +01:00
Jan Chaloupka
8ebf3fb323 nodeutilization: move node resource threshold value computation under a separate function 2022-03-11 12:46:11 +01:00
Kubernetes Prow Robot
0e0ae8df90 Merge pull request #761 from ingvagabund/TestTooManyRestarts-II
[e2e] TestTooManyRestarts: check if container status is set before accessing
2022-03-11 02:29:06 -08:00
Jan Chaloupka
bd3daa82d3 [e2e] TestTooManyRestarts: check if container status is set before accessing 2022-03-11 10:35:49 +01:00
Kubernetes Prow Robot
60a15f0392 Merge pull request #760 from ingvagabund/TestTooManyRestarts
[e2e] TestTooManyRestarts: check err and len before accessing pod items
2022-03-11 01:09:07 -08:00
Jan Chaloupka
d98cb84568 [e2e] TestTooManyRestarts: check err and len before accessing pod items 2022-03-11 09:45:05 +01:00
Kubernetes Prow Robot
6ab01eca63 Merge pull request #758 from hiroyaonoe/add-doc-about-max-no-of-pods-to-evict-per-namespace-policy
Update docs for maxNoOfPodsToEvictPerNamespace
2022-03-10 11:25:21 -08:00
Kubernetes Prow Robot
584ac2d604 Merge pull request #757 from prune998/prune/taint-logs
add conflicting taint to the logs
2022-03-10 05:37:35 -08:00
prune
448dc4784c add conflicting taint to the logs
log when count mismatch


simplified logic to log blocking taints
2022-03-10 08:05:42 -05:00
JaneLiuL
3ca77e7a3d OWNERS: add janeliul as a reviewer 2022-03-08 07:48:11 +08:00
Hiroya Onoe
01e7015b97 Update docs for maxNoOfPodsToEvictPerNamespace 2022-03-07 16:21:04 +09:00
Kubernetes Prow Robot
fd5a8c7d78 Merge pull request #739 from JaneLiuL/master
Share links to all descheduler ehnacements proposals in the project repo
2022-03-02 09:55:14 -08:00
Kubernetes Prow Robot
43148ecd0c Merge pull request #740 from JaneLiuL/doc-npd
fix doc about NPD description
2022-03-01 09:59:55 -08:00
Kubernetes Prow Robot
16501978dc Merge pull request #748 from damemi/update-v0.23.1
Update manifests and doc for v0.23.1
2022-03-01 07:47:46 -08:00
Mike Dame
1b4e48b006 Update manifests and doc for v0.23.1 2022-02-28 19:06:50 +00:00
Kubernetes Prow Robot
da6a3e063f Merge pull request #744 from antonio-te/master
Update golang image
2022-02-28 10:41:46 -08:00
Antonio Gurgel
5784c0cc04 Update golang image
1.17.3 is affected by CVE-2021-44716.
2022-02-28 07:22:26 -08:00
JaneLiuL
254a3a9ec1 Share links to all descheduler ehnacements proposals in the project repository 2022-02-26 12:27:35 +08:00
JaneLiuL
328c695141 fix doc about NPD description 2022-02-26 12:23:33 +08:00
Kubernetes Prow Robot
3ab0268c5a Merge pull request #733 from JaneLiuL/master
remove MostRequestedPriority from doc since already deprecated
2022-02-24 04:32:32 -08:00
Jane Liu L
cd8dbdd1e2 remove MostRequestedPriority from doc since already deprecated 2022-02-24 09:00:36 +08:00
Kubernetes Prow Robot
54c50c5390 Merge pull request #731 from jklaw90/fix-ctx-cron
Bugfix: Cronjob ctx cancel
2022-02-22 11:35:18 -08:00
Julian Lawrence
a2cbc25397 updated to handle cronjob flow 2022-02-22 08:52:06 -08:00
Kubernetes Prow Robot
bd81f6436e Merge pull request #708 from damemi/utilization-values-readme
Clarify resource calculations in NodeUtilization strategy Readmes
2022-02-22 04:47:46 -08:00
Kubernetes Prow Robot
30be19b04e Merge pull request #715 from eminaktas/values-fix
fix: Remove deprecated parameters from cmdOptions and add the parameters under policy
2022-02-18 05:08:23 -08:00
Kubernetes Prow Robot
3c251fb09d Merge pull request #726 from jklaw90/log-eviction-node
Eviction Logs
2022-02-15 04:08:03 -08:00
Julian Lawrence
224e2b078f updated logs to help with debugging 2022-02-14 18:27:53 -08:00
Kubernetes Prow Robot
dd80d60f4f Merge pull request #716 from eminaktas/imagepullsecret
fix: add imagePullSecrets for deployment resource
2022-02-14 05:27:29 -08:00
Kubernetes Prow Robot
e88837a349 Merge pull request #704 from ingvagabund/update-chart-readme
Update charts README to reflect the new parameters
2022-02-11 14:23:46 -08:00
Kubernetes Prow Robot
5901f8af1b Merge pull request #697 from a7i/code-reviewer
OWNERS: add a7i as a reviewer
2022-02-11 08:14:23 -08:00
Kubernetes Prow Robot
0d1704a192 Merge pull request #717 from JaneLiuL/release-1.23.1
[release-1.23.1] Update helm chart version to v0.23.1
2022-02-08 04:34:54 -08:00
JaneLiuL
c5878b18c6 Update helm chart version to v0.23.1 2022-02-08 20:21:57 +08:00
emin.aktas
ff1954b32e fix: add imagePullSecrets for deployment resource
Signed-off-by: emin.aktas <eminaktas34@gmail.com>
Co-authored-by: yasintahaerol <yasintahaerol@gmail.com>
Co-authored-by: Dentrax <furkan.turkal@trendyol.com>
2022-02-07 18:05:18 +03:00
emin.aktas
4c8040bbaf fix: Remove deprecated parameters from cmdOptions and add the parameters under policy 2022-02-07 15:14:55 +03:00
Kubernetes Prow Robot
deaa314492 Merge pull request #712 from JaneLiuL/helm
fix helmchart fail to watch namespace issue
2022-02-06 10:36:51 -08:00
Jane Liu L
9c653a2274 fix helmchart fail to watch namespace issue 2022-02-04 18:34:21 +08:00
Kubernetes Prow Robot
8d37557743 Merge pull request #709 from damemi/update-helm-23
Update helm chart version to v0.23
2022-02-03 12:10:58 -08:00
Mike Dame
5081ad84b5 Update helm chart version to v0.23 2022-02-03 14:57:18 -05:00
Mike Dame
c51c066cd1 Clarify resource calculations in NodeUtilization strategy Readmes
This adds text explaining the resource calculation in LowNodeUtilization and HighNodeUtilization
2022-01-30 12:59:47 -05:00
Kubernetes Prow Robot
afb1d75ce1 Merge pull request #660 from martin-magakian/features/add_affinity_option
Adding 'affinity' support to run 'descheduler' in CronJob or Deployment
2022-01-27 05:56:27 -08:00
Jan Chaloupka
90e6174fdd Update charts README to reflect the new parameters 2022-01-27 14:46:15 +01:00
Kubernetes Prow Robot
8e3ef9a6b3 Merge pull request #694 from sharkannon/master
Updates to include annotations to the service account
2022-01-27 05:42:26 -08:00
Kubernetes Prow Robot
778a18c550 Merge pull request #700 from jklaw90/root-ctx
Use the root context cancellation
2022-01-27 05:08:25 -08:00
Julian Lawrence
1a98a566b3 adding cancelation from sigint sigterm 2022-01-25 00:10:09 -08:00
Kubernetes Prow Robot
a643c619c9 Merge pull request #699 from ingvagabund/evict-pods-report-metrics-indendent-of-the-dry-mode
Evictor: report successful eviction independently of the dry-mode
2022-01-20 14:16:29 -08:00
Jan Chaloupka
203388ff1a Evictor: report successful eviction independently of the dry-mode
Dry mode currently does not report metrics when the eviction succeeds
2022-01-20 21:23:19 +01:00
Kubernetes Prow Robot
2844f80a35 Merge pull request #677 from ingvagabund/accumulated-eviction
Use a fake client when evicting pods by individual strategies to accumulate the evictions
2022-01-20 08:15:52 -08:00
Jan Chaloupka
901a16ecbc Do not collect the metrics when the metrics server is not enabled 2022-01-20 17:04:15 +01:00
Jan Chaloupka
e0f086ff85 Use a fake client when evicting pods by individual strategies to accumulate the evictions
Currently, when the descheduler is running with the --dry-run on, no strategy actually
evicts a pod so every strategy always starts with a complete list of
pods. E.g. when the PodLifeTime strategy evicts few pods, the RemoveDuplicatePods
strategy still takes into account even the pods eliminated by the PodLifeTime
strategy. Which does not correspond to the real case scenarios as the
same pod can be evicted multiple times. Instead, use a fake client and
evict/delete the pods from its cache so the strategies evict each pod
at most once as it would be normally done in a real cluster.
2022-01-20 17:04:05 +01:00
Amir Alavi
0251935268 OWNERS: add a7i as a reviewer 2022-01-18 09:14:44 -05:00
Stephen Herd
8752a28025 Merge branch 'kubernetes-sigs-master' 2022-01-13 12:52:36 -08:00
Stephen Herd
24884c7568 Rebase from master 2022-01-13 12:52:06 -08:00
Kubernetes Prow Robot
175f648045 Merge pull request #695 from a7i/liveness-template
make livenessprobe consistent across manifests
2022-01-12 13:37:40 -08:00
Amir Alavi
f50a3fa119 make livenessprobe consistent across manifests; make helm chart configurable via values.yaml 2022-01-12 11:49:17 -05:00
Kubernetes Prow Robot
551eced42a Merge pull request #688 from babygoat/evict-failed-without-ownerrefs
feat: support eviction of failed bare pods
2022-01-11 12:31:15 -08:00
Stephen Herd
3635a8171c Updates to include annotations to the service account, needed for things such as Workload Identity in Google Cloud 2022-01-11 11:55:05 -08:00
Kubernetes Prow Robot
796f347305 Merge pull request #692 from jklaw90/sliding-until
NonSlidingUntil for deployment
2022-01-11 06:21:16 -08:00
Kubernetes Prow Robot
13abbe7f09 Merge pull request #693 from developer-guy/patch-1
Update NOTES.txt
2022-01-10 05:11:13 -08:00
Kubernetes Prow Robot
e4df54d2d1 Merge pull request #685 from JaneLiuL/master
add  liveness probe
2022-01-10 04:29:12 -08:00
Jane Liu L
c38f617e40 add liveness probe 2022-01-10 09:56:53 +08:00
Kubernetes Prow Robot
e6551564c4 Merge pull request #691 from RyanDevlin/waitForNodes
Eliminated race condition in E2E tests
2022-01-07 06:16:30 -08:00
Batuhan Apaydın
3a991dd50c Update NOTES.txt
Signed-off-by: Batuhan Apaydın <batuhan.apaydin@trendyol.com>
Co-authored-by: Furkan Türkal <furkan.turkal@trendyol.com>
Co-authored-by: Emin Aktaş <emin.aktas@trendyol.com>
Co-authored-by: Necatican Yıldırım <necatican.yildirim@trendyol.com>
Co-authored-by: Fatih Sarhan <fatih.sarhan@trendyol.com>
2022-01-07 13:42:00 +03:00
Julian Lawrence
77cb406052 updated until -> sliding until 2022-01-06 12:55:10 -08:00
RyanDevlin
921a5680ab Eliminated race condition in E2E tests 2022-01-06 09:36:13 -05:00
babygoat
1529180d70 feat: support eviction of failed bare pods
This patch adds the policy(evictFailedBarePods) to allow the failed
pods without ownerReferences to be evicted. For backward compatibility,
disable the policy by default. Address #644.
2022-01-06 01:07:41 +08:00
Kubernetes Prow Robot
2d9143d129 Merge pull request #687 from jklaw90/error-comment
Comment update for metrics
2022-01-04 06:52:52 -08:00
Kubernetes Prow Robot
e9c0833b6f Merge pull request #689 from ingvagabund/run-hack-update-generated-conversions-sh
run ./hack/update-* scripts
2022-01-04 06:34:52 -08:00
Jan Chaloupka
8462cf56d7 run ./hack/update-* scripts 2022-01-04 09:37:01 +01:00
Julian Lawrence
a60d6a527d updated comment to reflect actual value 2021-12-29 10:56:10 -08:00
Kubernetes Prow Robot
2b23694704 Merge pull request #682 from jklaw90/chart-labels
commonLabels value for chart
2021-12-26 06:15:15 -08:00
Julian Lawrence
d0a95bee2f fixed default value for common labels 2021-12-20 08:24:30 -08:00
Julian Lawrence
57a910f5d1 adding commonLabels value 2021-12-18 23:31:52 -08:00
Kubernetes Prow Robot
ccaedde183 Merge pull request #661 from kirecek/enhc/include-pod-reason
Add pod.Status.Reason to the list of reasons
2021-12-17 13:11:55 -08:00
Erik Jankovič
2020642b6f chore: add pod.Status.Reason to the list of reasons
Signed-off-by: Erik Jankovič <erik.jankovic@gmail.com>
2021-12-17 18:37:53 +01:00
Kubernetes Prow Robot
96ff5d2dd9 Merge pull request #680 from ingvagabund/klog-output-stdout
Set the klog output to stdout by default
2021-12-16 05:31:18 -08:00
Jan Chaloupka
d8718d7db3 Set the klog output to stdout by default
Also, one needs to set --logtostderr=false to properly log into the stdout
2021-12-16 11:22:40 +01:00
Kubernetes Prow Robot
1e5165ba9f Merge pull request #670 from autumn0207/improve_pod_eviction_metrics
Add node name label to the counter metric for evicted pods
2021-12-16 01:49:18 -08:00
autumn0207
8e74f8bd77 improve pod eviction metrics 2021-12-16 17:06:22 +08:00
Kubernetes Prow Robot
2424928019 Merge pull request #667 from damemi/1.23-rc.0
bump: k8s to 1.23
2021-12-15 06:56:20 -08:00
Jan Chaloupka
e6314d2c7e Init the klog directly
Since 3948cb8d1b (diff-465167b08358906be13f9641d4798c6e8ad0790395e045af8ace4d08223fa922R78)
the klog verbosity level gets always overriden.
2021-12-15 09:23:20 -05:00
Kubernetes Prow Robot
271ee3c7e3 Merge pull request #678 from a7i/golangci-fix
fix: install golangci using from the golangci repo
2021-12-15 02:20:19 -08:00
Amir Alavi
e58686c142 fix: install golangci using from the golangci repo 2021-12-14 13:18:19 -05:00
Kubernetes Prow Robot
0b2c10d6ce Merge pull request #673 from Garrybest/pr_pod_cache
list pods assigned to a node by pod informer cache
2021-12-14 01:32:04 -08:00
Garrybest
cac3b9185b reform all test files
Signed-off-by: Garrybest <garrybest@foxmail.com>
2021-12-11 19:43:16 +08:00
Mike Dame
94888e653c Move klog initialization to cli.Run() 2021-12-10 12:00:11 -05:00
Mike Dame
936578b238 Update k8s version in helm test 2021-12-10 10:14:47 -05:00
Mike Dame
4fa7bf978c run hack/update-generated-deep-copies.sh 2021-12-10 10:02:39 -05:00
Mike Dame
2f7c496944 React to 1.23 bump
Logging validation functions changed in upstream commit
54ecfcdac8.
This uses the new function name.
2021-12-10 10:02:26 -05:00
Mike Dame
5fe3ca86ff bump: k8s to 1.23 2021-12-10 10:02:14 -05:00
Garrybest
0ff8ecb41e reform all strategies by using getPodsAssignedToNode
Signed-off-by: Garrybest <garrybest@foxmail.com>
2021-12-10 19:28:51 +08:00
Garrybest
08ed129a07 reform ListPodsOnANode by using pod informer and indexer
Signed-off-by: Garrybest <garrybest@foxmail.com>
2021-12-10 19:25:20 +08:00
Kubernetes Prow Robot
49ad197dfc Merge pull request #658 from JaneLiuL/master
Add maxNoOfPodsToEvictPerNamespace policy
2021-12-03 01:50:27 -08:00
Kubernetes Prow Robot
82201d0e48 Add maxNoOfPodsToEvictPerNamespace policy 2021-12-03 10:58:37 +08:00
Kubernetes Prow Robot
2b95332e8c Merge pull request #665 from spiffxp/use-k8s-infra-for-gcb-image
images: use k8s-staging-test-infra/gcb-docker-gcloud
2021-11-30 13:59:01 -08:00
Aaron Crickenberger
e8ed62e540 images: use k8s-staging-test-infra/gcb-docker-gcloud 2021-11-30 13:12:18 -08:00
Kubernetes Prow Robot
e5725de7bb Merge pull request #664 from stpabhi/dev
fix typo minPodLifeTimeSeconds
2021-11-30 08:10:56 -08:00
Abhilash Pallerlamudi
c47e811937 fix typo minPodLifeTimeSeconds
Signed-off-by: Abhilash Pallerlamudi <stp.abhi@gmail.com>
2021-11-29 17:51:40 -08:00
Kubernetes Prow Robot
e0bac4c371 Merge pull request #662 from ingvagabund/drop-deprecated-flags
Drop deprecated flags
2021-11-29 08:43:23 -08:00
Jan Chaloupka
73a7adf572 Drop deprecated flags 2021-11-29 17:12:59 +01:00
Kubernetes Prow Robot
5cf381a817 Merge pull request #663 from ingvagabund/bump-go-to-1.17
Bump go version in go.mod to go1.17
2021-11-29 08:01:23 -08:00
Jan Chaloupka
4603182320 Bump go version in go.mod to go1.17 2021-11-29 16:49:35 +01:00
Martin Magakian
ad207775ff Adding 'affinity' support to run 'descheduler' in CronJob or Deployment 2021-11-18 11:08:36 +01:00
Kubernetes Prow Robot
d0f11a41c0 Merge pull request #639 from JaneLiuL/master
Ignore Pods With Deletion Timestamp
2021-11-15 08:44:49 -08:00
Jane Liu L
c7524705b3 Ignore Pods With Deletion Timestamp 2021-11-10 09:32:11 +08:00
Kubernetes Prow Robot
50f9513cbb Merge pull request #642 from wking/clarify-RemovePodsHavingTooManyRestarts
README: Clarify podRestartThreshold applying to the sum over containers
2021-10-13 03:15:49 -07:00
W. Trevor King
6fd80ba29c README: Clarify podRestartThreshold applying to the sum over containers
calcContainerRestarts sums over containers.  The new language makes
that clear, avoiding potential confusion vs. an altenative that looked
for pods where a single container had passed the configured threshold.
For example, with three containers with 50 restarts and a threshold of
100, the actual "sum over containers" logic makes that pod a candidate
for descheduling, but the "largest single container restart count"
hypothetical would not have made it a candidate.

Also shifts labelSelector into the parameter table, because when it
was added in 29ade13ce7 (README and e2e-testcase add for
labelSelector, 2021-03-02, #510), it landed a few lines too high.
2021-10-07 14:51:26 -07:00
Kubernetes Prow Robot
5b557941fa Merge pull request #627 from JaneLiuL/master
Add E2E test case cover duplicatepods strategy
2021-10-01 00:01:22 -07:00
Kubernetes Prow Robot
c6229934a0 Merge pull request #637 from KohlsTechnology/helm-suspend-docs
Document suspend helm chart configuration option
2021-09-30 23:47:22 -07:00
Sean Malloy
ed28eaeccc Document suspend helm chart configuration option 2021-09-30 23:30:10 -05:00
Kubernetes Prow Robot
3be910c238 Merge pull request #621 from uthark/oatamanenko/deleted
Ignore pods being deleted
2021-09-30 21:21:22 -07:00
Kubernetes Prow Robot
d96dd6da2d Merge pull request #632 from a7i/amir/failedpods-crash
RemoveFailedPods: guard against nil descheduler strategy (e.g. in case of default that loads all strategies)
2021-09-29 01:02:49 -07:00
Amir Alavi
f7c26ef41f e2e tests for RemoveFailedPods strategy
Fix priority class default
2021-09-26 20:39:32 -04:00
Jane Liu L
57ad9cc91b Add E2E test case cover tooManyRestarts strategy 2021-09-26 09:10:17 +08:00
Kubernetes Prow Robot
926339594d Merge pull request #622 from yutachaos/feature/added_suspend_parameter
Added support for cronjob suspend
2021-09-22 09:18:01 -07:00
Amir Alavi
1ba53ad68c e2e TestTopologySpreadConstraint: ensure pods are running before checking for topology spread across domains 2021-09-20 18:18:47 -04:00
Amir Alavi
6eb37ce079 RemoveFailedPods: guard against nil descheduler strategy (e.g. in case of default that loads all strategies) 2021-09-20 11:20:54 -04:00
Kubernetes Prow Robot
54d660eee0 Merge pull request #629 from chenkaiyue/fix-node-affinity-test
fix duplicate code in node_affinity_test.go
2021-09-16 01:59:46 -07:00
yutachaos
cf219fbfae Added helm chart suspend parameter
Signed-off-by: yutachaos <18604471+yutachaos@users.noreply.github.com>
2021-09-16 14:32:09 +09:00
kaiyuechen
d1d9ea0c48 fix duplicate code in node_affinity_test.go 2021-09-16 10:39:52 +08:00
Oleg Atamanenko
4448d9c670 Ignore pods being deleted 2021-09-15 00:05:51 -07:00
Kubernetes Prow Robot
3909f3acae Merge pull request #623 from damemi/release-1.22
Update Helm chart version to 0.22.0
2021-09-08 13:13:56 -07:00
Mike Dame
9f1274f3ab Update Helm chart version to 0.22.0 2021-09-08 16:00:56 -04:00
Kubernetes Prow Robot
e6926e11ea Merge pull request #617 from KohlsTechnology/docs-1.22
Update Docs and Manifests for v0.22.0
2021-08-31 09:31:37 -07:00
Sean Malloy
16228c9dd1 Update Docs and Manifests for v0.22.0
* Added v0.22 references to README
* Update k8s manifests with v0.22.0 references
* Added table with list of supported architectures by release
2021-08-31 00:24:18 -05:00
Kubernetes Prow Robot
57dabbca5c Merge pull request #597 from a7i/amir/e2e-topology
Add e2e tests for TopologySpreadConstraint
2021-08-30 22:00:28 -07:00
Kubernetes Prow Robot
04439c6e64 Merge pull request #610 from a7i/failed-pods
Introduce RemoveFailedPods strategy
2021-08-30 11:30:09 -07:00
Amir Alavi
0e0e688fe8 Introduce RemoveFailedPods strategy 2021-08-30 14:17:52 -04:00
Kubernetes Prow Robot
0603de4353 Merge pull request #613 from derdanne/derdanne/highnodeutilization-readme-591
nodeutilization strategy: "numberOfNodes" should work as documented
2021-08-24 08:29:14 -07:00
Kubernetes Prow Robot
ea911db6dc Merge pull request #615 from KohlsTechnology/bump-1.22
Bump To k8s 1.22.0
2021-08-19 07:07:24 -07:00
Sean Malloy
c079c7aaae Bump To k8s 1.22.0 2021-08-19 00:40:38 -05:00
Kubernetes Prow Robot
5420988a28 Merge pull request #614 from KohlsTechnology/bump-go-version
Bump To Go 1.16.7
2021-08-18 02:10:08 -07:00
Sean Malloy
b56a1ab80a Bump To Go 1.16.7
The main k/k repo was updated to Go 1.16.7 for k8s
v1.22.0 release. See below PR for reference.

https://github.com/kubernetes/kubernetes/pull/104200
2021-08-17 23:47:45 -05:00
Daniel Klockenkämper
a6b34c1130 nodeutilization strategy: "numberOfNodes" should work as documented 2021-08-17 13:29:07 +00:00
Amir Alavi
0de8002b7d Update gce scripts to spread nodes over 2 zones 2021-08-16 22:41:37 -04:00
Amir Alavi
84d648ff60 Add e2e tests for TopologySpreadConstraint 2021-08-16 22:39:31 -04:00
Kubernetes Prow Robot
6ad6f6fce5 Merge pull request #592 from chrisjohnson00/issue-591
docs: adding clarification to HighNodeUtilization's purpose
2021-08-12 21:38:22 -07:00
Kubernetes Prow Robot
b7100ad871 Merge pull request #582 from praxist/helm-deployment
Update helm chart for running as deployment
2021-08-12 07:29:47 -07:00
Matthew Leung
38c0f1c639 Update helm chart for running as deployment 2021-08-11 11:56:14 -07:00
Kubernetes Prow Robot
64d7901d82 Merge pull request #602 from a7i/balance-domains-aboveavg
TopologySpreadConstraint: advance above avg index when at ideal average
2021-08-10 05:47:25 -07:00
Kubernetes Prow Robot
ab1015e5fa Merge pull request #599 from a7i/amir/update-gce-images
Update GCE images to Ubuntu 18.04 LTS
2021-08-10 05:47:18 -07:00
Kubernetes Prow Robot
1753bf4422 Merge pull request #611 from a7i/docs-remove-redundant-strategies
Update README to remove redundant list of strategies
2021-08-02 08:11:22 -07:00
Amir Alavi
7cb44dca27 Update README to remove redundant list of strategies 2021-07-30 16:35:00 -04:00
Amir Alavi
2ec4b8b674 Update GCE images to Ubuntu 18.04 LTS 2021-07-30 16:14:20 -04:00
Kubernetes Prow Robot
0d0633488d Merge pull request #598 from jayonlau/clenup
Clean up extra spaces
2021-07-30 11:11:37 -07:00
Kubernetes Prow Robot
f3b3853d9d Merge pull request #562 from wsscc2021/helm-chart-tolerations
Add tolerations to cronjob in helm chart
2021-07-30 10:43:38 -07:00
Kubernetes Prow Robot
e550e5e22a Merge pull request #600 from a7i/topology-example
Add example for RemovePodsViolatingTopologySpreadConstraint
2021-07-28 05:37:34 -07:00
Kubernetes Prow Robot
2bf37ff495 Merge pull request #608 from a7i/master
Remove kind binary from repo
2021-07-28 01:13:35 -07:00
Amir Alavi
fa84ec6774 Remove kind binary from repo and add to gitignore 2021-07-27 08:28:55 -04:00
Kubernetes Prow Robot
e18e0416b1 Merge pull request #607 from a7i/fix-helm-test
Place bash shebang at the top of the script + Ensure Helm installed for run-helm-tests
2021-07-27 00:40:45 -07:00
Amir Alavi
34282162f8 Wait for job to start/finish in helm-test script 2021-07-26 23:53:36 -04:00
Amir Alavi
7a043d31be Ensure helm is installed and move shebang to top of file 2021-07-26 16:16:20 -04:00
Amir Alavi
b0e5d64bd7 TopologySpreadConstraint: advance above avg index when at ideal average when balancing domains 2021-07-13 22:55:10 -04:00
Amir Alavi
1c9ac2daee Add example for RemovePodsViolatingTopologySpreadConstraint 2021-07-09 22:56:59 -04:00
jayonlau
c6b67e8a6f Clean up extra spaces
Clean up extra spaces, although these errors are not important, they affect the code specification.
2021-07-09 15:40:35 +08:00
Kubernetes Prow Robot
2e4873d103 Merge pull request #594 from ikarldasan/master-to-main
Rename master to main
2021-07-06 05:50:18 -07:00
Karl Dasan
3e483c4d85 Rename master to main 2021-07-01 13:48:50 +05:30
Kubernetes Prow Robot
032ea70380 Merge pull request #549 from pravarag/verify-defaulters-gen
Add verify scripts for defaulters generator
2021-06-30 04:05:03 -07:00
Pravar Agrawal
df84dc4548 add verify script for defaulters gen 2021-06-30 09:41:43 +05:30
Chris Johnson
d4fa83f8bc chore: PR feedback 2021-06-25 12:30:28 -07:00
Chris Johnson
448dbceadd docs: adding clarification to HighNodeUtilization's purpose 2021-06-24 16:20:21 -07:00
Kubernetes Prow Robot
b83b064992 Merge pull request #589 from a7i/remove-kind-bin
Remove kind binary from repo
2021-06-16 00:19:59 -07:00
Amir Alavi
133a0049e3 Remove kind binary from repo 2021-06-15 18:29:06 -04:00
Kubernetes Prow Robot
50b1c1337d Merge pull request #561 from mekza/support_image_pull_secrets
Add support for private registry creds
2021-06-10 23:28:58 -07:00
Martin-Zack Mekkaoui
d5deed44ca Add support for private registry creds 2021-06-09 22:11:37 +02:00
Kubernetes Prow Robot
0f785b9530 Merge pull request #584 from damemi/update-go-badge
Update Go report card badge
2021-06-08 10:48:08 -07:00
Mike Dame
eb1f0ecc14 Update Go report card badge 2021-06-08 13:39:53 -04:00
Kubernetes Prow Robot
b59995eeb8 Merge pull request #583 from ingvagabund/highnodeutil-nodefit
HighNodeUtilization: add NodeFit feature
2021-06-08 08:15:13 -07:00
Jan Chaloupka
d998d82357 HighNodeUtilization: add NodeFit feature 2021-06-08 16:59:43 +02:00
Kubernetes Prow Robot
f51ea72ce0 Merge pull request #577 from a7i/amira/cronjob-ga
Use stable batch/v1 API Group for Kubernetes 1.21
2021-06-08 06:03:13 -07:00
Kubernetes Prow Robot
fe8d4c0d21 Merge pull request #572 from audip/feature/add-deployment-k8s-yaml-files
Add run descheduler as deployment
2021-06-08 05:47:14 -07:00
Kubernetes Prow Robot
3843a2d5d1 Merge pull request #550 from hanumanthan/highnodeutilisation
Highnodeutilization strategy
2021-06-08 01:59:12 -07:00
Kubernetes Prow Robot
839a237d6a Merge pull request #581 from jsravn/patch-1
Remove namespace from ClusterRoleBinding
2021-06-07 07:16:40 -07:00
Hanu
4cd1e66ef3 Adding highnodeutilization strategy 2021-06-06 18:01:42 +08:00
Hanu
2f18864fa5 Refractor - Modify the common functions to be used by high utilisation 2021-06-06 18:00:43 +08:00
Hanu
6e71068f4f Refractoring lownodeutilization - extracting common functions 2021-06-06 18:00:29 +08:00
James Ravn
e40620effa Remove namespace from ClusterRoleBinding
It's not namespace scoped. This breaks some tools like kpt.
2021-06-04 11:04:14 +01:00
Kubernetes Prow Robot
d7dc0abd7b Merge pull request #576 from a7i/amira/topology-spread-label-filter
Filter pods by labelSelector during eviction for TopologySpreadConstraint strategy
2021-06-01 23:38:37 -07:00
Amir Alavi
012ca2398f Filter pods by labelSelector during eviction for TopologySpreadConstraint strategy 2021-06-01 15:42:23 -04:00
Amir Alavi
f07089d7b3 Bump Helm Chart, kind, and Kubernetes version for helm-test 2021-06-01 12:44:05 -04:00
Amir Alavi
a54b59f208 Use stable batch/v1 API Group for Kubernetes 1.21 2021-06-01 12:44:05 -04:00
Kubernetes Prow Robot
bfd5feaf60 Merge pull request #559 from RyanDevlin/nodeFit
Working nodeFit feature
2021-05-31 09:06:26 -07:00
RyanDevlin
41d46d0d3b Working nodeFit feature 2021-05-24 09:03:38 -04:00
Aditya Purandare
646c13ae15 Fix grammar and indentation issue for deployment resource 2021-05-21 12:42:51 -07:00
Kubernetes Prow Robot
3b9d3d9719 Merge pull request #563 from ingvagabund/removeduplicates-take-taints-into-account-for-node-count
RemoveDuplicates: take node taints, node affinity and node selector into account when computing a number of feasible nodes for the average occurence of pods per node
2021-05-21 05:38:46 -07:00
Aditya Purandare
449383caa3 Add run descheduler as deployment files and update README 2021-05-20 17:04:05 -07:00
Kubernetes Prow Robot
31fd097c0a Merge pull request #527 from pravarag/add-helm-test
Add helm test
2021-05-19 06:10:51 -07:00
Kubernetes Prow Robot
11143d5b2c Merge pull request #570 from KohlsTechnology/bump-kind-version
Bump kind version
2021-05-19 05:40:51 -07:00
Sean Malloy
8480e03e9c Fail unit and e2e tests on any errors 2021-05-19 00:35:16 -05:00
Sean Malloy
0397425010 Bump Kind To v0.11.0
This is required for running e2e tests for k8s v1.21.
2021-05-18 23:37:10 -05:00
Jan Chaloupka
5396282e3d RemoveDuplicates: take node taints, node affinity and node selector into account when computing a number of feasible nodes for the average occurence of pods per node
Nodes with taints which are not tolerated by evicted pods will never run the
pods. The same holds for node affinity and node selector.
So increase the number of pods per feasible nodes to decrease the
number of evicted pods.
2021-05-18 16:13:24 +02:00
Kubernetes Prow Robot
a9ff644de6 Merge pull request #568 from BinacsLee/binacs-pkg-descheduler-descheduler_test-fix-errorhandling
Add verify script for govet & fix pkg/descheduler/descheduler_test.go
2021-05-17 08:18:23 -07:00
BinacsLee
fe8e17f72c fix staticcheck failure for pkg/descheduler/descheduler_test.go 2021-05-17 23:07:12 +08:00
Kubernetes Prow Robot
a1709e9edd Merge pull request #567 from a7i/topology-taint-toleration
RemovePodsViolatingTopologySpreadConstraint : Take node's taints into consideration when balancing domains
2021-05-14 12:41:58 -07:00
Amir Alavi
24c0ca2ef9 Take node's taints into consideration when balancing domains 2021-05-14 15:23:58 -04:00
Kubernetes Prow Robot
9b26abd538 Merge pull request #565 from damemi/issue-564
Add test cases for soft constraints/multi constraints
2021-05-12 19:14:24 -07:00
Mike Dame
fc83c13166 Add test cases for soft constraints/multi constraints 2021-05-12 08:36:07 -04:00
Kubernetes Prow Robot
9b69962053 Merge pull request #535 from ingvagabund/e2e-refactor
E2e refactor
2021-05-11 06:33:36 -07:00
Jan Chaloupka
4edbecc85d Define NodeSelectorsEqual predicate 2021-05-09 18:08:32 +02:00
Jan Chaloupka
54f67266bb Define TolerationsEqual 2021-05-09 18:08:27 +02:00
Donggeun Lee
4ba48b018c Add tolerations to cronjob in helm chart 2021-05-04 19:56:08 +09:00
Kubernetes Prow Robot
2a3529c543 Merge pull request #560 from damemi/update-verify-messages
Update error messages in verify scripts to be more informative
2021-04-30 20:05:59 -07:00
Mike Dame
58408d710b Update error messages in verify scripts to be more informative 2021-04-30 16:56:16 -04:00
Kubernetes Prow Robot
161f66a12f Merge pull request #558 from KohlsTechnology/structured-logging
Use Structured Logging For Unknown Strategy Log Message
2021-04-28 23:28:50 -07:00
Sean Malloy
6bde95c9a1 Use Structured Logging For Unknown Strategy Log Message
Always use structured logging. Therefore update klog.Errorf() to instead
use klog.ErrorS().

Here is an example of the new log message.

E0428 23:58:57.048912 586 descheduler.go:145] "skipping strategy" err="unknown strategy name" strategy=ASDFPodLifeTime
2021-04-29 00:00:07 -05:00
Kubernetes Prow Robot
724ff8a188 Merge pull request #556 from damemi/change-main-loop
Invert main strategy loop for performance and customizability
2021-04-28 07:52:50 -07:00
Mike Dame
feae158a50 Invert main strategy loop for performance and customizability 2021-04-28 10:36:02 -04:00
Kubernetes Prow Robot
780ac7a51e Merge pull request #554 from BinacsLee/binacs-utils-predicates-cleanup
code cleanup: remove check on length
2021-04-26 07:19:01 -07:00
BinacsLee
c4afb6bb30 code cleanup: remove check on length 2021-04-25 21:44:20 +08:00
Pravar Agrawal
8b5c4e805d update docs with helm test info 2021-04-15 15:56:42 +05:30
Jan Chaloupka
f4e24a408f Drop klog 2021-04-14 09:03:38 +02:00
Jan Chaloupka
2781106d49 TestEvictAnnotation: replace LowNodeUtilization strategy with PodLifetime
PodLifetime is simpler in validating results
2021-04-14 09:03:31 +02:00
Jan Chaloupka
534a30a058 e2e: deleteRC: replace loop with wait.PollImmediate 2021-04-14 09:03:26 +02:00
Jan Chaloupka
bb55741320 Update vendor 2021-04-14 09:03:21 +02:00
Jan Chaloupka
079bd6157b e2e: TestLowNodeUtilization: normalize nodes before running the strategy 2021-04-14 09:03:15 +02:00
Pravar Agrawal
92cb1b23ed add helm-test configurations 2021-04-14 10:29:31 +05:30
Kubernetes Prow Robot
832facc526 Merge pull request #537 from KohlsTechnology/docs-0.21.0
Update Docs and Manifests for v0.21.0
2021-04-13 08:24:52 -07:00
Kubernetes Prow Robot
c4fa6c472f Merge pull request #548 from lx1036/feature/fix-testcase
Updating policy api version used in pod evictor
2021-04-13 08:12:50 -07:00
Xiang Liu
a848dac3cf Updating policy api version used in pod evictor 2021-04-13 11:02:01 +08:00
Kubernetes Prow Robot
43a2ccf9c4 Merge pull request #546 from ingvagabund/add-diagram
Add diagram of strategies
2021-04-12 13:38:15 -07:00
Jan Chaloupka
60cf3aeb95 Add diagram of strategies 2021-04-12 09:51:54 +02:00
Kubernetes Prow Robot
84b174e841 Merge pull request #547 from KohlsTechnology/bump-1.21.0
Bump To k8s 1.21.0
2021-04-08 23:00:48 -07:00
Sean Malloy
40337d064d Bump To k8s 1.21.0 2021-04-08 23:36:06 -05:00
Kubernetes Prow Robot
9fe585c854 Merge pull request #545 from pravarag/add-verify-script-deep-copies
Add verify script for deep-copies generator
2021-04-08 21:20:35 -07:00
Pravar Agrawal
4fce2ca2f1 add verify script for deep-copies gen 2021-04-08 22:51:24 +05:30
Kubernetes Prow Robot
4c11de0403 Merge pull request #507 from pravarag/add-verify-scripts
Add verify scripts for make gen to run during PR
2021-04-07 21:41:41 -07:00
Pravar Agrawal
a9099efc45 add verify scripts for conversions gen
Signed-off-by: Pravar Agrawal <pravaag1@in.ibm.com>
2021-04-08 09:56:32 +05:30
Kubernetes Prow Robot
6edb644f2e Merge pull request #544 from ingvagabund/lnu-improve-node-usage-logging
LNU: improve nodeUsage logging
2021-04-06 23:45:53 -07:00
Jan Chaloupka
c239e1199f LNU: improve nodeUsage logging
To avoid:
```
I0210 11:56:04.137956 3309277 lownodeutilization.go:389] "Updated node usage" updatedUsage={node:0xc000460000 usage:map[cpu:0xc00042b480 memory:0xc00042b4c0 pods:0xc00042b500] allPods:[0xc0004a0000 0xc0004a03e8 0xc0004a07d0 0xc0004a0bb8 0xc0004a0fa0 0xc0004a1388 0xc0004a1770 0xc0004a1b58] lowResourceThreshold:map[cpu:0xc00042b540 memory:0xc00042b580 pods:0xc00042b5c0] highResourceThreshold:map[cpu:0xc00042b600 memory:0xc00042b640 pods:0xc00042b680]}
I0210 11:56:04.138829 3309277 lownodeutilization.go:389] "Updated node usage" updatedUsage={node:0xc000460000 usage:map[cpu:0xc00042b480 memory:0xc00042b4c0 pods:0xc00042b500] allPods:[0xc0004a0000 0xc0004a03e8 0xc0004a07d0 0xc0004a0bb8 0xc0004a0fa0 0xc0004a1388 0xc0004a1770 0xc0004a1b58] lowResourceThreshold:map[cpu:0xc00042b540 memory:0xc00042b580 pods:0xc00042b5c0] highResourceThreshold:map[cpu:0xc00042b600 memory:0xc00042b640 pods:0xc00042b680]}
I0210 11:56:04.139044 3309277 lownodeutilization.go:389] "Updated node usage" updatedUsage={node:0xc000460000 usage:map[cpu:0xc00042b480 memory:0xc00042b4c0 pods:0xc00042b500] allPods:[0xc0004a0000 0xc0004a03e8 0xc0004a07d0 0xc0004a0bb8 0xc0004a0fa0 0xc0004a1388 0xc0004a1770 0xc0004a1b58] lowResourceThreshold:map[cpu:0xc00042b540 memory:0xc00042b580 pods:0xc00042b5c0] highResourceThreshold:map[cpu:0xc00042b600 memory:0xc00042b640 pods:0xc00042b680]}
```
2021-04-06 09:43:23 +02:00
Kubernetes Prow Robot
b713b7852a Merge pull request #434 from ZongqiangZhang/extend-resources
Support extended resources in LowNodeUtilization
2021-04-06 00:21:34 -07:00
Kubernetes Prow Robot
5d07d0c8e2 Merge pull request #543 from gaurav1086/e2e_test_fix_goroutine_leak
e2e_test: fix goroutine leak
2021-04-05 18:11:19 -07:00
Gaurav Singh
7076ba0760 e2e_test: fix goroutine leak 2021-04-04 21:19:56 -04:00
ZongqiangZhang
81b816d4a4 support extended resources in lownodeutilization 2021-04-02 21:37:51 +08:00
Kubernetes Prow Robot
9ebc909c7f Merge pull request #541 from KohlsTechnology/make-gen
Update Generated Code
2021-04-01 00:39:22 -07:00
Sean Malloy
af01b675b0 Update Generated Code
Ran "make gen" using Go 1.16.1. Some changes were merged, but "make gen"
was not run. This fixes the problem.

See below PR for reference:
https://github.com/kubernetes-sigs/descheduler/pull/523
2021-04-01 00:28:20 -05:00
Kubernetes Prow Robot
ce6ce5a058 Merge pull request #539 from damemi/1.21-rc.0
Bump to k8s 1.21-rc.0
2021-03-31 16:55:21 -07:00
Mike Dame
bd4f6d4fcd Bump to k8s 1.21-rc.0 2021-03-31 10:22:56 -04:00
Sean Malloy
6a4181158a Update Docs and Manifests for v0.21.0
* Added v0.21 references to README
* Update k8s manifests with v0.21.0 references
* Added table with list of supported architectures by release
2021-03-31 00:53:19 -05:00
Kubernetes Prow Robot
a2746d09e8 Merge pull request #523 from RyanDevlin/evict-critical
Added EvictSystemCriticalPods flag to descheduler
2021-03-30 10:41:57 -07:00
RyanDevlin
b5d7219391 Completed evictSystemCriticalPods feature 2021-03-29 23:13:05 -04:00
Kubernetes Prow Robot
b09d5d99dc Merge pull request #534 from KohlsTechnology/kustomize-docs
Use Tags In Kustomize Documentation
2021-03-27 23:46:44 -07:00
Sean Malloy
dbcc20f37f Use Tags In Kustomize Documentation
The master branch always represents the next release of the
descheduler. Therefore applying the descheduler k8s manifests
from the master branch is not considered stable. It is best for
users to install descheduler using the released tags.
2021-03-27 01:21:08 -05:00
Kubernetes Prow Robot
51340b56b8 Merge pull request #533 from ingvagabund/bump-to-go1.16
Bump go to 1.16
2021-03-26 23:20:43 -07:00
Jan Chaloupka
160669817e Bump go to 1.16 2021-03-25 10:13:40 +01:00
Kubernetes Prow Robot
6ca4479892 Merge pull request #520 from KohlsTechnology/statefulset-docs
Document That Descheduler Considers StatefulSets For Eviction
2021-03-23 08:15:36 -07:00
Sean Malloy
92740a25d4 Add Initial Unit Tests For StatefulSets 2021-03-23 09:24:08 -05:00
Sean Malloy
56e4daccaf Document That Descheduler Considers StatefulSets For Eviction
Similar to ReplicaSet, ReplicationController, and Jobs pods with a
StatefulSet metadata.ownerReference are considered for eviction.
Document this, so that it is clear to end users.
2021-03-23 09:24:08 -05:00
Kubernetes Prow Robot
b546832b66 Merge pull request #526 from KohlsTechnology/chart-docs-fix
Correct Helm Chart Docs For Container Requests and Limits
2021-03-15 08:59:05 -07:00
Kubernetes Prow Robot
39e5b34af3 Merge pull request #525 from damemi/topology-spread-selector-fix
(TopologySpread) Evict pods with selectors that match multiple nodes
2021-03-14 01:35:04 -08:00
Sean Malloy
e699e08d13 Correct Helm Chart Docs For Container Requests and Limits
The resources.cpuRequest and resources.memoryRequest varialbes are
not valid in the helm chart values.yaml file. The correct varialbe
name for setting the requests and limits is resources.

Also, fixed white space alignment in the markdown table.
2021-03-12 23:17:02 -06:00
Mike Dame
af26b57e5e (TopologySpread) Evict pods with selectors that match multiple nodes 2021-03-12 13:41:17 -05:00
Kubernetes Prow Robot
22fe589ae6 Merge pull request #508 from KohlsTechnology/emeritus-approvers
Move Inactive Maintainers to Emeritus Status
2021-03-12 09:06:18 -08:00
Kubernetes Prow Robot
0a11b5a138 Merge pull request #521 from KohlsTechnology/armv7
Enable ARM32v7 Container Image Builds
2021-03-09 17:07:13 -08:00
Sean Malloy
363f02710b Enable ARM32v7 Container Image Builds
Add ARM32v7 in addition to the currently supported architectures. This
will allow running descheduler on Raspberry Pi v3 devices.
2021-03-09 09:03:58 -06:00
Kubernetes Prow Robot
6abfa232e7 Merge pull request #518 from damemi/sigs-k8s-mdtoc
Add table of contents generator/verify script
2021-03-08 12:06:59 -08:00
Mike Dame
bbfb12a120 Run hack/update-toc.sh 2021-03-08 14:39:08 -05:00
Mike Dame
5df2a0c516 Add hack scripts and makefile targets 2021-03-08 14:39:08 -05:00
Mike Dame
8ecd14289a Add <toc> markers to README 2021-03-08 10:51:55 -05:00
Mike Dame
131ed42a4c Add sigs.k8s.io/mdtoc dependency 2021-03-08 10:46:27 -05:00
Kubernetes Prow Robot
6b8d4cd5a7 Merge pull request #517 from lixiang233/fix_topology_log
Correct log in topology spread strategy
2021-03-07 11:31:42 -08:00
Kubernetes Prow Robot
24a06511a2 Merge pull request #505 from ingvagabund/collect-metrics
Collect metrics
2021-03-06 21:41:42 -08:00
lixiang
09c7d1be0a Correct log in topology spread strategy 2021-03-06 16:21:46 +08:00
Kubernetes Prow Robot
f5666882de Merge pull request #515 from damemi/clarify-pod-percentage-readme
Clarify resource percentage calculation in README
2021-03-05 07:36:22 -08:00
Jan Chaloupka
701f22404b Serve secure metrics at 10258 2021-03-05 16:35:45 +01:00
Mike Dame
d5fa60bdd5 Clarify resource percentage calculation in README 2021-03-05 09:34:04 -05:00
Kubernetes Prow Robot
9e14f733b7 Merge pull request #510 from lixiang233/Ft_filter_by_label
Filter pods by labelSelector
2021-03-05 01:14:22 -08:00
lixiang
29ade13ce7 README and e2e-testcase add for labelSelector 2021-03-04 21:32:14 +08:00
lixiang
03518badb8 Strategies: Add labelSelector to all strategies except LowNodeUtilization, RemoveDuplicates and RemovePodsViolatingTopologySpreadConstraint. 2021-03-04 21:30:50 +08:00
Jan Chaloupka
24458fb0ca Increase pods_evicted metric 2021-03-03 16:15:21 +01:00
Jan Chaloupka
1c5b32763b Register metrics
New metrics:
- build_info: Build info about descheduler, including Go version, Descheduler version, Git SHA, Git branch
- pods_evicted: Number of successfully evicted pods, by the result, by the strategy, by the namespace
2021-03-03 16:15:09 +01:00
Jan Chaloupka
3bd031bbb3 Move build's versioning bits under pkg/version 2021-03-03 15:56:53 +01:00
Jan Chaloupka
ea6e9f22b9 Vendor metrics 2021-03-03 15:56:48 +01:00
Kubernetes Prow Robot
73309a3948 Merge pull request #490 from damemi/logging-format-default
Add default logging-format value
2021-03-03 06:49:20 -08:00
Kubernetes Prow Robot
105313a0e3 Merge pull request #512 from ingvagabund/bump-gogo-protobuf-to-1.3.2
Bump github.com/gogo/protobuf to v1.3.2
2021-03-02 07:05:19 -08:00
Jan Chaloupka
d368cbed32 Bump github.com/gogo/protobuf to v1.3.2
Fixes https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-3121
2021-03-02 14:48:32 +01:00
Sean Malloy
5f4dfbc922 Update Maintainer Field in Dockerfiles
Set the maintainer field to use the more generic SIG scheduling name and
email address.
2021-03-01 23:19:42 -06:00
Sean Malloy
967911e4c1 Move Inactive Maintainers to Emeritus Status
For roughly the past year damemi has been the only active approver for
the descheduler. Therefore move the inactive approvers to emeritus
status. This will help clarify to contributors who should be assigned to
pull requests.
2021-03-01 09:23:49 -06:00
Kubernetes Prow Robot
726091712d Merge pull request #509 from KohlsTechnology/update-reviwers
Add lixiang233 As A Reviewer
2021-03-01 06:45:25 -08:00
lixiang
854afa7c73 PodsListing: Add WithLabelSelector option. 2021-03-01 11:13:44 +08:00
lixiang
2517268b1f API: Add a new parameter labelSelector to StrategyParameters. 2021-02-26 15:49:00 +08:00
Sean Malloy
fea8beabab Add lixiang233 As A Reviewer 2021-02-25 22:03:50 -06:00
Kubernetes Prow Robot
7f9c95fa16 Merge pull request #506 from KohlsTechnology/kind-0.10.0
Update e2e tests to use kind v0.10.0
2021-02-25 07:29:27 -08:00
Sean Malloy
6f7e2a271e Update e2e tests to use kind v0.10.0 2021-02-25 00:17:06 -06:00
Kubernetes Prow Robot
217ebdfa73 Merge pull request #504 from lixiang233/Fix_lowNodeUtilization_log
Log and README optimization for LowNodeUtilization
2021-02-24 05:22:50 -08:00
lixiang
e014fda58e Log and README optimization for LowNodeUtilization 2021-02-24 09:17:07 +08:00
Kubernetes Prow Robot
6d0360fd16 Merge pull request #502 from damemi/node-affinity-totalevicted
Move 'total pods evicted' log message to main loop
2021-02-19 13:45:41 -08:00
Mike Dame
01a87b6143 Move 'total pods evicted' log message to main loop 2021-02-19 10:37:43 -05:00
Kubernetes Prow Robot
f3e871492c Merge pull request #499 from KohlsTechnology/go-1.15.8
Bump To Go 1.15.8
2021-02-18 05:52:57 -08:00
Kubernetes Prow Robot
bf8d744686 Merge pull request #498 from kristinnardal2/patch-1
Fix indentation in values.yaml
2021-02-18 05:52:51 -08:00
Kubernetes Prow Robot
477d12968e Merge pull request #496 from KohlsTechnology/nonroot-helm-chart
Update Helm Chart to run as non-root
2021-02-18 00:34:51 -08:00
Sean Malloy
dcb4136a96 Bump To Go 1.15.8
The main k/k repo was updated to Go 1.15.8 for the upcoming k8s v1.21.0
release. See below PR for reference.

https://github.com/kubernetes/release/issues/1895
2021-02-18 00:00:31 -06:00
Kristinn Björgvin Árdal
2c0afafccf Fix indentation in values.yaml
A couple of lines had an extra whitespace.
2021-02-17 08:51:26 +01:00
Kubernetes Prow Robot
e56617253f Merge pull request #497 from ingvagabund/resourceUsagePercentages-unit-test
LowNodeUtilization: unit test resourceUsagePercentages to validate percentages are computed correctly
2021-02-11 20:26:48 -08:00
Jan Chaloupka
500aaea4dd LowNodeUtilization: unit test resourceUsagePercentages to validate percentages are computed correctly 2021-02-11 16:04:37 +01:00
Sean Malloy
4444811f26 Update Helm Chart to run as non-root 2021-02-10 21:49:01 -06:00
Kubernetes Prow Robot
b63c6fac27 Merge pull request #495 from KohlsTechnology/nonroot
Update Job and CronJob YAML to run as non-root
2021-02-10 08:22:59 -08:00
Sean Malloy
dfc76906d4 Update Job and CronJob YAML to run as non-root 2021-02-09 23:10:18 -06:00
Kubernetes Prow Robot
fbd17d4caf Merge pull request #480 from damemi/nodeselector-cronjob
Add nodeSelector to cronJob helm chart
2021-02-09 10:32:59 -08:00
Mike Dame
9c01589fb9 Add nodeSelector to cronJob helm chart 2021-02-09 13:27:26 -05:00
Kubernetes Prow Robot
a0942afaa1 Merge pull request #493 from fancc/resource_usage_percentage
Modify resourceUsage.Value() to resourceUsage.MilliValue()
2021-02-02 03:38:28 -08:00
范成城
16fa21a4a6 change resourceUsagePercentage func 2021-01-31 20:46:46 +08:00
Kubernetes Prow Robot
241f1325c9 Merge pull request #481 from damemi/ignore-pvc-pods
Add option to ignore pods with PVCs from eviction
2021-01-26 22:41:40 -08:00
Mike Dame
c1a63a557a Add option to ignore pods with PVCs from eviction 2021-01-26 08:47:54 -05:00
Mike Dame
e45e21368a Add default logging-format value 2021-01-25 14:19:58 -05:00
Kubernetes Prow Robot
f24b367479 Merge pull request #474 from lixiang233/Ft_include_soft_constraints
Add a parameter to include soft topology spread constraints
2021-01-21 09:21:06 -08:00
lixiang
8ba9cb1df7 Add a parameter to include soft topology spread constraints 2021-01-21 11:33:16 +08:00
Kubernetes Prow Robot
d502f05910 Merge pull request #484 from damemi/topology-spread-bug
Fix TopologySpread bug that evicts non-evictable pods
2021-01-12 17:26:36 -08:00
Mike Dame
241f47d947 Fix TopologySpread bug that evicts non-evictable pods 2021-01-12 15:25:55 -05:00
Kubernetes Prow Robot
19424f4119 Merge pull request #482 from damemi/fix-logging-gen
Add Logging field to v1alpha1 componentconfig
2021-01-11 09:44:25 -08:00
Mike Dame
635a40f305 Add Logging field to v1alpha1 componentconfig 2021-01-11 11:21:32 -05:00
Kubernetes Prow Robot
1804d2e3a2 Merge pull request #478 from ryuichi1208/master
Update README.md
2021-01-10 11:43:05 -08:00
Ryuichi Watanabe
fea4870243 Update README.md 2021-01-01 18:45:00 +09:00
Kubernetes Prow Robot
f54df67d11 Merge pull request #471 from Mathew857/master
refactor: remove unused code
2020-12-15 17:20:20 -08:00
wu.chaozong
1a998037f8 refactor: remove unuse param in e2e_test.go 2020-12-16 08:46:40 +08:00
wu.chaozong
c481877c03 refactor: update node_test file 2020-12-15 21:15:33 +08:00
Kubernetes Prow Robot
67df39690b Merge pull request #468 from eatwithforks/jylee/user
Run as user 1000 so pod can be runAsNonRoot for security purposes
2020-12-14 06:43:28 -08:00
wu.chaozong
674f14da78 refactor: remove unused code 2020-12-13 00:09:07 +08:00
Jye Lee
0cfbdf642b Run as user 1000 so pod can be runAsNonRoot for security purposes 2020-12-10 09:37:49 -08:00
Kubernetes Prow Robot
c86d1c7eb2 Merge pull request #463 from ingvagabund/duplicates-uniformly
RemoveDuplicatePods: evict uniformly
2020-12-10 07:14:13 -08:00
Jan Chaloupka
f67c265533 RemoveDuplicatePods: evict uniformly 2020-12-10 15:33:04 +01:00
Kubernetes Prow Robot
969921640c Merge pull request #465 from damemi/skip-balanced-topology
Skip topology calculations if domains are already balanced
2020-12-09 19:48:53 -08:00
Mike Dame
0273fd7597 Skip topology calculations if domains are already balanced 2020-12-09 11:29:59 -05:00
Kubernetes Prow Robot
e84d0c5587 Merge pull request #464 from damemi/1.20-bump
Bump to v0.20.0
2020-12-09 07:12:50 -08:00
Kubernetes Prow Robot
5267ec407c Merge pull request #462 from KohlsTechnology/docs-v0.20.0
Update Docs and Manifests for v0.20.0
2020-12-09 07:04:50 -08:00
Mike Dame
6714d8e0b7 Bump to v0.20.0 2020-12-09 09:53:03 -05:00
Sean Malloy
b3439eab41 Update Docs and Manifests for v0.20.0
* Added v0.20 references to README
* Update k8s manifests with v0.20.0 references
2020-12-09 00:10:50 -06:00
Kubernetes Prow Robot
509118587a Merge pull request #453 from dieterdemeyer/master
Allow setting options successfulJobsHistoryLimit and failedJobsHistoryLimit for cronjob
2020-12-08 14:33:33 -08:00
Kubernetes Prow Robot
f482537dff Merge pull request #455 from AmoVanB/fix/cluster-role-tsc
Helm: allow the topologySpreadConstraint strategy to list namespaces
2020-12-08 11:17:35 -08:00
Amaury Van Bemten
0f95817746 add permission to list namespaces for the topologyspread constraint strategy 2020-12-08 17:11:34 +01:00
Kubernetes Prow Robot
70d1fadae7 Merge pull request #454 from damemi/topology-spread-nsfix
Fix broken namespace logic in TopologySpreadConstraint
2020-12-04 09:34:00 -08:00
Mike Dame
499beb2fd7 Fix broken namespace logic in TopologySpreadConstraint 2020-12-04 10:49:58 -05:00
Kubernetes Prow Robot
de24f3854b Merge pull request #449 from KohlsTechnology/arm-image
Enable Multi-Arch Container Image Builds
2020-12-03 17:55:25 -08:00
Kubernetes Prow Robot
a5e8ba1a70 Merge pull request #452 from KohlsTechnology/go-1.15.5
Bump To Go 1.15.5
2020-12-03 06:21:00 -08:00
Dieter De Meyer
45ad48042f Allow setting options successfulJobsHistoryLimit and failedJobsHistoryLimit for cronjob 2020-12-03 11:56:44 +01:00
Sean Malloy
550de966c7 Bump To Go 1.15.5
The main k/k repo as updated to Go 1.15.5 for the upcoming k8s v1.20.0
release. See below PR for reference.

https://github.com/kubernetes/kubernetes/pull/95776
2020-12-03 01:16:42 -06:00
Kubernetes Prow Robot
c94342db31 Merge pull request #451 from damemi/v0.20.0-rc.0
bump k8s dependencies to 1.20-rc.0
2020-12-02 23:13:00 -08:00
Sean Malloy
94f1c7dd8d Document Multi-Arch Container Image Usage 2020-12-03 00:44:19 -06:00
Sean Malloy
bf91e6790e Enable Multi-Arch Container Image Builds
Previous to this change official descheduler container images only
supported the AMD64 hardware architecture. This change enables
building official descheduler container images for multiple
architectures.

The initially supported architectures are AMD64 and ARM64.
2020-12-03 00:06:22 -06:00
Mike Dame
251f44e568 bump(*): k8s to 1.20-rc.0 2020-12-02 14:40:50 -05:00
Kubernetes Prow Robot
922c4f6a63 Merge pull request #448 from damemi/topology-spread-logging
Add more topology spread logs
2020-12-01 15:10:50 -08:00
Mike Dame
2b5ec01381 Add more topology spread logs 2020-12-01 17:02:46 -05:00
Kubernetes Prow Robot
7bcd562ff5 Merge pull request #444 from KohlsTechnology/issue-432-cronjob
Update Helm Chart To Allow Setting startingDeadlineSeconds
2020-11-24 12:10:40 -08:00
Sean Malloy
03852d0914 Update Helm Chart To Allow Setting startingDeadlineSeconds
Allow end users to optionally set the descheduler CronJob
.spec.startingDeadlineSeconds when installing using helm.
2020-11-24 10:58:16 -06:00
Kubernetes Prow Robot
5f1e9a97c4 Merge pull request #446 from ingvagabund/low-node-util-percentages-times-100
LowNodeUtilization: express usagePercentage multiplied by 100
2020-11-24 08:13:02 -08:00
Jan Chaloupka
cd6f2cd4cb LowNodeUtilization: express usagePercentage multiplied by 100
Entry params are in interval <0; 100>. Have logs respect that as well.
2020-11-24 14:48:48 +01:00
Kubernetes Prow Robot
e679c7fabc Merge pull request #442 from KohlsTechnology/issue-432
Set Container Resources In YAML Manifests
2020-11-23 08:49:34 -08:00
Kubernetes Prow Robot
6f5918d765 Merge pull request #443 from KohlsTechnology/issue-432-helm
Set Container Resources In Helm Chart
2020-11-23 07:47:33 -08:00
Sean Malloy
5a46ba0630 Set Container Resources In Helm Chart
Prior to this commit the helm chart used to install the descheduler
CronJob did not set container requests or limits. This is considered
an anti-pattern when deploying applications on k8s.

Set descheduler container resources to make it a burstable pod. This
will ensure a high quality experience for end users when deploying
descheduler into their clusters using the instructions from the README.

The default values choosen for CPU/Memory are not based on any real data.
2020-11-23 09:12:54 -06:00
Kubernetes Prow Robot
c1323719f4 Merge pull request #427 from KohlsTechnology/fix-version-output
Fix Version Output For Automated Container Builds
2020-11-21 03:19:33 -08:00
Sean Malloy
8795fe6b90 Fix Version Output For Automated Container Builds
Prior to this change the output from the command "descheduler version"
when run using the official container images from k8s.gcr.io would
always output an empty string. See below for an example.

```
docker run k8s.gcr.io/descheduler/descheduler:v0.19.0 /bin/descheduler version
Descheduler version {Major: Minor: GitCommit: GitVersion: BuildDate:2020-09-01T16:43:23+0000 GoVersion:go1.15 Compiler:gc Platform:linux/amd64}
```

This change makes it possible to pass the descheduler version
information to the automated container image build process and
also makes it work for local builds too.
2020-11-20 23:25:22 -06:00
Sean Malloy
a3f8bb0369 Set Container Resources In YAML Manifests
Prior to this commit the YAML manifests used to install the descheduler
Job and CronJob did not set container requests or limits. This is
considered an anti-pattern when deploying applications on k8s.

Set descheduler container resources to make it a burstable pod. This
will ensure a high quality experience for end users when deploying
descheduler into their clusters using the instructions from the README.

The values choosen for CPU/Memory are not based on any real data.
2020-11-20 12:55:10 -06:00
Kubernetes Prow Robot
cd3c3bf4da Merge pull request #441 from ingvagabund/low-node-util-dump-node-usage-in-percentage
LowNodeUtilization: show node usage in percentage as well
2020-11-18 08:44:06 -08:00
Jan Chaloupka
652ee87bf5 LowNodeUtilization: show node usage in percentage as well 2020-11-18 14:50:53 +01:00
Kubernetes Prow Robot
5225ec4597 Merge pull request #436 from stevehipwell/helm-chart-rename
Rename Helm chart
2020-11-17 22:42:05 -08:00
Kubernetes Prow Robot
b2af720ddb Merge pull request #438 from lixiang233/add_missing_parameter
Add missing parameter in README
2020-11-17 20:24:04 -08:00
lixiang
94f07996f7 add missing parameter in README 2020-11-17 16:10:02 +08:00
Kubernetes Prow Robot
4839d5f369 Merge pull request #413 from damemi/podtopologyspread
Add PodTopologySpread strategy
2020-11-13 10:41:05 -08:00
Steve Hipwell
022e07c278 Rename helm chart 2020-11-09 10:21:48 +00:00
Kubernetes Prow Robot
620d71abdf Merge pull request #433 from ankon/patch-1
Fix trivial typo in helm README
2020-11-03 08:50:04 -08:00
Andreas Kohn
85d00ab457 Fix trivial typo in helm README 2020-11-03 13:10:37 +01:00
Kubernetes Prow Robot
b30bd40860 Merge pull request #428 from damemi/component-helper-nodeselector
Start using helpers from k8s.io/component-helpers
2020-11-02 04:52:54 -08:00
Mayank Kumar
4108362158 Add RemovePodsViolatingTopologySpreadConstraint strategy
This adds a strategy to balance pod topology domains based on the scheduler's
PodTopologySpread constraints. It attempts to find the minimum number of pods
that should be sent for eviction by comparing the largest domains in a topology
with the smallest domains in that topology.
2020-10-30 16:36:33 -04:00
Mike Dame
b27dc5f14e go mod tidy && go mod vendor 2020-10-30 09:41:49 -04:00
Mike Dame
3c54378749 Use NodeSelectorRequirementsAsSelector from k8s.io/component-helpers 2020-10-30 09:41:49 -04:00
Kubernetes Prow Robot
6240aa68f7 Merge pull request #430 from damemi/verify-spelling
Add hack/verify-spelling.sh script and Makefile target
2020-10-22 08:27:36 -07:00
Mike Dame
301af7fd9c Add hack/verify-spelling.sh script and Makefile target 2020-10-22 10:20:53 -04:00
Kubernetes Prow Robot
41d529ebe2 Merge pull request #429 from invidian/invidian/fix-typo
pkg/descheduler/strategies: fix typo
2020-10-22 06:59:36 -07:00
Mateusz Gozdek
cc6bb633ba pkg/descheduler/pod: fix typo
Signed-off-by: Mateusz Gozdek <mgozdekof@gmail.com>
2020-10-22 09:25:55 +02:00
Mateusz Gozdek
31cf70c34c pkg/descheduler/strategies: fix typo
Signed-off-by: Mateusz Gozdek <mgozdekof@gmail.com>
2020-10-22 09:25:24 +02:00
Kubernetes Prow Robot
3399619395 Merge pull request #418 from invidian/psp
charts/descheduler: add PodSecurityPolicy support
2020-10-21 23:17:35 -07:00
Kubernetes Prow Robot
cfc4cce08b Merge pull request #421 from m3y/kustomize
Support for remote resources in kustomize
2020-10-21 01:02:20 -07:00
Mateusz Gozdek
f9e9f0654a charts/descheduler: add PodSecurityPolicy support
This commit adds restrictive PodSecurityPolicy, which can be
optionally created, so descheduler can be deployed on clusters with
PodSecurityPolicy admission controller, but which do not ship default
policies.

Signed-off-by: Mateusz Gozdek <mgozdekof@gmail.com>
2020-10-21 08:41:39 +02:00
m3y
73af0e84fa Support for remote resources in kustomize 2020-10-20 03:20:11 +09:00
Kubernetes Prow Robot
b33928ac91 Merge pull request #412 from farah/logging-format
Add logging-format flag
2020-10-12 10:58:48 -07:00
Ali Farah
3ac0c408de Add --logging-format flag
Add k8s.io/component-base/config package
2020-10-12 22:27:39 +11:00
Kubernetes Prow Robot
149f900811 Merge pull request #417 from plutzilla/master
Helm chart README fix
2020-10-07 22:46:15 -07:00
Paulius Leščinskas
9ede04ba9b Update README.md
`kube-system` namespace provided in the installation instruction.
2020-10-02 20:07:24 +03:00
Paulius Leščinskas
f9cbed8b71 Helm chart README fix
Helm 3 omits `--name` flag for release name.
2020-10-02 18:11:06 +03:00
Kubernetes Prow Robot
fa4da031e4 Merge pull request #416 from KohlsTechnology/go-1.15.2
Bump To Go 1.15.2
2020-10-02 07:09:20 -07:00
Sean Malloy
9511f308d0 Bump To Go 1.15.2 2020-10-02 01:02:36 -05:00
Kubernetes Prow Robot
52f43f0fcb Merge pull request #414 from KohlsTechnology/event-recorder-structured-logs
Update Event Logging to Use Structured Logging
2020-10-01 14:25:19 -07:00
Sean Malloy
4bb0ceeed5 Update Event Logging to Use Structured Logging 2020-10-01 00:50:37 -05:00
Kubernetes Prow Robot
279f648e9a Merge pull request #410 from lixiang233/Add_lowNodeUtilization_usecase
Add use case for lowNodeUtilization
2020-09-28 20:57:25 -07:00
Kubernetes Prow Robot
411ec740ff Merge pull request #411 from KohlsTechnology/klogv2
Convert Last Log Message To Structured Logging
2020-09-28 06:14:48 -07:00
Sean Malloy
6237ba5a43 Convert Last Log Message To Structured Logging
The k8s.io/klog/v2 package does not currently support structured logging
for warning level log messages. Therefore update the one call in the
code base using klog.Warningf to instead use klog.InfoS.
2020-09-25 22:58:28 -05:00
Kubernetes Prow Robot
5d65a9ad68 Merge pull request #409 from ingvagabund/flip-some-klog-info-to-error
Change klog info messages after a strategy is exited into error messages
2020-09-25 20:34:47 -07:00
Kubernetes Prow Robot
28f3f867c3 Merge pull request #407 from ingvagabund/structured-klog
Flip Info/Infof/Error to InfoS/ErrorS
2020-09-24 04:22:05 -07:00
lixiang
00f79aa28d add use case for lowNodeUtilization 2020-09-24 17:51:08 +08:00
lixiang
6042d717e9 delete disabled strategies in podLifeTime use case 2020-09-24 17:49:02 +08:00
Jan Chaloupka
7afa54519f Change klog info messages after a strategy is exited into error messages 2020-09-24 11:07:20 +02:00
Kubernetes Prow Robot
8c3a80fbf9 Merge pull request #406 from damemi/duplicates-namespace-filtering
Add Namespace filtering to RemoveDuplicates strategy
2020-09-22 18:16:08 -07:00
Mike Dame
11b9829885 Update README to include strategy params 2020-09-22 10:28:43 -04:00
Jan Chaloupka
4798559545 Flip Info/Infof/Error to InfoS/ErrorS 2020-09-21 09:08:11 +02:00
Mike Dame
8b34d6eb94 Add Namespace filtering to RemoveDuplicates strategy 2020-09-18 12:17:47 -04:00
Kubernetes Prow Robot
70700a1c97 Merge pull request #401 from KohlsTechnology/bump-kind
Update e2e tests to use kind v0.9.0
2020-09-15 09:58:07 -07:00
Sean Malloy
d7420eb945 Update e2e tests to use kind v0.9.0 2020-09-15 09:11:36 -05:00
Kubernetes Prow Robot
c9cfeb35c2 Merge pull request #384 from ingvagabund/refactor-low-node-utilization
Refactor low node utilization
2020-09-13 19:04:58 -07:00
Kubernetes Prow Robot
fda63a816f Merge pull request #397 from farah/farah/add-structured-logging
Convert logs to use structured logs
2020-09-13 18:46:57 -07:00
Ali Farah
6329b6c27b Convert logs to use structured logs 2020-09-12 14:46:16 +10:00
Jan Chaloupka
9b4f781c5c Be verbose about unschedulable nodes which are not considered as underutilized 2020-09-11 12:57:24 +02:00
Jan Chaloupka
63039fcfd6 Compute utilization absolutely, not relatively 2020-09-11 12:57:22 +02:00
Kubernetes Prow Robot
d25f3757d6 Merge pull request #393 from lixiang233/Ft_custom_pod_phase_PodLifeTime
PodLifeTime: allow custom podStatusPhases
2020-09-11 03:46:14 -07:00
lixiang
1303fe6eb9 PodLifeTime: allow custom podStatusPhases 2020-09-11 09:56:45 +08:00
Kubernetes Prow Robot
1682cc9462 Merge pull request #394 from farah/farah/add-structured-logging
Convert logs to use structured logging
2020-09-09 10:15:08 -07:00
Kubernetes Prow Robot
605927676f Merge pull request #395 from damemi/antiaffinity-evictable
Move IsEvictable check in PodAntiAffinity
2020-09-09 07:45:52 -07:00
Jan Chaloupka
dc41e6a41c Remove createNodePodsMap 2020-09-09 16:29:11 +02:00
Ali Farah
e37c27313e Convert logs to use structured logging 2020-09-10 00:22:24 +10:00
Mike Dame
e5d9756ebe Move IsEvictable check in PodAntiAffinity
While non-evictable pods should never be evicted, they should still be
considered when calculating PodAntiAffinity violations. For example, you
may have an evictable pod that should not be running next to a system-critical
static pod. We currently filter IsEvictable before checking for Affinity violations,
so this case would not be caught.
2020-09-09 09:46:50 -04:00
Kubernetes Prow Robot
e6f1c6f78a Merge pull request #392 from KohlsTechnology/helm-hub
Add Link To Helm Hub
2020-09-03 06:59:41 -07:00
Sean Malloy
fceebded6d Add Link To Helm Hub
Updated the README with the link to the official descheduler helm chart
on https://hub.helm.sh. This makes it easier for end users to install the
desceduler using helm.
2020-09-02 23:55:27 -05:00
Kubernetes Prow Robot
08b2dffa42 Merge pull request #376 from farah/farah/add-structured-logging
Change klog to use structured logging
2020-09-02 09:49:06 -07:00
Kubernetes Prow Robot
745e29959c Merge pull request #388 from damemi/helm-chart-1.19
Update Helm chart to v0.19.0
2020-09-01 09:41:53 -07:00
Kubernetes Prow Robot
aa1bab2c4a Merge pull request #389 from KohlsTechnology/docs-0.19
Update Docs and Manifests for v0.19.0
2020-08-31 12:59:50 -07:00
Sean Malloy
19c3e02b44 Update Docs and Manifests for v0.19.0
* Added v0.19 references to README
* Update k8s manifests with v0.19.0 references
2020-08-31 14:41:53 -05:00
Mike Dame
a45057200f Update Helm chart to v0.19.0 2020-08-31 15:39:26 -04:00
Kubernetes Prow Robot
74d6be3943 Merge pull request #371 from KohlsTechnology/go-1.15
Update To Go 1.15.0
2020-08-31 12:22:43 -07:00
Sean Malloy
1fb3445692 Fix golangci-lint Failures For 1.30.0 Upgrade 2020-08-31 14:03:43 -05:00
Sean Malloy
195082d33b Update To Go 1.15.0
As part of the k8s 1.19 release cycle the Go version is being bumped to
1.15.0. Updating the descheduler to use Go 1.15 prior to the descheduler
v0.19.0 release.

See below issues for reference:
* https://github.com/kubernetes/release/issues/1421
* https://github.com/kubernetes/kubernetes/issues/93484
2020-08-31 14:03:43 -05:00
Kubernetes Prow Robot
03dbc93961 Merge pull request #385 from ingvagabund/low-node-utilization-use-clientset-in-testing
LowNodeUtilization: use clientset in testing, drop all custom reactors
2020-08-31 11:48:44 -07:00
Jan Chaloupka
d27f64480b LowNodeUtilization: use clientset in testing, drop all custom reactors 2020-08-31 20:38:01 +02:00
Kubernetes Prow Robot
5645663b71 Merge pull request #387 from KohlsTechnology/contrib-docs
Add KUBECONFIG Export To Contributing Docs
2020-08-31 06:34:28 -07:00
Kubernetes Prow Robot
dbc8092282 Merge pull request #386 from KohlsTechnology/bump-1.19
Bump k8s Modules For k8s 1.19
2020-08-31 06:34:20 -07:00
Ali Farah
50d2b246d9 Change klog to use structured logging
Signed-off-by: Ali Farah <aliyfarah9@gmail.com>
2020-08-31 14:30:08 +10:00
Sean Malloy
6220aca03e Add KUBECONFIG Export To Contributing Docs 2020-08-28 00:24:29 -05:00
Sean Malloy
674993d23a Update Vendor Deps For k8s 1.19 2020-08-27 23:49:48 -05:00
Sean Malloy
f4c3f9b18f Bump k8s Modules For k8s 1.19 2020-08-27 23:48:58 -05:00
Kubernetes Prow Robot
d65a7c4783 Merge pull request #380 from ingvagabund/move-some-flags-under-descheduler-polic
Deprecate node-selector, max-pods-to-evict-per-node and evict-local-storage-pods flags and promote then to policy v1alpha1 fields
2020-08-21 05:15:39 -07:00
Jan Chaloupka
89541f7545 Deprecate node-selector, max-pods-to-evict-per-node and evict-local-storage-pods flags and promote then to policy v1alpha1 fields 2020-08-21 13:27:40 +02:00
Kubernetes Prow Robot
17f769c1c1 Merge pull request #382 from damemi/readme-toc
Add table of contents to README
2020-08-20 12:36:51 -07:00
Mike Dame
eb9e62f047 Add table of contents to README 2020-08-20 15:24:41 -04:00
Kubernetes Prow Robot
6ccd80f2ee Merge pull request #372 from ingvagabund/isevictable
Redefine IsEvictable to be customizable for a particular strategy
2020-08-19 02:53:11 -07:00
Jan Chaloupka
d8251b9086 Redefine IsEvictable to be customizable for a particular strategy
Use WithXXX methods to extend the list of constraints to also
provide a reasonable error message.
2020-08-19 11:21:26 +02:00
Kubernetes Prow Robot
4cd1f45d90 Merge pull request #375 from KohlsTechnology/helm-chart-update
Update Maintainer Details In Helm Chart
2020-08-17 06:42:19 -07:00
Sean Malloy
2dc3f53a13 Update Maintainer Details In Helm Chart
Changing the maintainer for the helm chart to SIG scheduling instead of
an individual person.
2020-08-13 22:15:18 -05:00
Kubernetes Prow Robot
f5d8a02f79 Merge pull request #374 from ingvagabund/namespace-as-pointer
Promote Namespaces field to a pointer
2020-08-13 19:04:21 -07:00
Jan Chaloupka
a7c51ffae0 Promote Namespaces field to a pointer 2020-08-13 18:38:46 +02:00
Kubernetes Prow Robot
9746fd300f Merge pull request #364 from lixiang233/ft_allow_custom_priority_threshold
Allow custom priority threshold
2020-08-12 07:35:45 -07:00
lixiang
b5e17f91cd ClusterRole: add permission to access priorityclasses.scheduling.k8s.io 2020-08-12 13:55:53 +08:00
lixiang
f5524153ba README: add description for priority threshold 2020-08-12 13:55:53 +08:00
lixiang
6d693d06fb e2e: add testcase for priority threshold 2020-08-12 13:55:53 +08:00
lixiang
4d7a6ee9be e2e: Add priority and priority class to runPodLifetimeStrategy and RcByNameContainer 2020-08-12 13:55:53 +08:00
lixiang
0fdaac6042 Strategy: Set threshold priority from strategy's parameters 2020-08-12 13:54:57 +08:00
Kubernetes Prow Robot
bb7ab369d7 Merge pull request #370 from damemi/1.19-rc.4
Bump k8s dependencies to 1.19-rc.4
2020-08-11 06:42:16 -07:00
Mike Dame
d96cca2221 go mod tidy && go mod vendor 2020-08-11 09:32:00 -04:00
Mike Dame
6ee87d9d7c Bump(k8s.io): to 1.19-rc.4 2020-08-11 09:31:43 -04:00
lixiang
95ce2a4ff7 PodEvictor: add a new param thresholdPriority to IsEvictable 2020-08-11 09:57:26 +08:00
Kubernetes Prow Robot
19e1387bf1 Merge pull request #369 from damemi/duplicates-panic
Add check for ownerref length in DuplicatePods strategy
2020-08-10 07:06:20 -07:00
Mike Dame
ec4c5bed5d Add check for ownerref length in DuplicatePods strategy
This fixes a panic that occurs if a pod has no ownerrefs
later on in the strategy. If a pod has no ownerrefs, there's
nothing for us to do with it.
2020-08-10 09:37:05 -04:00
lixiang
ae38aa63af StrategyParameters: Add new param ThresholdPriority and ThresholdPriorityClassName 2020-08-07 13:53:29 +08:00
Kubernetes Prow Robot
cdcd677aa0 Merge pull request #366 from lixiang233/podAntiAffinity_add_missing_validation
Add missing validation in PodAntiAffinity
2020-07-30 07:34:31 -07:00
lixiang
a5eb9fc36d Add missing validation in PodAntiAffinity 2020-07-30 16:44:03 +08:00
Kubernetes Prow Robot
96efd2312b Merge pull request #363 from KohlsTechnology/new-registry-name
Update Container Registry to k8s.gcr.io
2020-07-29 08:29:48 -07:00
Sean Malloy
e7699c4f6b Update Container Registry to k8s.gcr.io
The k8s project recently cut over to the new official k8s.gcr.io
container registry. The descheduler image can now be pulled from
k8s.gcr.io. This is just a new DNS name for the container registry. The
previously documented DNS names for the registry still work, but
require more typing.
2020-07-28 22:45:06 -05:00
Kubernetes Prow Robot
d0fbebb77c Merge pull request #337 from damemi/rebase-1.19
Rebase k8s dependencies to 1.19-rc.2
2020-07-28 10:47:47 -07:00
Kubernetes Prow Robot
46bb5b6f55 Merge pull request #362 from damemi/only-release-tags
Update version parsing to exclude helm-chart tags
2020-07-28 09:31:48 -07:00
Kubernetes Prow Robot
b799ed074a Merge pull request #361 from jjmengze/patch-1
remove unnecessary line feed  in log messages
2020-07-28 09:19:48 -07:00
Kubernetes Prow Robot
eee41ee111 Merge pull request #338 from ingvagabund/filter-out-pods-by-namespaces
Filter pods by namespaces
2020-07-28 08:57:47 -07:00
MengZeLee
6ac81e0b9c remove unnecessary line feed in log messages
If we remove these \n, there will be no misspell errors in go report
2020-07-28 23:27:23 +08:00
Mike Dame
5970899029 Update version parsing to exclude helm-chart tags 2020-07-28 10:59:27 -04:00
Mike Dame
5bb0389538 Update API scheme registration for 1.19 2020-07-28 10:24:21 -04:00
Mike Dame
92cb6a378a Change gnostic/openapiv2 to lowercase in git 2020-07-27 13:55:31 -04:00
Mike Dame
b09932e92a go mod tidy && go mod vendor 2020-07-27 13:55:29 -04:00
Mike Dame
63603f38d6 Pin k8s deps to 1.19-rc.2 2020-07-27 13:55:13 -04:00
Jan Chaloupka
42db31683f README: describe usage of the namespace filtering 2020-07-27 10:57:30 +02:00
Jan Chaloupka
c40a9c397f e2e: add test for included/excluded namespace through PodLifeTime strategy 2020-07-27 10:54:35 +02:00
Kubernetes Prow Robot
4014ebad92 Merge pull request #359 from KohlsTechnology/more-helm-docs
More Helm Documentation
2020-07-26 18:56:16 -07:00
Kubernetes Prow Robot
bb7cb05571 Merge pull request #360 from dharmab/descheduler-autoheal-guide
Add NPD+CA autohealing use case to user guide
2020-07-24 15:20:16 -07:00
Dharma Bellamkonda
30b2bd5d9f Add NPD+CA autohealing use case to user guide 2020-07-24 15:40:47 -06:00
Sean Malloy
8d5ab05aa0 Add Helm Badge To README 2020-07-23 22:14:22 -05:00
Sean Malloy
db501da34d Clean Up End User Helm Documentation
* Correct helm install command in documentation
* Add link to helm install docs from main README
2020-07-23 22:11:17 -05:00
Kubernetes Prow Robot
8d60370612 Merge pull request #358 from damemi/helm-chart-name-docs
Update helm chart name in docs
2020-07-23 16:38:22 -07:00
Mike Dame
052f011288 Update helm chart name in docs 2020-07-23 15:42:40 -04:00
Kubernetes Prow Robot
6e23579bd0 Merge pull request #356 from damemi/update-helm-action
Update Helm release action to work on release branches
2020-07-23 11:05:29 -07:00
Mike Dame
7331f4e5de Update Helm release action to work on release branches
Basing this action on push to `chart-*` tags doesn't work: the action itself
creates the new release tag, so trying to push the changes to a tag ends up
with the releaser comparing the changes to themself (and failing).

This also renames the chart from "descheduler" to "descheduler-helm-chart", to
avoid confusion with automated releases.
2020-07-22 17:12:54 -04:00
Jan Chaloupka
11f1333af7 ListPodsOnANode: allow to include/exclude namespaces
Info: field selector is still not properly mocked so it's not possible to unit test it
2020-07-21 15:08:05 +02:00
Jan Chaloupka
74f70fdbc9 ListPodsOnANode: define Options type to pass various options
Options like:
- filter
- included/excluded namespaces
- labels
2020-07-21 15:07:45 +02:00
Jan Chaloupka
0006fb039d ListPodsOnANode: have one function parameter per each line 2020-07-21 15:02:34 +02:00
Kubernetes Prow Robot
0894f7740c Merge pull request #343 from ingvagabund/test-cleanup
Clean e2e test so it's easier to extend it
2020-07-21 05:21:13 -07:00
Kubernetes Prow Robot
c3f07dc366 Merge pull request #351 from KohlsTechnology/helm-release-docs
Update Release Documentation For Helm Charts
2020-07-20 20:33:14 -07:00
Sean Malloy
ca8f1051eb Update Helm Chart Version To 0.18.0 2020-07-20 12:52:38 -05:00
Sean Malloy
c7692a2e9f Update Release Documentation For Helm Charts
The descheduler now has an official helm chart. This change documents the
process to release a new helm chart artifact.
2020-07-20 12:51:50 -05:00
Jan Chaloupka
53badf7b61 e2e: create dedicated ns for each e2e test 2020-07-17 18:15:53 +02:00
Jan Chaloupka
f801c5f72f e2e: code cleanup 2020-07-17 18:15:13 +02:00
Jan Chaloupka
327880ba51 e2e: separate tests for LowNodeUtilization strategy and evict annotation 2020-07-17 18:11:01 +02:00
Jan Chaloupka
7bb8b4feda e2e: create and delete RC in the same function body
So create/delete are coupled and caller of create is responsible for calling delete as well
2020-07-17 18:06:19 +02:00
Kubernetes Prow Robot
36b1e1f061 Merge pull request #298 from stevehipwell/helm-chart
Add Helm chart
2020-07-16 16:15:00 -07:00
Steve Hipwell
4507a90bb6 Add helm chart 2020-07-16 17:36:17 +01:00
Kubernetes Prow Robot
550f68306c Merge pull request #342 from KohlsTechnology/remove-travis-ci-config
Remove Travis CI Configuration
2020-07-16 07:05:41 -07:00
Sean Malloy
2b668566ce Remove Travis CI Configuration
The e2e tests are now being run through Prow. Therefore the Travis CI
configuration can now be completely removed.
2020-07-15 23:52:27 -05:00
Kubernetes Prow Robot
ee414ea366 Merge pull request #344 from ingvagabund/kind-have-version-configurable-through-env
e2e: have k8s version configurable when creating cluster through kind
2020-07-14 06:47:21 -07:00
Jan Chaloupka
5761b5d595 e2e: have k8s version configurable when creating cluster through kind 2020-07-14 15:25:02 +02:00
Kubernetes Prow Robot
07f476dfc4 Merge pull request #340 from damemi/update-e2e
Update e2e script to use Kind setup
2020-07-13 19:57:20 -07:00
Mike Dame
f5e9f07321 Move kind setup to e2e script
This moves the kind setup (previously used by Travis) to the e2e runner script
to accomodate the switch to Prow. This provides a KIND_E2E env var to specify
whether to run the tests in kind, or (by default) to run locally).
2020-07-13 15:49:41 -04:00
Kubernetes Prow Robot
05c69ee26a Merge pull request #336 from lixiang233/avoid_duplicated_append_RemoveDuplicatePods
avoid appending list multiple times in RemoveDuplicates
2020-07-09 01:32:02 -07:00
Kubernetes Prow Robot
1623e09122 Merge pull request #332 from lixiang233/fix_test_struct_lownodeutilization
Add maxPodsToEvictPerNode to LowNodeUtilization testcase struct
2020-07-09 01:22:02 -07:00
lixiang
696aa7c505 rename maxPodsToEvict to maxPodsToEvictPerNode to avoid misunderstanding 2020-07-08 15:15:48 +08:00
lixiang
c53dce0805 Add maxPodsToEvictPerNode to testcase struct 2020-07-08 15:15:38 +08:00
lixiang
cd8b5a0354 avoid appending list multiple times in RemoveDuplicates 2020-07-07 14:42:53 +08:00
Kubernetes Prow Robot
b51f24eb8e Merge pull request #333 from lixiang233/refactor_lowNodeUtilization_func
Support only one sorting strategy in lowNodeUtilization
2020-07-02 22:48:47 -07:00
lixiang
ae3b4368ee support only one sorting strategy in lowNodeUtilization 2020-07-01 09:47:35 +08:00
Kubernetes Prow Robot
61eef93618 Merge pull request #330 from paulfantom/patch-1
examples: fix typo
2020-06-29 07:27:24 -07:00
Paweł Krupa
fa0a2ec6fe examples: fix typo
Fix incorrect example causing following error in runtime:

PodsHavingTooManyRestarts thresholds not set
2020-06-28 14:28:41 +02:00
Kubernetes Prow Robot
7ece10a643 Merge pull request #328 from KohlsTechnology/go114
Update To Go 1.14.4
2020-06-25 08:26:39 -07:00
Sean Malloy
71c8eae47e Update To Go 1.14.4
The kubernetes project has been updated to use Go 1.14.4. See below pull
request.

https://github.com/kubernetes/kubernetes/pull/88638

After making the updates to Go 1.14 "make gen" no longer worked. The
file hack/tools.go had to be created to get "make gen" working with Go
1.14.
2020-06-23 21:33:28 -05:00
Kubernetes Prow Robot
267b0837dc Merge pull request #327 from farah/farah/klog-migration
Update klog to v2
2020-06-23 08:35:40 -07:00
Kubernetes Prow Robot
c713537d56 Merge pull request #322 from lixiang233/move_pod_sort
Move sortPodsBasedOnPriority to pod util
2020-06-22 12:21:40 -07:00
Kubernetes Prow Robot
e374229707 Merge pull request #325 from KohlsTechnology/issue-template
Add initial GitHub issue templates
2020-06-22 07:42:40 -07:00
Sean Malloy
f834581a8e Fix Spelling Error 2020-06-22 09:12:22 -05:00
Ali Farah
15fcde5229 Update klog to v2 2020-06-22 20:43:22 +10:00
Sean Malloy
96c5dd3941 Add initial GitHub issue templates
Initially users can open a bug report, feature request, or misc request.
Bug reports and feature requests will have the "kind" label
automatically added.
2020-06-20 00:52:23 -05:00
lixiang
65a03e76bf Add sorting to RemovePodsViolatingInterPodAntiAffinity 2020-06-19 14:11:23 +08:00
lixiang
43525f6493 Move sortPodsBasedOnPriority to podutil 2020-06-16 19:19:03 +08:00
lixiang
7457626f62 Move helper funcs to testutil 2020-06-16 19:17:28 +08:00
Kubernetes Prow Robot
08c22e8921 Merge pull request #321 from KohlsTechnology/add-event-reason
Add Pod Eviction Reason To Events
2020-06-15 08:53:57 -07:00
Sean Malloy
4ff533ec17 Add pod eviction reason to k8s events
Prior to this change the event created for every pod eviction was
identical. Instead leverage the newly added eviction reason when
creating k8s events. This makes it easier for end users to understand
why the descheduler evicted a pod when inspecting k8s events.
2020-06-10 01:13:17 -05:00
Sean Malloy
7680e3d079 Use var declaration instead of short assignmnet
This is a very minor refactor to use a var declaration for the reason
variable. A var declaration is being used because the zero value for
strings is an empty string.

https://golang.org/ref/spec#The_zero_value
2020-06-10 01:05:05 -05:00
Kubernetes Prow Robot
305801dd0e Merge pull request #319 from damemi/remove-extra-eviction-log
Remove redundant eviction log message and add "Reason" param to EvictPod
2020-06-09 11:25:19 -07:00
Mike Dame
9951b85d60 Add optional reason parameter to EvictPod 2020-06-09 13:35:51 -04:00
Mike Dame
ff21ec9432 Remove redundant eviction log message 2020-06-09 12:17:45 -04:00
Kubernetes Prow Robot
5e15d77bf2 Merge pull request #309 from damemi/change-evict-local-storage-pods
Make evictLocalStoragePods a property of PodEvictor
2020-06-08 03:53:46 -07:00
Kubernetes Prow Robot
d833c73fc4 Merge pull request #317 from ingvagabund/extend-owners
Add myself to project reviewers
2020-06-05 10:45:44 -07:00
Kubernetes Prow Robot
795a80dfb0 Merge pull request #310 from lixiang233/ignore_no_evictable_pod_node
Skip evicting when no evictable pod found on node
2020-06-05 07:41:44 -07:00
Mike Dame
f5e4acdd8a Make evictLocalStoragePods a property of PodEvictor
This is a minor refactor of PodEvictor to include evictLocalStoragePods as a
property (so that it doesn't need to be passed to each strategy function.) It
also removes ListEvictablePodsOnNode and extends ListPodsOnANode with an optional
"filter" function parameter, so that for example IsEvictable can be passed to it
and achieve the same results as the function formerly known as ListEvictablePodsOnNode.
This approach also now allows strategies to more explicitly extend their criteria of what
IsEvictable considers an evictable pod (such as NodeAffinity, which checks that the pod
can fit on any other node).
2020-06-05 10:28:48 -04:00
Jan Chaloupka
eb4c1bb355 Add myself to project reviewers
Based on https://github.com/kubernetes/community/blob/master/community-membership.md#requirements-1:

The following apply to the part of codebase for which one would be a reviewer in an OWNERS file (for repos using the bot).

> member for at least 3 months

For a couple of years now

> Primary reviewer for at least 5 PRs to the codebase

https://github.com/kubernetes-sigs/descheduler/pull/285
https://github.com/kubernetes-sigs/descheduler/pull/275
https://github.com/kubernetes-sigs/descheduler/pull/267
https://github.com/kubernetes-sigs/descheduler/pull/254
https://github.com/kubernetes-sigs/descheduler/pull/181

> Reviewed or merged at least 20 substantial PRs to the codebase

https://github.com/kubernetes-sigs/descheduler/pulls?q=is%3Apr+is%3Aclosed+assignee%3Aingvagabund

> Knowledgeable about the codebase

yes

> Sponsored by a subproject approver
> With no objections from other approvers
> Done through PR to update the OWNERS file

this PR

> May either self-nominate, be nominated by an approver in this subproject, or be nominated by a robot

self-nominating
2020-06-04 18:16:38 +02:00
Kubernetes Prow Robot
6dfa95cc87 Merge pull request #312 from lixiang233/fix_lowUtilization_golint
Pass golint check for pkg/descheduler/strategies/
2020-06-04 07:09:15 -07:00
lixiang
eb9d974a8b pass golint check for pkg/descheduler/strategies/ 2020-06-04 14:33:24 +08:00
lixiang
d41a1f4a56 Ignore evicting when no evictable pod found on node 2020-06-04 10:31:15 +08:00
Kubernetes Prow Robot
138ad556a3 Merge pull request #315 from damemi/update-travis-yml
Remove redundant tests from Travis CI
2020-06-03 11:44:11 -07:00
Mike Dame
37124e6e45 Remove redundant tests from Travis CI
These tests are now covered by k8s test-infra prow tests.
2020-06-03 14:11:08 -04:00
Kubernetes Prow Robot
bd412bf87f Merge pull request #300 from damemi/refactor-is-evictable
Add more verbose logging to IsEvictable checks
2020-05-29 10:03:16 -07:00
Kubernetes Prow Robot
6f9b31f568 Merge pull request #308 from damemi/fix-lint-for-ci
Download golangci-lint to _output/bin directory
2020-05-29 09:57:15 -07:00
Mike Dame
c858740c4f Run golangci-lint from GOPATH 2020-05-29 12:48:42 -04:00
Kubernetes Prow Robot
bfefe634a1 Merge pull request #307 from damemi/make-verify
Add parent make verify target
2020-05-29 08:01:16 -07:00
Mike Dame
7b7b9e1cd7 Add parent make verify target 2020-05-29 10:23:38 -04:00
Mike Dame
0a4b8b0a25 Add more verbose logging to IsEvictable checks 2020-05-29 10:20:14 -04:00
Kubernetes Prow Robot
f28183dcbe Merge pull request #306 from ingvagabund/make-Params-a-pointer-II
Add missing address of api.StrategyParameters
2020-05-29 05:53:16 -07:00
Jan Chaloupka
abdf79454f Add missing address of api.StrategyParameters
https://github.com/kubernetes-sigs/descheduler/pull/285 got merged before re-running the unit tests.
2020-05-29 13:34:57 +02:00
Kubernetes Prow Robot
46b570b71d Merge pull request #296 from ingvagabund/make-Params-a-pointer
Make DeschedulerStrategy.Params a pointer
2020-05-28 22:53:15 -07:00
Kubernetes Prow Robot
616a9b5f6b Merge pull request #285 from lixiang233/Ft_change_lowUtilization_break
Change break condition and thresholds validation for lowUtilization
2020-05-28 07:36:02 -07:00
Kubernetes Prow Robot
003a4cdc2b Merge pull request #302 from lixiang233/clean_up_strategy_enable_check
Clean up code in LowNodeUtilization
2020-05-28 07:16:02 -07:00
Kubernetes Prow Robot
54ea05d8bb Merge pull request #299 from damemi/node-affinity-logs
Standardize node affinity strategy logs
2020-05-28 01:52:02 -07:00
lixiang
d0eea0cabb Clean up code in LowNodeUtilization 2020-05-28 10:35:46 +08:00
Mike Dame
f0297dfe03 Standardize node affinity strategy logs 2020-05-27 16:52:16 -04:00
Kubernetes Prow Robot
ef1f36f8e4 Merge pull request #297 from ingvagabund/add-verify-gofmt
Add verify-gofmt make target
2020-05-27 07:02:40 -07:00
Jan Chaloupka
a733c95dcc Add verify-gofmt and verify-vendor make target
So e.g. CI can run 'make verify-gofmt' instead of ./hack/verify-gofmt in case the script location changes.
2020-05-27 15:08:20 +02:00
Jan Chaloupka
5a81a0661b Make DeschedulerStrategy.Params a pointer
To avoid empty params fields when serializing:
  RemoveDuplicates:
    enabled: true
    params: {}
  RemovePodsHavingTooManyRestarts:
    enabled: true
    params:
      podsHavingTooManyRestarts: {}
  RemovePodsViolatingInterPodAntiAffinity:
    enabled: true
    params: {}
  RemovePodsViolatingNodeAffinity:
    enabled: true
    params:
      nodeAffinityType:
      - requiredDuringSchedulingIgnoredDuringExecution
  RemovePodsViolatingNodeTaints:
    enabled: true
    params: {}
2020-05-27 10:31:31 +02:00
Kubernetes Prow Robot
83e04960af Merge pull request #288 from KohlsTechnology/update-support-matrix
Update Compatibility Matrix in README
2020-05-26 06:37:12 -07:00
lixiang
2a8dc69cbb Stop condition and config validation change for lowUtilization
1.Set default CPU/Mem/Pods percentage of thresholds to 100
2.Stop evicting pods if any resource ran out
3.Add thresholds verification method and limit resource percentage within [0, 100]
4.Change testcases and readme
2020-05-25 19:03:37 +08:00
Kubernetes Prow Robot
5d82d08af3 Merge pull request #291 from KohlsTechnology/update-releasedocs
Add URL For Viewing Staging Registry
2020-05-22 09:55:11 -07:00
Sean Malloy
c265825166 Add URL For Viewing Staging Registry
This newly documented URL can be used to view the descheduler staging
registry in a web browser. This is easier to browse if the gcloud
command is not available.
2020-05-22 11:33:45 -05:00
Kubernetes Prow Robot
ed0126fb63 Merge pull request #290 from KohlsTechnology/bump-install-manifests
Update Install Manifests For Release v0.18.0
2020-05-22 07:08:38 -07:00
Sean Malloy
8c7267b379 Update Install Manifests For Release v0.18.0 2020-05-21 22:54:54 -05:00
Kubernetes Prow Robot
d85ce22975 Merge pull request #289 from KohlsTechnology/update-release-docs
Update Release Guide
2020-05-20 11:46:19 -07:00
Kubernetes Prow Robot
a9091a1e37 Merge pull request #287 from ingvagabund/evict-pod-mention-dry-run-if-set
Mention pod eviction is in dry mode when set
2020-05-20 11:44:19 -07:00
Sean Malloy
d3b0ac8e06 Update Release Guide
Starting with descheduler v0.18 a release branch will be created for
each descheduler minor release. The release guide has been updated with
the steps to create release branches.
2020-05-20 11:10:42 -05:00
Sean Malloy
435674fb44 Update Compatibility Matrix in README
The matrix has been updated with the soon the be released v0.18
details. Also, clarified the descheduler and k8s version compatibility
requirements and recommendations.
2020-05-20 10:55:57 -05:00
Jan Chaloupka
eacbae72fd Mention pod eviction is in dry mode when set
Indicate whether a pod is only virtually evicted so there's no confusion
with the real eviction.
2020-05-20 10:40:58 +02:00
Kubernetes Prow Robot
34550d4b7c Merge pull request #275 from damemi/image-in-duplicates
Consider pod image in duplicates strategy
2020-05-15 06:51:38 -07:00
Mike Dame
c6ff87dbd6 Consider pod image in duplicates strategy
This increases the specificity of the RemoveDuplicates strategy by
removing pods which not only have the same owner, but who also
must have the same list of container images. This also adds a
parameter, `ExcludeOwnerKinds` to the RemoveDuplicates strategy
which accepts a list of Kinds. If a pod has any of these Kinds as
an owner, that pod is not considered for eviction.
2020-05-15 09:37:09 -04:00
Kubernetes Prow Robot
04efe65f90 Merge pull request #279 from KohlsTechnology/eviction-logs
Add Additional Details To Pod Eviction Log Messages
2020-05-14 07:20:22 -07:00
Sean Malloy
55afde6251 Remove line break in log message from PodLifeTime strategy 2020-05-14 00:36:52 -05:00
Sean Malloy
7039b6c8aa Refactor function EvictPod to only return an error
The EvictPod function previously returned a bool and an error. The
function now only returns an error. Callers can check for failure by
testing if the returned error is not nil. This aligns the EvictPod
function with idiomatic Go best practices.

Also, the function name has been changed from EvictPod to evictPod
because it is not used outside the evictions package.
2020-05-14 00:11:54 -05:00
Sean Malloy
cff984261e Log an error when EvictPod method returns a non-nil error
End users should be able to see the detailed error from the EvictPod
method when it fails. Updates all strategies to log the error. The
PodLifeTime strategy already logs this error.
2020-05-13 23:36:32 -05:00
Sean Malloy
4819ab9c69 Add namespace to pod eviction log messages
In multi-tenant environments it is useful to know which namespace a pod
was evicted from. Therefore log the namespace when evicting pods.

Also, do not log a nil error when successfully evicting pods.
2020-05-13 23:28:25 -05:00
Kubernetes Prow Robot
25336da708 Merge pull request #281 from KohlsTechnology/update-travis-ci
Update Travis CI build matrix with latest k8s point releases
2020-05-13 08:20:26 -07:00
Sean Malloy
4941f6a16b Update Travis CI build matrix with latest k8s point releases 2020-05-12 22:38:23 -05:00
Kubernetes Prow Robot
d7e93058d4 Merge pull request #280 from damemi/k8s-1.18
Update to k8s 1.18.2 dependencies
2020-05-12 13:40:21 -07:00
Mike Dame
c20a595370 Update travis.yml with new k8s version and updated kind version 2020-05-12 15:01:25 -04:00
Mike Dame
eec1104d6e React to 1.18 by adding contexts to client calls 2020-05-12 15:01:25 -04:00
Mike Dame
741b35edf5 Update to k8s 1.18.2 dependencies 2020-05-12 14:07:31 -04:00
Kubernetes Prow Robot
c01cfcf3b6 Merge pull request #274 from KohlsTechnology/pod-lifetime-strategy
Add New PodLifeTime Strategy
2020-05-08 07:29:42 -07:00
Sean Malloy
643cd472ef Add initial production use cases section to user guide 2020-05-08 09:02:24 -05:00
Sean Malloy
668d727fc2 Add VS Code File To .gitignore 2020-05-07 23:10:48 -05:00
Sean Malloy
423ee35846 Add New PodLifeTime Strategy
The new PodLifeTime descheduler strategy can be used to evict pods that
were created more than the configured number of seconds ago.

In the below example pods created more than 24 hours ago will be evicted.
````
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
strategies:
  "PodLifeTime":
     enabled: true
     params:
        maxPodLifeTimeSeconds: 86400
````
2020-05-07 23:10:36 -05:00
Kubernetes Prow Robot
31c7855212 Merge pull request #278 from lixiang233/fix_readme
Fix readme
2020-05-07 12:19:43 -07:00
Kubernetes Prow Robot
211f3942b6 Merge pull request #276 from damemi/toomanyrestarts-pointer
Switch PodsHavingTooManyRestarts params to pointer
2020-05-07 12:17:42 -07:00
Kubernetes Prow Robot
beae282735 Merge pull request #277 from damemi/damemi-approvers
Add damemi to approvers
2020-05-07 12:15:42 -07:00
lixiang
635348efb9 Fix readme 2020-05-07 11:47:32 +08:00
Mike Dame
fa335c782f Switch PodsHavingTooManyRestarts params to pointer 2020-05-06 13:29:05 -04:00
Mike Dame
b019a58525 Add damemi to approvers 2020-05-06 11:00:58 -04:00
Kubernetes Prow Robot
78eef6c343 Merge pull request #267 from ingvagabund/drop-DeschedulerServer-from-strategies
Drop descheduler server from strategies
2020-04-28 08:34:06 -07:00
Jan Chaloupka
cbcefb5d2f Remove options.DeschedulerServer from all strategies 2020-04-28 16:13:33 +02:00
Kubernetes Prow Robot
149085fb57 Merge pull request #240 from ingvagabund/turn-strategy-params-NodeResourceUtilizationThresholds-field-into-pointer
Turn StrategyParameters.NodeResourceUtilizationThresholds field into a pointer
2020-04-28 06:48:07 -07:00
Jan Chaloupka
991eddb691 Turn StrategyParameters.NodeResourceUtilizationThresholds field into a pointer
The field is intended to be omitempty when not set. Without a pointer the strategy
serialized into json string looks like:

```json
strategies:
  LowNodeUtilization:
    enabled: true
    params:
      nodeResourceUtilizationThresholds:
        numberOfNodes: 1
        targetThresholds:
          cpu: 50
          memory: 50
          pods: 20
        thresholds:
          cpu: 50
          memory: 50
          pods: 20
  RemoveDuplicates:
    enabled: true
    params:
      nodeResourceUtilizationThresholds: {}
  RemovePodsViolatingInterPodAntiAffinity:
    enabled: true
    params:
      nodeResourceUtilizationThresholds: {}
  RemovePodsViolatingNodeAffinity:
    enabled: true
    params:
      nodeAffinityType:
      - requiredDuringSchedulingIgnoredDuringExecution
      nodeResourceUtilizationThresholds: {}
  RemovePodsViolatingNodeTaints:
    enabled: true
    params:
      nodeResourceUtilizationThresholds: {}
```

It's preferred to have the following json string instead:
```
strategies:
  LowNodeUtilization:
    enabled: true
    params:
      nodeResourceUtilizationThresholds:
        numberOfNodes: 1
        targetThresholds:
          cpu: 50
          memory: 50
          pods: 20
        thresholds:
          cpu: 50
          memory: 50
          pods: 20
  RemoveDuplicates:
    enabled: true
  RemovePodsViolatingInterPodAntiAffinity:
    enabled: true
  RemovePodsViolatingNodeAffinity:
    enabled: true
    params:
      nodeAffinityType:
      - requiredDuringSchedulingIgnoredDuringExecution
  RemovePodsViolatingNodeTaints:
    enabled: true
```
2020-04-28 15:20:30 +02:00
Kubernetes Prow Robot
91de471376 Merge pull request #254 from damemi/toomanyrestarts
Add RemoveTooManyRestarts policy
2020-04-27 12:48:05 -07:00
Mike Dame
c2d7e22749 make gen && go mod vendor 2020-04-24 10:48:28 -04:00
Mike Dame
e7c42794a0 Add RemovePodsHavingTooManyRestarts strategy 2020-04-24 10:48:28 -04:00
Kubernetes Prow Robot
3a8dfc07ed Merge pull request #266 from ingvagabund/pod-evictor
Move maximum-pods-per-nodes-evicted logic under a single invocation
2020-04-23 16:14:07 -07:00
Jan Chaloupka
077b7f6505 Drop check for MaxNoOfPodsToEvictPerNode and invoke EvictPod wrapper instead 2020-04-22 10:33:22 +02:00
Jan Chaloupka
240fa93bc5 Move maximum-pods-per-nodes-evicted logic under a single invocation
Each strategy implements a test for checking if a maximum number of pods per node
was already evicted. The test duplicates a code that can be put
under a single invocation. Thus, reducing the number of arguments passed
to each strategy given EvicPod call can encapsulate both the check
and the invocation of the pod eviction itself.
2020-04-22 10:32:16 +02:00
Kubernetes Prow Robot
6c7f846917 Merge pull request #265 from ingvagabund/tolerations-tains-code-cleanup
Tolerations taints code cleanup
2020-04-21 14:27:52 -07:00
Jan Chaloupka
6db7c3b92c Call utils.TolerationsTolerateTaintsWithFilter directly, not through checkPodsSatisfyTolerations 2020-04-21 22:34:14 +02:00
Kubernetes Prow Robot
030267107a Merge pull request #262 from ingvagabund/order-pods-by-priority-only-over-over-utilized-nodes
Order pods by priority only over over utilized nodes
2020-04-19 14:05:38 -07:00
Jan Chaloupka
1c300a9881 Drop getNoScheduleTaints and allTaintsTolerated in favor of utils.TolerationsTolerateTaintsWithFilter 2020-04-19 22:18:49 +02:00
Jan Chaloupka
0e9b33b822 allTaintsTolerated: remove for iteration through tolerations which is already implemented in utils.TolerationsTolerateTaint 2020-04-19 22:07:34 +02:00
Jan Chaloupka
36e3d1e703 Drop local implementation of toleratesTaint in favor of k8s.io/api/core/v1.Toleration.TolerateTaint
Functionally identical implementation of toleratesTaint is already provided in k8s.io/api
2020-04-19 21:58:41 +02:00
Jan Chaloupka
f53264b613 lownodeutilization: evict best-effort pods only
Unit test refactored node utilization and pod clasification
2020-04-17 11:32:34 +02:00
Jan Chaloupka
414554ae5e lownodeutilization: make unit tests with/without priority table driven 2020-04-17 11:32:28 +02:00
Jan Chaloupka
150f945592 lownodeutilization: clasify pods of over utilized nodes only
Only over utilized nodes need clasification of pods into categories.
Thus, skipping categorizing of pods which saves computation time in cases
where the number of over utilized nodes makes less than 50% of all nodes
or their fraction.
2020-04-17 11:32:22 +02:00
Kubernetes Prow Robot
9a84afece1 Merge pull request #264 from ingvagabund/remove-pods-violating-node-affinity
readme: RemovePodsViolatingNodeAffinity: reword description of the strategy
2020-04-16 20:59:08 -07:00
Jan Chaloupka
e0c101c5ae readme: RemovePodsViolatingNodeAffinity: reword description of the strategy
Compared to https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#node-affinity
the strategy kinda implements requiredDuringSchedulingRequiredDuringExecution node affinity type
for kubelets. Only addition to kubelet is the strategy checks whether is at least another node
capable of respecting the node affinity rules.

When requiredDuringSchedulingRequiredDuringExecution node affinity type is implemented in kubelet,
it's likely the strategy either gets removed or re-implemented. Stressing the relation with
requiredDuringSchedulingRequiredDuringExecution will helps consumers of descheduler
to keep in mind the kubelet will eventually take over the strategy when implemented.
2020-04-15 11:54:37 +02:00
Kubernetes Prow Robot
e3a562aea0 Merge pull request #260 from CriaHu/hyq_descheduler
fix broken link :
2020-04-07 18:01:44 -07:00
Kubernetes Prow Robot
4966e8ee08 Merge pull request #256 from damemi/fix-podfitsanynode
Fix early return from PodFitsAnyNode check
2020-04-07 08:01:43 -07:00
Kubernetes Prow Robot
d3542d5892 Merge pull request #258 from damemi/verify-deps
Add verify-vendor scripts to CI
2020-04-07 07:53:43 -07:00
Mike Dame
62d04b0fc7 go mod tidy && go mod vendor 2020-04-07 10:34:47 -04:00
Mike Dame
6ecbc85448 Add hack/{update,verify}-vendor.sh scripts 2020-04-07 10:34:47 -04:00
Cria Hu
b9b1eae6fb fix broken link :
https://git.k8s.io/community/contributors/guide/contributor-cheatsheet.md
2020-04-06 09:28:09 +08:00
Mike Dame
e95e42930d Remove early return false from PodFitsAnyNode 2020-04-02 11:53:29 -04:00
Kubernetes Prow Robot
3cabb69014 Merge pull request #255 from KohlsTechnology/go-1.13.9
Update to Go 1.13.9
2020-03-31 20:47:27 -07:00
Sean Malloy
ad8f90f177 Update to Go 1.13.9
Kubernetes releases 1.16 through 1.19 have been updated
to Go 1.13.9, so it's time to update the descheduler too.
2020-03-31 22:30:52 -05:00
Kubernetes Prow Robot
8a62cf1699 Merge pull request #246 from KohlsTechnology/go-1.13.8
Update to Go 1.13.8
2020-03-09 11:17:36 -07:00
Sean Malloy
a6b54dae99 Update to Go 1.13.8
Kubernetes releases 1.15 through 1.19 have been updated
to Go 1.13.8, so it's time to update the descheduler too.

https://github.com/kubernetes/release/issues/1134
2020-03-08 23:12:53 -05:00
Kubernetes Prow Robot
112684bcb9 Merge pull request #181 from jw-s/feature/respect-tolerations
Respect Node taints with tolerations (if exist)
2020-03-04 10:45:48 -08:00
Kubernetes Prow Robot
682e07c3cd Merge pull request #249 from ingvagabund/list-node-through-informer-in-every-iteration
List nodes through informer in every iteration
2020-03-04 10:43:48 -08:00
Jan Chaloupka
9593ce16d9 List nodes through informer in every iteration
Also, refactor the code a bit so it can be tested without checking for eviction support.
2020-03-04 16:26:13 +01:00
Kubernetes Prow Robot
2545c8b031 Merge pull request #244 from KohlsTechnology/docs-revamp
Documentation Refresh
2020-03-04 07:23:47 -08:00
Sean Malloy
c9793e7029 Clean README based on feedback from review 2020-03-03 22:07:20 -06:00
Joel Whittaker-Smith
4757132452 check for taints on low nodes and honor them 2020-03-02 14:43:11 +01:00
Sean Malloy
561b3b67b3 Clean README based on feedback from review 2020-02-28 22:38:28 -06:00
Sean Malloy
566f33e6ad Initial User and Contributor Guide Draft 2020-02-26 01:29:57 -06:00
Sean Malloy
83a75bac80 Create Structure and Links For User and Contributor Guides 2020-02-26 01:30:08 -06:00
Sean Malloy
d8772f5685 Clean up README
* Fixed small typos
* Update compatability matrix
2020-02-26 01:29:57 -06:00
Kubernetes Prow Robot
df510187d6 Merge pull request #233 from KohlsTechnology/update-release-docs
Add notes section to release documentation
2020-02-21 11:12:12 -08:00
Kubernetes Prow Robot
137b9b72e7 Merge pull request #242 from KohlsTechnology/fix-gcr-registry
Fix GCR Image Location In Job and CronJob YAML
2020-02-20 09:41:01 -08:00
Sean Malloy
09b4979673 Fix GCR Image Location In Job and CronJob YAML
The correct location for the official descheduler container image is
us.gcr.io/k8s-artifacts-prod/descheduler/descheduler:TAG_NAME.
2020-02-20 00:30:16 -06:00
Kubernetes Prow Robot
eff8185d7c Merge pull request #239 from aveshagarwal/master-fix-gen
Fix email address in docker files.
2020-02-12 07:04:52 -08:00
Avesh Agarwal
83ee94dd08 Fix email address in docker files. 2020-02-12 09:31:24 -05:00
Sean Malloy
aae52ac2ee Document gcloud command used to get container image details
The "gcloud container images describe" command is used to get the SHA256
hash of an image in the staging registry. The SHA256 hash is required
for the image promotion process.

https://github.com/kubernetes/k8s.io/tree/master/k8s.gcr.io#image-promoter
2020-02-11 22:46:27 -06:00
Sean Malloy
a7c4295c58 Add notes section to release documentation
Add a link to the test grid dashboard for automated container image builds,
and documents useful gcloud commands for woking with the staging registry.

This documentation is useful for project maintainers that are creating
releases and publishing container images. The chance of remembering
these commands and links is slim to none. Therefore documenting this
information will be very useful.
2020-02-11 22:41:35 -06:00
5147 changed files with 882912 additions and 196073 deletions

46
.github/ISSUE_TEMPLATE/bug_report.md vendored Normal file
View File

@@ -0,0 +1,46 @@
---
name: Bug report
about: Create a bug report to help improve descheduler
title: ''
labels: 'kind/bug'
assignees: ''
---
<!-- Please answer these questions before submitting your bug report. Thanks! -->
**What version of descheduler are you using?**
descheduler version:
**Does this issue reproduce with the latest release?**
**Which descheduler CLI options are you using?**
**Please provide a copy of your descheduler policy config file**
**What k8s version are you using (`kubectl version`)?**
<details><summary><code>kubectl version</code> Output</summary><br><pre>
$ kubectl version
</pre></details>
**What did you do?**
<!--
If possible, provide a recipe for reproducing the error.
A detailed sequence of steps describing what to do to observe the issue is good.
A complete runnable bash shell script is best.
-->
**What did you expect to see?**
**What did you see instead?**

View File

@@ -0,0 +1,26 @@
---
name: Feature request
about: Suggest an idea for descheduler
title: ''
labels: 'kind/feature'
assignees: ''
---
<!-- Please answer these questions before submitting your feature request. Thanks! -->
**Is your feature request related to a problem? Please describe.**
<!-- A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] -->
**Describe the solution you'd like**
<!-- A clear and concise description of what you want to happen. -->
**Describe alternatives you've considered**
<!-- A clear and concise description of any alternative solutions or features you've considered. -->
**What version of descheduler are you using?**
descheduler version:
**Additional context**
<!-- Add any other context or screenshots about the feature request here. -->

18
.github/ISSUE_TEMPLATE/misc_request.md vendored Normal file
View File

@@ -0,0 +1,18 @@
---
name: Miscellaneous
about: Not a bug and not a feature
title: ''
labels: ''
assignees: ''
---
<!--
Please do not use this to submit a bug report or feature request. Use the
bug report or feature request options instead.
Also, please consider posting in the Kubernetes Slack #sig-scheduling channel
instead of opening an issue if this is a support request.
Thanks!
-->

6
.github/ci/ct.yaml vendored Normal file
View File

@@ -0,0 +1,6 @@
chart-dirs:
- charts
helm-extra-args: "--timeout=5m"
check-version-increment: false
helm-extra-set-args: "--set=kind=Deployment"
target-branch: master

67
.github/workflows/helm.yaml vendored Normal file
View File

@@ -0,0 +1,67 @@
name: Helm
on:
push:
branches:
- master
- release-*
paths:
- 'charts/**'
- '.github/workflows/helm.yaml'
pull_request:
paths:
- 'charts/**'
- '.github/workflows/helm.yaml'
jobs:
lint-and-test:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Set up Helm
uses: azure/setup-helm@v2.1
with:
version: v3.9.2
- uses: actions/setup-python@v3.1.2
with:
python-version: 3.7
- uses: actions/setup-go@v3
with:
go-version: '1.19.0'
- name: Set up chart-testing
uses: helm/chart-testing-action@v2.2.1
with:
version: v3.7.0
- name: Run chart-testing (list-changed)
id: list-changed
run: |
changed=$(ct list-changed --config=.github/ci/ct.yaml)
if [[ -n "$changed" ]]; then
echo "::set-output name=changed::true"
fi
- name: Run chart-testing (lint)
run: ct lint --config=.github/ci/ct.yaml --validate-maintainers=false
# Need a multi node cluster so descheduler runs until evictions
- name: Create multi node Kind cluster
run: make kind-multi-node
# helm-extra-set-args only available after ct 3.6.0
- name: Run chart-testing (install)
run: ct install --config=.github/ci/ct.yaml
- name: E2E after chart install
env:
KUBERNETES_VERSION: "v1.25.0"
KIND_E2E: true
SKIP_INSTALL: true
run: make test-e2e

31
.github/workflows/release.yaml vendored Normal file
View File

@@ -0,0 +1,31 @@
name: Release Charts
on:
push:
branches:
- release-*
jobs:
release:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
with:
fetch-depth: 0
- name: Configure Git
run: |
git config user.name "$GITHUB_ACTOR"
git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
- name: Install Helm
uses: azure/setup-helm@v1
with:
version: v3.4.0
- name: Run chart-releaser
uses: helm/chart-releaser-action@v1.1.0
env:
CR_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
CR_RELEASE_NAME_TEMPLATE: "descheduler-helm-chart-{{ .Version }}"

47
.github/workflows/security.yaml vendored Normal file
View File

@@ -0,0 +1,47 @@
name: "Security"
on:
push:
branches:
- main
- master
- release-*
schedule:
- cron: '30 1 * * 0'
jobs:
analyze:
name: Analyze
runs-on: ubuntu-latest
permissions:
actions: read
contents: read
security-events: write
strategy:
fail-fast: false
steps:
- name: Checkout
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Build image
run: |
IMAGE_REPO=${HELM_IMAGE_REPO:-descheduler}
IMAGE_TAG=${HELM_IMAGE_TAG:-security-test}
VERSION=security-test make image
- name: Run Trivy vulnerability scanner
uses: aquasecurity/trivy-action@master
with:
image-ref: 'descheduler:security-test'
format: 'sarif'
exit-code: '0'
severity: 'CRITICAL,HIGH'
output: 'trivy-results.sarif'
- name: Upload Trivy scan results to GitHub Security tab
uses: github/codeql-action/upload-sarif@v2
with:
sarif_file: 'trivy-results.sarif'
exit-code: '0'

7
.gitignore vendored
View File

@@ -1 +1,8 @@
_output/
_tmp/
vendordiff.patch
.idea/
*.code-workspace
.vscode/
kind
bin/

View File

@@ -1,31 +0,0 @@
sudo: false
language: go
go:
- 1.13.x
env:
- K8S_VERSION=v1.17.0
- K8S_VERSION=v1.16.4
- K8S_VERSION=v1.15.7
services:
- docker
before_script:
- curl -Lo kubectl https://storage.googleapis.com/kubernetes-release/release/${K8S_VERSION}/bin/linux/amd64/kubectl && chmod +x kubectl && sudo mv kubectl /usr/local/bin/
- wget https://github.com/kubernetes-sigs/kind/releases/download/v0.7.0/kind-linux-amd64
- chmod +x kind-linux-amd64
- mv kind-linux-amd64 kind
- export PATH=$PATH:$PWD
- kind create cluster --image kindest/node:${K8S_VERSION} --config=$TRAVIS_BUILD_DIR/hack/kind_config.yaml
- export KUBECONFIG="$(kind get kubeconfig-path)"
- docker pull kubernetes/pause
- kind load docker-image kubernetes/pause
- kind get kubeconfig > /tmp/admin.conf
script:
- mkdir -p ~/gopath/src/sigs.k8s.io/
- mv ~/gopath/src/github.com/kubernetes-sigs/descheduler ~/gopath/src/sigs.k8s.io/.
- hack/verify-gofmt.sh
- make lint
- make build
- make test-unit
- make test-e2e

View File

@@ -10,7 +10,7 @@ We have full documentation on how to get started contributing here:
- [Contributor License Agreement](https://git.k8s.io/community/CLA.md) Kubernetes projects require that you sign a Contributor License Agreement (CLA) before we can accept your pull requests
- [Kubernetes Contributor Guide](http://git.k8s.io/community/contributors/guide) - Main contributor documentation, or you can just jump directly to the [contributing section](http://git.k8s.io/community/contributors/guide#contributing)
- [Contributor Cheat Sheet](https://git.k8s.io/community/contributors/guide/contributor-cheatsheet.md) - Common resources for existing developers
- [Contributor Cheat Sheet](https://git.k8s.io/community/contributors/guide/contributor-cheatsheet/README.md) - Common resources for existing developers
## Mentorship

View File

@@ -11,15 +11,19 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
FROM golang:1.13.6
FROM golang:1.19.0
WORKDIR /go/src/sigs.k8s.io/descheduler
COPY . .
RUN make
ARG ARCH
ARG VERSION
RUN VERSION=${VERSION} make build.$ARCH
FROM scratch
MAINTAINER Avesh Agarwal <avagarwa@redhat.com>
MAINTAINER Kubernetes SIG Scheduling <kubernetes-sig-scheduling@googlegroups.com>
USER 1000
COPY --from=0 /go/src/sigs.k8s.io/descheduler/_output/bin/descheduler /bin/descheduler

View File

@@ -13,7 +13,9 @@
# limitations under the License.
FROM scratch
MAINTAINER Avesh Agarwal <avagarwa@redhat.com>
MAINTAINER Kubernetes SIG Scheduling <kubernetes-sig-scheduling@googlegroups.com>
USER 1000
COPY _output/bin/descheduler /bin/descheduler

113
Makefile
View File

@@ -14,16 +14,20 @@
.PHONY: test
# VERSION is currently based on the last commit
VERSION?=$(shell git describe --tags)
COMMIT=$(shell git rev-parse HEAD)
export CONTAINER_ENGINE ?= docker
# VERSION is based on a date stamp plus the last commit
VERSION?=v$(shell date +%Y%m%d)-$(shell git describe --tags)
BRANCH?=$(shell git branch --show-current)
SHA1?=$(shell git rev-parse HEAD)
BUILD=$(shell date +%FT%T%z)
LDFLAG_LOCATION=sigs.k8s.io/descheduler/cmd/descheduler/app
LDFLAG_LOCATION=sigs.k8s.io/descheduler/pkg/version
ARCHS = amd64 arm arm64
LDFLAGS=-ldflags "-X ${LDFLAG_LOCATION}.version=${VERSION} -X ${LDFLAG_LOCATION}.buildDate=${BUILD} -X ${LDFLAG_LOCATION}.gitCommit=${COMMIT}"
LDFLAGS=-ldflags "-X ${LDFLAG_LOCATION}.version=${VERSION} -X ${LDFLAG_LOCATION}.buildDate=${BUILD} -X ${LDFLAG_LOCATION}.gitbranch=${BRANCH} -X ${LDFLAG_LOCATION}.gitsha1=${SHA1}"
GOLANGCI_VERSION := v1.15.0
HAS_GOLANGCI := $(shell which golangci-lint)
GOLANGCI_VERSION := v1.49.0
HAS_GOLANGCI := $(shell ls _output/bin/golangci-lint 2> /dev/null)
# REGISTRY is the container registry to push
# into. The default is to push to the staging
@@ -41,26 +45,74 @@ IMAGE_GCLOUD:=$(REGISTRY)/descheduler:$(VERSION)
# In the future binaries can be uploaded to
# GCS bucket gs://k8s-staging-descheduler.
HAS_HELM := $(shell which helm 2> /dev/null)
all: build
build:
CGO_ENABLED=0 go build ${LDFLAGS} -o _output/bin/descheduler sigs.k8s.io/descheduler/cmd/descheduler
build.amd64:
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build ${LDFLAGS} -o _output/bin/descheduler sigs.k8s.io/descheduler/cmd/descheduler
build.arm:
CGO_ENABLED=0 GOOS=linux GOARCH=arm GOARM=7 go build ${LDFLAGS} -o _output/bin/descheduler sigs.k8s.io/descheduler/cmd/descheduler
build.arm64:
CGO_ENABLED=0 GOOS=linux GOARCH=arm64 go build ${LDFLAGS} -o _output/bin/descheduler sigs.k8s.io/descheduler/cmd/descheduler
dev-image: build
docker build -f Dockerfile.dev -t $(IMAGE) .
$(CONTAINER_ENGINE) build -f Dockerfile.dev -t $(IMAGE) .
image:
docker build -t $(IMAGE) .
$(CONTAINER_ENGINE) build --build-arg VERSION="$(VERSION)" --build-arg ARCH="amd64" -t $(IMAGE) .
push-container-to-gcloud: image
image.amd64:
$(CONTAINER_ENGINE) build --build-arg VERSION="$(VERSION)" --build-arg ARCH="amd64" -t $(IMAGE)-amd64 .
image.arm:
$(CONTAINER_ENGINE) build --build-arg VERSION="$(VERSION)" --build-arg ARCH="arm" -t $(IMAGE)-arm .
image.arm64:
$(CONTAINER_ENGINE) build --build-arg VERSION="$(VERSION)" --build-arg ARCH="arm64" -t $(IMAGE)-arm64 .
push: image
gcloud auth configure-docker
docker tag $(IMAGE) $(IMAGE_GCLOUD)
docker push $(IMAGE_GCLOUD)
$(CONTAINER_ENGINE) tag $(IMAGE) $(IMAGE_GCLOUD)
$(CONTAINER_ENGINE) push $(IMAGE_GCLOUD)
push: push-container-to-gcloud
push-all: image.amd64 image.arm image.arm64
gcloud auth configure-docker
for arch in $(ARCHS); do \
$(CONTAINER_ENGINE) tag $(IMAGE)-$${arch} $(IMAGE_GCLOUD)-$${arch} ;\
$(CONTAINER_ENGINE) push $(IMAGE_GCLOUD)-$${arch} ;\
done
DOCKER_CLI_EXPERIMENTAL=enabled $(CONTAINER_ENGINE) manifest create $(IMAGE_GCLOUD) $(addprefix --amend $(IMAGE_GCLOUD)-, $(ARCHS))
for arch in $(ARCHS); do \
DOCKER_CLI_EXPERIMENTAL=enabled $(CONTAINER_ENGINE) manifest annotate --arch $${arch} $(IMAGE_GCLOUD) $(IMAGE_GCLOUD)-$${arch} ;\
done
DOCKER_CLI_EXPERIMENTAL=enabled $(CONTAINER_ENGINE) manifest push $(IMAGE_GCLOUD) ;\
clean:
rm -rf _output
rm -rf _tmp
verify: verify-govet verify-spelling verify-gofmt verify-vendor lint lint-chart verify-toc verify-gen
verify-govet:
./hack/verify-govet.sh
verify-spelling:
./hack/verify-spelling.sh
verify-gofmt:
./hack/verify-gofmt.sh
verify-vendor:
./hack/verify-vendor.sh
verify-toc:
./hack/verify-toc.sh
test-unit:
./test/run-unit-tests.sh
@@ -72,10 +124,37 @@ gen:
./hack/update-generated-conversions.sh
./hack/update-generated-deep-copies.sh
./hack/update-generated-defaulters.sh
#undo go mod changes caused by above.
go mod tidy
./hack/update-toc.sh
verify-gen:
./hack/verify-conversions.sh
./hack/verify-deep-copies.sh
./hack/verify-defaulters.sh
lint:
ifndef HAS_GOLANGCI
curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(GOPATH)/bin ${GOLANGCI_VERSION}
curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b ./_output/bin ${GOLANGCI_VERSION}
endif
golangci-lint run
./_output/bin/golangci-lint run
# helm
ensure-helm-install:
ifndef HAS_HELM
curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 && chmod 700 ./get_helm.sh && ./get_helm.sh
endif
lint-chart: ensure-helm-install
helm lint ./charts/descheduler
build-helm:
helm package ./charts/descheduler --dependency-update --destination ./bin/chart
test-helm: ensure-helm-install
./test/run-helm-tests.sh
kind-multi-node:
kind create cluster --name kind --config ./hack/kind_config.yaml --wait 2m
ct-helm:
./hack/verify-chart.sh

18
OWNERS
View File

@@ -1,10 +1,16 @@
approvers:
- aveshagarwal
- k82cn
- ravisantoshgudimetla
- damemi
- ingvagabund
- seanmalloy
- a7i
reviewers:
- aveshagarwal
- k82cn
- ravisantoshgudimetla
- damemi
- seanmalloy
- ingvagabund
- lixiang233
- a7i
- janeliul
emeritus_approvers:
- aveshagarwal
- k82cn
- ravisantoshgudimetla

869
README.md
View File

@@ -1,16 +1,14 @@
[![Build Status](https://travis-ci.org/kubernetes-sigs/descheduler.svg?branch=master)](https://travis-ci.org/kubernetes-sigs/descheduler)
[![Go Report Card](https://goreportcard.com/badge/kubernetes-sigs/descheduler)](https://goreportcard.com/report/sigs.k8s.io/descheduler)
[![Go Report Card](https://goreportcard.com/badge/sigs.k8s.io/descheduler)](https://goreportcard.com/report/sigs.k8s.io/descheduler)
![Release Charts](https://github.com/kubernetes-sigs/descheduler/workflows/Release%20Charts/badge.svg)
# Descheduler for Kubernetes
## Introduction
Scheduling in Kubernetes is the process of binding pending pods to nodes, and is performed by
a component of Kubernetes called kube-scheduler. The scheduler's decisions, whether or where a
pod can or can not be scheduled, are guided by its configurable policy which comprises of set of
rules, called predicates and priorities. The scheduler's decisions are influenced by its view of
a Kubernetes cluster at that point of time when a new pod appears first time for scheduling.
As Kubernetes clusters are very dynamic and their state change over time, there may be desired
a Kubernetes cluster at that point of time when a new pod appears for scheduling.
As Kubernetes clusters are very dynamic and their state changes over time, there may be desire
to move already running pods to some other nodes for various reasons:
* Some nodes are under or over utilized.
@@ -24,87 +22,179 @@ Descheduler, based on its policy, finds pods that can be moved and evicts them.
note, in current implementation, descheduler does not schedule replacement of evicted pods
but relies on the default scheduler for that.
## Build and Run
Table of Contents
=================
<!-- toc -->
- [Quick Start](#quick-start)
- [Run As A Job](#run-as-a-job)
- [Run As A CronJob](#run-as-a-cronjob)
- [Run As A Deployment](#run-as-a-deployment)
- [Install Using Helm](#install-using-helm)
- [Install Using Kustomize](#install-using-kustomize)
- [User Guide](#user-guide)
- [Policy and Strategies](#policy-and-strategies)
- [RemoveDuplicates](#removeduplicates)
- [LowNodeUtilization](#lownodeutilization)
- [HighNodeUtilization](#highnodeutilization)
- [RemovePodsViolatingInterPodAntiAffinity](#removepodsviolatinginterpodantiaffinity)
- [RemovePodsViolatingNodeAffinity](#removepodsviolatingnodeaffinity)
- [RemovePodsViolatingNodeTaints](#removepodsviolatingnodetaints)
- [RemovePodsViolatingTopologySpreadConstraint](#removepodsviolatingtopologyspreadconstraint)
- [RemovePodsHavingTooManyRestarts](#removepodshavingtoomanyrestarts)
- [PodLifeTime](#podlifetime)
- [RemoveFailedPods](#removefailedpods)
- [Filter Pods](#filter-pods)
- [Namespace filtering](#namespace-filtering)
- [Priority filtering](#priority-filtering)
- [Label filtering](#label-filtering)
- [Node Fit filtering](#node-fit-filtering)
- [Pod Evictions](#pod-evictions)
- [Pod Disruption Budget (PDB)](#pod-disruption-budget-pdb)
- [High Availability](#high-availability)
- [Configure HA Mode](#configure-ha-mode)
- [Metrics](#metrics)
- [Compatibility Matrix](#compatibility-matrix)
- [Getting Involved and Contributing](#getting-involved-and-contributing)
- [Communicating With Contributors](#communicating-with-contributors)
- [Roadmap](#roadmap)
- [Code of conduct](#code-of-conduct)
<!-- /toc -->
- Checkout the repo into your $GOPATH directory under src/sigs.k8s.io/descheduler
## Quick Start
Build descheduler:
The descheduler can be run as a `Job`, `CronJob`, or `Deployment` inside of a k8s cluster. It has the
advantage of being able to be run multiple times without needing user intervention.
The descheduler pod is run as a critical pod in the `kube-system` namespace to avoid
being evicted by itself or by the kubelet.
```sh
$ make
```
and run descheduler:
```sh
$ ./_output/bin/descheduler --kubeconfig <path to kubeconfig> --policy-config-file <path-to-policy-file>
```
If you want more information about what descheduler is doing add `-v 1` to the command line
For more information about available options run:
```
$ ./_output/bin/descheduler --help
```
## Running Descheduler as a Job or CronJob
The descheduler can be run as a job or cronjob inside of a pod. It has the advantage of
being able to be run multiple times without needing user intervention.
The descheduler pod is run as a critical pod to avoid being evicted by itself,
or by the kubelet due to an eviction event. Since critical pods are created in the
`kube-system` namespace, the descheduler job and its pod will also be created
in `kube-system` namespace.
### Setup RBAC
To give necessary permissions for the descheduler to work in a pod.
### Run As A Job
```
$ kubectl create -f kubernetes/rbac.yaml
kubectl create -f kubernetes/base/rbac.yaml
kubectl create -f kubernetes/base/configmap.yaml
kubectl create -f kubernetes/job/job.yaml
```
### Create a configmap to store descheduler policy
### Run As A CronJob
```
$ kubectl create -f kubernetes/configmap.yaml
kubectl create -f kubernetes/base/rbac.yaml
kubectl create -f kubernetes/base/configmap.yaml
kubectl create -f kubernetes/cronjob/cronjob.yaml
```
### Create a Job or CronJob
### Run As A Deployment
As a Job.
```
$ kubectl create -f kubernetes/job.yaml
kubectl create -f kubernetes/base/rbac.yaml
kubectl create -f kubernetes/base/configmap.yaml
kubectl create -f kubernetes/deployment/deployment.yaml
```
Or as a CronJob.
### Install Using Helm
Starting with release v0.18.0 there is an official helm chart that can be used to install the
descheduler. See the [helm chart README](https://github.com/kubernetes-sigs/descheduler/blob/master/charts/descheduler/README.md) for detailed instructions.
The descheduler helm chart is also listed on the [artifact hub](https://artifacthub.io/packages/helm/descheduler/descheduler).
### Install Using Kustomize
You can use kustomize to install descheduler.
See the [resources | Kustomize](https://kubectl.docs.kubernetes.io/references/kustomize/resource/) for detailed instructions.
Run As A Job
```
$ kubectl create -f kubernetes/cronjob.yaml
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/job?ref=v0.25.1' | kubectl apply -f -
```
Run As A CronJob
```
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/cronjob?ref=v0.25.1' | kubectl apply -f -
```
Run As A Deployment
```
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/deployment?ref=v0.25.1' | kubectl apply -f -
```
## User Guide
See the [user guide](docs/user-guide.md) in the `/docs` directory.
## Policy and Strategies
Descheduler's policy is configurable and includes strategies to be enabled or disabled.
Five strategies, `RemoveDuplicates`, `LowNodeUtilization`, `RemovePodsViolatingInterPodAntiAffinity`, `RemovePodsViolatingNodeAffinity` , `RemovePodsViolatingNodeTaints` are currently implemented.
As part of the policy, the parameters associated with the strategies can be configured too.
By default, all strategies are enabled.
Descheduler's policy is configurable and includes strategies that can be enabled or disabled. By default, all strategies are enabled.
The policy includes a common configuration that applies to all the strategies:
| Name | Default Value | Description |
|------|---------------|-------------|
| `nodeSelector` | `nil` | limiting the nodes which are processed |
| `evictLocalStoragePods` | `false` | allows eviction of pods with local storage |
| `evictSystemCriticalPods` | `false` | [Warning: Will evict Kubernetes system pods] allows eviction of pods with any priority, including system pods like kube-dns |
| `ignorePvcPods` | `false` | set whether PVC pods should be evicted or ignored |
| `maxNoOfPodsToEvictPerNode` | `nil` | maximum number of pods evicted from each node (summed through all strategies) |
| `maxNoOfPodsToEvictPerNamespace` | `nil` | maximum number of pods evicted from each namespace (summed through all strategies) |
| `evictFailedBarePods` | `false` | allow eviction of pods without owner references and in failed phase |
As part of the policy, the parameters associated with each strategy can be configured.
See each strategy for details on available parameters.
**Policy:**
```yaml
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
nodeSelector: prod=dev
evictFailedBarePods: false
evictLocalStoragePods: true
evictSystemCriticalPods: true
maxNoOfPodsToEvictPerNode: 40
ignorePvcPods: false
strategies:
...
```
The following diagram provides a visualization of most of the strategies to help
categorize how strategies fit together.
![Strategies diagram](strategies_diagram.png)
### RemoveDuplicates
This strategy makes sure that there is only one pod associated with a Replica Set (RS),
Replication Controller (RC), Deployment, or Job running on same node. If there are more,
This strategy makes sure that there is only one pod associated with a ReplicaSet (RS),
ReplicationController (RC), StatefulSet, or Job running on the same node. If there are more,
those duplicate pods are evicted for better spreading of pods in a cluster. This issue could happen
if some nodes went down due to whatever reasons, and pods on them were moved to other nodes leading to
more than one pod associated with RS or RC, for example, running on same node. Once the failed nodes
are ready again, this strategy could be enabled to evict those duplicate pods. Currently, there are no
parameters associated with this strategy. To disable this strategy, the policy should look like:
more than one pod associated with a RS or RC, for example, running on the same node. Once the failed nodes
are ready again, this strategy could be enabled to evict those duplicate pods.
```
It provides one optional parameter, `excludeOwnerKinds`, which is a list of OwnerRef `Kind`s. If a pod
has any of these `Kind`s listed as an `OwnerRef`, that pod will not be considered for eviction. Note that
pods created by Deployments are considered for eviction by this strategy. The `excludeOwnerKinds` parameter
should include `ReplicaSet` to have pods created by Deployments excluded.
**Parameters:**
|Name|Type|
|---|---|
|`excludeOwnerKinds`|list(string)|
|`namespaces`|(see [namespace filtering](#namespace-filtering))|
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))|
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))|
|`nodeFit`|bool (see [node fit filtering](#node-fit-filtering))|
**Example:**
```yaml
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
strategies:
"RemoveDuplicates":
enabled: false
enabled: true
params:
removeDuplicates:
excludeOwnerKinds:
- "ReplicaSet"
```
### LowNodeUtilization
@@ -113,20 +203,50 @@ This strategy finds nodes that are under utilized and evicts pods, if possible,
in the hope that recreation of evicted pods will be scheduled on these underutilized nodes. The
parameters of this strategy are configured under `nodeResourceUtilizationThresholds`.
The under utilization of nodes is determined by a configurable threshold, `thresholds`. The threshold
`thresholds` can be configured for cpu, memory, and number of pods in terms of percentage. If a node's
usage is below threshold for all (cpu, memory, and number of pods), the node is considered underutilized.
Currently, pods' request resource requirements are considered for computing node resource utilization.
The under utilization of nodes is determined by a configurable threshold `thresholds`. The threshold
`thresholds` can be configured for cpu, memory, number of pods, and extended resources in terms of percentage (the percentage is
calculated as the current resources requested on the node vs [total allocatable](https://kubernetes.io/docs/concepts/architecture/nodes/#capacity).
For pods, this means the number of pods on the node as a fraction of the pod capacity set for that node).
If a node's usage is below threshold for all (cpu, memory, number of pods and extended resources), the node is considered underutilized.
Currently, pods request resource requirements are considered for computing node resource utilization.
There is another configurable threshold, `targetThresholds`, that is used to compute those potential nodes
from where pods could be evicted. Any node, between the thresholds, `thresholds` and `targetThresholds` is
from where pods could be evicted. If a node's usage is above targetThreshold for any (cpu, memory, number of pods, or extended resources),
the node is considered over utilized. Any node between the thresholds, `thresholds` and `targetThresholds` is
considered appropriately utilized and is not considered for eviction. The threshold, `targetThresholds`,
can be configured for cpu, memory, and number of pods too in terms of percentage.
These thresholds, `thresholds` and `targetThresholds`, could be tuned as per your cluster requirements.
An example of the policy for this strategy would look like:
These thresholds, `thresholds` and `targetThresholds`, could be tuned as per your cluster requirements. Note that this
strategy evicts pods from `overutilized nodes` (those with usage above `targetThresholds`) to `underutilized nodes`
(those with usage below `thresholds`), it will abort if any number of `underutilized nodes` or `overutilized nodes` is zero.
```
Additionally, the strategy accepts a `useDeviationThresholds` parameter.
If that parameter is set to `true`, the thresholds are considered as percentage deviations from mean resource usage.
`thresholds` will be deducted from the mean among all nodes and `targetThresholds` will be added to the mean.
A resource consumption above (resp. below) this window is considered as overutilization (resp. underutilization).
**NOTE:** Node resource consumption is determined by the requests and limits of pods, not actual usage.
This approach is chosen in order to maintain consistency with the kube-scheduler, which follows the same
design for scheduling pods onto nodes. This means that resource usage as reported by Kubelet (or commands
like `kubectl top`) may differ from the calculated consumption, due to these components reporting
actual usage metrics. Implementing metrics-based descheduling is currently TODO for the project.
**Parameters:**
|Name|Type|
|---|---|
|`thresholds`|map(string:int)|
|`targetThresholds`|map(string:int)|
|`numberOfNodes`|int|
|`useDeviationThresholds`|bool|
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))|
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))|
|`nodeFit`|bool (see [node fit filtering](#node-fit-filtering))|
**Example:**
```yaml
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
strategies:
@@ -144,28 +264,145 @@ strategies:
"pods": 50
```
There is another parameter associated with `LowNodeUtilization` strategy, called `numberOfNodes`.
This parameter can be configured to activate the strategy only when number of under utilized nodes
Policy should pass the following validation checks:
* Three basic native types of resources are supported: `cpu`, `memory` and `pods`.
If any of these resource types is not specified, all its thresholds default to 100% to avoid nodes going from underutilized to overutilized.
* Extended resources are supported. For example, resource type `nvidia.com/gpu` is specified for GPU node utilization. Extended resources are optional,
and will not be used to compute node's usage if it's not specified in `thresholds` and `targetThresholds` explicitly.
* `thresholds` or `targetThresholds` can not be nil and they must configure exactly the same types of resources.
* The valid range of the resource's percentage value is \[0, 100\]
* Percentage value of `thresholds` can not be greater than `targetThresholds` for the same resource.
There is another parameter associated with the `LowNodeUtilization` strategy, called `numberOfNodes`.
This parameter can be configured to activate the strategy only when the number of under utilized nodes
are above the configured value. This could be helpful in large clusters where a few nodes could go
under utilized frequently or for a short period of time. By default, `numberOfNodes` is set to zero.
### HighNodeUtilization
This strategy finds nodes that are under utilized and evicts pods from the nodes in the hope that these pods will be
scheduled compactly into fewer nodes. Used in conjunction with node auto-scaling, this strategy is intended to help
trigger down scaling of under utilized nodes.
This strategy **must** be used with the scheduler scoring strategy `MostAllocated`. The parameters of this strategy are
configured under `nodeResourceUtilizationThresholds`.
The under utilization of nodes is determined by a configurable threshold `thresholds`. The threshold
`thresholds` can be configured for cpu, memory, number of pods, and extended resources in terms of percentage. The percentage is
calculated as the current resources requested on the node vs [total allocatable](https://kubernetes.io/docs/concepts/architecture/nodes/#capacity).
For pods, this means the number of pods on the node as a fraction of the pod capacity set for that node.
If a node's usage is below threshold for all (cpu, memory, number of pods and extended resources), the node is considered underutilized.
Currently, pods request resource requirements are considered for computing node resource utilization.
Any node above `thresholds` is considered appropriately utilized and is not considered for eviction.
The `thresholds` param could be tuned as per your cluster requirements. Note that this
strategy evicts pods from `underutilized nodes` (those with usage below `thresholds`)
so that they can be recreated in appropriately utilized nodes.
The strategy will abort if any number of `underutilized nodes` or `appropriately utilized nodes` is zero.
**NOTE:** Node resource consumption is determined by the requests and limits of pods, not actual usage.
This approach is chosen in order to maintain consistency with the kube-scheduler, which follows the same
design for scheduling pods onto nodes. This means that resource usage as reported by Kubelet (or commands
like `kubectl top`) may differ from the calculated consumption, due to these components reporting
actual usage metrics. Implementing metrics-based descheduling is currently TODO for the project.
**Parameters:**
|Name|Type|
|---|---|
|`thresholds`|map(string:int)|
|`numberOfNodes`|int|
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))|
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))|
|`nodeFit`|bool (see [node fit filtering](#node-fit-filtering))|
**Example:**
```yaml
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
strategies:
"HighNodeUtilization":
enabled: true
params:
nodeResourceUtilizationThresholds:
thresholds:
"cpu" : 20
"memory": 20
"pods": 20
```
Policy should pass the following validation checks:
* Three basic native types of resources are supported: `cpu`, `memory` and `pods`. If any of these resource types is not specified, all its thresholds default to 100%.
* Extended resources are supported. For example, resource type `nvidia.com/gpu` is specified for GPU node utilization. Extended resources are optional, and will not be used to compute node's usage if it's not specified in `thresholds` explicitly.
* `thresholds` can not be nil.
* The valid range of the resource's percentage value is \[0, 100\]
There is another parameter associated with the `HighNodeUtilization` strategy, called `numberOfNodes`.
This parameter can be configured to activate the strategy only when the number of under utilized nodes
is above the configured value. This could be helpful in large clusters where a few nodes could go
under utilized frequently or for a short period of time. By default, `numberOfNodes` is set to zero.
### RemovePodsViolatingInterPodAntiAffinity
This strategy makes sure that pods violating interpod anti-affinity are removed from nodes. For example, if there is podA on node and podB and podC(running on same node) have antiaffinity rules which prohibit them to run on the same node, then podA will be evicted from the node so that podB and podC could run. This issue could happen, when the anti-affinity rules for pods B,C are created when they are already running on node. Currently, there are no parameters associated with this strategy. To disable this strategy, the policy should look like:
This strategy makes sure that pods violating interpod anti-affinity are removed from nodes. For example,
if there is podA on a node and podB and podC (running on the same node) have anti-affinity rules which prohibit
them to run on the same node, then podA will be evicted from the node so that podB and podC could run. This
issue could happen, when the anti-affinity rules for podB and podC are created when they are already running on
node.
```
**Parameters:**
|Name|Type|
|---|---|
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))|
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))|
|`namespaces`|(see [namespace filtering](#namespace-filtering))|
|`labelSelector`|(see [label filtering](#label-filtering))|
|`nodeFit`|bool (see [node fit filtering](#node-fit-filtering))|
**Example:**
```yaml
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
strategies:
"RemovePodsViolatingInterPodAntiAffinity":
enabled: false
enabled: true
```
### RemovePodsViolatingNodeAffinity
This strategy makes sure that pods violating node affinity are removed from nodes. For example, there is podA that was scheduled on nodeA which satisfied the node affinity rule `requiredDuringSchedulingIgnoredDuringExecution` at the time of scheduling, but over time nodeA no longer satisfies the rule, then if another node nodeB is available that satisfies the node affinity rule, then podA will be evicted from nodeA. The policy file should like this -
This strategy makes sure all pods violating
[node affinity](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#node-affinity)
are eventually removed from nodes. Node affinity rules allow a pod to specify
`requiredDuringSchedulingIgnoredDuringExecution` type, which tells the scheduler
to respect node affinity when scheduling the pod but kubelet to ignore
in case node changes over time and no longer respects the affinity.
When enabled, the strategy serves as a temporary implementation
of `requiredDuringSchedulingRequiredDuringExecution` and evicts pod for kubelet
that no longer respects node affinity.
```
For example, there is podA scheduled on nodeA which satisfies the node
affinity rule `requiredDuringSchedulingIgnoredDuringExecution` at the time
of scheduling. Over time nodeA stops to satisfy the rule. When the strategy gets
executed and there is another node available that satisfies the node affinity rule,
podA gets evicted from nodeA.
**Parameters:**
|Name|Type|
|---|---|
|`nodeAffinityType`|list(string)|
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))|
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))|
|`namespaces`|(see [namespace filtering](#namespace-filtering))|
|`labelSelector`|(see [label filtering](#label-filtering))|
|`nodeFit`|bool (see [node fit filtering](#node-fit-filtering))|
**Example:**
```yaml
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
strategies:
@@ -175,63 +412,489 @@ strategies:
nodeAffinityType:
- "requiredDuringSchedulingIgnoredDuringExecution"
```
### RemovePodsViolatingNodeTaints
This strategy makes sure that pods violating NoSchedule taints on nodes are removed. For example: there is a pod "podA" with toleration to tolerate a taint ``key=value:NoSchedule`` scheduled and running on the tainted node. If the node's taint is subsequently updated/removed, taint is no longer satisfied by its pods' tolerations and will be evicted. The policy file should look like:
This strategy makes sure that pods violating NoSchedule taints on nodes are removed. For example there is a
pod "podA" with a toleration to tolerate a taint ``key=value:NoSchedule`` scheduled and running on the tainted
node. If the node's taint is subsequently updated/removed, taint is no longer satisfied by its pods' tolerations
and will be evicted.
````
Node taints can be excluded from consideration by specifying a list of excludedTaints. If a node taint key **or**
key=value matches an excludedTaints entry, the taint will be ignored.
For example, excludedTaints entry "dedicated" would match all taints with key "dedicated", regardless of value.
excludedTaints entry "dedicated=special-user" would match taints with key "dedicated" and value "special-user".
**Parameters:**
|Name|Type|
|---|---|
|`excludedTaints`|list(string)|
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))|
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))|
|`namespaces`|(see [namespace filtering](#namespace-filtering))|
|`labelSelector`|(see [label filtering](#label-filtering))|
|`nodeFit`|bool (see [node fit filtering](#node-fit-filtering))|
**Example:**
````yaml
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
strategies:
"RemovePodsViolatingNodeTaints":
enabled: true
params:
excludedTaints:
- dedicated=special-user # exclude taints with key "dedicated" and value "special-user"
- reserved # exclude all taints with key "reserved"
````
### RemovePodsViolatingTopologySpreadConstraint
This strategy makes sure that pods violating [topology spread constraints](https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/)
are evicted from nodes. Specifically, it tries to evict the minimum number of pods required to balance topology domains to within each constraint's `maxSkew`.
This strategy requires k8s version 1.18 at a minimum.
By default, this strategy only deals with hard constraints, setting parameter `includeSoftConstraints` to `true` will
include soft constraints.
Strategy parameter `labelSelector` is not utilized when balancing topology domains and is only applied during eviction to determine if the pod can be evicted.
**Parameters:**
|Name|Type|
|---|---|
|`includeSoftConstraints`|bool|
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))|
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))|
|`namespaces`|(see [namespace filtering](#namespace-filtering))|
|`labelSelector`|(see [label filtering](#label-filtering))|
|`nodeFit`|bool (see [node fit filtering](#node-fit-filtering))|
**Example:**
```yaml
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
strategies:
"RemovePodsViolatingTopologySpreadConstraint":
enabled: true
params:
includeSoftConstraints: false
```
### RemovePodsHavingTooManyRestarts
This strategy makes sure that pods having too many restarts are removed from nodes. For example a pod with EBS/PD that
can't get the volume/disk attached to the instance, then the pod should be re-scheduled to other nodes. Its parameters
include `podRestartThreshold`, which is the number of restarts (summed over all eligible containers) at which a pod
should be evicted, and `includingInitContainers`, which determines whether init container restarts should be factored
into that calculation.
**Parameters:**
|Name|Type|
|---|---|
|`podRestartThreshold`|int|
|`includingInitContainers`|bool|
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))|
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))|
|`namespaces`|(see [namespace filtering](#namespace-filtering))|
|`labelSelector`|(see [label filtering](#label-filtering))|
|`nodeFit`|bool (see [node fit filtering](#node-fit-filtering))|
**Example:**
```yaml
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
strategies:
"RemovePodsHavingTooManyRestarts":
enabled: true
params:
podsHavingTooManyRestarts:
podRestartThreshold: 100
includingInitContainers: true
```
### PodLifeTime
This strategy evicts pods that are older than `maxPodLifeTimeSeconds`.
You can also specify `states` parameter to **only** evict pods matching the following conditions:
- [Pod Phase](https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-phase) status of: `Running`, `Pending`
- [Container State Waiting](https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-state-waiting) condition of: `PodInitializing`, `ContainerCreating`
If a value for `states` or `podStatusPhases` is not specified,
Pods in any state (even `Running`) are considered for eviction.
**Parameters:**
|Name|Type|Notes|
|---|---|---|
|`maxPodLifeTimeSeconds`|int||
|`podStatusPhases`|list(string)|Deprecated in v0.25+ Use `states` instead|
|`states`|list(string)|Only supported in v0.25+|
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))||
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))||
|`namespaces`|(see [namespace filtering](#namespace-filtering))||
|`labelSelector`|(see [label filtering](#label-filtering))||
**Example:**
```yaml
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
strategies:
"PodLifeTime":
enabled: true
params:
podLifeTime:
maxPodLifeTimeSeconds: 86400
states:
- "Pending"
- "PodInitializing"
```
### RemoveFailedPods
This strategy evicts pods that are in failed status phase.
You can provide an optional parameter to filter by failed `reasons`.
`reasons` can be expanded to include reasons of InitContainers as well by setting the optional parameter `includingInitContainers` to `true`.
You can specify an optional parameter `minPodLifetimeSeconds` to evict pods that are older than specified seconds.
Lastly, you can specify the optional parameter `excludeOwnerKinds` and if a pod
has any of these `Kind`s listed as an `OwnerRef`, that pod will not be considered for eviction.
**Parameters:**
|Name|Type|
|---|---|
|`minPodLifetimeSeconds`|uint|
|`excludeOwnerKinds`|list(string)|
|`reasons`|list(string)|
|`includingInitContainers`|bool|
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))|
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))|
|`namespaces`|(see [namespace filtering](#namespace-filtering))|
|`labelSelector`|(see [label filtering](#label-filtering))|
|`nodeFit`|bool (see [node fit filtering](#node-fit-filtering))|
**Example:**
```yaml
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
strategies:
"RemoveFailedPods":
enabled: true
params:
failedPods:
reasons:
- "NodeAffinity"
includingInitContainers: true
excludeOwnerKinds:
- "Job"
minPodLifetimeSeconds: 3600
```
## Filter Pods
### Namespace filtering
The following strategies accept a `namespaces` parameter which allows to specify a list of including, resp. excluding namespaces:
* `PodLifeTime`
* `RemovePodsHavingTooManyRestarts`
* `RemovePodsViolatingNodeTaints`
* `RemovePodsViolatingNodeAffinity`
* `RemovePodsViolatingInterPodAntiAffinity`
* `RemoveDuplicates`
* `RemovePodsViolatingTopologySpreadConstraint`
* `RemoveFailedPods`
For example:
```yaml
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
strategies:
"PodLifeTime":
enabled: true
params:
podLifeTime:
maxPodLifeTimeSeconds: 86400
namespaces:
include:
- "namespace1"
- "namespace2"
```
In the examples `PodLifeTime` gets executed only over `namespace1` and `namespace2`.
The similar holds for `exclude` field:
```yaml
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
strategies:
"PodLifeTime":
enabled: true
params:
podLifeTime:
maxPodLifeTimeSeconds: 86400
namespaces:
exclude:
- "namespace1"
- "namespace2"
```
The strategy gets executed over all namespaces but `namespace1` and `namespace2`.
It's not allowed to compute `include` with `exclude` field.
### Priority filtering
All strategies are able to configure a priority threshold, only pods under the threshold can be evicted. You can
specify this threshold by setting `thresholdPriorityClassName`(setting the threshold to the value of the given
priority class) or `thresholdPriority`(directly setting the threshold) parameters. By default, this threshold
is set to the value of `system-cluster-critical` priority class.
Note: Setting `evictSystemCriticalPods` to true disables priority filtering entirely.
E.g.
Setting `thresholdPriority`
```yaml
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
strategies:
"PodLifeTime":
enabled: true
params:
podLifeTime:
maxPodLifeTimeSeconds: 86400
thresholdPriority: 10000
```
Setting `thresholdPriorityClassName`
```yaml
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
strategies:
"PodLifeTime":
enabled: true
params:
podLifeTime:
maxPodLifeTimeSeconds: 86400
thresholdPriorityClassName: "priorityclass1"
```
Note that you can't configure both `thresholdPriority` and `thresholdPriorityClassName`, if the given priority class
does not exist, descheduler won't create it and will throw an error.
### Label filtering
The following strategies can configure a [standard kubernetes labelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#labelselector-v1-meta)
to filter pods by their labels:
* `PodLifeTime`
* `RemovePodsHavingTooManyRestarts`
* `RemovePodsViolatingNodeTaints`
* `RemovePodsViolatingNodeAffinity`
* `RemovePodsViolatingInterPodAntiAffinity`
* `RemovePodsViolatingTopologySpreadConstraint`
* `RemoveFailedPods`
This allows running strategies among pods the descheduler is interested in.
For example:
```yaml
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
strategies:
"PodLifeTime":
enabled: true
params:
podLifeTime:
maxPodLifeTimeSeconds: 86400
labelSelector:
matchLabels:
component: redis
matchExpressions:
- {key: tier, operator: In, values: [cache]}
- {key: environment, operator: NotIn, values: [dev]}
```
### Node Fit filtering
The following strategies accept a `nodeFit` boolean parameter which can optimize descheduling:
* `RemoveDuplicates`
* `LowNodeUtilization`
* `HighNodeUtilization`
* `RemovePodsViolatingInterPodAntiAffinity`
* `RemovePodsViolatingNodeAffinity`
* `RemovePodsViolatingNodeTaints`
* `RemovePodsViolatingTopologySpreadConstraint`
* `RemovePodsHavingTooManyRestarts`
* `RemoveFailedPods`
If set to `true` the descheduler will consider whether or not the pods that meet eviction criteria will fit on other nodes before evicting them. If a pod cannot be rescheduled to another node, it will not be evicted. Currently the following criteria are considered when setting `nodeFit` to `true`:
- A `nodeSelector` on the pod
- Any `tolerations` on the pod and any `taints` on the other nodes
- `nodeAffinity` on the pod
- Resource `requests` made by the pod and the resources available on other nodes
- Whether any of the other nodes are marked as `unschedulable`
E.g.
```yaml
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
strategies:
"LowNodeUtilization":
enabled: true
params:
nodeFit: true
nodeResourceUtilizationThresholds:
thresholds:
"cpu": 20
"memory": 20
"pods": 20
targetThresholds:
"cpu": 50
"memory": 50
"pods": 50
```
Note that node fit filtering references the current pod spec, and not that of it's owner.
Thus, if the pod is owned by a ReplicationController (and that ReplicationController was modified recently),
the pod may be running with an outdated spec, which the descheduler will reference when determining node fit.
This is expected behavior as the descheduler is a "best-effort" mechanism.
Using Deployments instead of ReplicationControllers provides an automated rollout of pod spec changes, therefore ensuring that the descheduler has an up-to-date view of the cluster state.
## Pod Evictions
When the descheduler decides to evict pods from a node, it employs following general mechanism:
When the descheduler decides to evict pods from a node, it employs the following general mechanism:
* [Critical pods](https://kubernetes.io/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/) (with priorityClassName set to system-cluster-critical or system-node-critical) are never evicted.
* Pods (static or mirrored pods or stand alone pods) not part of an RC, RS, Deployment or Jobs are
never evicted because these pods won't be recreated.
* [Critical pods](https://kubernetes.io/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/) (with priorityClassName set to system-cluster-critical or system-node-critical) are never evicted (unless `evictSystemCriticalPods: true` is set).
* Pods (static or mirrored pods or standalone pods) not part of an ReplicationController, ReplicaSet(Deployment), StatefulSet, or Job are
never evicted because these pods won't be recreated. (Standalone pods in failed status phase can be evicted by setting `evictFailedBarePods: true`)
* Pods associated with DaemonSets are never evicted.
* Pods with local storage are never evicted.
* Best efforts pods are evicted before Burstable and Guaranteed pods.
* All types of pods with annotation descheduler.alpha.kubernetes.io/evict are evicted. This
annotation is used to override checks which prevent eviction and user can select which pod is evicted.
User should know how and if the pod will be recreated.
* Pods with local storage are never evicted (unless `evictLocalStoragePods: true` is set).
* Pods with PVCs are evicted (unless `ignorePvcPods: true` is set).
* In `LowNodeUtilization` and `RemovePodsViolatingInterPodAntiAffinity`, pods are evicted by their priority from low to high, and if they have same priority,
best effort pods are evicted before burstable and guaranteed pods.
* All types of pods with the annotation `descheduler.alpha.kubernetes.io/evict` are eligible for eviction. This
annotation is used to override checks which prevent eviction and users can select which pod is evicted.
Users should know how and if the pod will be recreated.
* Pods with a non-nil DeletionTimestamp are not evicted by default.
### Pod disruption Budget (PDB)
Pods subject to Pod Disruption Budget (PDB) are not evicted if descheduling violates its pod
disruption budget (PDB). The pods are evicted by using eviction subresource to handle PDB.
Setting `--v=4` or greater on the Descheduler will log all reasons why any pod is not evictable.
### Pod Disruption Budget (PDB)
Pods subject to a Pod Disruption Budget(PDB) are not evicted if descheduling violates its PDB. The pods
are evicted by using the eviction subresource to handle PDB.
## High Availability
In High Availability mode, Descheduler starts [leader election](https://github.com/kubernetes/client-go/tree/master/tools/leaderelection) process in Kubernetes. You can activate HA mode
if you choose to deploy your application as Deployment.
Deployment starts with 1 replica by default. If you want to use more than 1 replica, you must consider
enable High Availability mode since we don't want to run descheduler pods simultaneously.
### Configure HA Mode
The leader election process can be enabled by setting `--leader-elect` in the CLI. You can also set
`--set=leaderElection.enabled=true` flag if you are using Helm.
To get best results from HA mode some additional configurations might require:
* Configure a [podAntiAffinity](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node) rule if you want to schedule onto a node only if that node is in the same zone as at least one already-running descheduler
* Set the replica count greater than 1
## Metrics
| name | type | description |
|-------|-------|----------------|
| build_info | gauge | constant 1 |
| pods_evicted | CounterVec | total number of pods evicted |
The metrics are served through https://localhost:10258/metrics by default.
The address and port can be changed by setting `--binding-address` and `--secure-port` flags.
## Compatibility Matrix
The below compatibility matrix shows the k8s client package(client-go, apimachinery, etc) versions that descheduler
is compiled with. At this time descheduler does not have a hard dependency to a specific k8s release. However a
particular descheduler release is only tested against the three latest k8s minor versions. For example descheduler
v0.18 should work with k8s v1.18, v1.17, and v1.16.
Starting with descheduler release v0.18 the minor version of descheduler matches the minor version of the k8s client
packages that it is compiled with.
| Descheduler | Supported Kubernetes Version |
|-------------|------------------------------|
| v0.25 | v1.25 |
| v0.24 | v1.24 |
| v0.23 | v1.23 |
| v0.22 | v1.22 |
| v0.21 | v1.21 |
| v0.20 | v1.20 |
| v0.19 | v1.19 |
| v0.18 | v1.18 |
| v0.10 | v1.17 |
| v0.4-v0.9 | v1.9+ |
| v0.1-v0.3 | v1.7-v1.8 |
## Getting Involved and Contributing
Are you interested in contributing to descheduler? We, the
maintainers and community, would love your suggestions, contributions, and help!
Also, the maintainers can be contacted at any time to learn more about how to get
involved.
To get started writing code see the [contributor guide](docs/contributor-guide.md) in the `/docs` directory.
In the interest of getting more new people involved we tag issues with
[`good first issue`][good_first_issue].
These are typically issues that have smaller scope but are good ways to start
to get acquainted with the codebase.
We also encourage ALL active community participants to act as if they are
maintainers, even if you don't have "official" write permissions. This is a
community effort, we are here to serve the Kubernetes community. If you have an
active interest and you want to get involved, you have real power! Don't assume
that the only people who can get things done around here are the "maintainers".
We also would love to add more "official" maintainers, so show us what you can
do!
This repository uses the Kubernetes bots. See a full list of the commands [here][prow].
### Communicating With Contributors
You can reach the contributors of this project at:
- [Slack channel](https://kubernetes.slack.com/messages/sig-scheduling)
- [Mailing list](https://groups.google.com/forum/#!forum/kubernetes-sig-scheduling)
Learn how to engage with the Kubernetes community on the [community page](http://kubernetes.io/community/).
## Roadmap
This roadmap is not in any particular order.
* Consideration of pod affinity
* Strategy to consider pod life time
* Strategy to consider number of pending pods
* Integration with cluster autoscaler
* Integration with metrics providers for obtaining real load metrics
* Consideration of Kubernetes's scheduler's predicates
## Compatibility matrix
Descheduler | supported Kubernetes version
-------------|-----------------------------
0.4+ | 1.9+
0.1-0.3 | 1.7-1.8
## Community, discussion, contribution, and support
Learn how to engage with the Kubernetes community on the [community page](http://kubernetes.io/community/).
You can reach the maintainers of this project at:
- [Slack channel](https://kubernetes.slack.com/messages/sig-scheduling)
- [Mailing list](https://groups.google.com/forum/#!forum/kubernetes-sig-scheduling)
### Code of conduct
Participation in the Kubernetes community is governed by the [Kubernetes Code of Conduct](code-of-conduct.md).

View File

@@ -0,0 +1,22 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@@ -0,0 +1,16 @@
apiVersion: v1
name: descheduler
version: 0.25.1
appVersion: 0.25.1
description: Descheduler for Kubernetes is used to rebalance clusters by evicting pods that can potentially be scheduled on better nodes. In the current implementation, descheduler does not schedule replacement of evicted pods but relies on the default scheduler for that.
keywords:
- kubernetes
- descheduler
- kube-scheduler
home: https://github.com/kubernetes-sigs/descheduler
icon: https://kubernetes.io/images/favicon.png
sources:
- https://github.com/kubernetes-sigs/descheduler
maintainers:
- name: Kubernetes SIG Scheduling
email: kubernetes-sig-scheduling@googlegroups.com

View File

@@ -0,0 +1,87 @@
# Descheduler for Kubernetes
[Descheduler](https://github.com/kubernetes-sigs/descheduler/) for Kubernetes is used to rebalance clusters by evicting pods that can potentially be scheduled on better nodes. In the current implementation, descheduler does not schedule replacement of evicted pods but relies on the default scheduler for that.
## TL;DR:
```shell
helm repo add descheduler https://kubernetes-sigs.github.io/descheduler/
helm install my-release --namespace kube-system descheduler/descheduler
```
## Introduction
This chart bootstraps a [descheduler](https://github.com/kubernetes-sigs/descheduler/) cron job on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
## Prerequisites
- Kubernetes 1.14+
## Installing the Chart
To install the chart with the release name `my-release`:
```shell
helm install --namespace kube-system my-release descheduler/descheduler
```
The command deploys _descheduler_ on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation.
> **Tip**: List all releases using `helm list`
## Uninstalling the Chart
To uninstall/delete the `my-release` deployment:
```shell
helm delete my-release
```
The command removes all the Kubernetes components associated with the chart and deletes the release.
## Configuration
The following table lists the configurable parameters of the _descheduler_ chart and their default values.
| Parameter | Description | Default |
|-------------------------------------|-----------------------------------------------------------------------------------------------------------------------|--------------------------------------|
| `kind` | Use as CronJob or Deployment | `CronJob` |
| `image.repository` | Docker repository to use | `k8s.gcr.io/descheduler/descheduler` |
| `image.tag` | Docker tag to use | `v[chart appVersion]` |
| `image.pullPolicy` | Docker image pull policy | `IfNotPresent` |
| `imagePullSecrets` | Docker repository secrets | `[]` |
| `nameOverride` | String to partially override `descheduler.fullname` template (will prepend the release name) | `""` |
| `fullnameOverride` | String to fully override `descheduler.fullname` template | `""` |
| `cronJobApiVersion` | CronJob API Group Version | `"batch/v1"` |
| `schedule` | The cron schedule to run the _descheduler_ job on | `"*/2 * * * *"` |
| `startingDeadlineSeconds` | If set, configure `startingDeadlineSeconds` for the _descheduler_ job | `nil` |
| `successfulJobsHistoryLimit` | If set, configure `successfulJobsHistoryLimit` for the _descheduler_ job | `nil` |
| `failedJobsHistoryLimit` | If set, configure `failedJobsHistoryLimit` for the _descheduler_ job | `nil` |
| `deschedulingInterval` | If using kind:Deployment, sets time between consecutive descheduler executions. | `5m` |
| `replicas` | The replica count for Deployment | `1` |
| `leaderElection` | The options for high availability when running replicated components | _see values.yaml_ |
| `cmdOptions` | The options to pass to the _descheduler_ command | _see values.yaml_ |
| `deschedulerPolicy.strategies` | The _descheduler_ strategies to apply | _see values.yaml_ |
| `priorityClassName` | The name of the priority class to add to pods | `system-cluster-critical` |
| `rbac.create` | If `true`, create & use RBAC resources | `true` |
| `resources` | Descheduler container CPU and memory requests/limits | _see values.yaml_ |
| `serviceAccount.create` | If `true`, create a service account for the cron job | `true` |
| `serviceAccount.name` | The name of the service account to use, if not set and create is true a name is generated using the fullname template | `nil` |
| `serviceAccount.annotations` | Specifies custom annotations for the serviceAccount | `{}` |
| `podAnnotations` | Annotations to add to the descheduler Pods | `{}` |
| `podLabels` | Labels to add to the descheduler Pods | `{}` |
| `nodeSelector` | Node selectors to run the descheduler cronjob/deployment on specific nodes | `nil` |
| `service.enabled` | If `true`, create a service for deployment | `false` |
| `serviceMonitor.enabled` | If `true`, create a ServiceMonitor for deployment | `false` |
| `serviceMonitor.namespace` | The namespace where Prometheus expects to find service monitors | `nil` |
| `serviceMonitor.interval` | The scrape interval. If not set, the Prometheus default scrape interval is used | `nil` |
| `serviceMonitor.honorLabels` | Keeps the scraped data's labels when labels are on collisions with target labels. | `true` |
| `serviceMonitor.insecureSkipVerify` | Skip TLS certificate validation when scraping | `true` |
| `serviceMonitor.serverName` | Name of the server to use when validating TLS certificate | `nil` |
| `serviceMonitor.metricRelabelings` | MetricRelabelConfigs to apply to samples after scraping, but before ingestion | `[]` |
| `serviceMonitor.relabelings` | RelabelConfigs to apply to samples before scraping | `[]` |
| `affinity` | Node affinity to run the descheduler cronjob/deployment on specific nodes | `nil` |
| `tolerations` | tolerations to run the descheduler cronjob/deployment on specific nodes | `nil` |
| `suspend` | Set spec.suspend in descheduler cronjob | `false` |
| `commonLabels` | Labels to apply to all resources | `{}` |
| `livenessProbe` | Liveness probe configuration for the descheduler container | _see values.yaml_ |

View File

@@ -0,0 +1,7 @@
Descheduler installed as a {{ .Values.kind }}.
{{- if eq .Values.kind "Deployment" }}
{{- if eq .Values.replicas 1.0}}
WARNING: You set replica count as 1 and workload kind as Deployment however leaderElection is not enabled. Consider enabling Leader Election for HA mode.
{{- end}}
{{- end}}

View File

@@ -0,0 +1,94 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "descheduler.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "descheduler.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "descheduler.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Common labels
*/}}
{{- define "descheduler.labels" -}}
app.kubernetes.io/name: {{ include "descheduler.name" . }}
helm.sh/chart: {{ include "descheduler.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- if .Values.commonLabels}}
{{ toYaml .Values.commonLabels }}
{{- end }}
{{- end -}}
{{/*
Selector labels
*/}}
{{- define "descheduler.selectorLabels" -}}
app.kubernetes.io/name: {{ include "descheduler.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end -}}
{{/*
Create the name of the service account to use
*/}}
{{- define "descheduler.serviceAccountName" -}}
{{- if .Values.serviceAccount.create -}}
{{ default (include "descheduler.fullname" .) .Values.serviceAccount.name }}
{{- else -}}
{{ default "default" .Values.serviceAccount.name }}
{{- end -}}
{{- end -}}
{{/*
Leader Election
*/}}
{{- define "descheduler.leaderElection"}}
{{- if .Values.leaderElection -}}
- --leader-elect={{ .Values.leaderElection.enabled }}
{{- if .Values.leaderElection.leaseDuration }}
- --leader-elect-lease-duration={{ .Values.leaderElection.leaseDuration }}
{{- end }}
{{- if .Values.leaderElection.renewDeadline }}
- --leader-elect-renew-deadline={{ .Values.leaderElection.renewDeadline }}
{{- end }}
{{- if .Values.leaderElection.retryPeriod }}
- --leader-elect-retry-period={{ .Values.leaderElection.retryPeriod }}
{{- end }}
{{- if .Values.leaderElection.resourceLock }}
- --leader-elect-resource-lock={{ .Values.leaderElection.resourceLock }}
{{- end }}
{{- if .Values.leaderElection.resourceName }}
- --leader-elect-resource-name={{ .Values.leaderElection.resourceName }}
{{- end }}
{{- if .Values.leaderElection.resourceNamescape }}
- --leader-elect-resource-namespace={{ .Values.leaderElection.resourceNamescape }}
{{- end -}}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,36 @@
{{- if .Values.rbac.create -}}
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ template "descheduler.fullname" . }}
labels:
{{- include "descheduler.labels" . | nindent 4 }}
rules:
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "update"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "watch", "list"]
- apiGroups: [""]
resources: ["namespaces"]
verbs: ["get", "watch", "list"]
- apiGroups: [""]
resources: ["pods"]
verbs: ["get", "watch", "list", "delete"]
- apiGroups: [""]
resources: ["pods/eviction"]
verbs: ["create"]
- apiGroups: ["scheduling.k8s.io"]
resources: ["priorityclasses"]
verbs: ["get", "watch", "list"]
{{- if .Values.leaderElection.enabled }}
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["create", "update"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
resourceNames: ["{{ .Values.leaderElection.resourceName | default "descheduler" }}"]
verbs: ["get", "patch", "delete"]
{{- end }}
{{- end -}}

View File

@@ -0,0 +1,16 @@
{{- if .Values.rbac.create -}}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ template "descheduler.fullname" . }}
labels:
{{- include "descheduler.labels" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ template "descheduler.fullname" . }}
subjects:
- kind: ServiceAccount
name: {{ template "descheduler.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
{{- end -}}

View File

@@ -0,0 +1,12 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ template "descheduler.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "descheduler.labels" . | nindent 4 }}
data:
policy.yaml: |
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
{{ toYaml .Values.deschedulerPolicy | trim | indent 4 }}

View File

@@ -0,0 +1,95 @@
{{- if eq .Values.kind "CronJob" }}
apiVersion: {{ .Values.cronJobApiVersion | default "batch/v1" }}
kind: CronJob
metadata:
name: {{ template "descheduler.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "descheduler.labels" . | nindent 4 }}
spec:
schedule: {{ .Values.schedule | quote }}
{{- if .Values.suspend }}
suspend: {{ .Values.suspend }}
{{- end }}
concurrencyPolicy: "Forbid"
{{- if .Values.startingDeadlineSeconds }}
startingDeadlineSeconds: {{ .Values.startingDeadlineSeconds }}
{{- end }}
{{- if .Values.successfulJobsHistoryLimit }}
successfulJobsHistoryLimit: {{ .Values.successfulJobsHistoryLimit }}
{{- end }}
{{- if .Values.failedJobsHistoryLimit }}
failedJobsHistoryLimit: {{ .Values.failedJobsHistoryLimit }}
{{- end }}
jobTemplate:
spec:
template:
metadata:
name: {{ template "descheduler.fullname" . }}
annotations:
checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }}
{{- if .Values.podAnnotations }}
{{- .Values.podAnnotations | toYaml | nindent 12 }}
{{- end }}
labels:
{{- include "descheduler.selectorLabels" . | nindent 12 }}
{{- if .Values.podLabels }}
{{- .Values.podLabels | toYaml | nindent 12 }}
{{- end }}
spec:
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- if .Values.priorityClassName }}
priorityClassName: {{ .Values.priorityClassName }}
{{- end }}
serviceAccountName: {{ template "descheduler.serviceAccountName" . }}
restartPolicy: "Never"
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 10 }}
{{- end }}
containers:
- name: {{ .Chart.Name }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default (printf "v%s" .Chart.AppVersion) }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
command:
- "/bin/descheduler"
args:
- "--policy-config-file"
- "/policy-dir/policy.yaml"
{{- range $key, $value := .Values.cmdOptions }}
- {{ printf "--%s" $key | quote }}
{{- if $value }}
- {{ $value | quote }}
{{- end }}
{{- end }}
livenessProbe:
{{- toYaml .Values.livenessProbe | nindent 16 }}
resources:
{{- toYaml .Values.resources | nindent 16 }}
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
privileged: false
readOnlyRootFilesystem: true
runAsNonRoot: true
volumeMounts:
- mountPath: /policy-dir
name: policy-volume
volumes:
- name: policy-volume
configMap:
name: {{ template "descheduler.fullname" . }}
{{- end }}

View File

@@ -0,0 +1,94 @@
{{- if eq .Values.kind "Deployment" }}
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ template "descheduler.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "descheduler.labels" . | nindent 4 }}
spec:
{{- if gt .Values.replicas 1.0}}
{{- if not .Values.leaderElection.enabled }}
{{- fail "You must set leaderElection to use more than 1 replica"}}
{{- end}}
replicas: {{ required "leaderElection required for running more than one replica" .Values.replicas }}
{{- else }}
replicas: 1
{{- end }}
selector:
matchLabels:
{{- include "descheduler.selectorLabels" . | nindent 6 }}
template:
metadata:
labels:
{{- include "descheduler.selectorLabels" . | nindent 8 }}
{{- if .Values.podLabels }}
{{- .Values.podLabels | toYaml | nindent 8 }}
{{- end }}
annotations:
checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }}
{{- if .Values.podAnnotations }}
{{- .Values.podAnnotations | toYaml | nindent 8 }}
{{- end }}
spec:
{{- if .Values.priorityClassName }}
priorityClassName: {{ .Values.priorityClassName }}
{{- end }}
serviceAccountName: {{ template "descheduler.serviceAccountName" . }}
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 10 }}
{{- end }}
containers:
- name: {{ .Chart.Name }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default (printf "v%s" .Chart.AppVersion) }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
command:
- "/bin/descheduler"
args:
- "--policy-config-file"
- "/policy-dir/policy.yaml"
- "--descheduling-interval"
- {{ required "deschedulingInterval required for running as Deployment" .Values.deschedulingInterval }}
{{- range $key, $value := .Values.cmdOptions }}
- {{ printf "--%s" $key | quote }}
{{- if $value }}
- {{ $value | quote }}
{{- end }}
{{- end }}
{{- include "descheduler.leaderElection" . | nindent 12 }}
ports:
- containerPort: 10258
protocol: TCP
livenessProbe:
{{- toYaml .Values.livenessProbe | nindent 12 }}
resources:
{{- toYaml .Values.resources | nindent 12 }}
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
privileged: false
readOnlyRootFilesystem: true
runAsNonRoot: true
volumeMounts:
- mountPath: /policy-dir
name: policy-volume
volumes:
- name: policy-volume
configMap:
name: {{ template "descheduler.fullname" . }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,21 @@
{{- if eq .Values.kind "Deployment" }}
{{- if eq .Values.service.enabled true }}
apiVersion: v1
kind: Service
metadata:
labels:
{{- include "descheduler.labels" . | nindent 4 }}
name: {{ template "descheduler.fullname" . }}
namespace: {{ .Release.Namespace }}
spec:
clusterIP: None
ports:
- name: http-metrics
port: 10258
protocol: TCP
targetPort: 10258
selector:
{{- include "descheduler.selectorLabels" . | nindent 4 }}
type: ClusterIP
{{- end }}
{{- end }}

View File

@@ -0,0 +1,12 @@
{{- if .Values.serviceAccount.create -}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ template "descheduler.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "descheduler.labels" . | nindent 4 }}
{{- if .Values.serviceAccount.annotations }}
annotations: {{ toYaml .Values.serviceAccount.annotations | nindent 4 }}
{{- end }}
{{- end -}}

View File

@@ -0,0 +1,41 @@
{{- if eq .Values.kind "Deployment" }}
{{- if eq .Values.serviceMonitor.enabled true }}
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: {{ template "descheduler.fullname" . }}-servicemonitor
namespace: {{ .Values.serviceMonitor.namespace | default .Release.Namespace }}
labels:
{{- include "descheduler.labels" . | nindent 4 }}
spec:
jobLabel: jobLabel
namespaceSelector:
matchNames:
- {{ .Release.Namespace }}
selector:
matchLabels:
{{- include "descheduler.selectorLabels" . | nindent 6 }}
endpoints:
- honorLabels: {{ .Values.serviceMonitor.honorLabels | default true }}
port: http-metrics
{{- if .Values.serviceMonitor.interval }}
interval: {{ .Values.serviceMonitor.interval }}
{{- end }}
scheme: https
tlsConfig:
{{- if eq .Values.serviceMonitor.insecureSkipVerify true }}
insecureSkipVerify: true
{{- end }}
{{- if .Values.serviceMonitor.serverName }}
serverName: {{ .Values.serviceMonitor.serverName }}
{{- end}}
{{- if .Values.serviceMonitor.metricRelabelings }}
metricRelabelings:
{{ tpl (toYaml .Values.serviceMonitor.metricRelabelings | indent 4) . }}
{{- end }}
{{- if .Values.serviceMonitor.relabelings }}
relabelings:
{{ tpl (toYaml .Values.serviceMonitor.relabelings | indent 4) . }}
{{- end }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,179 @@
# Default values for descheduler.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
# CronJob or Deployment
kind: CronJob
image:
repository: k8s.gcr.io/descheduler/descheduler
# Overrides the image tag whose default is the chart version
tag: ""
pullPolicy: IfNotPresent
imagePullSecrets:
# - name: container-registry-secret
resources:
requests:
cpu: 500m
memory: 256Mi
# limits:
# cpu: 100m
# memory: 128Mi
nameOverride: ""
fullnameOverride: ""
# labels that'll be applied to all resources
commonLabels: {}
cronJobApiVersion: "batch/v1"
schedule: "*/2 * * * *"
suspend: false
# startingDeadlineSeconds: 200
# successfulJobsHistoryLimit: 1
# failedJobsHistoryLimit: 1
# Required when running as a Deployment
deschedulingInterval: 5m
# Specifies the replica count for Deployment
# Set leaderElection if you want to use more than 1 replica
# Set affinity.podAntiAffinity rule if you want to schedule onto a node
# only if that node is in the same zone as at least one already-running descheduler
replicas: 1
# Specifies whether Leader Election resources should be created
# Required when running as a Deployment
leaderElection: {}
# enabled: true
# leaseDuration: 15s
# renewDeadline: 10s
# retryPeriod: 2s
# resourceLock: "leases"
# resourceName: "descheduler"
# resourceNamescape: "kube-system"
cmdOptions:
v: 3
deschedulerPolicy:
# nodeSelector: "key1=value1,key2=value2"
# maxNoOfPodsToEvictPerNode: 10
# maxNoOfPodsToEvictPerNamespace: 10
# ignorePvcPods: true
# evictLocalStoragePods: true
strategies:
RemoveDuplicates:
enabled: true
RemovePodsHavingTooManyRestarts:
enabled: true
params:
podsHavingTooManyRestarts:
podRestartThreshold: 100
includingInitContainers: true
RemovePodsViolatingNodeTaints:
enabled: true
RemovePodsViolatingNodeAffinity:
enabled: true
params:
nodeAffinityType:
- requiredDuringSchedulingIgnoredDuringExecution
RemovePodsViolatingInterPodAntiAffinity:
enabled: true
RemovePodsViolatingTopologySpreadConstraint:
enabled: true
params:
includeSoftConstraints: false
LowNodeUtilization:
enabled: true
params:
nodeResourceUtilizationThresholds:
thresholds:
cpu: 20
memory: 20
pods: 20
targetThresholds:
cpu: 50
memory: 50
pods: 50
priorityClassName: system-cluster-critical
nodeSelector: {}
# foo: bar
affinity: {}
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: kubernetes.io/e2e-az-name
# operator: In
# values:
# - e2e-az1
# - e2e-az2
# podAntiAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# - labelSelector:
# matchExpressions:
# - key: app.kubernetes.io/name
# operator: In
# values:
# - descheduler
# topologyKey: "kubernetes.io/hostname"
tolerations: []
# - key: 'management'
# operator: 'Equal'
# value: 'tool'
# effect: 'NoSchedule'
rbac:
# Specifies whether RBAC resources should be created
create: true
serviceAccount:
# Specifies whether a ServiceAccount should be created
create: true
# The name of the ServiceAccount to use.
# If not set and create is true, a name is generated using the fullname template
name:
# Specifies custom annotations for the serviceAccount
annotations: {}
podAnnotations: {}
podLabels: {}
livenessProbe:
failureThreshold: 3
httpGet:
path: /healthz
port: 10258
scheme: HTTPS
initialDelaySeconds: 3
periodSeconds: 10
service:
enabled: false
serviceMonitor:
enabled: false
# The namespace where Prometheus expects to find service monitors.
# namespace: ""
interval: ""
# honorLabels: true
insecureSkipVerify: true
serverName: null
metricRelabelings: []
# - action: keep
# regex: 'descheduler_(build_info|pods_evicted)'
# sourceLabels: [__name__]
relabelings: []
# - sourceLabels: [__meta_kubernetes_pod_node_name]
# separator: ;
# regex: ^(.*)$
# targetLabel: nodename
# replacement: $1
# action: replace

View File

@@ -1,20 +1,20 @@
# See https://cloud.google.com/cloud-build/docs/build-config
# this must be specified in seconds. If omitted, defaults to 600s (10 mins)
timeout: 1200s
timeout: 1500s
# this prevents errors if you don't use both _GIT_TAG and _PULL_BASE_REF,
# or any new substitutions added in the future.
options:
substitution_option: ALLOW_LOOSE
steps:
- name: 'gcr.io/k8s-testimages/gcb-docker-gcloud:v20190906-745fed4'
- name: 'gcr.io/k8s-staging-test-infra/gcb-docker-gcloud:v20211118-2f2d816b90'
entrypoint: make
env:
- DOCKER_CLI_EXPERIMENTAL=enabled
- VERSION=$_GIT_TAG
- BASE_REF=$_PULL_BASE_REF
args:
- push
- push-all
substitutions:
# _GIT_TAG will be filled with a git-based tag for the image, of the form vYYYYMMDD-hash, and
# can be used as a substitution

View File

@@ -18,44 +18,79 @@ limitations under the License.
package options
import (
clientset "k8s.io/client-go/kubernetes"
"time"
// install the componentconfig api so we get its defaulting and conversion functions
"github.com/spf13/pflag"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
apiserveroptions "k8s.io/apiserver/pkg/server/options"
clientset "k8s.io/client-go/kubernetes"
componentbaseconfig "k8s.io/component-base/config"
componentbaseoptions "k8s.io/component-base/config/options"
"sigs.k8s.io/descheduler/pkg/apis/componentconfig"
"sigs.k8s.io/descheduler/pkg/apis/componentconfig/v1alpha1"
deschedulerscheme "sigs.k8s.io/descheduler/pkg/descheduler/scheme"
)
"github.com/spf13/pflag"
const (
DefaultDeschedulerPort = 10258
)
// DeschedulerServer configuration
type DeschedulerServer struct {
componentconfig.DeschedulerConfiguration
Client clientset.Interface
Client clientset.Interface
EventClient clientset.Interface
SecureServing *apiserveroptions.SecureServingOptionsWithLoopback
DisableMetrics bool
}
// NewDeschedulerServer creates a new DeschedulerServer with default parameters
func NewDeschedulerServer() *DeschedulerServer {
versioned := v1alpha1.DeschedulerConfiguration{}
deschedulerscheme.Scheme.Default(&versioned)
cfg := componentconfig.DeschedulerConfiguration{}
deschedulerscheme.Scheme.Convert(versioned, &cfg, nil)
s := DeschedulerServer{
DeschedulerConfiguration: cfg,
func NewDeschedulerServer() (*DeschedulerServer, error) {
cfg, err := newDefaultComponentConfig()
if err != nil {
return nil, err
}
return &s
secureServing := apiserveroptions.NewSecureServingOptions().WithLoopback()
secureServing.BindPort = DefaultDeschedulerPort
return &DeschedulerServer{
DeschedulerConfiguration: *cfg,
SecureServing: secureServing,
}, nil
}
func newDefaultComponentConfig() (*componentconfig.DeschedulerConfiguration, error) {
versionedCfg := v1alpha1.DeschedulerConfiguration{
LeaderElection: componentbaseconfig.LeaderElectionConfiguration{
LeaderElect: false,
LeaseDuration: metav1.Duration{Duration: 137 * time.Second},
RenewDeadline: metav1.Duration{Duration: 107 * time.Second},
RetryPeriod: metav1.Duration{Duration: 26 * time.Second},
ResourceLock: "leases",
ResourceName: "descheduler",
ResourceNamespace: "kube-system",
},
}
deschedulerscheme.Scheme.Default(&versionedCfg)
cfg := componentconfig.DeschedulerConfiguration{}
if err := deschedulerscheme.Scheme.Convert(&versionedCfg, &cfg, nil); err != nil {
return nil, err
}
return &cfg, nil
}
// AddFlags adds flags for a specific SchedulerServer to the specified FlagSet
func (rs *DeschedulerServer) AddFlags(fs *pflag.FlagSet) {
fs.StringVar(&rs.Logging.Format, "logging-format", "text", `Sets the log format. Permitted formats: "text", "json". Non-default formats don't honor these flags: --add-dir-header, --alsologtostderr, --log-backtrace-at, --log_dir, --log_file, --log_file_max_size, --logtostderr, --skip-headers, --skip-log-headers, --stderrthreshold, --log-flush-frequency.\nNon-default choices are currently alpha and subject to change without warning.`)
fs.DurationVar(&rs.DeschedulingInterval, "descheduling-interval", rs.DeschedulingInterval, "Time interval between two consecutive descheduler executions. Setting this value instructs the descheduler to run in a continuous loop at the interval specified.")
fs.StringVar(&rs.KubeconfigFile, "kubeconfig", rs.KubeconfigFile, "File with kube configuration.")
fs.StringVar(&rs.PolicyConfigFile, "policy-config-file", rs.PolicyConfigFile, "File with descheduler policy configuration.")
fs.BoolVar(&rs.DryRun, "dry-run", rs.DryRun, "execute descheduler in dry run mode.")
// node-selector query causes descheduler to run only on nodes that matches the node labels in the query
fs.StringVar(&rs.NodeSelector, "node-selector", rs.NodeSelector, "Selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2)")
// max-no-pods-to-evict limits the maximum number of pods to be evicted per node by descheduler.
fs.IntVar(&rs.MaxNoOfPodsToEvictPerNode, "max-pods-to-evict-per-node", rs.MaxNoOfPodsToEvictPerNode, "Limits the maximum number of pods to be evicted per node by descheduler")
// evict-local-storage-pods allows eviction of pods that are using local storage. This is false by default.
fs.BoolVar(&rs.EvictLocalStoragePods, "evict-local-storage-pods", rs.EvictLocalStoragePods, "Enables evicting pods using local storage by descheduler")
fs.BoolVar(&rs.DisableMetrics, "disable-metrics", rs.DisableMetrics, "Disables metrics. The metrics are by default served through https://localhost:10258/metrics. Secure address, resp. port can be changed through --bind-address, resp. --secure-port flags.")
componentbaseoptions.BindLeaderElectionFlags(&rs.LeaderElection, fs)
rs.SecureServing.AddFlags(fs)
}

View File

@@ -18,44 +18,92 @@ limitations under the License.
package app
import (
"flag"
"context"
"io"
"os/signal"
"syscall"
"k8s.io/apiserver/pkg/server/healthz"
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
"sigs.k8s.io/descheduler/pkg/descheduler"
"github.com/spf13/cobra"
aflag "k8s.io/component-base/cli/flag"
"k8s.io/component-base/logs"
"k8s.io/klog"
apiserver "k8s.io/apiserver/pkg/server"
"k8s.io/apiserver/pkg/server/mux"
restclient "k8s.io/client-go/rest"
registry "k8s.io/component-base/logs/api/v1"
_ "k8s.io/component-base/logs/json/register"
"k8s.io/component-base/metrics/legacyregistry"
"k8s.io/klog/v2"
)
// NewDeschedulerCommand creates a *cobra.Command object with default parameters
func NewDeschedulerCommand(out io.Writer) *cobra.Command {
s := options.NewDeschedulerServer()
s, err := options.NewDeschedulerServer()
if err != nil {
klog.ErrorS(err, "unable to initialize server")
}
cmd := &cobra.Command{
Use: "descheduler",
Short: "descheduler",
Long: `The descheduler evicts pods which may be bound to less desired nodes`,
Run: func(cmd *cobra.Command, args []string) {
logs.InitLogs()
defer logs.FlushLogs()
err := Run(s)
if err != nil {
klog.Errorf("%v", err)
// s.Logs.Config.Format = s.Logging.Format
// LoopbackClientConfig is a config for a privileged loopback connection
var LoopbackClientConfig *restclient.Config
var SecureServing *apiserver.SecureServingInfo
if err := s.SecureServing.ApplyTo(&SecureServing, &LoopbackClientConfig); err != nil {
klog.ErrorS(err, "failed to apply secure server configuration")
return
}
var factory registry.LogFormatFactory
if factory == nil {
klog.ClearLogger()
} else {
log, logrFlush := factory.Create(registry.LoggingConfiguration{
Format: s.Logging.Format,
})
defer logrFlush()
klog.SetLogger(log)
}
ctx, done := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM)
pathRecorderMux := mux.NewPathRecorderMux("descheduler")
if !s.DisableMetrics {
pathRecorderMux.Handle("/metrics", legacyregistry.HandlerWithReset())
}
healthz.InstallHandler(pathRecorderMux, healthz.NamedCheck("Descheduler", healthz.PingHealthz.Check))
stoppedCh, _, err := SecureServing.Serve(pathRecorderMux, 0, ctx.Done())
if err != nil {
klog.Fatalf("failed to start secure server: %v", err)
return
}
err = Run(ctx, s)
if err != nil {
klog.ErrorS(err, "descheduler server")
}
done()
// wait for metrics server to close
<-stoppedCh
},
}
cmd.SetOutput(out)
cmd.SetOut(out)
flags := cmd.Flags()
flags.SetNormalizeFunc(aflag.WordSepNormalizeFunc)
flags.AddGoFlagSet(flag.CommandLine)
s.AddFlags(flags)
return cmd
}
func Run(rs *options.DeschedulerServer) error {
return descheduler.Run(rs)
func Run(ctx context.Context, rs *options.DeschedulerServer) error {
return descheduler.Run(ctx, rs)
}

View File

@@ -18,70 +18,19 @@ package app
import (
"fmt"
"runtime"
"strings"
"github.com/spf13/cobra"
"sigs.k8s.io/descheduler/pkg/version"
)
var (
// gitCommit is a constant representing the source version that
// generated this build. It should be set during build via -ldflags.
gitCommit string
// version is a constant representing the version tag that
// generated this build. It should be set during build via -ldflags.
version string
// buildDate in ISO8601 format, output of $(date -u +'%Y-%m-%dT%H:%M:%SZ')
//It should be set during build via -ldflags.
buildDate string
)
// Info holds the information related to descheduler app version.
type Info struct {
Major string `json:"major"`
Minor string `json:"minor"`
GitCommit string `json:"gitCommit"`
GitVersion string `json:"gitVersion"`
BuildDate string `json:"buildDate"`
GoVersion string `json:"goVersion"`
Compiler string `json:"compiler"`
Platform string `json:"platform"`
}
// Get returns the overall codebase version. It's for detecting
// what code a binary was built from.
func Get() Info {
majorVersion, minorVersion := splitVersion(version)
return Info{
Major: majorVersion,
Minor: minorVersion,
GitCommit: gitCommit,
GitVersion: version,
BuildDate: buildDate,
GoVersion: runtime.Version(),
Compiler: runtime.Compiler,
Platform: fmt.Sprintf("%s/%s", runtime.GOOS, runtime.GOARCH),
}
}
func NewVersionCommand() *cobra.Command {
var versionCmd = &cobra.Command{
Use: "version",
Short: "Version of descheduler",
Long: `Prints the version of descheduler.`,
Run: func(cmd *cobra.Command, args []string) {
fmt.Printf("Descheduler version %+v\n", Get())
fmt.Printf("Descheduler version %+v\n", version.Get())
},
}
return versionCmd
}
// splitVersion splits the git version to generate major and minor versions needed.
func splitVersion(version string) (string, string) {
if version == "" {
return "", ""
}
// A sample version would be of form v0.1.0-7-ge884046, so split at first '.' and
// then return 0 and 1+(+ appended to follow semver convention) for major and minor versions.
return strings.Trim(strings.Split(version, ".")[0], "v"), strings.Split(version, ".")[1] + "+"
}

View File

@@ -17,20 +17,23 @@ limitations under the License.
package main
import (
"flag"
"fmt"
"os"
"k8s.io/component-base/cli"
"k8s.io/klog/v2"
"sigs.k8s.io/descheduler/cmd/descheduler/app"
)
func init() {
klog.SetOutput(os.Stdout)
klog.InitFlags(nil)
}
func main() {
out := os.Stdout
cmd := app.NewDeschedulerCommand(out)
cmd.AddCommand(app.NewVersionCommand())
flag.CommandLine.Parse([]string{})
if err := cmd.Execute(); err != nil {
fmt.Println(err)
os.Exit(1)
}
code := cli.Run(cmd)
os.Exit(code)
}

5
docs/README.md Normal file
View File

@@ -0,0 +1,5 @@
# Documentation Index
- [Contributor Guide](contributor-guide.md)
- [Release Guide](release-guide.md)
- [User Guide](user-guide.md)

70
docs/contributor-guide.md Normal file
View File

@@ -0,0 +1,70 @@
# Contributor Guide
## Required Tools
- [Git](https://git-scm.com/downloads)
- [Go 1.16+](https://golang.org/dl/)
- [Docker](https://docs.docker.com/install/)
- [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl)
- [kind v0.10.0+](https://kind.sigs.k8s.io/)
## Build and Run
Build descheduler.
```sh
cd $GOPATH/src/sigs.k8s.io
git clone https://github.com/kubernetes-sigs/descheduler.git
cd descheduler
make
```
Run descheduler.
```sh
./_output/bin/descheduler --kubeconfig <path to kubeconfig> --policy-config-file <path-to-policy-file> --v 1
```
View all CLI options.
```
./_output/bin/descheduler --help
```
## Run Tests
```
GOOS=linux make dev-image
make kind-multi-node
kind load docker-image <image name>
kind get kubeconfig > /tmp/admin.conf
export KUBECONFIG=/tmp/admin.conf
make test-unit
make test-e2e
```
## Build Helm Package locally
If you made some changes in the chart, and just want to check if templating is ok, or if the chart is buildable, you can run this command to have a package built from the `./charts` directory.
```
make build-helm
```
## Lint Helm Chart locally
To check linting of your changes in the helm chart locally you can run:
```
make lint-helm
```
## Test helm changes locally with kind and ct
You will need kind and docker (or equivalent) installed. We can use ct public image to avoid installing ct and all its dependencies.
```
make kind-multi-node
make ct-helm
```
### Miscellaneous
See the [hack directory](https://github.com/kubernetes-sigs/descheduler/tree/master/hack) for additional tools and scripts used for developing the descheduler.

16
docs/proposals.md Normal file
View File

@@ -0,0 +1,16 @@
# Proposals
This document walk you through about all the enhancements proposals for descheduler.
## Descheduler v1alpha2 Design Proposal
```yaml
title: Descheduler v1alpha2 Design Proposal
authors:
- "@damemi"
link:
- https://docs.google.com/document/d/1S1JCh-0F-QCJvBBG-kbmXiHAJFF8doArhDIAKbOj93I/edit#heading=h.imbp1ctnc8lx
- https://github.com/kubernetes-sigs/descheduler/issues/679
owning-sig: sig-scheduling
creation-date: 2021-05-01
status: implementable
```

104
docs/release-guide.md Normal file
View File

@@ -0,0 +1,104 @@
# Release Guide
The process for publishing each Descheduler release includes a mixture of manual and automatic steps. Over
time, it would be good to automate as much of this process as possible. However, due to current limitations there
is care that must be taken to perform each manual step precisely so that the automated steps execute properly.
## Pre-release Code Changes
Before publishing each release, the following code updates must be made:
- [ ] (Optional, but recommended) Bump `k8s.io` dependencies to the `-rc` tags. These tags are usually published around upstream code freeze. [Example](https://github.com/kubernetes-sigs/descheduler/pull/539)
- [ ] Bump `k8s.io` dependencies to GA tags once they are published (following the upstream release). [Example](https://github.com/kubernetes-sigs/descheduler/pull/615)
- [ ] Ensure that Go is updated to the same version as upstream. [Example](https://github.com/kubernetes-sigs/descheduler/pull/801)
- [ ] Make CI changes in [github.com/kubernetes/test-infra](https://github.com/kubernetes/test-infra) to add the new version's tests (note, this may also include a Go bump). [Example](https://github.com/kubernetes/test-infra/pull/25833)
- [ ] Update local CI versions for utils (such as golang-ci), kind, and go. [Example - e2e](https://github.com/kubernetes-sigs/descheduler/commit/ac4d576df8831c0c399ee8fff1e85469e90b8c44), [Example - helm](https://github.com/kubernetes-sigs/descheduler/pull/821)
- [ ] Update version references in docs and Readme. [Example](https://github.com/kubernetes-sigs/descheduler/pull/617)
## Release Process
When the above pre-release steps are complete and the release is ready to be cut, perform the following steps **in order**
(the flowchart below demonstrates these steps):
**Version release**
1. Create the `git tag` on `master` for the release, eg `v0.24.0`
2. Merge Helm chart version update to `master` (see [Helm chart](#helm-chart) below). [Example](https://github.com/kubernetes-sigs/descheduler/pull/709)
3. Perform the [image promotion process](https://github.com/kubernetes/k8s.io/tree/main/k8s.gcr.io#image-promoter). [Example](https://github.com/kubernetes/k8s.io/pull/3344)
4. Cut release branch from `master`, eg `release-1.24`
5. Publish release using Github's release process from the git tag you created
6. Email `kubernetes-sig-scheduling@googlegroups.com` to announce the release
**Patch release**
1. Pick relevant code change commits to the matching release branch, eg `release-1.24`
2. Create the patch tag on the release branch, eg `v0.24.1` on `release-1.24`
3. Merge Helm chart version update to release branch
4. Perform the image promotion process for the patch version
5. Publish release using Github's release process from the git tag you created
6. Email `kubernetes-sig-scheduling@googlegroups.com` to announce the release
### Flowchart
![Flowchart for major and patch releases](release-process.png)
### Image promotion process
Every merge to any branch triggers an [image build and push](https://github.com/kubernetes/test-infra/blob/c36b8e5/config/jobs/image-pushing/k8s-staging-descheduler.yaml) to a `gcr.io` repository.
These automated image builds are snapshots of the code in place at the time of every PR merge and
tagged with the latest git SHA at the time of the build. To create a final release image, the desired
auto-built image SHA is added to a [file upstream](https://github.com/kubernetes/k8s.io/blob/e9e971c/k8s.gcr.io/images/k8s-staging-descheduler/images.yaml) which
copies that image to a public registry.
Automatic builds can be monitored and re-triggered with the [`post-descheduler-push-images` job](https://prow.k8s.io/?job=post-descheduler-push-images) on prow.k8s.io.
Note that images can also be manually built and pushed using `VERSION=$VERSION make push-all` by [users with access](https://github.com/kubernetes/k8s.io/blob/fbee8f67b70304241e613a672c625ad972998ad7/groups/sig-scheduling/groups.yaml#L33-L43).
## Helm Chart
We currently use the [chart-releaser-action GitHub Action](https://github.com/helm/chart-releaser-action) to automatically
publish [Helm chart releases](https://github.com/kubernetes-sigs/descheduler/blob/022e07c/.github/workflows/release.yaml).
This action is triggered when it detects any changes to [`Chart.yaml`](https://github.com/kubernetes-sigs/descheduler/blob/022e07c27853fade6d1304adc0a6ebe02642386c/charts/descheduler/Chart.yaml) on
a `release-*` branch.
Helm chart releases are managed by a separate set of git tags that are prefixed with `descheduler-helm-chart-*`. Example git tag name is `descheduler-helm-chart-0.18.0`.
Released versions of the helm charts are stored in the `gh-pages` branch of this repo.
The major and minor version of the chart matches the descheduler major and minor versions. For example descheduler helm chart version helm-descheduler-chart-0.18.0 corresponds
to descheduler version v0.18.0. The patch version of the descheduler helm chart and the patcher version of the descheduler will not necessarily match. The patch
version of the descheduler helm chart is used to version changes specific to the helm chart.
1. Merge all helm chart changes into the master branch before the release is tagged/cut
1. Ensure that `appVersion` in file `charts/descheduler/Chart.yaml` matches the descheduler version(no `v` prefix)
2. Ensure that `version` in file `charts/descheduler/Chart.yaml` has been incremented. This is the chart version.
2. Make sure your repo is clean by git's standards
3. Follow the release-branch or patch release tagging pattern from the above section.
4. Verify the new helm artifact has been successfully pushed to the `gh-pages` branch
## Notes
The Helm releaser-action compares the changes in the action-triggering branch to the latest tag on that branch, so if you tag before creating the new branch there
will be nothing to compare and it will fail. This is why it's necessary to tag, eg, `v0.24.0` *before* making the changes to the
Helm chart version, so that there is a new diff for the action to find. (Tagging *after* making the Helm chart changes would
also work, but then the code that gets built into the promoted image will be tagged as `descheduler-helm-chart-xxx` rather than `v0.xx.0`).
See [post-descheduler-push-images dashboard](https://testgrid.k8s.io/sig-scheduling#post-descheduler-push-images) for staging registry image build job status.
View the descheduler staging registry using [this URL](https://console.cloud.google.com/gcr/images/k8s-staging-descheduler/GLOBAL/descheduler) in a web browser
or use the below `gcloud` commands.
List images in staging registry.
```
gcloud container images list --repository gcr.io/k8s-staging-descheduler
```
List descheduler image tags in the staging registry.
```
gcloud container images list-tags gcr.io/k8s-staging-descheduler/descheduler
```
Get SHA256 hash for a specific image in the staging registry.
```
gcloud container images describe gcr.io/k8s-staging-descheduler/descheduler:v20200206-0.9.0-94-ge2a23f284
```
Pull image from the staging registry.
```
docker pull gcr.io/k8s-staging-descheduler/descheduler:v20200206-0.9.0-94-ge2a23f284
```

BIN
docs/release-process.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 121 KiB

View File

@@ -1,21 +0,0 @@
# Release process
## Semi-automatic
1. Make sure your repo is clean by git's standards
2. Tag the repository and push the tag `VERSION=v0.10.0 git tag -m $VERSION $VERSION; git push origin $VERSION`
3. Publish a draft release using the tag you just created
4. Perform the [image promotion process](https://github.com/kubernetes/k8s.io/tree/master/k8s.gcr.io#image-promoter)
5. Publish release
6. Email `kubernetes-sig-scheduling@googlegroups.com` to announce the release
## Manual
1. Make sure your repo is clean by git's standards
2. Tag the repository and push the tag `VERSION=v0.10.0 git tag -m $VERSION $VERSION; git push origin $VERSION`
3. Checkout the tag you just created and make sure your repo is clean by git's standards `git checkout $VERSION`
4. Build and push the container image to the staging registry `VERSION=$VERSION make push`
5. Publish a draft release using the tag you just created
6. Perform the [image promotion process](https://github.com/kubernetes/k8s.io/tree/master/k8s.gcr.io#image-promoter)
7. Publish release
8. Email `kubernetes-sig-scheduling@googlegroups.com` to announce the release

176
docs/user-guide.md Normal file
View File

@@ -0,0 +1,176 @@
# User Guide
Starting with descheduler release v0.10.0 container images are available in the official k8s container registry.
Descheduler Version | Container Image | Architectures |
------------------- |--------------------------------------------|-------------------------|
v0.25.1 | k8s.gcr.io/descheduler/descheduler:v0.25.1 | AMD64<br>ARM64<br>ARMv7 |
v0.25.0 | k8s.gcr.io/descheduler/descheduler:v0.25.0 | AMD64<br>ARM64<br>ARMv7 |
v0.24.1 | k8s.gcr.io/descheduler/descheduler:v0.24.1 | AMD64<br>ARM64<br>ARMv7 |
v0.24.0 | k8s.gcr.io/descheduler/descheduler:v0.24.0 | AMD64<br>ARM64<br>ARMv7 |
v0.23.1 | k8s.gcr.io/descheduler/descheduler:v0.23.1 | AMD64<br>ARM64<br>ARMv7 |
v0.22.0 | k8s.gcr.io/descheduler/descheduler:v0.22.0 | AMD64<br>ARM64<br>ARMv7 |
v0.21.0 | k8s.gcr.io/descheduler/descheduler:v0.21.0 | AMD64<br>ARM64<br>ARMv7 |
v0.20.0 | k8s.gcr.io/descheduler/descheduler:v0.20.0 | AMD64<br>ARM64 |
v0.19.0 | k8s.gcr.io/descheduler/descheduler:v0.19.0 | AMD64 |
v0.18.0 | k8s.gcr.io/descheduler/descheduler:v0.18.0 | AMD64 |
v0.10.0 | k8s.gcr.io/descheduler/descheduler:v0.10.0 | AMD64 |
Note that multi-arch container images cannot be pulled by [kind](https://kind.sigs.k8s.io) from a registry. Therefore
starting with descheduler release v0.20.0 use the below process to download the official descheduler
image into a kind cluster.
```
kind create cluster
docker pull k8s.gcr.io/descheduler/descheduler:v0.20.0
kind load docker-image k8s.gcr.io/descheduler/descheduler:v0.20.0
```
## Policy Configuration Examples
The [examples](https://github.com/kubernetes-sigs/descheduler/tree/master/examples) directory has descheduler policy configuration examples.
## CLI Options
The descheduler has many CLI options that can be used to override its default behavior.
```
descheduler --help
The descheduler evicts pods which may be bound to less desired nodes
Usage:
descheduler [flags]
descheduler [command]
Available Commands:
completion generate the autocompletion script for the specified shell
help Help about any command
version Version of descheduler
Flags:
--add-dir-header If true, adds the file directory to the header of the log messages (DEPRECATED: will be removed in a future release, see https://github.com/kubernetes/enhancements/tree/master/keps/sig-instrumentation/2845-deprecate-klog-specific-flags-in-k8s-components)
--alsologtostderr log to standard error as well as files (DEPRECATED: will be removed in a future release, see https://github.com/kubernetes/enhancements/tree/master/keps/sig-instrumentation/2845-deprecate-klog-specific-flags-in-k8s-components)
--bind-address ip The IP address on which to listen for the --secure-port port. The associated interface(s) must be reachable by the rest of the cluster, and by CLI/web clients. If blank or an unspecified address (0.0.0.0 or ::), all interfaces will be used. (default 0.0.0.0)
--cert-dir string The directory where the TLS certs are located. If --tls-cert-file and --tls-private-key-file are provided, this flag will be ignored. (default "apiserver.local.config/certificates")
--descheduling-interval duration Time interval between two consecutive descheduler executions. Setting this value instructs the descheduler to run in a continuous loop at the interval specified.
--disable-metrics Disables metrics. The metrics are by default served through https://localhost:10258/metrics. Secure address, resp. port can be changed through --bind-address, resp. --secure-port flags.
--dry-run execute descheduler in dry run mode.
-h, --help help for descheduler
--http2-max-streams-per-connection int The limit that the server gives to clients for the maximum number of streams in an HTTP/2 connection. Zero means to use golang's default.
--kubeconfig string File with kube configuration.
--leader-elect Start a leader election client and gain leadership before executing the main loop. Enable this when running replicated components for high availability.
--leader-elect-lease-duration duration The duration that non-leader candidates will wait after observing a leadership renewal until attempting to acquire leadership of a led but unrenewed leader slot. This is effectively the maximum duration that a leader can be stopped before it is replaced by another candidate. This is only applicable if leader election is enabled. (default 15s)
--leader-elect-renew-deadline duration The interval between attempts by the acting master to renew a leadership slot before it stops leading. This must be less than or equal to the lease duration. This is only applicable if leader election is enabled. (default 10s)
--leader-elect-resource-lock string The type of resource object that is used for locking during leader election. Supported options are 'endpoints', 'configmaps', 'leases', 'endpointsleases' and 'configmapsleases'. (default "leases")
--leader-elect-resource-name string The name of resource object that is used for locking during leader election. (default "descheduler")
--leader-elect-resource-namespace string The namespace of resource object that is used for locking during leader election. (default "kube-system")
--leader-elect-retry-period duration The duration the clients should wait between attempting acquisition and renewal of a leadership. This is only applicable if leader election is enabled. (default 2s)
--log-backtrace-at traceLocation when logging hits line file:N, emit a stack trace (default :0) (DEPRECATED: will be removed in a future release, see https://github.com/kubernetes/enhancements/tree/master/keps/sig-instrumentation/2845-deprecate-klog-specific-flags-in-k8s-components)
--log_dir string If non-empty, write log files in this directory (DEPRECATED: will be removed in a future release, see https://github.com/kubernetes/enhancements/tree/master/keps/sig-instrumentation/2845-deprecate-klog-specific-flags-in-k8s-components)
--log_file string If non-empty, use this log file (DEPRECATED: will be removed in a future release, see https://github.com/kubernetes/enhancements/tree/master/keps/sig-instrumentation/2845-deprecate-klog-specific-flags-in-k8s-components)
--log_file_max_size uint Defines the maximum size a log file can grow to. Unit is megabytes. If the value is 0, the maximum file size is unlimited. (default 1800) (DEPRECATED: will be removed in a future release, see https://github.com/kubernetes/enhancements/tree/master/keps/sig-instrumentation/2845-deprecate-klog-specific-flags-in-k8s-components)
--log-flush-frequency duration Maximum number of seconds between log flushes (default 5s)
--logging-format string Sets the log format. Permitted formats: "text", "json". Non-default formats don't honor these flags: --add-dir-header, --alsologtostderr, --log-backtrace-at, --log_dir, --log_file, --log_file_max_size, --logtostderr, --skip-headers, --skip-log-headers, --stderrthreshold, --log-flush-frequency.\nNon-default choices are currently alpha and subject to change without warning. (default "text")
--logtostderr log to standard error instead of files (default true) (DEPRECATED: will be removed in a future release, see https://github.com/kubernetes/enhancements/tree/master/keps/sig-instrumentation/2845-deprecate-klog-specific-flags-in-k8s-components)
--one-output If true, only write logs to their native severity level (vs also writing to each lower severity level) (DEPRECATED: will be removed in a future release, see https://github.com/kubernetes/enhancements/tree/master/keps/sig-instrumentation/2845-deprecate-klog-specific-flags-in-k8s-components)
--permit-address-sharing If true, SO_REUSEADDR will be used when binding the port. This allows binding to wildcard IPs like 0.0.0.0 and specific IPs in parallel, and it avoids waiting for the kernel to release sockets in TIME_WAIT state. [default=false]
--permit-port-sharing If true, SO_REUSEPORT will be used when binding the port, which allows more than one instance to bind on the same address and port. [default=false]
--policy-config-file string File with descheduler policy configuration.
--secure-port int The port on which to serve HTTPS with authentication and authorization. If 0, don't serve HTTPS at all. (default 10258)
--skip-headers If true, avoid header prefixes in the log messages (DEPRECATED: will be removed in a future release, see https://github.com/kubernetes/enhancements/tree/master/keps/sig-instrumentation/2845-deprecate-klog-specific-flags-in-k8s-components)
--skip-log-headers If true, avoid headers when opening log files (DEPRECATED: will be removed in a future release, see https://github.com/kubernetes/enhancements/tree/master/keps/sig-instrumentation/2845-deprecate-klog-specific-flags-in-k8s-components)
--stderrthreshold severity logs at or above this threshold go to stderr (default 2) (DEPRECATED: will be removed in a future release, see https://github.com/kubernetes/enhancements/tree/master/keps/sig-instrumentation/2845-deprecate-klog-specific-flags-in-k8s-components)
--tls-cert-file string File containing the default x509 Certificate for HTTPS. (CA cert, if any, concatenated after server cert). If HTTPS serving is enabled, and --tls-cert-file and --tls-private-key-file are not provided, a self-signed certificate and key are generated for the public address and saved to the directory specified by --cert-dir.
--tls-cipher-suites strings Comma-separated list of cipher suites for the server. If omitted, the default Go cipher suites will be used.
Preferred values: TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, TLS_RSA_WITH_AES_128_CBC_SHA, TLS_RSA_WITH_AES_128_GCM_SHA256, TLS_RSA_WITH_AES_256_CBC_SHA, TLS_RSA_WITH_AES_256_GCM_SHA384.
Insecure values: TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_RSA_WITH_RC4_128_SHA, TLS_RSA_WITH_3DES_EDE_CBC_SHA, TLS_RSA_WITH_AES_128_CBC_SHA256, TLS_RSA_WITH_RC4_128_SHA.
--tls-min-version string Minimum TLS version supported. Possible values: VersionTLS10, VersionTLS11, VersionTLS12, VersionTLS13
--tls-private-key-file string File containing the default x509 private key matching --tls-cert-file.
--tls-sni-cert-key namedCertKey A pair of x509 certificate and private key file paths, optionally suffixed with a list of domain patterns which are fully qualified domain names, possibly with prefixed wildcard segments. The domain patterns also allow IP addresses, but IPs should only be used if the apiserver has visibility to the IP address requested by a client. If no domain patterns are provided, the names of the certificate are extracted. Non-wildcard matches trump over wildcard matches, explicit domain patterns trump over extracted names. For multiple key/certificate pairs, use the --tls-sni-cert-key multiple times. Examples: "example.crt,example.key" or "foo.crt,foo.key:*.foo.com,foo.com". (default [])
-v, --v Level number for the log level verbosity
--vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
Use "descheduler [command] --help" for more information about a command.
```
## Production Use Cases
This section contains descriptions of real world production use cases.
### Balance Cluster By Pod Age
When initially migrating applications from a static virtual machine infrastructure to a cloud native k8s
infrastructure there can be a tendency to treat application pods like static virtual machines. One approach
to help prevent developers and operators from treating pods like virtual machines is to ensure that pods
only run for a fixed amount
of time.
The `PodLifeTime` strategy can be used to ensure that old pods are evicted. It is recommended to create a
[pod disruption budget](https://kubernetes.io/docs/tasks/run-application/configure-pdb/) for each
application to ensure application availability.
```
descheduler -v=3 --evict-local-storage-pods --policy-config-file=pod-life-time.yml
```
This policy configuration file ensures that pods created more than 7 days ago are evicted.
```
---
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
strategies:
"PodLifeTime":
enabled: true
params:
podLifeTime:
maxPodLifeTimeSeconds: 604800 # pods run for a maximum of 7 days
```
### Balance Cluster By Node Memory Utilization
If your cluster has been running for a long period of time, you may find that the resource utilization is not very
balanced. The following two strategies can be used to rebalance your cluster based on `cpu`, `memory`
or `number of pods`.
#### Balance high utilization nodes
Using `LowNodeUtilization`, descheduler will rebalance the cluster based on memory by evicting pods
from nodes with memory utilization over 70% to nodes with memory utilization below 20%.
```
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
strategies:
"LowNodeUtilization":
enabled: true
params:
nodeResourceUtilizationThresholds:
thresholds:
"memory": 20
targetThresholds:
"memory": 70
```
#### Balance low utilization nodes
Using `HighNodeUtilization`, descheduler will rebalance the cluster based on memory by evicting pods
from nodes with memory utilization lower than 20%. This should be use `NodeResourcesFit` with the `MostAllocated` scoring strategy based on these [doc](https://kubernetes.io/docs/reference/scheduling/config/#scheduling-plugins).
The evicted pods will be compacted into minimal set of nodes.
```
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
strategies:
"HighNodeUtilization":
enabled: true
params:
nodeResourceUtilizationThresholds:
thresholds:
"memory": 20
```
### Autoheal Node Problems
Descheduler's `RemovePodsViolatingNodeTaints` strategy can be combined with
[Node Problem Detector](https://github.com/kubernetes/node-problem-detector/) and
[Cluster Autoscaler](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler) to automatically remove
Nodes which have problems. Node Problem Detector can detect specific Node problems and report them to the API server.
There is a feature called TaintNodeByCondition of the node controller that takes some conditions and turns them into taints. Currently, this only works for the default node conditions: PIDPressure, MemoryPressure, DiskPressure, Ready, and some cloud provider specific conditions.
The Descheduler will then deschedule workloads from those Nodes. Finally, if the descheduled Node's resource
allocation falls below the Cluster Autoscaler's scale down threshold, the Node will become a scale down candidate
and can be removed by Cluster Autoscaler. These three components form an autohealing cycle for Node problems.
---
**NOTE**
Once [kubernetes/node-problem-detector#565](https://github.com/kubernetes/node-problem-detector/pull/565) is available in NPD, we need to update this section.
---

14
examples/failed-pods.yaml Normal file
View File

@@ -0,0 +1,14 @@
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
strategies:
"RemoveFailedPods":
enabled: true
params:
failedPods:
reasons:
- "OutOfcpu"
- "CreateContainerConfigError"
includingInitContainers: true
excludeOwnerKinds:
- "Job"
minPodLifetimeSeconds: 3600 # 1 hour

View File

@@ -0,0 +1,9 @@
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
strategies:
"HighNodeUtilization":
enabled: true
params:
nodeResourceUtilizationThresholds:
thresholds:
"memory": 20

View File

@@ -0,0 +1,11 @@
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
strategies:
"LowNodeUtilization":
enabled: true
params:
nodeResourceUtilizationThresholds:
thresholds:
"memory": 20
targetThresholds:
"memory": 70

View File

@@ -0,0 +1,11 @@
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
strategies:
"PodLifeTime":
enabled: true
params:
podLifeTime:
maxPodLifeTimeSeconds: 604800 # 7 days
states:
- "Pending"
- "PodInitializing"

View File

@@ -17,3 +17,13 @@ strategies:
"cpu" : 50
"memory": 50
"pods": 50
"RemovePodsHavingTooManyRestarts":
enabled: true
params:
podsHavingTooManyRestarts:
podRestartThreshold: 100
includingInitContainers: true
"RemovePodsViolatingTopologySpreadConstraint":
enabled: true
params:
includeSoftConstraints: true

View File

@@ -0,0 +1,8 @@
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
strategies:
"RemovePodsViolatingTopologySpreadConstraint":
enabled: true
params:
nodeFit: true
includeSoftConstraints: true # Include 'ScheduleAnyways' constraints

117
go.mod
View File

@@ -1,14 +1,115 @@
module sigs.k8s.io/descheduler
go 1.13
go 1.19
require (
github.com/spf13/cobra v0.0.5
github.com/client9/misspell v0.3.4
github.com/spf13/cobra v1.4.0
github.com/spf13/pflag v1.0.5
k8s.io/api v0.17.0
k8s.io/apimachinery v0.17.3-beta.0
k8s.io/apiserver v0.17.0
k8s.io/client-go v0.17.0
k8s.io/component-base v0.17.0
k8s.io/klog v1.0.0
k8s.io/api v0.25.0
k8s.io/apimachinery v0.25.0
k8s.io/apiserver v0.25.0
k8s.io/client-go v0.25.0
k8s.io/code-generator v0.25.0
k8s.io/component-base v0.25.0
k8s.io/component-helpers v0.25.0
k8s.io/klog/v2 v2.70.1
k8s.io/utils v0.0.0-20220823124924-e9cbc92d1a73
sigs.k8s.io/mdtoc v1.0.1
)
require (
cloud.google.com/go v0.97.0 // indirect
github.com/Azure/go-autorest v14.2.0+incompatible // indirect
github.com/Azure/go-autorest/autorest v0.11.27 // indirect
github.com/Azure/go-autorest/autorest/adal v0.9.20 // indirect
github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect
github.com/Azure/go-autorest/logger v0.2.1 // indirect
github.com/Azure/go-autorest/tracing v0.6.0 // indirect
github.com/BurntSushi/toml v0.3.1 // indirect
github.com/NYTimes/gziphandler v1.1.1 // indirect
github.com/PuerkitoBio/purell v1.1.1 // indirect
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/blang/semver/v4 v4.0.0 // indirect
github.com/cespare/xxhash/v2 v2.1.2 // indirect
github.com/coreos/go-semver v0.3.0 // indirect
github.com/coreos/go-systemd/v22 v22.3.2 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/emicklei/go-restful/v3 v3.8.0 // indirect
github.com/evanphx/json-patch v4.12.0+incompatible // indirect
github.com/felixge/httpsnoop v1.0.1 // indirect
github.com/fsnotify/fsnotify v1.4.9 // indirect
github.com/go-logr/logr v1.2.3 // indirect
github.com/go-logr/zapr v1.2.3 // indirect
github.com/go-openapi/jsonpointer v0.19.5 // indirect
github.com/go-openapi/jsonreference v0.19.5 // indirect
github.com/go-openapi/swag v0.19.14 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang-jwt/jwt/v4 v4.2.0 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.2 // indirect
github.com/gomarkdown/markdown v0.0.0-20200824053859-8c8b3816f167 // indirect
github.com/google/gnostic v0.5.7-v3refs // indirect
github.com/google/go-cmp v0.5.6 // indirect
github.com/google/gofuzz v1.1.0 // indirect
github.com/google/uuid v1.1.2 // indirect
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect
github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect
github.com/imdario/mergo v0.3.6 // indirect
github.com/inconshreveable/mousetrap v1.0.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/mailru/easyjson v0.7.6 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
github.com/mmarkdown/mmark v2.0.40+incompatible // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/prometheus/client_golang v1.12.1 // indirect
github.com/prometheus/client_model v0.2.0 // indirect
github.com/prometheus/common v0.32.1 // indirect
github.com/prometheus/procfs v0.7.3 // indirect
go.etcd.io/etcd/api/v3 v3.5.4 // indirect
go.etcd.io/etcd/client/pkg/v3 v3.5.4 // indirect
go.etcd.io/etcd/client/v3 v3.5.4 // indirect
go.opentelemetry.io/contrib v0.20.0 // indirect
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.20.0 // indirect
go.opentelemetry.io/otel v0.20.0 // indirect
go.opentelemetry.io/otel/exporters/otlp v0.20.0 // indirect
go.opentelemetry.io/otel/metric v0.20.0 // indirect
go.opentelemetry.io/otel/sdk v0.20.0 // indirect
go.opentelemetry.io/otel/sdk/export/metric v0.20.0 // indirect
go.opentelemetry.io/otel/sdk/metric v0.20.0 // indirect
go.opentelemetry.io/otel/trace v0.20.0 // indirect
go.opentelemetry.io/proto/otlp v0.7.0 // indirect
go.uber.org/atomic v1.7.0 // indirect
go.uber.org/multierr v1.6.0 // indirect
go.uber.org/zap v1.19.0 // indirect
golang.org/x/crypto v0.0.0-20220518034528-6f7dac969898 // indirect
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 // indirect
golang.org/x/net v0.0.0-20220722155237-a158d28d115b // indirect
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 // indirect
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 // indirect
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f // indirect
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect
golang.org/x/text v0.3.7 // indirect
golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 // indirect
golang.org/x/tools v0.1.12 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21 // indirect
google.golang.org/grpc v1.47.0 // indirect
google.golang.org/protobuf v1.28.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
k8s.io/gengo v0.0.0-20211129171323-c02415ce4185 // indirect
k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1 // indirect
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.32 // indirect
sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect
sigs.k8s.io/yaml v1.2.0 // indirect
)

954
go.sum

File diff suppressed because it is too large Load Diff

6
hack/.spelling_failures Normal file
View File

@@ -0,0 +1,6 @@
BUILD
CHANGELOG
OWNERS
go.mod
go.sum
vendor/

View File

@@ -18,15 +18,15 @@ E2E_GCE_HOME=$DESCHEDULER_ROOT/hack/e2e-gce
create_cluster() {
echo "#################### Creating instances ##########################"
gcloud compute instances create descheduler-$master_uuid --image="ubuntu-1604-xenial-v20180306" --image-project="ubuntu-os-cloud" --zone=us-east1-b
gcloud compute instances create descheduler-$master_uuid --image-family="ubuntu-1804-lts" --image-project="ubuntu-os-cloud" --zone=us-east1-b
# Keeping the --zone here so as to make sure that e2e's can run locally.
echo "gcloud compute instances delete descheduler-$master_uuid --zone=us-east1-b --quiet" > $E2E_GCE_HOME/delete_cluster.sh
gcloud compute instances create descheduler-$node1_uuid --image="ubuntu-1604-xenial-v20180306" --image-project="ubuntu-os-cloud" --zone=us-east1-b
gcloud compute instances create descheduler-$node1_uuid --image-family="ubuntu-1804-lts" --image-project="ubuntu-os-cloud" --zone=us-east1-b
echo "gcloud compute instances delete descheduler-$node1_uuid --zone=us-east1-b --quiet" >> $E2E_GCE_HOME/delete_cluster.sh
gcloud compute instances create descheduler-$node2_uuid --image="ubuntu-1604-xenial-v20180306" --image-project="ubuntu-os-cloud" --zone=us-east1-b
echo "gcloud compute instances delete descheduler-$node2_uuid --zone=us-east1-b --quiet" >> $E2E_GCE_HOME/delete_cluster.sh
gcloud compute instances create descheduler-$node2_uuid --image-family="ubuntu-1804-lts" --image-project="ubuntu-os-cloud" --zone=us-east1-b
echo "gcloud compute instances delete descheduler-$node2_uuid --zone=us-east1-c --quiet" >> $E2E_GCE_HOME/delete_cluster.sh
# Delete the firewall port created for master.
echo "gcloud compute firewall-rules delete kubeapiserver-$master_uuid --quiet" >> $E2E_GCE_HOME/delete_cluster.sh
@@ -44,10 +44,10 @@ generate_kubeadm_instance_files() {
transfer_install_files() {
gcloud compute scp $E2E_GCE_HOME/kubeadm_preinstall.sh descheduler-$master_uuid:/tmp --zone=us-east1-b
gcloud compute scp $E2E_GCE_HOME/kubeadm_preinstall.sh descheduler-$master_uuid:/tmp --zone=us-east1-b
gcloud compute scp $E2E_GCE_HOME/kubeadm_install.sh descheduler-$master_uuid:/tmp --zone=us-east1-b
gcloud compute scp $E2E_GCE_HOME/kubeadm_preinstall.sh descheduler-$node1_uuid:/tmp --zone=us-east1-b
gcloud compute scp $E2E_GCE_HOME/kubeadm_preinstall.sh descheduler-$node2_uuid:/tmp --zone=us-east1-b
gcloud compute scp $E2E_GCE_HOME/kubeadm_preinstall.sh descheduler-$node1_uuid:/tmp --zone=us-east1-b
gcloud compute scp $E2E_GCE_HOME/kubeadm_preinstall.sh descheduler-$node2_uuid:/tmp --zone=us-east1-c
}
@@ -55,19 +55,19 @@ install_kube() {
# Docker installation.
gcloud compute ssh descheduler-$master_uuid --command "sudo apt-get update; sudo apt-get install -y docker.io" --zone=us-east1-b
gcloud compute ssh descheduler-$node1_uuid --command "sudo apt-get update; sudo apt-get install -y docker.io" --zone=us-east1-b
gcloud compute ssh descheduler-$node2_uuid --command "sudo apt-get update; sudo apt-get install -y docker.io" --zone=us-east1-b
gcloud compute ssh descheduler-$node2_uuid --command "sudo apt-get update; sudo apt-get install -y docker.io" --zone=us-east1-c
# kubeadm installation.
# 1. Transfer files to master, nodes.
transfer_install_files
# 2. Install kubeadm.
#TODO: Add rm /tmp/kubeadm_install.sh
# Open port for kube API server
gcloud compute firewall-rules create kubeapiserver-$master_uuid --allow tcp:6443 --source-tags=descheduler-$master_uuid --source-ranges=0.0.0.0/0 --description="Opening api server port"
gcloud compute firewall-rules create kubeapiserver-$master_uuid --allow tcp:6443 --source-tags=descheduler-$master_uuid --source-ranges=0.0.0.0/0 --description="Opening api server port"
gcloud compute ssh descheduler-$master_uuid --command "sudo chmod 755 /tmp/kubeadm_preinstall.sh; sudo /tmp/kubeadm_preinstall.sh" --zone=us-east1-b
kubeadm_join_command=$(gcloud compute ssh descheduler-$master_uuid --command "sudo chmod 755 /tmp/kubeadm_install.sh; sudo /tmp/kubeadm_install.sh" --zone=us-east1-b|grep 'kubeadm join')
# Copy the kubeconfig file onto /tmp for e2e tests.
# Copy the kubeconfig file onto /tmp for e2e tests.
gcloud compute ssh descheduler-$master_uuid --command "sudo cp /etc/kubernetes/admin.conf /tmp; sudo chmod 777 /tmp/admin.conf" --zone=us-east1-b
gcloud compute scp descheduler-$master_uuid:/tmp/admin.conf /tmp/admin.conf --zone=us-east1-b
@@ -75,16 +75,15 @@ install_kube() {
gcloud compute ssh descheduler-$master_uuid --command "sudo kubectl apply -f https://raw.githubusercontent.com/cloudnativelabs/kube-router/master/daemonset/kubeadm-kuberouter.yaml --kubeconfig /etc/kubernetes/admin.conf" --zone=us-east1-b
echo $kubeadm_join_command > $E2E_GCE_HOME/kubeadm_join.sh
# Copy kubeadm_join to every node.
# Copy kubeadm_join to every node.
#TODO: Put these in a loop, so that extension becomes possible.
gcloud compute ssh descheduler-$node1_uuid --command "sudo chmod 755 /tmp/kubeadm_preinstall.sh; sudo /tmp/kubeadm_preinstall.sh" --zone=us-east1-b
gcloud compute scp $E2E_GCE_HOME/kubeadm_join.sh descheduler-$node1_uuid:/tmp --zone=us-east1-b
gcloud compute ssh descheduler-$node1_uuid --command "sudo chmod 755 /tmp/kubeadm_join.sh; sudo /tmp/kubeadm_join.sh" --zone=us-east1-b
gcloud compute ssh descheduler-$node2_uuid --command "sudo chmod 755 /tmp/kubeadm_preinstall.sh; sudo /tmp/kubeadm_preinstall.sh" --zone=us-east1-b
gcloud compute scp $E2E_GCE_HOME/kubeadm_join.sh descheduler-$node2_uuid:/tmp --zone=us-east1-b
gcloud compute ssh descheduler-$node2_uuid --command "sudo chmod 755 /tmp/kubeadm_join.sh; sudo /tmp/kubeadm_join.sh" --zone=us-east1-b
gcloud compute ssh descheduler-$node2_uuid --command "sudo chmod 755 /tmp/kubeadm_preinstall.sh; sudo /tmp/kubeadm_preinstall.sh" --zone=us-east1-c
gcloud compute scp $E2E_GCE_HOME/kubeadm_join.sh descheduler-$node2_uuid:/tmp --zone=us-east1-c
gcloud compute ssh descheduler-$node2_uuid --command "sudo chmod 755 /tmp/kubeadm_join.sh; sudo /tmp/kubeadm_join.sh" --zone=us-east1-c
}

View File

@@ -1,6 +1,18 @@
kind: Cluster
apiVersion: kind.sigs.k8s.io/v1alpha3
apiVersion: kind.x-k8s.io/v1alpha4
nodes:
- role: control-plane
- role: worker
kubeadmConfigPatches:
- |
kind: JoinConfiguration
nodeRegistration:
kubeletExtraArgs:
node-labels: "topology.kubernetes.io/zone=local-a"
- role: worker
kubeadmConfigPatches:
- |
kind: JoinConfiguration
nodeRegistration:
kubeletExtraArgs:
node-labels: "topology.kubernetes.io/zone=local-b"

27
hack/tools.go Normal file
View File

@@ -0,0 +1,27 @@
//go:build tools
// +build tools
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// This package imports things required by build scripts, to force `go mod` to see them as dependencies
package tools
import (
_ "github.com/client9/misspell/cmd/misspell"
_ "k8s.io/code-generator"
_ "sigs.k8s.io/mdtoc"
)

View File

@@ -5,6 +5,6 @@ go build -o "${OS_OUTPUT_BINPATH}/deepcopy-gen" "k8s.io/code-generator/cmd/deepc
${OS_OUTPUT_BINPATH}/deepcopy-gen \
--go-header-file "hack/boilerplate/boilerplate.go.txt" \
--input-dirs "${PRJ_PREFIX}/pkg/apis/componentconfig,${PRJ_PREFIX}/pkg/apis/componentconfig/v1alpha1,${PRJ_PREFIX}/pkg/api,${PRJ_PREFIX}/pkg/api/v1alpha1" \
--input-dirs "${PRJ_PREFIX}/pkg/apis/componentconfig,${PRJ_PREFIX}/pkg/apis/componentconfig/v1alpha1,${PRJ_PREFIX}/pkg/api,${PRJ_PREFIX}/pkg/api/v1alpha1,${PRJ_PREFIX}/pkg/framework/plugins/defaultevictor/" \
--output-file-base zz_generated.deepcopy

View File

@@ -23,7 +23,7 @@ DESCHEDULER_ROOT=$(dirname "${BASH_SOURCE}")/..
GO_VERSION=($(go version))
if [[ -z $(echo "${GO_VERSION[2]}" | grep -E 'go1.2|go1.3|go1.4|go1.5|go1.6|go1.7|go1.8|go1.9|go1.10|go1.11|go1.12|go1.13') ]]; then
if [[ -z $(echo "${GO_VERSION[2]}" | grep -E 'go1.17|go1.18|go1.19') ]]; then
echo "Unknown go version '${GO_VERSION[2]}', skipping gofmt."
exit 1
fi

25
hack/update-toc.sh Executable file
View File

@@ -0,0 +1,25 @@
#!/bin/bash
# Copyright 2021 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
source "$(dirname "${BASH_SOURCE}")/lib/init.sh"
go build -o "${OS_OUTPUT_BINPATH}/mdtoc" "sigs.k8s.io/mdtoc"
${OS_OUTPUT_BINPATH}/mdtoc --inplace README.md

21
hack/update-vendor.sh Executable file
View File

@@ -0,0 +1,21 @@
#!/bin/bash
# Copyright 2020 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
source "$(dirname "${BASH_SOURCE}")/lib/init.sh"
go mod tidy
go mod vendor

1
hack/verify-chart.sh Executable file
View File

@@ -0,0 +1 @@
${CONTAINER_ENGINE:-docker} run -it --rm --network host --workdir=/data --volume ~/.kube/config:/root/.kube/config:ro --volume $(pwd):/data quay.io/helmpack/chart-testing:v3.7.0 /bin/bash -c "git config --global --add safe.directory /data; ct install --config=.github/ci/ct.yaml --helm-extra-set-args=\"--set=kind=Deployment\""

36
hack/verify-conversions.sh Executable file
View File

@@ -0,0 +1,36 @@
#!/bin/bash
set -o errexit
set -o nounset
set -o pipefail
source "$(dirname "${BASH_SOURCE}")/lib/init.sh"
DESCHEDULER_ROOT=$(dirname "${BASH_SOURCE}")/..
mkdir -p "${DESCHEDULER_ROOT}/_tmp"
_tmpdir="$(mktemp -d "${DESCHEDULER_ROOT}/_tmp/kube-verify.XXXXXX")"
_deschedulertmp="${_tmpdir}"
mkdir -p "${_deschedulertmp}"
git archive --format=tar --prefix=descheduler/ "$(git write-tree)" | (cd "${_deschedulertmp}" && tar xf -)
_deschedulertmp="${_deschedulertmp}/descheduler"
pushd "${_deschedulertmp}" > /dev/null 2>&1
go build -o "${OS_OUTPUT_BINPATH}/conversion-gen" "k8s.io/code-generator/cmd/conversion-gen"
${OS_OUTPUT_BINPATH}/conversion-gen \
--go-header-file "hack/boilerplate/boilerplate.go.txt" \
--input-dirs "./pkg/apis/componentconfig/v1alpha1,./pkg/api/v1alpha1" \
--output-file-base zz_generated.conversion
popd > /dev/null 2>&1
pushd "${DESCHEDULER_ROOT}" > /dev/null 2>&1
if ! _out="$(diff -Naupr pkg/ "${_deschedulertmp}/pkg/")"; then
echo "Generated output differs:" >&2
echo "${_out}" >&2
echo "Generated conversions verify failed. Please run ./hack/update-generated-conversions.sh"
exit 1
fi
popd > /dev/null 2>&1
echo "Generated conversions verified."

36
hack/verify-deep-copies.sh Executable file
View File

@@ -0,0 +1,36 @@
#!/bin/bash
set -o errexit
set -o nounset
set -o pipefail
source "$(dirname "${BASH_SOURCE}")/lib/init.sh"
DESCHEDULER_ROOT=$(dirname "${BASH_SOURCE}")/..
mkdir -p "${DESCHEDULER_ROOT}/_tmp"
_tmpdir="$(mktemp -d "${DESCHEDULER_ROOT}/_tmp/kube-verify.XXXXXX")"
_deschedulertmp="${_tmpdir}"
mkdir -p "${_deschedulertmp}"
git archive --format=tar --prefix=descheduler/ "$(git write-tree)" | (cd "${_deschedulertmp}" && tar xf -)
_deschedulertmp="${_deschedulertmp}/descheduler"
pushd "${_deschedulertmp}" > /dev/null 2>&1
go build -o "${OS_OUTPUT_BINPATH}/deepcopy-gen" "k8s.io/code-generator/cmd/deepcopy-gen"
${OS_OUTPUT_BINPATH}/deepcopy-gen \
--go-header-file "hack/boilerplate/boilerplate.go.txt" \
--input-dirs "./pkg/apis/componentconfig,./pkg/apis/componentconfig/v1alpha1,./pkg/api,./pkg/api/v1alpha1,./pkg/framework/plugins/defaultevictor/" \
--output-file-base zz_generated.deepcopy
popd > /dev/null 2>&1
pushd "${DESCHEDULER_ROOT}" > /dev/null 2>&1
if ! _out="$(diff -Naupr pkg/ "${_deschedulertmp}/pkg/")"; then
echo "Generated deep-copies output differs:" >&2
echo "${_out}" >&2
echo "Generated deep-copies verify failed. Please run ./hack/update-generated-deep-copies.sh"
exit 1
fi
popd > /dev/null 2>&1
echo "Generated deep-copies verified."

35
hack/verify-defaulters.sh Executable file
View File

@@ -0,0 +1,35 @@
#!/bin/bash
set -o errexit
set -o nounset
set -o pipefail
source "$(dirname "${BASH_SOURCE}")/lib/init.sh"
DESCHEDULER_ROOT=$(dirname "${BASH_SOURCE}")/..
_tmpdir="$(mktemp -d "${DESCHEDULER_ROOT}/_tmp/kube-verify.XXXXXX")"
_deschedulertmp="${_tmpdir}"
mkdir -p "${_deschedulertmp}"
git archive --format=tar --prefix=descheduler/ "$(git write-tree)" | (cd "${_deschedulertmp}" && tar xf -)
_deschedulertmp="${_deschedulertmp}/descheduler"
pushd "${_deschedulertmp}" > /dev/null 2>&1
go build -o "${OS_OUTPUT_BINPATH}/defaulter-gen" "k8s.io/code-generator/cmd/defaulter-gen"
${OS_OUTPUT_BINPATH}/defaulter-gen \
--go-header-file "hack/boilerplate/boilerplate.go.txt" \
--input-dirs "${PRJ_PREFIX}/pkg/apis/componentconfig/v1alpha1,${PRJ_PREFIX}/pkg/api/v1alpha1" \
--extra-peer-dirs "${PRJ_PREFIX}/pkg/apis/componentconfig/v1alpha1,${PRJ_PREFIX}/pkg/api/v1alpha1" \
--output-file-base zz_generated.defaults
popd > /dev/null 2>&1
pushd "${DESCHEDULER_ROOT}" > /dev/null 2>&1
if ! _out="$(diff -Naupr pkg/ "${_deschedulertmp}/pkg/")"; then
echo "Generated defaulters output differs:" >&2
echo "${_out}" >&2
echo "Generated defaulters verify failed. Please run ./hack/update-generated-defaulters.sh"
fi
popd > /dev/null 2>&1
echo "Generated Defaulters verified."

View File

@@ -23,7 +23,7 @@ DESCHEDULER_ROOT=$(dirname "${BASH_SOURCE}")/..
GO_VERSION=($(go version))
if [[ -z $(echo "${GO_VERSION[2]}" | grep -E 'go1.2|go1.3|go1.4|go1.5|go1.6|go1.7|go1.8|go1.9|go1.10|go1.11|go1.12|go1.13') ]]; then
if [[ -z $(echo "${GO_VERSION[2]}" | grep -E 'go1.17|go1.18|go1.19') ]]; then
echo "Unknown go version '${GO_VERSION[2]}', skipping gofmt."
exit 1
fi

19
hack/verify-govet.sh Executable file
View File

@@ -0,0 +1,19 @@
#!/bin/bash
# Copyright 2021 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
source "$(dirname "${BASH_SOURCE}")/lib/init.sh"
go vet ${OS_ROOT}/...

41
hack/verify-spelling.sh Executable file
View File

@@ -0,0 +1,41 @@
#!/usr/bin/env bash
# Copyright 2018 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script checks commonly misspelled English words in all files in the
# working directory by client9/misspell package.
# Usage: `hack/verify-spelling.sh`.
set -o errexit
set -o nounset
set -o pipefail
KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/..
export KUBE_ROOT
source "${KUBE_ROOT}/hack/lib/init.sh"
# Ensure that we find the binaries we build before anything else.
export GOBIN="${OS_OUTPUT_BINPATH}"
PATH="${GOBIN}:${PATH}"
# Install tools we need
pushd "${KUBE_ROOT}" >/dev/null
GO111MODULE=on go install github.com/client9/misspell/cmd/misspell
popd >/dev/null
# Spell checking
# All the skipping files are defined in hack/.spelling_failures
skipping_file="${KUBE_ROOT}/hack/.spelling_failures"
failing_packages=$(sed "s| | -e |g" "${skipping_file}")
git ls-files | grep -v -e "${failing_packages}" | xargs misspell -i "Creater,creater,ect" -error -o stderr

29
hack/verify-toc.sh Executable file
View File

@@ -0,0 +1,29 @@
#!/bin/bash
# Copyright 2021 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
source "$(dirname "${BASH_SOURCE}")/lib/init.sh"
go build -o "${OS_OUTPUT_BINPATH}/mdtoc" "sigs.k8s.io/mdtoc"
if ! ${OS_OUTPUT_BINPATH}/mdtoc --inplace --dryrun README.md
then
echo "ERROR: Changes detected to table of contents. Run ./hack/update-toc.sh" >&2
exit 1
fi

88
hack/verify-vendor.sh Executable file
View File

@@ -0,0 +1,88 @@
#!/bin/bash
# Copyright 2020 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This is mostly copied from the hack/verify-vendor.sh script located in k8s.io/kubernetes
set -o errexit
set -o nounset
set -o pipefail
source "$(dirname "${BASH_SOURCE}")/lib/init.sh"
DESCHEDULER_ROOT=$(dirname "${BASH_SOURCE}")/..
mkdir -p "${DESCHEDULER_ROOT}/_tmp"
_tmpdir="$(mktemp -d "${DESCHEDULER_ROOT}/_tmp/kube-vendor.XXXXXX")"
if [[ -z ${KEEP_TMP:-} ]]; then
KEEP_TMP=false
fi
function cleanup {
# make go module dirs writeable
chmod -R +w "${_tmpdir}"
if [ "${KEEP_TMP}" == "true" ]; then
echo "Leaving ${_tmpdir} for you to examine or copy. Please delete it manually when finished. (rm -rf ${_tmpdir})"
else
echo "Removing ${_tmpdir}"
rm -rf "${_tmpdir}"
fi
}
trap "cleanup" EXIT
_deschedulertmp="${_tmpdir}"
mkdir -p "${_deschedulertmp}"
git archive --format=tar --prefix=descheduler/ "$(git write-tree)" | (cd "${_deschedulertmp}" && tar xf -)
_deschedulertmp="${_deschedulertmp}/descheduler"
pushd "${_deschedulertmp}" > /dev/null 2>&1
# Destroy deps in the copy of the kube tree
rm -rf ./vendor
# Recreate the vendor tree using the nice clean set we just downloaded
hack/update-vendor.sh
popd > /dev/null 2>&1
ret=0
pushd "${DESCHEDULER_ROOT}" > /dev/null 2>&1
# Test for diffs
if ! _out="$(diff -Naupr --ignore-matching-lines='^\s*\"GoVersion\":' go.mod "${_deschedulertmp}/go.mod")"; then
echo "Your go.mod file is different:" >&2
echo "${_out}" >&2
echo "Vendor Verify failed." >&2
echo "If you're seeing this locally, run the below command to fix your go.mod:" >&2
echo "hack/update-vendor.sh" >&2
ret=1
fi
if ! _out="$(diff -Naupr -x "BUILD" -x "AUTHORS*" -x "CONTRIBUTORS*" vendor "${_deschedulertmp}/vendor")"; then
echo "Your vendored results are different:" >&2
echo "${_out}" >&2
echo "Vendor Verify failed." >&2
echo "${_out}" > vendordiff.patch
echo "If you're seeing this locally, run the below command to fix your directories:" >&2
echo "hack/update-vendor.sh" >&2
ret=1
fi
popd > /dev/null 2>&1
if [[ ${ret} -gt 0 ]]; then
exit ${ret}
fi
echo "Vendor Verified."

View File

@@ -0,0 +1,6 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- configmap.yaml
- rbac.yaml

50
kubernetes/base/rbac.yaml Normal file
View File

@@ -0,0 +1,50 @@
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: descheduler-cluster-role
rules:
- apiGroups: ["events.k8s.io"]
resources: ["events"]
verbs: ["create", "update"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "watch", "list"]
- apiGroups: [""]
resources: ["namespaces"]
verbs: ["get", "watch", "list"]
- apiGroups: [""]
resources: ["pods"]
verbs: ["get", "watch", "list", "delete"]
- apiGroups: [""]
resources: ["pods/eviction"]
verbs: ["create"]
- apiGroups: ["scheduling.k8s.io"]
resources: ["priorityclasses"]
verbs: ["get", "watch", "list"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["create"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
resourceNames: ["descheduler"]
verbs: ["get", "patch", "delete"]
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: descheduler-sa
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: descheduler-cluster-role-binding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: descheduler-cluster-role
subjects:
- name: descheduler-sa
kind: ServiceAccount
namespace: kube-system

View File

@@ -1,35 +0,0 @@
---
apiVersion: batch/v1beta1
kind: CronJob
metadata:
name: descheduler-cronjob
namespace: kube-system
spec:
schedule: "*/2 * * * *"
concurrencyPolicy: "Forbid"
jobTemplate:
spec:
template:
metadata:
name: descheduler-pod
spec:
priorityClassName: system-cluster-critical
containers:
- name: descheduler
image: us.gcr.io/k8s-artifacts-prod/descheduler:v0.10.0
volumeMounts:
- mountPath: /policy-dir
name: policy-volume
command:
- "/bin/descheduler"
args:
- "--policy-config-file"
- "/policy-dir/policy.yaml"
- "--v"
- "3"
restartPolicy: "Never"
serviceAccountName: descheduler-sa
volumes:
- name: policy-volume
configMap:
name: descheduler-policy-configmap

View File

@@ -0,0 +1,55 @@
---
apiVersion: batch/v1
kind: CronJob
metadata:
name: descheduler-cronjob
namespace: kube-system
spec:
schedule: "*/2 * * * *"
concurrencyPolicy: "Forbid"
jobTemplate:
spec:
template:
metadata:
name: descheduler-pod
spec:
priorityClassName: system-cluster-critical
containers:
- name: descheduler
image: k8s.gcr.io/descheduler/descheduler:v0.25.1
volumeMounts:
- mountPath: /policy-dir
name: policy-volume
command:
- "/bin/descheduler"
args:
- "--policy-config-file"
- "/policy-dir/policy.yaml"
- "--v"
- "3"
resources:
requests:
cpu: "500m"
memory: "256Mi"
livenessProbe:
failureThreshold: 3
httpGet:
path: /healthz
port: 10258
scheme: HTTPS
initialDelaySeconds: 3
periodSeconds: 10
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
privileged: false
readOnlyRootFilesystem: true
runAsNonRoot: true
restartPolicy: "Never"
serviceAccountName: descheduler-sa
volumes:
- name: policy-volume
configMap:
name: descheduler-policy-configmap

View File

@@ -0,0 +1,6 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- ../base
- cronjob.yaml

View File

@@ -0,0 +1,62 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: descheduler
namespace: kube-system
labels:
app: descheduler
spec:
replicas: 1
selector:
matchLabels:
app: descheduler
template:
metadata:
labels:
app: descheduler
spec:
priorityClassName: system-cluster-critical
serviceAccountName: descheduler-sa
containers:
- name: descheduler
image: k8s.gcr.io/descheduler/descheduler:v0.25.1
imagePullPolicy: IfNotPresent
command:
- "/bin/descheduler"
args:
- "--policy-config-file"
- "/policy-dir/policy.yaml"
- "--descheduling-interval"
- "5m"
- "--v"
- "3"
ports:
- containerPort: 10258
protocol: TCP
livenessProbe:
failureThreshold: 3
httpGet:
path: /healthz
port: 10258
scheme: HTTPS
initialDelaySeconds: 3
periodSeconds: 10
resources:
requests:
cpu: 500m
memory: 256Mi
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
privileged: false
readOnlyRootFilesystem: true
runAsNonRoot: true
volumeMounts:
- mountPath: /policy-dir
name: policy-volume
volumes:
- name: policy-volume
configMap:
name: descheduler-policy-configmap

View File

@@ -0,0 +1,6 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- ../base
- deployment.yaml

View File

@@ -1,33 +0,0 @@
---
apiVersion: batch/v1
kind: Job
metadata:
name: descheduler-job
namespace: kube-system
spec:
parallelism: 1
completions: 1
template:
metadata:
name: descheduler-pod
spec:
priorityClassName: system-cluster-critical
containers:
- name: descheduler
image: us.gcr.io/k8s-artifacts-prod/descheduler:v0.10.0
volumeMounts:
- mountPath: /policy-dir
name: policy-volume
command:
- "/bin/descheduler"
args:
- "--policy-config-file"
- "/policy-dir/policy.yaml"
- "--v"
- "3"
restartPolicy: "Never"
serviceAccountName: descheduler-sa
volumes:
- name: policy-volume
configMap:
name: descheduler-policy-configmap

53
kubernetes/job/job.yaml Normal file
View File

@@ -0,0 +1,53 @@
---
apiVersion: batch/v1
kind: Job
metadata:
name: descheduler-job
namespace: kube-system
spec:
parallelism: 1
completions: 1
template:
metadata:
name: descheduler-pod
spec:
priorityClassName: system-cluster-critical
containers:
- name: descheduler
image: k8s.gcr.io/descheduler/descheduler:v0.25.1
volumeMounts:
- mountPath: /policy-dir
name: policy-volume
command:
- "/bin/descheduler"
args:
- "--policy-config-file"
- "/policy-dir/policy.yaml"
- "--v"
- "3"
resources:
requests:
cpu: "500m"
memory: "256Mi"
livenessProbe:
failureThreshold: 3
httpGet:
path: /healthz
port: 10258
scheme: HTTPS
initialDelaySeconds: 3
periodSeconds: 10
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
privileged: false
readOnlyRootFilesystem: true
runAsNonRoot: true
restartPolicy: "Never"
serviceAccountName: descheduler-sa
volumes:
- name: policy-volume
configMap:
name: descheduler-policy-configmap

View File

@@ -0,0 +1,6 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- ../base
- job.yaml

View File

@@ -1,40 +0,0 @@
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: descheduler-cluster-role
namespace: kube-system
rules:
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "update"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "watch", "list"]
- apiGroups: [""]
resources: ["pods"]
verbs: ["get", "watch", "list", "delete"]
- apiGroups: [""]
resources: ["pods/eviction"]
verbs: ["create"]
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: descheduler-sa
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: descheduler-cluster-role-binding
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: descheduler-cluster-role
subjects:
- name: descheduler-sa
kind: ServiceAccount
namespace: kube-system

72
metrics/metrics.go Normal file
View File

@@ -0,0 +1,72 @@
/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package metrics
import (
"sync"
"k8s.io/component-base/metrics"
"k8s.io/component-base/metrics/legacyregistry"
"sigs.k8s.io/descheduler/pkg/version"
)
const (
// DeschedulerSubsystem - subsystem name used by descheduler
DeschedulerSubsystem = "descheduler"
)
var (
PodsEvicted = metrics.NewCounterVec(
&metrics.CounterOpts{
Subsystem: DeschedulerSubsystem,
Name: "pods_evicted",
Help: "Number of evicted pods, by the result, by the strategy, by the namespace, by the node name. 'error' result means a pod could not be evicted",
StabilityLevel: metrics.ALPHA,
}, []string{"result", "strategy", "namespace", "node"})
buildInfo = metrics.NewGauge(
&metrics.GaugeOpts{
Subsystem: DeschedulerSubsystem,
Name: "build_info",
Help: "Build info about descheduler, including Go version, Descheduler version, Git SHA, Git branch",
ConstLabels: map[string]string{"GoVersion": version.Get().GoVersion, "AppVersion": version.Get().Major + "." + version.Get().Minor, "DeschedulerVersion": version.Get().GitVersion, "GitBranch": version.Get().GitBranch, "GitSha1": version.Get().GitSha1},
StabilityLevel: metrics.ALPHA,
},
)
metricsList = []metrics.Registerable{
PodsEvicted,
buildInfo,
}
)
var registerMetrics sync.Once
// Register all metrics.
func Register() {
// Register the metrics.
registerMetrics.Do(func() {
RegisterMetrics(metricsList...)
})
}
// RegisterMetrics registers a list of metrics.
func RegisterMetrics(extraMetrics ...metrics.Registerable) {
for _, metric := range extraMetrics {
legacyregistry.MustRegister(metric)
}
}

View File

@@ -19,8 +19,6 @@ package api
import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"sigs.k8s.io/descheduler/pkg/descheduler/scheme"
)
var (
@@ -35,12 +33,6 @@ const GroupName = "descheduler"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal}
func init() {
if err := addKnownTypes(scheme.Scheme); err != nil {
panic(err)
}
}
// Kind takes an unqualified kind and returns a Group qualified GroupKind
func Kind(kind string) schema.GroupKind {
return SchemeGroupVersion.WithKind(kind).GroupKind()

View File

@@ -17,7 +17,7 @@ limitations under the License.
package api
import (
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
@@ -28,6 +28,27 @@ type DeschedulerPolicy struct {
// Strategies
Strategies StrategyList
// NodeSelector for a set of nodes to operate over
NodeSelector *string
// EvictFailedBarePods allows pods without ownerReferences and in failed phase to be evicted.
EvictFailedBarePods *bool
// EvictLocalStoragePods allows pods using local storage to be evicted.
EvictLocalStoragePods *bool
// EvictSystemCriticalPods allows eviction of pods of any priority (including Kubernetes system pods)
EvictSystemCriticalPods *bool
// IgnorePVCPods prevents pods with PVCs from being evicted.
IgnorePVCPods *bool
// MaxNoOfPodsToEvictPerNode restricts maximum of pods to be evicted per node.
MaxNoOfPodsToEvictPerNode *uint
// MaxNoOfPodsToEvictPerNamespace restricts maximum of pods to be evicted per namespace.
MaxNoOfPodsToEvictPerNamespace *uint
}
type StrategyName string
@@ -41,20 +62,72 @@ type DeschedulerStrategy struct {
Weight int
// Strategy parameters
Params StrategyParameters
Params *StrategyParameters
}
// Only one of its members may be specified
// Namespaces carries a list of included/excluded namespaces
// for which a given strategy is applicable
type Namespaces struct {
Include []string
Exclude []string
}
// Besides Namespaces only one of its members may be specified
// TODO(jchaloup): move Namespaces ThresholdPriority and ThresholdPriorityClassName to individual strategies
//
// once the policy version is bumped to v1alpha2
type StrategyParameters struct {
NodeResourceUtilizationThresholds NodeResourceUtilizationThresholds
NodeResourceUtilizationThresholds *NodeResourceUtilizationThresholds
NodeAffinityType []string
PodsHavingTooManyRestarts *PodsHavingTooManyRestarts
PodLifeTime *PodLifeTime
RemoveDuplicates *RemoveDuplicates
FailedPods *FailedPods
IncludeSoftConstraints bool
Namespaces *Namespaces
ThresholdPriority *int32
ThresholdPriorityClassName string
LabelSelector *metav1.LabelSelector
NodeFit bool
IncludePreferNoSchedule bool
ExcludedTaints []string
}
type Percentage float64
type ResourceThresholds map[v1.ResourceName]Percentage
type NodeResourceUtilizationThresholds struct {
Thresholds ResourceThresholds
TargetThresholds ResourceThresholds
NumberOfNodes int
UseDeviationThresholds bool
Thresholds ResourceThresholds
TargetThresholds ResourceThresholds
NumberOfNodes int
}
type PodsHavingTooManyRestarts struct {
PodRestartThreshold int32
IncludingInitContainers bool
}
type RemoveDuplicates struct {
ExcludeOwnerKinds []string
}
type PodLifeTime struct {
MaxPodLifeTimeSeconds *uint
States []string
// Deprecated: Use States instead.
PodStatusPhases []string
}
type FailedPods struct {
ExcludeOwnerKinds []string
MinPodLifetimeSeconds *uint
Reasons []string
IncludingInitContainers bool
}
type PriorityThreshold struct {
Value *int32
Name string
}

View File

@@ -17,7 +17,7 @@ limitations under the License.
package v1alpha1
import (
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
@@ -28,6 +28,27 @@ type DeschedulerPolicy struct {
// Strategies
Strategies StrategyList `json:"strategies,omitempty"`
// NodeSelector for a set of nodes to operate over
NodeSelector *string `json:"nodeSelector,omitempty"`
// EvictFailedBarePods allows pods without ownerReferences and in failed phase to be evicted.
EvictFailedBarePods *bool `json:"evictFailedBarePods,omitempty"`
// EvictLocalStoragePods allows pods using local storage to be evicted.
EvictLocalStoragePods *bool `json:"evictLocalStoragePods,omitempty"`
// EvictSystemCriticalPods allows eviction of pods of any priority (including Kubernetes system pods)
EvictSystemCriticalPods *bool `json:"evictSystemCriticalPods,omitempty"`
// IgnorePVCPods prevents pods with PVCs from being evicted.
IgnorePVCPods *bool `json:"ignorePvcPods,omitempty"`
// MaxNoOfPodsToEvictPerNode restricts maximum of pods to be evicted per node.
MaxNoOfPodsToEvictPerNode *int `json:"maxNoOfPodsToEvictPerNode,omitempty"`
// MaxNoOfPodsToEvictPerNamespace restricts maximum of pods to be evicted per namespace.
MaxNoOfPodsToEvictPerNamespace *int `json:"maxNoOfPodsToEvictPerNamespace,omitempty"`
}
type StrategyName string
@@ -41,20 +62,64 @@ type DeschedulerStrategy struct {
Weight int `json:"weight,omitempty"`
// Strategy parameters
Params StrategyParameters `json:"params,omitempty"`
Params *StrategyParameters `json:"params,omitempty"`
}
// Only one of its members may be specified
// Namespaces carries a list of included/excluded namespaces
// for which a given strategy is applicable.
type Namespaces struct {
Include []string `json:"include"`
Exclude []string `json:"exclude"`
}
// Besides Namespaces ThresholdPriority and ThresholdPriorityClassName only one of its members may be specified
type StrategyParameters struct {
NodeResourceUtilizationThresholds NodeResourceUtilizationThresholds `json:"nodeResourceUtilizationThresholds,omitempty"`
NodeAffinityType []string `json:"nodeAffinityType,omitempty"`
NodeResourceUtilizationThresholds *NodeResourceUtilizationThresholds `json:"nodeResourceUtilizationThresholds,omitempty"`
NodeAffinityType []string `json:"nodeAffinityType,omitempty"`
PodsHavingTooManyRestarts *PodsHavingTooManyRestarts `json:"podsHavingTooManyRestarts,omitempty"`
PodLifeTime *PodLifeTime `json:"podLifeTime,omitempty"`
RemoveDuplicates *RemoveDuplicates `json:"removeDuplicates,omitempty"`
FailedPods *FailedPods `json:"failedPods,omitempty"`
IncludeSoftConstraints bool `json:"includeSoftConstraints"`
Namespaces *Namespaces `json:"namespaces"`
ThresholdPriority *int32 `json:"thresholdPriority"`
ThresholdPriorityClassName string `json:"thresholdPriorityClassName"`
LabelSelector *metav1.LabelSelector `json:"labelSelector"`
NodeFit bool `json:"nodeFit"`
IncludePreferNoSchedule bool `json:"includePreferNoSchedule"`
ExcludedTaints []string `json:"excludedTaints,omitempty"`
}
type Percentage float64
type ResourceThresholds map[v1.ResourceName]Percentage
type NodeResourceUtilizationThresholds struct {
Thresholds ResourceThresholds `json:"thresholds,omitempty"`
TargetThresholds ResourceThresholds `json:"targetThresholds,omitempty"`
NumberOfNodes int `json:"numberOfNodes,omitempty"`
UseDeviationThresholds bool `json:"useDeviationThresholds,omitempty"`
Thresholds ResourceThresholds `json:"thresholds,omitempty"`
TargetThresholds ResourceThresholds `json:"targetThresholds,omitempty"`
NumberOfNodes int `json:"numberOfNodes,omitempty"`
}
type PodsHavingTooManyRestarts struct {
PodRestartThreshold int32 `json:"podRestartThreshold,omitempty"`
IncludingInitContainers bool `json:"includingInitContainers,omitempty"`
}
type RemoveDuplicates struct {
ExcludeOwnerKinds []string `json:"excludeOwnerKinds,omitempty"`
}
type PodLifeTime struct {
MaxPodLifeTimeSeconds *uint `json:"maxPodLifeTimeSeconds,omitempty"`
States []string `json:"states,omitempty"`
// Deprecated: Use States instead.
PodStatusPhases []string `json:"podStatusPhases,omitempty"`
}
type FailedPods struct {
ExcludeOwnerKinds []string `json:"excludeOwnerKinds,omitempty"`
MinPodLifetimeSeconds *uint `json:"minPodLifetimeSeconds,omitempty"`
Reasons []string `json:"reasons,omitempty"`
IncludingInitContainers bool `json:"includingInitContainers,omitempty"`
}

View File

@@ -1,7 +1,8 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright 2020 The Kubernetes Authors.
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -23,6 +24,7 @@ package v1alpha1
import (
unsafe "unsafe"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime"
api "sigs.k8s.io/descheduler/pkg/api"
@@ -55,6 +57,26 @@ func RegisterConversions(s *runtime.Scheme) error {
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*FailedPods)(nil), (*api.FailedPods)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_FailedPods_To_api_FailedPods(a.(*FailedPods), b.(*api.FailedPods), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*api.FailedPods)(nil), (*FailedPods)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_api_FailedPods_To_v1alpha1_FailedPods(a.(*api.FailedPods), b.(*FailedPods), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*Namespaces)(nil), (*api.Namespaces)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_Namespaces_To_api_Namespaces(a.(*Namespaces), b.(*api.Namespaces), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*api.Namespaces)(nil), (*Namespaces)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_api_Namespaces_To_v1alpha1_Namespaces(a.(*api.Namespaces), b.(*Namespaces), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*NodeResourceUtilizationThresholds)(nil), (*api.NodeResourceUtilizationThresholds)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_NodeResourceUtilizationThresholds_To_api_NodeResourceUtilizationThresholds(a.(*NodeResourceUtilizationThresholds), b.(*api.NodeResourceUtilizationThresholds), scope)
}); err != nil {
@@ -65,6 +87,36 @@ func RegisterConversions(s *runtime.Scheme) error {
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*PodLifeTime)(nil), (*api.PodLifeTime)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_PodLifeTime_To_api_PodLifeTime(a.(*PodLifeTime), b.(*api.PodLifeTime), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*api.PodLifeTime)(nil), (*PodLifeTime)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_api_PodLifeTime_To_v1alpha1_PodLifeTime(a.(*api.PodLifeTime), b.(*PodLifeTime), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*PodsHavingTooManyRestarts)(nil), (*api.PodsHavingTooManyRestarts)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_PodsHavingTooManyRestarts_To_api_PodsHavingTooManyRestarts(a.(*PodsHavingTooManyRestarts), b.(*api.PodsHavingTooManyRestarts), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*api.PodsHavingTooManyRestarts)(nil), (*PodsHavingTooManyRestarts)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_api_PodsHavingTooManyRestarts_To_v1alpha1_PodsHavingTooManyRestarts(a.(*api.PodsHavingTooManyRestarts), b.(*PodsHavingTooManyRestarts), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*RemoveDuplicates)(nil), (*api.RemoveDuplicates)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_RemoveDuplicates_To_api_RemoveDuplicates(a.(*RemoveDuplicates), b.(*api.RemoveDuplicates), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*api.RemoveDuplicates)(nil), (*RemoveDuplicates)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_api_RemoveDuplicates_To_v1alpha1_RemoveDuplicates(a.(*api.RemoveDuplicates), b.(*RemoveDuplicates), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*StrategyParameters)(nil), (*api.StrategyParameters)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_StrategyParameters_To_api_StrategyParameters(a.(*StrategyParameters), b.(*api.StrategyParameters), scope)
}); err != nil {
@@ -80,6 +132,25 @@ func RegisterConversions(s *runtime.Scheme) error {
func autoConvert_v1alpha1_DeschedulerPolicy_To_api_DeschedulerPolicy(in *DeschedulerPolicy, out *api.DeschedulerPolicy, s conversion.Scope) error {
out.Strategies = *(*api.StrategyList)(unsafe.Pointer(&in.Strategies))
out.NodeSelector = (*string)(unsafe.Pointer(in.NodeSelector))
out.EvictFailedBarePods = (*bool)(unsafe.Pointer(in.EvictFailedBarePods))
out.EvictLocalStoragePods = (*bool)(unsafe.Pointer(in.EvictLocalStoragePods))
out.EvictSystemCriticalPods = (*bool)(unsafe.Pointer(in.EvictSystemCriticalPods))
out.IgnorePVCPods = (*bool)(unsafe.Pointer(in.IgnorePVCPods))
if in.MaxNoOfPodsToEvictPerNode != nil {
in, out := &in.MaxNoOfPodsToEvictPerNode, &out.MaxNoOfPodsToEvictPerNode
*out = new(uint)
**out = uint(**in)
} else {
out.MaxNoOfPodsToEvictPerNode = nil
}
if in.MaxNoOfPodsToEvictPerNamespace != nil {
in, out := &in.MaxNoOfPodsToEvictPerNamespace, &out.MaxNoOfPodsToEvictPerNamespace
*out = new(uint)
**out = uint(**in)
} else {
out.MaxNoOfPodsToEvictPerNamespace = nil
}
return nil
}
@@ -90,6 +161,25 @@ func Convert_v1alpha1_DeschedulerPolicy_To_api_DeschedulerPolicy(in *Descheduler
func autoConvert_api_DeschedulerPolicy_To_v1alpha1_DeschedulerPolicy(in *api.DeschedulerPolicy, out *DeschedulerPolicy, s conversion.Scope) error {
out.Strategies = *(*StrategyList)(unsafe.Pointer(&in.Strategies))
out.NodeSelector = (*string)(unsafe.Pointer(in.NodeSelector))
out.EvictFailedBarePods = (*bool)(unsafe.Pointer(in.EvictFailedBarePods))
out.EvictLocalStoragePods = (*bool)(unsafe.Pointer(in.EvictLocalStoragePods))
out.EvictSystemCriticalPods = (*bool)(unsafe.Pointer(in.EvictSystemCriticalPods))
out.IgnorePVCPods = (*bool)(unsafe.Pointer(in.IgnorePVCPods))
if in.MaxNoOfPodsToEvictPerNode != nil {
in, out := &in.MaxNoOfPodsToEvictPerNode, &out.MaxNoOfPodsToEvictPerNode
*out = new(int)
**out = int(**in)
} else {
out.MaxNoOfPodsToEvictPerNode = nil
}
if in.MaxNoOfPodsToEvictPerNamespace != nil {
in, out := &in.MaxNoOfPodsToEvictPerNamespace, &out.MaxNoOfPodsToEvictPerNamespace
*out = new(int)
**out = int(**in)
} else {
out.MaxNoOfPodsToEvictPerNamespace = nil
}
return nil
}
@@ -101,9 +191,7 @@ func Convert_api_DeschedulerPolicy_To_v1alpha1_DeschedulerPolicy(in *api.Desched
func autoConvert_v1alpha1_DeschedulerStrategy_To_api_DeschedulerStrategy(in *DeschedulerStrategy, out *api.DeschedulerStrategy, s conversion.Scope) error {
out.Enabled = in.Enabled
out.Weight = in.Weight
if err := Convert_v1alpha1_StrategyParameters_To_api_StrategyParameters(&in.Params, &out.Params, s); err != nil {
return err
}
out.Params = (*api.StrategyParameters)(unsafe.Pointer(in.Params))
return nil
}
@@ -115,9 +203,7 @@ func Convert_v1alpha1_DeschedulerStrategy_To_api_DeschedulerStrategy(in *Desched
func autoConvert_api_DeschedulerStrategy_To_v1alpha1_DeschedulerStrategy(in *api.DeschedulerStrategy, out *DeschedulerStrategy, s conversion.Scope) error {
out.Enabled = in.Enabled
out.Weight = in.Weight
if err := Convert_api_StrategyParameters_To_v1alpha1_StrategyParameters(&in.Params, &out.Params, s); err != nil {
return err
}
out.Params = (*StrategyParameters)(unsafe.Pointer(in.Params))
return nil
}
@@ -126,7 +212,56 @@ func Convert_api_DeschedulerStrategy_To_v1alpha1_DeschedulerStrategy(in *api.Des
return autoConvert_api_DeschedulerStrategy_To_v1alpha1_DeschedulerStrategy(in, out, s)
}
func autoConvert_v1alpha1_FailedPods_To_api_FailedPods(in *FailedPods, out *api.FailedPods, s conversion.Scope) error {
out.ExcludeOwnerKinds = *(*[]string)(unsafe.Pointer(&in.ExcludeOwnerKinds))
out.MinPodLifetimeSeconds = (*uint)(unsafe.Pointer(in.MinPodLifetimeSeconds))
out.Reasons = *(*[]string)(unsafe.Pointer(&in.Reasons))
out.IncludingInitContainers = in.IncludingInitContainers
return nil
}
// Convert_v1alpha1_FailedPods_To_api_FailedPods is an autogenerated conversion function.
func Convert_v1alpha1_FailedPods_To_api_FailedPods(in *FailedPods, out *api.FailedPods, s conversion.Scope) error {
return autoConvert_v1alpha1_FailedPods_To_api_FailedPods(in, out, s)
}
func autoConvert_api_FailedPods_To_v1alpha1_FailedPods(in *api.FailedPods, out *FailedPods, s conversion.Scope) error {
out.ExcludeOwnerKinds = *(*[]string)(unsafe.Pointer(&in.ExcludeOwnerKinds))
out.MinPodLifetimeSeconds = (*uint)(unsafe.Pointer(in.MinPodLifetimeSeconds))
out.Reasons = *(*[]string)(unsafe.Pointer(&in.Reasons))
out.IncludingInitContainers = in.IncludingInitContainers
return nil
}
// Convert_api_FailedPods_To_v1alpha1_FailedPods is an autogenerated conversion function.
func Convert_api_FailedPods_To_v1alpha1_FailedPods(in *api.FailedPods, out *FailedPods, s conversion.Scope) error {
return autoConvert_api_FailedPods_To_v1alpha1_FailedPods(in, out, s)
}
func autoConvert_v1alpha1_Namespaces_To_api_Namespaces(in *Namespaces, out *api.Namespaces, s conversion.Scope) error {
out.Include = *(*[]string)(unsafe.Pointer(&in.Include))
out.Exclude = *(*[]string)(unsafe.Pointer(&in.Exclude))
return nil
}
// Convert_v1alpha1_Namespaces_To_api_Namespaces is an autogenerated conversion function.
func Convert_v1alpha1_Namespaces_To_api_Namespaces(in *Namespaces, out *api.Namespaces, s conversion.Scope) error {
return autoConvert_v1alpha1_Namespaces_To_api_Namespaces(in, out, s)
}
func autoConvert_api_Namespaces_To_v1alpha1_Namespaces(in *api.Namespaces, out *Namespaces, s conversion.Scope) error {
out.Include = *(*[]string)(unsafe.Pointer(&in.Include))
out.Exclude = *(*[]string)(unsafe.Pointer(&in.Exclude))
return nil
}
// Convert_api_Namespaces_To_v1alpha1_Namespaces is an autogenerated conversion function.
func Convert_api_Namespaces_To_v1alpha1_Namespaces(in *api.Namespaces, out *Namespaces, s conversion.Scope) error {
return autoConvert_api_Namespaces_To_v1alpha1_Namespaces(in, out, s)
}
func autoConvert_v1alpha1_NodeResourceUtilizationThresholds_To_api_NodeResourceUtilizationThresholds(in *NodeResourceUtilizationThresholds, out *api.NodeResourceUtilizationThresholds, s conversion.Scope) error {
out.UseDeviationThresholds = in.UseDeviationThresholds
out.Thresholds = *(*api.ResourceThresholds)(unsafe.Pointer(&in.Thresholds))
out.TargetThresholds = *(*api.ResourceThresholds)(unsafe.Pointer(&in.TargetThresholds))
out.NumberOfNodes = in.NumberOfNodes
@@ -139,6 +274,7 @@ func Convert_v1alpha1_NodeResourceUtilizationThresholds_To_api_NodeResourceUtili
}
func autoConvert_api_NodeResourceUtilizationThresholds_To_v1alpha1_NodeResourceUtilizationThresholds(in *api.NodeResourceUtilizationThresholds, out *NodeResourceUtilizationThresholds, s conversion.Scope) error {
out.UseDeviationThresholds = in.UseDeviationThresholds
out.Thresholds = *(*ResourceThresholds)(unsafe.Pointer(&in.Thresholds))
out.TargetThresholds = *(*ResourceThresholds)(unsafe.Pointer(&in.TargetThresholds))
out.NumberOfNodes = in.NumberOfNodes
@@ -150,11 +286,87 @@ func Convert_api_NodeResourceUtilizationThresholds_To_v1alpha1_NodeResourceUtili
return autoConvert_api_NodeResourceUtilizationThresholds_To_v1alpha1_NodeResourceUtilizationThresholds(in, out, s)
}
func autoConvert_v1alpha1_PodLifeTime_To_api_PodLifeTime(in *PodLifeTime, out *api.PodLifeTime, s conversion.Scope) error {
out.MaxPodLifeTimeSeconds = (*uint)(unsafe.Pointer(in.MaxPodLifeTimeSeconds))
out.States = *(*[]string)(unsafe.Pointer(&in.States))
out.PodStatusPhases = *(*[]string)(unsafe.Pointer(&in.PodStatusPhases))
return nil
}
// Convert_v1alpha1_PodLifeTime_To_api_PodLifeTime is an autogenerated conversion function.
func Convert_v1alpha1_PodLifeTime_To_api_PodLifeTime(in *PodLifeTime, out *api.PodLifeTime, s conversion.Scope) error {
return autoConvert_v1alpha1_PodLifeTime_To_api_PodLifeTime(in, out, s)
}
func autoConvert_api_PodLifeTime_To_v1alpha1_PodLifeTime(in *api.PodLifeTime, out *PodLifeTime, s conversion.Scope) error {
out.MaxPodLifeTimeSeconds = (*uint)(unsafe.Pointer(in.MaxPodLifeTimeSeconds))
out.States = *(*[]string)(unsafe.Pointer(&in.States))
out.PodStatusPhases = *(*[]string)(unsafe.Pointer(&in.PodStatusPhases))
return nil
}
// Convert_api_PodLifeTime_To_v1alpha1_PodLifeTime is an autogenerated conversion function.
func Convert_api_PodLifeTime_To_v1alpha1_PodLifeTime(in *api.PodLifeTime, out *PodLifeTime, s conversion.Scope) error {
return autoConvert_api_PodLifeTime_To_v1alpha1_PodLifeTime(in, out, s)
}
func autoConvert_v1alpha1_PodsHavingTooManyRestarts_To_api_PodsHavingTooManyRestarts(in *PodsHavingTooManyRestarts, out *api.PodsHavingTooManyRestarts, s conversion.Scope) error {
out.PodRestartThreshold = in.PodRestartThreshold
out.IncludingInitContainers = in.IncludingInitContainers
return nil
}
// Convert_v1alpha1_PodsHavingTooManyRestarts_To_api_PodsHavingTooManyRestarts is an autogenerated conversion function.
func Convert_v1alpha1_PodsHavingTooManyRestarts_To_api_PodsHavingTooManyRestarts(in *PodsHavingTooManyRestarts, out *api.PodsHavingTooManyRestarts, s conversion.Scope) error {
return autoConvert_v1alpha1_PodsHavingTooManyRestarts_To_api_PodsHavingTooManyRestarts(in, out, s)
}
func autoConvert_api_PodsHavingTooManyRestarts_To_v1alpha1_PodsHavingTooManyRestarts(in *api.PodsHavingTooManyRestarts, out *PodsHavingTooManyRestarts, s conversion.Scope) error {
out.PodRestartThreshold = in.PodRestartThreshold
out.IncludingInitContainers = in.IncludingInitContainers
return nil
}
// Convert_api_PodsHavingTooManyRestarts_To_v1alpha1_PodsHavingTooManyRestarts is an autogenerated conversion function.
func Convert_api_PodsHavingTooManyRestarts_To_v1alpha1_PodsHavingTooManyRestarts(in *api.PodsHavingTooManyRestarts, out *PodsHavingTooManyRestarts, s conversion.Scope) error {
return autoConvert_api_PodsHavingTooManyRestarts_To_v1alpha1_PodsHavingTooManyRestarts(in, out, s)
}
func autoConvert_v1alpha1_RemoveDuplicates_To_api_RemoveDuplicates(in *RemoveDuplicates, out *api.RemoveDuplicates, s conversion.Scope) error {
out.ExcludeOwnerKinds = *(*[]string)(unsafe.Pointer(&in.ExcludeOwnerKinds))
return nil
}
// Convert_v1alpha1_RemoveDuplicates_To_api_RemoveDuplicates is an autogenerated conversion function.
func Convert_v1alpha1_RemoveDuplicates_To_api_RemoveDuplicates(in *RemoveDuplicates, out *api.RemoveDuplicates, s conversion.Scope) error {
return autoConvert_v1alpha1_RemoveDuplicates_To_api_RemoveDuplicates(in, out, s)
}
func autoConvert_api_RemoveDuplicates_To_v1alpha1_RemoveDuplicates(in *api.RemoveDuplicates, out *RemoveDuplicates, s conversion.Scope) error {
out.ExcludeOwnerKinds = *(*[]string)(unsafe.Pointer(&in.ExcludeOwnerKinds))
return nil
}
// Convert_api_RemoveDuplicates_To_v1alpha1_RemoveDuplicates is an autogenerated conversion function.
func Convert_api_RemoveDuplicates_To_v1alpha1_RemoveDuplicates(in *api.RemoveDuplicates, out *RemoveDuplicates, s conversion.Scope) error {
return autoConvert_api_RemoveDuplicates_To_v1alpha1_RemoveDuplicates(in, out, s)
}
func autoConvert_v1alpha1_StrategyParameters_To_api_StrategyParameters(in *StrategyParameters, out *api.StrategyParameters, s conversion.Scope) error {
if err := Convert_v1alpha1_NodeResourceUtilizationThresholds_To_api_NodeResourceUtilizationThresholds(&in.NodeResourceUtilizationThresholds, &out.NodeResourceUtilizationThresholds, s); err != nil {
return err
}
out.NodeResourceUtilizationThresholds = (*api.NodeResourceUtilizationThresholds)(unsafe.Pointer(in.NodeResourceUtilizationThresholds))
out.NodeAffinityType = *(*[]string)(unsafe.Pointer(&in.NodeAffinityType))
out.PodsHavingTooManyRestarts = (*api.PodsHavingTooManyRestarts)(unsafe.Pointer(in.PodsHavingTooManyRestarts))
out.PodLifeTime = (*api.PodLifeTime)(unsafe.Pointer(in.PodLifeTime))
out.RemoveDuplicates = (*api.RemoveDuplicates)(unsafe.Pointer(in.RemoveDuplicates))
out.FailedPods = (*api.FailedPods)(unsafe.Pointer(in.FailedPods))
out.IncludeSoftConstraints = in.IncludeSoftConstraints
out.Namespaces = (*api.Namespaces)(unsafe.Pointer(in.Namespaces))
out.ThresholdPriority = (*int32)(unsafe.Pointer(in.ThresholdPriority))
out.ThresholdPriorityClassName = in.ThresholdPriorityClassName
out.LabelSelector = (*v1.LabelSelector)(unsafe.Pointer(in.LabelSelector))
out.NodeFit = in.NodeFit
out.IncludePreferNoSchedule = in.IncludePreferNoSchedule
out.ExcludedTaints = *(*[]string)(unsafe.Pointer(&in.ExcludedTaints))
return nil
}
@@ -164,10 +376,20 @@ func Convert_v1alpha1_StrategyParameters_To_api_StrategyParameters(in *StrategyP
}
func autoConvert_api_StrategyParameters_To_v1alpha1_StrategyParameters(in *api.StrategyParameters, out *StrategyParameters, s conversion.Scope) error {
if err := Convert_api_NodeResourceUtilizationThresholds_To_v1alpha1_NodeResourceUtilizationThresholds(&in.NodeResourceUtilizationThresholds, &out.NodeResourceUtilizationThresholds, s); err != nil {
return err
}
out.NodeResourceUtilizationThresholds = (*NodeResourceUtilizationThresholds)(unsafe.Pointer(in.NodeResourceUtilizationThresholds))
out.NodeAffinityType = *(*[]string)(unsafe.Pointer(&in.NodeAffinityType))
out.PodsHavingTooManyRestarts = (*PodsHavingTooManyRestarts)(unsafe.Pointer(in.PodsHavingTooManyRestarts))
out.PodLifeTime = (*PodLifeTime)(unsafe.Pointer(in.PodLifeTime))
out.RemoveDuplicates = (*RemoveDuplicates)(unsafe.Pointer(in.RemoveDuplicates))
out.FailedPods = (*FailedPods)(unsafe.Pointer(in.FailedPods))
out.IncludeSoftConstraints = in.IncludeSoftConstraints
out.Namespaces = (*Namespaces)(unsafe.Pointer(in.Namespaces))
out.ThresholdPriority = (*int32)(unsafe.Pointer(in.ThresholdPriority))
out.ThresholdPriorityClassName = in.ThresholdPriorityClassName
out.LabelSelector = (*v1.LabelSelector)(unsafe.Pointer(in.LabelSelector))
out.NodeFit = in.NodeFit
out.IncludePreferNoSchedule = in.IncludePreferNoSchedule
out.ExcludedTaints = *(*[]string)(unsafe.Pointer(&in.ExcludedTaints))
return nil
}

View File

@@ -1,7 +1,8 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright 2020 The Kubernetes Authors.
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -21,6 +22,7 @@ limitations under the License.
package v1alpha1
import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
@@ -35,6 +37,41 @@ func (in *DeschedulerPolicy) DeepCopyInto(out *DeschedulerPolicy) {
(*out)[key] = *val.DeepCopy()
}
}
if in.NodeSelector != nil {
in, out := &in.NodeSelector, &out.NodeSelector
*out = new(string)
**out = **in
}
if in.EvictFailedBarePods != nil {
in, out := &in.EvictFailedBarePods, &out.EvictFailedBarePods
*out = new(bool)
**out = **in
}
if in.EvictLocalStoragePods != nil {
in, out := &in.EvictLocalStoragePods, &out.EvictLocalStoragePods
*out = new(bool)
**out = **in
}
if in.EvictSystemCriticalPods != nil {
in, out := &in.EvictSystemCriticalPods, &out.EvictSystemCriticalPods
*out = new(bool)
**out = **in
}
if in.IgnorePVCPods != nil {
in, out := &in.IgnorePVCPods, &out.IgnorePVCPods
*out = new(bool)
**out = **in
}
if in.MaxNoOfPodsToEvictPerNode != nil {
in, out := &in.MaxNoOfPodsToEvictPerNode, &out.MaxNoOfPodsToEvictPerNode
*out = new(int)
**out = **in
}
if in.MaxNoOfPodsToEvictPerNamespace != nil {
in, out := &in.MaxNoOfPodsToEvictPerNamespace, &out.MaxNoOfPodsToEvictPerNamespace
*out = new(int)
**out = **in
}
return
}
@@ -59,7 +96,11 @@ func (in *DeschedulerPolicy) DeepCopyObject() runtime.Object {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DeschedulerStrategy) DeepCopyInto(out *DeschedulerStrategy) {
*out = *in
in.Params.DeepCopyInto(&out.Params)
if in.Params != nil {
in, out := &in.Params, &out.Params
*out = new(StrategyParameters)
(*in).DeepCopyInto(*out)
}
return
}
@@ -73,6 +114,63 @@ func (in *DeschedulerStrategy) DeepCopy() *DeschedulerStrategy {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *FailedPods) DeepCopyInto(out *FailedPods) {
*out = *in
if in.ExcludeOwnerKinds != nil {
in, out := &in.ExcludeOwnerKinds, &out.ExcludeOwnerKinds
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.MinPodLifetimeSeconds != nil {
in, out := &in.MinPodLifetimeSeconds, &out.MinPodLifetimeSeconds
*out = new(uint)
**out = **in
}
if in.Reasons != nil {
in, out := &in.Reasons, &out.Reasons
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FailedPods.
func (in *FailedPods) DeepCopy() *FailedPods {
if in == nil {
return nil
}
out := new(FailedPods)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Namespaces) DeepCopyInto(out *Namespaces) {
*out = *in
if in.Include != nil {
in, out := &in.Include, &out.Include
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Exclude != nil {
in, out := &in.Exclude, &out.Exclude
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Namespaces.
func (in *Namespaces) DeepCopy() *Namespaces {
if in == nil {
return nil
}
out := new(Namespaces)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NodeResourceUtilizationThresholds) DeepCopyInto(out *NodeResourceUtilizationThresholds) {
*out = *in
@@ -103,6 +201,74 @@ func (in *NodeResourceUtilizationThresholds) DeepCopy() *NodeResourceUtilization
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodLifeTime) DeepCopyInto(out *PodLifeTime) {
*out = *in
if in.MaxPodLifeTimeSeconds != nil {
in, out := &in.MaxPodLifeTimeSeconds, &out.MaxPodLifeTimeSeconds
*out = new(uint)
**out = **in
}
if in.States != nil {
in, out := &in.States, &out.States
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.PodStatusPhases != nil {
in, out := &in.PodStatusPhases, &out.PodStatusPhases
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodLifeTime.
func (in *PodLifeTime) DeepCopy() *PodLifeTime {
if in == nil {
return nil
}
out := new(PodLifeTime)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodsHavingTooManyRestarts) DeepCopyInto(out *PodsHavingTooManyRestarts) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodsHavingTooManyRestarts.
func (in *PodsHavingTooManyRestarts) DeepCopy() *PodsHavingTooManyRestarts {
if in == nil {
return nil
}
out := new(PodsHavingTooManyRestarts)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RemoveDuplicates) DeepCopyInto(out *RemoveDuplicates) {
*out = *in
if in.ExcludeOwnerKinds != nil {
in, out := &in.ExcludeOwnerKinds, &out.ExcludeOwnerKinds
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RemoveDuplicates.
func (in *RemoveDuplicates) DeepCopy() *RemoveDuplicates {
if in == nil {
return nil
}
out := new(RemoveDuplicates)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in ResourceThresholds) DeepCopyInto(out *ResourceThresholds) {
{
@@ -150,12 +316,56 @@ func (in StrategyList) DeepCopy() StrategyList {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *StrategyParameters) DeepCopyInto(out *StrategyParameters) {
*out = *in
in.NodeResourceUtilizationThresholds.DeepCopyInto(&out.NodeResourceUtilizationThresholds)
if in.NodeResourceUtilizationThresholds != nil {
in, out := &in.NodeResourceUtilizationThresholds, &out.NodeResourceUtilizationThresholds
*out = new(NodeResourceUtilizationThresholds)
(*in).DeepCopyInto(*out)
}
if in.NodeAffinityType != nil {
in, out := &in.NodeAffinityType, &out.NodeAffinityType
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.PodsHavingTooManyRestarts != nil {
in, out := &in.PodsHavingTooManyRestarts, &out.PodsHavingTooManyRestarts
*out = new(PodsHavingTooManyRestarts)
**out = **in
}
if in.PodLifeTime != nil {
in, out := &in.PodLifeTime, &out.PodLifeTime
*out = new(PodLifeTime)
(*in).DeepCopyInto(*out)
}
if in.RemoveDuplicates != nil {
in, out := &in.RemoveDuplicates, &out.RemoveDuplicates
*out = new(RemoveDuplicates)
(*in).DeepCopyInto(*out)
}
if in.FailedPods != nil {
in, out := &in.FailedPods, &out.FailedPods
*out = new(FailedPods)
(*in).DeepCopyInto(*out)
}
if in.Namespaces != nil {
in, out := &in.Namespaces, &out.Namespaces
*out = new(Namespaces)
(*in).DeepCopyInto(*out)
}
if in.ThresholdPriority != nil {
in, out := &in.ThresholdPriority, &out.ThresholdPriority
*out = new(int32)
**out = **in
}
if in.LabelSelector != nil {
in, out := &in.LabelSelector, &out.LabelSelector
*out = new(v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
if in.ExcludedTaints != nil {
in, out := &in.ExcludedTaints, &out.ExcludedTaints
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}

View File

@@ -1,7 +1,8 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright 2020 The Kubernetes Authors.
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -1,7 +1,8 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright 2020 The Kubernetes Authors.
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -21,6 +22,7 @@ limitations under the License.
package api
import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
@@ -35,6 +37,41 @@ func (in *DeschedulerPolicy) DeepCopyInto(out *DeschedulerPolicy) {
(*out)[key] = *val.DeepCopy()
}
}
if in.NodeSelector != nil {
in, out := &in.NodeSelector, &out.NodeSelector
*out = new(string)
**out = **in
}
if in.EvictFailedBarePods != nil {
in, out := &in.EvictFailedBarePods, &out.EvictFailedBarePods
*out = new(bool)
**out = **in
}
if in.EvictLocalStoragePods != nil {
in, out := &in.EvictLocalStoragePods, &out.EvictLocalStoragePods
*out = new(bool)
**out = **in
}
if in.EvictSystemCriticalPods != nil {
in, out := &in.EvictSystemCriticalPods, &out.EvictSystemCriticalPods
*out = new(bool)
**out = **in
}
if in.IgnorePVCPods != nil {
in, out := &in.IgnorePVCPods, &out.IgnorePVCPods
*out = new(bool)
**out = **in
}
if in.MaxNoOfPodsToEvictPerNode != nil {
in, out := &in.MaxNoOfPodsToEvictPerNode, &out.MaxNoOfPodsToEvictPerNode
*out = new(uint)
**out = **in
}
if in.MaxNoOfPodsToEvictPerNamespace != nil {
in, out := &in.MaxNoOfPodsToEvictPerNamespace, &out.MaxNoOfPodsToEvictPerNamespace
*out = new(uint)
**out = **in
}
return
}
@@ -59,7 +96,11 @@ func (in *DeschedulerPolicy) DeepCopyObject() runtime.Object {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DeschedulerStrategy) DeepCopyInto(out *DeschedulerStrategy) {
*out = *in
in.Params.DeepCopyInto(&out.Params)
if in.Params != nil {
in, out := &in.Params, &out.Params
*out = new(StrategyParameters)
(*in).DeepCopyInto(*out)
}
return
}
@@ -73,6 +114,63 @@ func (in *DeschedulerStrategy) DeepCopy() *DeschedulerStrategy {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *FailedPods) DeepCopyInto(out *FailedPods) {
*out = *in
if in.ExcludeOwnerKinds != nil {
in, out := &in.ExcludeOwnerKinds, &out.ExcludeOwnerKinds
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.MinPodLifetimeSeconds != nil {
in, out := &in.MinPodLifetimeSeconds, &out.MinPodLifetimeSeconds
*out = new(uint)
**out = **in
}
if in.Reasons != nil {
in, out := &in.Reasons, &out.Reasons
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FailedPods.
func (in *FailedPods) DeepCopy() *FailedPods {
if in == nil {
return nil
}
out := new(FailedPods)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Namespaces) DeepCopyInto(out *Namespaces) {
*out = *in
if in.Include != nil {
in, out := &in.Include, &out.Include
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Exclude != nil {
in, out := &in.Exclude, &out.Exclude
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Namespaces.
func (in *Namespaces) DeepCopy() *Namespaces {
if in == nil {
return nil
}
out := new(Namespaces)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NodeResourceUtilizationThresholds) DeepCopyInto(out *NodeResourceUtilizationThresholds) {
*out = *in
@@ -103,6 +201,95 @@ func (in *NodeResourceUtilizationThresholds) DeepCopy() *NodeResourceUtilization
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodLifeTime) DeepCopyInto(out *PodLifeTime) {
*out = *in
if in.MaxPodLifeTimeSeconds != nil {
in, out := &in.MaxPodLifeTimeSeconds, &out.MaxPodLifeTimeSeconds
*out = new(uint)
**out = **in
}
if in.States != nil {
in, out := &in.States, &out.States
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.PodStatusPhases != nil {
in, out := &in.PodStatusPhases, &out.PodStatusPhases
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodLifeTime.
func (in *PodLifeTime) DeepCopy() *PodLifeTime {
if in == nil {
return nil
}
out := new(PodLifeTime)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodsHavingTooManyRestarts) DeepCopyInto(out *PodsHavingTooManyRestarts) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodsHavingTooManyRestarts.
func (in *PodsHavingTooManyRestarts) DeepCopy() *PodsHavingTooManyRestarts {
if in == nil {
return nil
}
out := new(PodsHavingTooManyRestarts)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PriorityThreshold) DeepCopyInto(out *PriorityThreshold) {
*out = *in
if in.Value != nil {
in, out := &in.Value, &out.Value
*out = new(int32)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityThreshold.
func (in *PriorityThreshold) DeepCopy() *PriorityThreshold {
if in == nil {
return nil
}
out := new(PriorityThreshold)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RemoveDuplicates) DeepCopyInto(out *RemoveDuplicates) {
*out = *in
if in.ExcludeOwnerKinds != nil {
in, out := &in.ExcludeOwnerKinds, &out.ExcludeOwnerKinds
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RemoveDuplicates.
func (in *RemoveDuplicates) DeepCopy() *RemoveDuplicates {
if in == nil {
return nil
}
out := new(RemoveDuplicates)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in ResourceThresholds) DeepCopyInto(out *ResourceThresholds) {
{
@@ -150,12 +337,56 @@ func (in StrategyList) DeepCopy() StrategyList {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *StrategyParameters) DeepCopyInto(out *StrategyParameters) {
*out = *in
in.NodeResourceUtilizationThresholds.DeepCopyInto(&out.NodeResourceUtilizationThresholds)
if in.NodeResourceUtilizationThresholds != nil {
in, out := &in.NodeResourceUtilizationThresholds, &out.NodeResourceUtilizationThresholds
*out = new(NodeResourceUtilizationThresholds)
(*in).DeepCopyInto(*out)
}
if in.NodeAffinityType != nil {
in, out := &in.NodeAffinityType, &out.NodeAffinityType
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.PodsHavingTooManyRestarts != nil {
in, out := &in.PodsHavingTooManyRestarts, &out.PodsHavingTooManyRestarts
*out = new(PodsHavingTooManyRestarts)
**out = **in
}
if in.PodLifeTime != nil {
in, out := &in.PodLifeTime, &out.PodLifeTime
*out = new(PodLifeTime)
(*in).DeepCopyInto(*out)
}
if in.RemoveDuplicates != nil {
in, out := &in.RemoveDuplicates, &out.RemoveDuplicates
*out = new(RemoveDuplicates)
(*in).DeepCopyInto(*out)
}
if in.FailedPods != nil {
in, out := &in.FailedPods, &out.FailedPods
*out = new(FailedPods)
(*in).DeepCopyInto(*out)
}
if in.Namespaces != nil {
in, out := &in.Namespaces, &out.Namespaces
*out = new(Namespaces)
(*in).DeepCopyInto(*out)
}
if in.ThresholdPriority != nil {
in, out := &in.ThresholdPriority, &out.ThresholdPriority
*out = new(int32)
**out = **in
}
if in.LabelSelector != nil {
in, out := &in.LabelSelector, &out.LabelSelector
*out = new(v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
if in.ExcludedTaints != nil {
in, out := &in.ExcludedTaints, &out.ExcludedTaints
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}

View File

@@ -19,8 +19,6 @@ package componentconfig
import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"sigs.k8s.io/descheduler/pkg/descheduler/scheme"
)
var (
@@ -34,12 +32,6 @@ const GroupName = "deschedulercomponentconfig"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal}
func init() {
if err := addKnownTypes(scheme.Scheme); err != nil {
panic(err)
}
}
// Kind takes an unqualified kind and returns a Group qualified GroupKind
func Kind(kind string) schema.GroupKind {
return SchemeGroupVersion.WithKind(kind).GroupKind()

View File

@@ -20,6 +20,8 @@ import (
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
componentbaseconfig "k8s.io/component-base/config"
registry "k8s.io/component-base/logs/api/v1"
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
@@ -48,4 +50,14 @@ type DeschedulerConfiguration struct {
// EvictLocalStoragePods allows pods using local storage to be evicted.
EvictLocalStoragePods bool
// IgnorePVCPods sets whether PVC pods should be allowed to be evicted
IgnorePVCPods bool
// LeaderElection starts Deployment using leader election loop
LeaderElection componentbaseconfig.LeaderElectionConfiguration
// Logging specifies the options of logging.
// Refer [Logs Options](https://github.com/kubernetes/component-base/blob/master/logs/api/v1/options.go) for more information.
Logging registry.LoggingConfiguration
}

View File

@@ -0,0 +1,133 @@
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package componentconfig
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/descheduler/pkg/api"
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// RemovePodsViolatingNodeTaintsArgs holds arguments used to configure the RemovePodsViolatingNodeTaints plugin.
type RemovePodsViolatingNodeTaintsArgs struct {
metav1.TypeMeta
Namespaces *api.Namespaces
LabelSelector *metav1.LabelSelector
IncludePreferNoSchedule bool
ExcludedTaints []string
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// RemoveFailedPodsArgs holds arguments used to configure RemoveFailedPods plugin.
type RemoveFailedPodsArgs struct {
metav1.TypeMeta
Namespaces *api.Namespaces
LabelSelector *metav1.LabelSelector
ExcludeOwnerKinds []string
MinPodLifetimeSeconds *uint
Reasons []string
IncludingInitContainers bool
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// RemovePodsViolatingNodeAffinityArgs holds arguments used to configure RemovePodsViolatingNodeAffinity plugin.
type RemovePodsViolatingNodeAffinityArgs struct {
metav1.TypeMeta
Namespaces *api.Namespaces
LabelSelector *metav1.LabelSelector
NodeAffinityType []string
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// RemovePodsHavingTooManyRestartsArgs holds arguments used to configure RemovePodsHavingTooManyRestarts plugin.
type RemovePodsHavingTooManyRestartsArgs struct {
metav1.TypeMeta
Namespaces *api.Namespaces
LabelSelector *metav1.LabelSelector
PodRestartThreshold int32
IncludingInitContainers bool
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type RemoveDuplicatesArgs struct {
metav1.TypeMeta
Namespaces *api.Namespaces
ExcludeOwnerKinds []string
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PodLifeTimeArgs holds arguments used to configure PodLifeTime plugin.
type PodLifeTimeArgs struct {
metav1.TypeMeta
Namespaces *api.Namespaces
LabelSelector *metav1.LabelSelector
MaxPodLifeTimeSeconds *uint
States []string
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// RemovePodsViolatingTopologySpreadConstraintArgs holds arguments used to configure RemovePodsViolatingTopologySpreadConstraint plugin.
type RemovePodsViolatingTopologySpreadConstraintArgs struct {
metav1.TypeMeta
Namespaces *api.Namespaces
LabelSelector *metav1.LabelSelector
IncludeSoftConstraints bool
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// RemovePodsViolatingInterPodAntiAffinity holds arguments used to configure RemovePodsViolatingInterPodAntiAffinity plugin.
type RemovePodsViolatingInterPodAntiAffinityArgs struct {
metav1.TypeMeta
Namespaces *api.Namespaces
LabelSelector *metav1.LabelSelector
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type LowNodeUtilizationArgs struct {
metav1.TypeMeta
UseDeviationThresholds bool
Thresholds api.ResourceThresholds
TargetThresholds api.ResourceThresholds
NumberOfNodes int
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type HighNodeUtilizationArgs struct {
metav1.TypeMeta
Thresholds api.ResourceThresholds
NumberOfNodes int
}

View File

@@ -20,6 +20,8 @@ import (
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
componentbaseconfig "k8s.io/component-base/config"
registry "k8s.io/component-base/logs/api/v1"
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
@@ -48,4 +50,14 @@ type DeschedulerConfiguration struct {
// EvictLocalStoragePods allows pods using local storage to be evicted.
EvictLocalStoragePods bool `json:"evictLocalStoragePods,omitempty"`
// IgnorePVCPods sets whether PVC pods should be allowed to be evicted
IgnorePVCPods bool `json:"ignorePvcPods,omitempty"`
// LeaderElection starts Deployment using leader election loop
LeaderElection componentbaseconfig.LeaderElectionConfiguration `json:"leaderElection,omitempty"`
// Logging specifies the options of logging.
// Refer [Logs Options](https://github.com/kubernetes/component-base/blob/master/logs/api/v1/options.go) for more information.
Logging registry.LoggingConfiguration `json:"logging,omitempty"`
}

View File

@@ -1,7 +1,8 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright 2020 The Kubernetes Authors.
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -56,6 +57,9 @@ func autoConvert_v1alpha1_DeschedulerConfiguration_To_componentconfig_Deschedule
out.NodeSelector = in.NodeSelector
out.MaxNoOfPodsToEvictPerNode = in.MaxNoOfPodsToEvictPerNode
out.EvictLocalStoragePods = in.EvictLocalStoragePods
out.IgnorePVCPods = in.IgnorePVCPods
out.LeaderElection = in.LeaderElection
out.Logging = in.Logging
return nil
}
@@ -72,6 +76,9 @@ func autoConvert_componentconfig_DeschedulerConfiguration_To_v1alpha1_Deschedule
out.NodeSelector = in.NodeSelector
out.MaxNoOfPodsToEvictPerNode = in.MaxNoOfPodsToEvictPerNode
out.EvictLocalStoragePods = in.EvictLocalStoragePods
out.IgnorePVCPods = in.IgnorePVCPods
out.LeaderElection = in.LeaderElection
out.Logging = in.Logging
return nil
}

View File

@@ -1,7 +1,8 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright 2020 The Kubernetes Authors.
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -28,6 +29,8 @@ import (
func (in *DeschedulerConfiguration) DeepCopyInto(out *DeschedulerConfiguration) {
*out = *in
out.TypeMeta = in.TypeMeta
out.LeaderElection = in.LeaderElection
in.Logging.DeepCopyInto(&out.Logging)
return
}

View File

@@ -1,7 +1,8 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright 2020 The Kubernetes Authors.
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -0,0 +1,203 @@
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package validation
import (
"fmt"
v1 "k8s.io/api/core/v1"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/sets"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/apis/componentconfig"
)
const (
// MinResourcePercentage is the minimum value of a resource's percentage
MinResourcePercentage = 0
// MaxResourcePercentage is the maximum value of a resource's percentage
MaxResourcePercentage = 100
)
// ValidateRemovePodsViolatingNodeTaintsArgs validates RemovePodsViolatingNodeTaints arguments
func ValidateRemovePodsViolatingNodeTaintsArgs(args *componentconfig.RemovePodsViolatingNodeTaintsArgs) error {
return errorsAggregate(
validateNamespaceArgs(args.Namespaces),
validateLabelSelectorArgs(args.LabelSelector),
)
}
// ValidateRemoveFailedPodsArgs validates RemoveFailedPods arguments
func ValidateRemoveFailedPodsArgs(args *componentconfig.RemoveFailedPodsArgs) error {
return errorsAggregate(
validateNamespaceArgs(args.Namespaces),
validateLabelSelectorArgs(args.LabelSelector),
)
}
// ValidateRemovePodsHavingTooManyRestartsArgs validates RemovePodsHavingTooManyRestarts arguments
func ValidateRemovePodsHavingTooManyRestartsArgs(args *componentconfig.RemovePodsHavingTooManyRestartsArgs) error {
return errorsAggregate(
validateNamespaceArgs(args.Namespaces),
validateLabelSelectorArgs(args.LabelSelector),
validatePodRestartThreshold(args.PodRestartThreshold),
)
}
// ValidateRemovePodsViolatingNodeAffinityArgs validates RemovePodsViolatingNodeAffinity arguments
func ValidateRemovePodsViolatingNodeAffinityArgs(args *componentconfig.RemovePodsViolatingNodeAffinityArgs) error {
var err error
if args == nil || len(args.NodeAffinityType) == 0 {
err = fmt.Errorf("nodeAffinityType needs to be set")
}
return errorsAggregate(
err,
validateNamespaceArgs(args.Namespaces),
validateLabelSelectorArgs(args.LabelSelector),
)
}
// ValidatePodLifeTimeArgs validates PodLifeTime arguments
func ValidatePodLifeTimeArgs(args *componentconfig.PodLifeTimeArgs) error {
var err error
if args.MaxPodLifeTimeSeconds == nil {
err = fmt.Errorf("MaxPodLifeTimeSeconds not set")
}
return errorsAggregate(
err,
validateNamespaceArgs(args.Namespaces),
validateLabelSelectorArgs(args.LabelSelector),
validatePodLifeTimeStates(args.States),
)
}
func ValidateRemoveDuplicatesArgs(args *componentconfig.RemoveDuplicatesArgs) error {
return validateNamespaceArgs(args.Namespaces)
}
// ValidateRemovePodsViolatingTopologySpreadConstraintArgs validates RemovePodsViolatingTopologySpreadConstraint arguments
func ValidateRemovePodsViolatingTopologySpreadConstraintArgs(args *componentconfig.RemovePodsViolatingTopologySpreadConstraintArgs) error {
return errorsAggregate(
validateNamespaceArgs(args.Namespaces),
validateLabelSelectorArgs(args.LabelSelector),
)
}
// ValidateRemovePodsViolatingInterPodAntiAffinityArgs validates ValidateRemovePodsViolatingInterPodAntiAffinity arguments
func ValidateRemovePodsViolatingInterPodAntiAffinityArgs(args *componentconfig.RemovePodsViolatingInterPodAntiAffinityArgs) error {
return errorsAggregate(
validateNamespaceArgs(args.Namespaces),
validateLabelSelectorArgs(args.LabelSelector),
)
}
// errorsAggregate converts all arg validation errors to a single error interface.
// if no errors, it will return nil.
func errorsAggregate(errors ...error) error {
return utilerrors.NewAggregate(errors)
}
func validateNamespaceArgs(namespaces *api.Namespaces) error {
// At most one of include/exclude can be set
if namespaces != nil && len(namespaces.Include) > 0 && len(namespaces.Exclude) > 0 {
return fmt.Errorf("only one of Include/Exclude namespaces can be set")
}
return nil
}
func validateLabelSelectorArgs(labelSelector *metav1.LabelSelector) error {
if labelSelector != nil {
if _, err := metav1.LabelSelectorAsSelector(labelSelector); err != nil {
return fmt.Errorf("failed to get label selectors from strategy's params: %+v", err)
}
}
return nil
}
func validatePodRestartThreshold(podRestartThreshold int32) error {
if podRestartThreshold < 1 {
return fmt.Errorf("PodsHavingTooManyRestarts threshold not set")
}
return nil
}
func validatePodLifeTimeStates(states []string) error {
podLifeTimeAllowedStates := sets.NewString(
string(v1.PodRunning),
string(v1.PodPending),
// Container state reasons: https://github.com/kubernetes/kubernetes/blob/release-1.24/pkg/kubelet/kubelet_pods.go#L76-L79
"PodInitializing",
"ContainerCreating",
)
if !podLifeTimeAllowedStates.HasAll(states...) {
return fmt.Errorf("states must be one of %v", podLifeTimeAllowedStates.List())
}
return nil
}
func ValidateHighNodeUtilizationArgs(args *componentconfig.HighNodeUtilizationArgs) error {
return validateThresholds(args.Thresholds)
}
func ValidateLowNodeUtilizationArgs(args *componentconfig.LowNodeUtilizationArgs) error {
return validateLowNodeUtilizationThresholds(args.Thresholds, args.TargetThresholds, args.UseDeviationThresholds)
}
func validateLowNodeUtilizationThresholds(thresholds, targetThresholds api.ResourceThresholds, useDeviationThresholds bool) error {
// validate thresholds and targetThresholds config
if err := validateThresholds(thresholds); err != nil {
return fmt.Errorf("thresholds config is not valid: %v", err)
}
if err := validateThresholds(targetThresholds); err != nil {
return fmt.Errorf("targetThresholds config is not valid: %v", err)
}
// validate if thresholds and targetThresholds have same resources configured
if len(thresholds) != len(targetThresholds) {
return fmt.Errorf("thresholds and targetThresholds configured different resources")
}
for resourceName, value := range thresholds {
if targetValue, ok := targetThresholds[resourceName]; !ok {
return fmt.Errorf("thresholds and targetThresholds configured different resources")
} else if value > targetValue && !useDeviationThresholds {
return fmt.Errorf("thresholds' %v percentage is greater than targetThresholds'", resourceName)
}
}
return nil
}
// validateThresholds checks if thresholds have valid resource name and resource percentage configured
func validateThresholds(thresholds api.ResourceThresholds) error {
if len(thresholds) == 0 {
return fmt.Errorf("no resource threshold is configured")
}
for name, percent := range thresholds {
if percent < MinResourcePercentage || percent > MaxResourcePercentage {
return fmt.Errorf("%v threshold not in [%v, %v] range", name, MinResourcePercentage, MaxResourcePercentage)
}
}
return nil
}

View File

@@ -0,0 +1,331 @@
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package validation
import (
"fmt"
"testing"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/apis/componentconfig"
)
func TestValidateRemovePodsViolatingNodeTaintsArgs(t *testing.T) {
testCases := []struct {
description string
args *componentconfig.RemovePodsViolatingNodeTaintsArgs
expectError bool
}{
{
description: "valid namespace args, no errors",
args: &componentconfig.RemovePodsViolatingNodeTaintsArgs{
Namespaces: &api.Namespaces{
Include: []string{"default"},
},
},
expectError: false,
},
{
description: "invalid namespaces args, expects error",
args: &componentconfig.RemovePodsViolatingNodeTaintsArgs{
Namespaces: &api.Namespaces{
Include: []string{"default"},
Exclude: []string{"kube-system"},
},
},
expectError: true,
},
{
description: "valid label selector args, no errors",
args: &componentconfig.RemovePodsViolatingNodeTaintsArgs{
LabelSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{"role.kubernetes.io/node": ""},
},
},
expectError: false,
},
{
description: "invalid label selector args, expects errors",
args: &componentconfig.RemovePodsViolatingNodeTaintsArgs{
LabelSelector: &metav1.LabelSelector{
MatchExpressions: []metav1.LabelSelectorRequirement{
{
Operator: metav1.LabelSelectorOpIn,
},
},
},
},
expectError: true,
},
}
for _, tc := range testCases {
t.Run(tc.description, func(t *testing.T) {
err := ValidateRemovePodsViolatingNodeTaintsArgs(tc.args)
hasError := err != nil
if tc.expectError != hasError {
t.Error("unexpected arg validation behavior")
}
})
}
}
func TestValidateRemovePodsViolatingNodeAffinityArgs(t *testing.T) {
testCases := []struct {
description string
args *componentconfig.RemovePodsViolatingNodeAffinityArgs
expectError bool
}{
{
description: "nil NodeAffinityType args, expects errors",
args: &componentconfig.RemovePodsViolatingNodeAffinityArgs{
NodeAffinityType: nil,
},
expectError: true,
},
{
description: "empty NodeAffinityType args, expects errors",
args: &componentconfig.RemovePodsViolatingNodeAffinityArgs{
NodeAffinityType: []string{},
},
expectError: true,
},
{
description: "valid NodeAffinityType args, no errors",
args: &componentconfig.RemovePodsViolatingNodeAffinityArgs{
NodeAffinityType: []string{"requiredDuringSchedulingIgnoredDuringExecution"},
},
expectError: false,
},
}
for _, tc := range testCases {
t.Run(tc.description, func(t *testing.T) {
err := ValidateRemovePodsViolatingNodeAffinityArgs(tc.args)
hasError := err != nil
if tc.expectError != hasError {
t.Error("unexpected arg validation behavior")
}
})
}
}
func TestValidateRemovePodLifeTimeArgs(t *testing.T) {
testCases := []struct {
description string
args *componentconfig.PodLifeTimeArgs
expectError bool
}{
{
description: "valid arg, no errors",
args: &componentconfig.PodLifeTimeArgs{
MaxPodLifeTimeSeconds: func(i uint) *uint { return &i }(1),
States: []string{string(v1.PodRunning)},
},
expectError: false,
},
{
description: "nil MaxPodLifeTimeSeconds arg, expects errors",
args: &componentconfig.PodLifeTimeArgs{
MaxPodLifeTimeSeconds: nil,
},
expectError: true,
},
{
description: "invalid pod state arg, expects errors",
args: &componentconfig.PodLifeTimeArgs{
States: []string{string(v1.NodeRunning)},
},
expectError: true,
},
}
for _, tc := range testCases {
t.Run(tc.description, func(t *testing.T) {
err := ValidatePodLifeTimeArgs(tc.args)
hasError := err != nil
if tc.expectError != hasError {
t.Error("unexpected arg validation behavior")
}
})
}
}
func TestValidateLowNodeUtilizationPluginConfig(t *testing.T) {
var extendedResource = v1.ResourceName("example.com/foo")
tests := []struct {
name string
thresholds api.ResourceThresholds
targetThresholds api.ResourceThresholds
errInfo error
}{
{
name: "passing invalid thresholds",
thresholds: api.ResourceThresholds{
v1.ResourceCPU: 20,
v1.ResourceMemory: 120,
},
targetThresholds: api.ResourceThresholds{
v1.ResourceCPU: 80,
v1.ResourceMemory: 80,
},
errInfo: fmt.Errorf("thresholds config is not valid: %v", fmt.Errorf(
"%v threshold not in [%v, %v] range", v1.ResourceMemory, MinResourcePercentage, MaxResourcePercentage)),
},
{
name: "thresholds and targetThresholds configured different num of resources",
thresholds: api.ResourceThresholds{
v1.ResourceCPU: 20,
v1.ResourceMemory: 20,
},
targetThresholds: api.ResourceThresholds{
v1.ResourceCPU: 80,
v1.ResourceMemory: 80,
v1.ResourcePods: 80,
},
errInfo: fmt.Errorf("thresholds and targetThresholds configured different resources"),
},
{
name: "thresholds and targetThresholds configured different resources",
thresholds: api.ResourceThresholds{
v1.ResourceCPU: 20,
v1.ResourceMemory: 20,
},
targetThresholds: api.ResourceThresholds{
v1.ResourceCPU: 80,
v1.ResourcePods: 80,
},
errInfo: fmt.Errorf("thresholds and targetThresholds configured different resources"),
},
{
name: "thresholds' CPU config value is greater than targetThresholds'",
thresholds: api.ResourceThresholds{
v1.ResourceCPU: 90,
v1.ResourceMemory: 20,
},
targetThresholds: api.ResourceThresholds{
v1.ResourceCPU: 80,
v1.ResourceMemory: 80,
},
errInfo: fmt.Errorf("thresholds' %v percentage is greater than targetThresholds'", v1.ResourceCPU),
},
{
name: "only thresholds configured extended resource",
thresholds: api.ResourceThresholds{
v1.ResourceCPU: 20,
v1.ResourceMemory: 20,
extendedResource: 20,
},
targetThresholds: api.ResourceThresholds{
v1.ResourceCPU: 80,
v1.ResourceMemory: 80,
},
errInfo: fmt.Errorf("thresholds and targetThresholds configured different resources"),
},
{
name: "only targetThresholds configured extended resource",
thresholds: api.ResourceThresholds{
v1.ResourceCPU: 20,
v1.ResourceMemory: 20,
},
targetThresholds: api.ResourceThresholds{
v1.ResourceCPU: 80,
v1.ResourceMemory: 80,
extendedResource: 80,
},
errInfo: fmt.Errorf("thresholds and targetThresholds configured different resources"),
},
{
name: "thresholds and targetThresholds configured different extended resources",
thresholds: api.ResourceThresholds{
v1.ResourceCPU: 20,
v1.ResourceMemory: 20,
extendedResource: 20,
},
targetThresholds: api.ResourceThresholds{
v1.ResourceCPU: 80,
v1.ResourceMemory: 80,
"example.com/bar": 80,
},
errInfo: fmt.Errorf("thresholds and targetThresholds configured different resources"),
},
{
name: "thresholds' extended resource config value is greater than targetThresholds'",
thresholds: api.ResourceThresholds{
v1.ResourceCPU: 20,
v1.ResourceMemory: 20,
extendedResource: 90,
},
targetThresholds: api.ResourceThresholds{
v1.ResourceCPU: 80,
v1.ResourceMemory: 80,
extendedResource: 20,
},
errInfo: fmt.Errorf("thresholds' %v percentage is greater than targetThresholds'", extendedResource),
},
{
name: "passing valid plugin config",
thresholds: api.ResourceThresholds{
v1.ResourceCPU: 20,
v1.ResourceMemory: 20,
},
targetThresholds: api.ResourceThresholds{
v1.ResourceCPU: 80,
v1.ResourceMemory: 80,
},
errInfo: nil,
},
{
name: "passing valid plugin config with extended resource",
thresholds: api.ResourceThresholds{
v1.ResourceCPU: 20,
v1.ResourceMemory: 20,
extendedResource: 20,
},
targetThresholds: api.ResourceThresholds{
v1.ResourceCPU: 80,
v1.ResourceMemory: 80,
extendedResource: 80,
},
errInfo: nil,
},
}
for _, testCase := range tests {
args := &componentconfig.LowNodeUtilizationArgs{
Thresholds: testCase.thresholds,
TargetThresholds: testCase.targetThresholds,
}
validateErr := validateLowNodeUtilizationThresholds(args.Thresholds, args.TargetThresholds, false)
if validateErr == nil || testCase.errInfo == nil {
if validateErr != testCase.errInfo {
t.Errorf("expected validity of plugin config: thresholds %#v targetThresholds %#v to be %v but got %v instead",
testCase.thresholds, testCase.targetThresholds, testCase.errInfo, validateErr)
}
} else if validateErr.Error() != testCase.errInfo.Error() {
t.Errorf("expected validity of plugin config: thresholds %#v targetThresholds %#v to be %v but got %v instead",
testCase.thresholds, testCase.targetThresholds, testCase.errInfo, validateErr)
}
}
}

View File

@@ -1,7 +1,8 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright 2020 The Kubernetes Authors.
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -21,13 +22,17 @@ limitations under the License.
package componentconfig
import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
api "sigs.k8s.io/descheduler/pkg/api"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DeschedulerConfiguration) DeepCopyInto(out *DeschedulerConfiguration) {
*out = *in
out.TypeMeta = in.TypeMeta
out.LeaderElection = in.LeaderElection
in.Logging.DeepCopyInto(&out.Logging)
return
}
@@ -48,3 +53,389 @@ func (in *DeschedulerConfiguration) DeepCopyObject() runtime.Object {
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HighNodeUtilizationArgs) DeepCopyInto(out *HighNodeUtilizationArgs) {
*out = *in
out.TypeMeta = in.TypeMeta
if in.Thresholds != nil {
in, out := &in.Thresholds, &out.Thresholds
*out = make(api.ResourceThresholds, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HighNodeUtilizationArgs.
func (in *HighNodeUtilizationArgs) DeepCopy() *HighNodeUtilizationArgs {
if in == nil {
return nil
}
out := new(HighNodeUtilizationArgs)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *HighNodeUtilizationArgs) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *LowNodeUtilizationArgs) DeepCopyInto(out *LowNodeUtilizationArgs) {
*out = *in
out.TypeMeta = in.TypeMeta
if in.Thresholds != nil {
in, out := &in.Thresholds, &out.Thresholds
*out = make(api.ResourceThresholds, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.TargetThresholds != nil {
in, out := &in.TargetThresholds, &out.TargetThresholds
*out = make(api.ResourceThresholds, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LowNodeUtilizationArgs.
func (in *LowNodeUtilizationArgs) DeepCopy() *LowNodeUtilizationArgs {
if in == nil {
return nil
}
out := new(LowNodeUtilizationArgs)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *LowNodeUtilizationArgs) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodLifeTimeArgs) DeepCopyInto(out *PodLifeTimeArgs) {
*out = *in
out.TypeMeta = in.TypeMeta
if in.Namespaces != nil {
in, out := &in.Namespaces, &out.Namespaces
*out = new(api.Namespaces)
(*in).DeepCopyInto(*out)
}
if in.LabelSelector != nil {
in, out := &in.LabelSelector, &out.LabelSelector
*out = new(v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
if in.MaxPodLifeTimeSeconds != nil {
in, out := &in.MaxPodLifeTimeSeconds, &out.MaxPodLifeTimeSeconds
*out = new(uint)
**out = **in
}
if in.States != nil {
in, out := &in.States, &out.States
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodLifeTimeArgs.
func (in *PodLifeTimeArgs) DeepCopy() *PodLifeTimeArgs {
if in == nil {
return nil
}
out := new(PodLifeTimeArgs)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *PodLifeTimeArgs) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RemoveDuplicatesArgs) DeepCopyInto(out *RemoveDuplicatesArgs) {
*out = *in
out.TypeMeta = in.TypeMeta
if in.Namespaces != nil {
in, out := &in.Namespaces, &out.Namespaces
*out = new(api.Namespaces)
(*in).DeepCopyInto(*out)
}
if in.ExcludeOwnerKinds != nil {
in, out := &in.ExcludeOwnerKinds, &out.ExcludeOwnerKinds
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RemoveDuplicatesArgs.
func (in *RemoveDuplicatesArgs) DeepCopy() *RemoveDuplicatesArgs {
if in == nil {
return nil
}
out := new(RemoveDuplicatesArgs)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *RemoveDuplicatesArgs) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RemoveFailedPodsArgs) DeepCopyInto(out *RemoveFailedPodsArgs) {
*out = *in
out.TypeMeta = in.TypeMeta
if in.Namespaces != nil {
in, out := &in.Namespaces, &out.Namespaces
*out = new(api.Namespaces)
(*in).DeepCopyInto(*out)
}
if in.LabelSelector != nil {
in, out := &in.LabelSelector, &out.LabelSelector
*out = new(v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
if in.ExcludeOwnerKinds != nil {
in, out := &in.ExcludeOwnerKinds, &out.ExcludeOwnerKinds
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.MinPodLifetimeSeconds != nil {
in, out := &in.MinPodLifetimeSeconds, &out.MinPodLifetimeSeconds
*out = new(uint)
**out = **in
}
if in.Reasons != nil {
in, out := &in.Reasons, &out.Reasons
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RemoveFailedPodsArgs.
func (in *RemoveFailedPodsArgs) DeepCopy() *RemoveFailedPodsArgs {
if in == nil {
return nil
}
out := new(RemoveFailedPodsArgs)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *RemoveFailedPodsArgs) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RemovePodsHavingTooManyRestartsArgs) DeepCopyInto(out *RemovePodsHavingTooManyRestartsArgs) {
*out = *in
out.TypeMeta = in.TypeMeta
if in.Namespaces != nil {
in, out := &in.Namespaces, &out.Namespaces
*out = new(api.Namespaces)
(*in).DeepCopyInto(*out)
}
if in.LabelSelector != nil {
in, out := &in.LabelSelector, &out.LabelSelector
*out = new(v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RemovePodsHavingTooManyRestartsArgs.
func (in *RemovePodsHavingTooManyRestartsArgs) DeepCopy() *RemovePodsHavingTooManyRestartsArgs {
if in == nil {
return nil
}
out := new(RemovePodsHavingTooManyRestartsArgs)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *RemovePodsHavingTooManyRestartsArgs) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RemovePodsViolatingInterPodAntiAffinityArgs) DeepCopyInto(out *RemovePodsViolatingInterPodAntiAffinityArgs) {
*out = *in
out.TypeMeta = in.TypeMeta
if in.Namespaces != nil {
in, out := &in.Namespaces, &out.Namespaces
*out = new(api.Namespaces)
(*in).DeepCopyInto(*out)
}
if in.LabelSelector != nil {
in, out := &in.LabelSelector, &out.LabelSelector
*out = new(v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RemovePodsViolatingInterPodAntiAffinityArgs.
func (in *RemovePodsViolatingInterPodAntiAffinityArgs) DeepCopy() *RemovePodsViolatingInterPodAntiAffinityArgs {
if in == nil {
return nil
}
out := new(RemovePodsViolatingInterPodAntiAffinityArgs)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *RemovePodsViolatingInterPodAntiAffinityArgs) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RemovePodsViolatingNodeAffinityArgs) DeepCopyInto(out *RemovePodsViolatingNodeAffinityArgs) {
*out = *in
out.TypeMeta = in.TypeMeta
if in.Namespaces != nil {
in, out := &in.Namespaces, &out.Namespaces
*out = new(api.Namespaces)
(*in).DeepCopyInto(*out)
}
if in.LabelSelector != nil {
in, out := &in.LabelSelector, &out.LabelSelector
*out = new(v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
if in.NodeAffinityType != nil {
in, out := &in.NodeAffinityType, &out.NodeAffinityType
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RemovePodsViolatingNodeAffinityArgs.
func (in *RemovePodsViolatingNodeAffinityArgs) DeepCopy() *RemovePodsViolatingNodeAffinityArgs {
if in == nil {
return nil
}
out := new(RemovePodsViolatingNodeAffinityArgs)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *RemovePodsViolatingNodeAffinityArgs) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RemovePodsViolatingNodeTaintsArgs) DeepCopyInto(out *RemovePodsViolatingNodeTaintsArgs) {
*out = *in
out.TypeMeta = in.TypeMeta
if in.Namespaces != nil {
in, out := &in.Namespaces, &out.Namespaces
*out = new(api.Namespaces)
(*in).DeepCopyInto(*out)
}
if in.LabelSelector != nil {
in, out := &in.LabelSelector, &out.LabelSelector
*out = new(v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
if in.ExcludedTaints != nil {
in, out := &in.ExcludedTaints, &out.ExcludedTaints
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RemovePodsViolatingNodeTaintsArgs.
func (in *RemovePodsViolatingNodeTaintsArgs) DeepCopy() *RemovePodsViolatingNodeTaintsArgs {
if in == nil {
return nil
}
out := new(RemovePodsViolatingNodeTaintsArgs)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *RemovePodsViolatingNodeTaintsArgs) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RemovePodsViolatingTopologySpreadConstraintArgs) DeepCopyInto(out *RemovePodsViolatingTopologySpreadConstraintArgs) {
*out = *in
out.TypeMeta = in.TypeMeta
if in.Namespaces != nil {
in, out := &in.Namespaces, &out.Namespaces
*out = new(api.Namespaces)
(*in).DeepCopyInto(*out)
}
if in.LabelSelector != nil {
in, out := &in.LabelSelector, &out.LabelSelector
*out = new(v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RemovePodsViolatingTopologySpreadConstraintArgs.
func (in *RemovePodsViolatingTopologySpreadConstraintArgs) DeepCopy() *RemovePodsViolatingTopologySpreadConstraintArgs {
if in == nil {
return nil
}
out := new(RemovePodsViolatingTopologySpreadConstraintArgs)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *RemovePodsViolatingTopologySpreadConstraintArgs) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}

View File

@@ -26,7 +26,7 @@ import (
"k8s.io/client-go/tools/clientcmd"
)
func CreateClient(kubeconfig string) (clientset.Interface, error) {
func CreateClient(kubeconfig string, userAgt string) (clientset.Interface, error) {
var cfg *rest.Config
if len(kubeconfig) != 0 {
master, err := GetMasterFromKubeconfig(kubeconfig)
@@ -47,7 +47,11 @@ func CreateClient(kubeconfig string) (clientset.Interface, error) {
}
}
return clientset.NewForConfig(cfg)
if len(userAgt) != 0 {
return clientset.NewForConfig(rest.AddUserAgent(cfg, userAgt))
} else {
return clientset.NewForConfig(cfg)
}
}
func GetMasterFromKubeconfig(filename string) (string, error) {

View File

@@ -17,26 +17,46 @@ limitations under the License.
package descheduler
import (
"context"
"fmt"
"k8s.io/klog"
"k8s.io/klog/v2"
v1 "k8s.io/api/core/v1"
policy "k8s.io/api/policy/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/informers"
corev1informers "k8s.io/client-go/informers/core/v1"
schedulingv1informers "k8s.io/client-go/informers/scheduling/v1"
clientset "k8s.io/client-go/kubernetes"
fakeclientset "k8s.io/client-go/kubernetes/fake"
core "k8s.io/client-go/testing"
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
"sigs.k8s.io/descheduler/metrics"
"sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/descheduler/client"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
eutils "sigs.k8s.io/descheduler/pkg/descheduler/evictions/utils"
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
"sigs.k8s.io/descheduler/pkg/descheduler/strategies"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
"sigs.k8s.io/descheduler/pkg/framework"
"sigs.k8s.io/descheduler/pkg/framework/plugins/defaultevictor"
"sigs.k8s.io/descheduler/pkg/utils"
)
func Run(rs *options.DeschedulerServer) error {
rsclient, err := client.CreateClient(rs.KubeconfigFile)
func Run(ctx context.Context, rs *options.DeschedulerServer) error {
metrics.Register()
rsclient, eventClient, err := createClients(rs.KubeconfigFile)
if err != nil {
return err
}
rs.Client = rsclient
rs.EventClient = eventClient
deschedulerPolicy, err := LoadPolicyConfig(rs.PolicyConfigFile)
if err != nil {
@@ -46,39 +66,384 @@ func Run(rs *options.DeschedulerServer) error {
return fmt.Errorf("deschedulerPolicy is nil")
}
return RunDeschedulerStrategies(rs, deschedulerPolicy)
}
func RunDeschedulerStrategies(rs *options.DeschedulerServer, deschedulerPolicy *api.DeschedulerPolicy) error {
evictionPolicyGroupVersion, err := eutils.SupportEviction(rs.Client)
if err != nil || len(evictionPolicyGroupVersion) == 0 {
return err
}
stopChannel := make(chan struct{})
nodes, err := nodeutil.ReadyNodes(rs.Client, rs.NodeSelector, stopChannel)
if err != nil {
return err
runFn := func() error {
return RunDeschedulerStrategies(ctx, rs, deschedulerPolicy, evictionPolicyGroupVersion)
}
if len(nodes) <= 1 {
klog.V(1).Infof("The cluster size is 0 or 1 meaning eviction causes service disruption or degradation. So aborting..")
if rs.LeaderElection.LeaderElect && rs.DeschedulingInterval.Seconds() == 0 {
return fmt.Errorf("leaderElection must be used with deschedulingInterval")
}
if rs.LeaderElection.LeaderElect && !rs.DryRun {
if err := NewLeaderElection(runFn, rsclient, &rs.LeaderElection, ctx); err != nil {
return fmt.Errorf("leaderElection: %w", err)
}
return nil
}
nodePodCount := utils.InitializeNodePodCount(nodes)
wait.Until(func() {
strategies.RemoveDuplicatePods(rs, deschedulerPolicy.Strategies["RemoveDuplicates"], evictionPolicyGroupVersion, nodes, nodePodCount)
strategies.LowNodeUtilization(rs, deschedulerPolicy.Strategies["LowNodeUtilization"], evictionPolicyGroupVersion, nodes, nodePodCount)
strategies.RemovePodsViolatingInterPodAntiAffinity(rs, deschedulerPolicy.Strategies["RemovePodsViolatingInterPodAntiAffinity"], evictionPolicyGroupVersion, nodes, nodePodCount)
strategies.RemovePodsViolatingNodeAffinity(rs, deschedulerPolicy.Strategies["RemovePodsViolatingNodeAffinity"], evictionPolicyGroupVersion, nodes, nodePodCount)
strategies.RemovePodsViolatingNodeTaints(rs, deschedulerPolicy.Strategies["RemovePodsViolatingNodeTaints"], evictionPolicyGroupVersion, nodes, nodePodCount)
return runFn()
}
type strategyFunction func(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor, evictorFilter framework.EvictorPlugin, getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc)
func cachedClient(
realClient clientset.Interface,
podInformer corev1informers.PodInformer,
nodeInformer corev1informers.NodeInformer,
namespaceInformer corev1informers.NamespaceInformer,
priorityClassInformer schedulingv1informers.PriorityClassInformer,
) (clientset.Interface, error) {
fakeClient := fakeclientset.NewSimpleClientset()
// simulate a pod eviction by deleting a pod
fakeClient.PrependReactor("create", "pods", func(action core.Action) (bool, runtime.Object, error) {
if action.GetSubresource() == "eviction" {
createAct, matched := action.(core.CreateActionImpl)
if !matched {
return false, nil, fmt.Errorf("unable to convert action to core.CreateActionImpl")
}
eviction, matched := createAct.Object.(*policy.Eviction)
if !matched {
return false, nil, fmt.Errorf("unable to convert action object into *policy.Eviction")
}
if err := fakeClient.Tracker().Delete(action.GetResource(), eviction.GetNamespace(), eviction.GetName()); err != nil {
return false, nil, fmt.Errorf("unable to delete pod %v/%v: %v", eviction.GetNamespace(), eviction.GetName(), err)
}
return true, nil, nil
}
// fallback to the default reactor
return false, nil, nil
})
klog.V(3).Infof("Pulling resources for the cached client from the cluster")
pods, err := podInformer.Lister().List(labels.Everything())
if err != nil {
return nil, fmt.Errorf("unable to list pods: %v", err)
}
for _, item := range pods {
if _, err := fakeClient.CoreV1().Pods(item.Namespace).Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
return nil, fmt.Errorf("unable to copy pod: %v", err)
}
}
nodes, err := nodeInformer.Lister().List(labels.Everything())
if err != nil {
return nil, fmt.Errorf("unable to list nodes: %v", err)
}
for _, item := range nodes {
if _, err := fakeClient.CoreV1().Nodes().Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
return nil, fmt.Errorf("unable to copy node: %v", err)
}
}
namespaces, err := namespaceInformer.Lister().List(labels.Everything())
if err != nil {
return nil, fmt.Errorf("unable to list namespaces: %v", err)
}
for _, item := range namespaces {
if _, err := fakeClient.CoreV1().Namespaces().Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
return nil, fmt.Errorf("unable to copy node: %v", err)
}
}
priorityClasses, err := priorityClassInformer.Lister().List(labels.Everything())
if err != nil {
return nil, fmt.Errorf("unable to list priorityclasses: %v", err)
}
for _, item := range priorityClasses {
if _, err := fakeClient.SchedulingV1().PriorityClasses().Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
return nil, fmt.Errorf("unable to copy priorityclass: %v", err)
}
}
return fakeClient, nil
}
// evictorImpl implements the Evictor interface so plugins
// can evict a pod without importing a specific pod evictor
type evictorImpl struct {
podEvictor *evictions.PodEvictor
evictorFilter framework.EvictorPlugin
}
var _ framework.Evictor = &evictorImpl{}
// Filter checks if a pod can be evicted
func (ei *evictorImpl) Filter(pod *v1.Pod) bool {
return ei.evictorFilter.Filter(pod)
}
// Evict evicts a pod (no pre-check performed)
func (ei *evictorImpl) Evict(ctx context.Context, pod *v1.Pod, opts evictions.EvictOptions) bool {
return ei.podEvictor.EvictPod(ctx, pod, opts)
}
func (ei *evictorImpl) NodeLimitExceeded(node *v1.Node) bool {
return ei.podEvictor.NodeLimitExceeded(node)
}
// handleImpl implements the framework handle which gets passed to plugins
type handleImpl struct {
clientSet clientset.Interface
getPodsAssignedToNodeFunc podutil.GetPodsAssignedToNodeFunc
sharedInformerFactory informers.SharedInformerFactory
evictor *evictorImpl
}
var _ framework.Handle = &handleImpl{}
// ClientSet retrieves kube client set
func (hi *handleImpl) ClientSet() clientset.Interface {
return hi.clientSet
}
// GetPodsAssignedToNodeFunc retrieves GetPodsAssignedToNodeFunc implementation
func (hi *handleImpl) GetPodsAssignedToNodeFunc() podutil.GetPodsAssignedToNodeFunc {
return hi.getPodsAssignedToNodeFunc
}
// SharedInformerFactory retrieves shared informer factory
func (hi *handleImpl) SharedInformerFactory() informers.SharedInformerFactory {
return hi.sharedInformerFactory
}
// Evictor retrieves evictor so plugins can filter and evict pods
func (hi *handleImpl) Evictor() framework.Evictor {
return hi.evictor
}
func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer, deschedulerPolicy *api.DeschedulerPolicy, evictionPolicyGroupVersion string) error {
sharedInformerFactory := informers.NewSharedInformerFactory(rs.Client, 0)
nodeInformer := sharedInformerFactory.Core().V1().Nodes()
podInformer := sharedInformerFactory.Core().V1().Pods()
namespaceInformer := sharedInformerFactory.Core().V1().Namespaces()
priorityClassInformer := sharedInformerFactory.Scheduling().V1().PriorityClasses()
ctx, cancel := context.WithCancel(ctx)
defer cancel()
// create the informers
namespaceInformer.Informer()
priorityClassInformer.Informer()
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
if err != nil {
return fmt.Errorf("build get pods assigned to node function error: %v", err)
}
sharedInformerFactory.Start(ctx.Done())
sharedInformerFactory.WaitForCacheSync(ctx.Done())
strategyFuncs := map[api.StrategyName]strategyFunction{
"RemoveDuplicates": nil,
"LowNodeUtilization": nil,
"HighNodeUtilization": nil,
"RemovePodsViolatingInterPodAntiAffinity": nil,
"RemovePodsViolatingNodeAffinity": nil,
"RemovePodsViolatingNodeTaints": nil,
"RemovePodsHavingTooManyRestarts": nil,
"PodLifeTime": nil,
"RemovePodsViolatingTopologySpreadConstraint": nil,
"RemoveFailedPods": nil,
}
var nodeSelector string
if deschedulerPolicy.NodeSelector != nil {
nodeSelector = *deschedulerPolicy.NodeSelector
}
var evictLocalStoragePods bool
if deschedulerPolicy.EvictLocalStoragePods != nil {
evictLocalStoragePods = *deschedulerPolicy.EvictLocalStoragePods
}
evictBarePods := false
if deschedulerPolicy.EvictFailedBarePods != nil {
evictBarePods = *deschedulerPolicy.EvictFailedBarePods
if evictBarePods {
klog.V(1).InfoS("Warning: EvictFailedBarePods is set to True. This could cause eviction of pods without ownerReferences.")
}
}
evictSystemCriticalPods := false
if deschedulerPolicy.EvictSystemCriticalPods != nil {
evictSystemCriticalPods = *deschedulerPolicy.EvictSystemCriticalPods
if evictSystemCriticalPods {
klog.V(1).InfoS("Warning: EvictSystemCriticalPods is set to True. This could cause eviction of Kubernetes system pods.")
}
}
ignorePvcPods := false
if deschedulerPolicy.IgnorePVCPods != nil {
ignorePvcPods = *deschedulerPolicy.IgnorePVCPods
}
var eventClient clientset.Interface
if rs.DryRun {
eventClient = fakeclientset.NewSimpleClientset()
} else {
eventClient = rs.Client
}
eventBroadcaster, eventRecorder := utils.GetRecorderAndBroadcaster(ctx, eventClient)
defer eventBroadcaster.Shutdown()
wait.NonSlidingUntil(func() {
nodes, err := nodeutil.ReadyNodes(ctx, rs.Client, nodeInformer, nodeSelector)
if err != nil {
klog.V(1).InfoS("Unable to get ready nodes", "err", err)
cancel()
return
}
if len(nodes) <= 1 {
klog.V(1).InfoS("The cluster size is 0 or 1 meaning eviction causes service disruption or degradation. So aborting..")
cancel()
return
}
var podEvictorClient clientset.Interface
// When the dry mode is enable, collect all the relevant objects (mostly pods) under a fake client.
// So when evicting pods while running multiple strategies in a row have the cummulative effect
// as is when evicting pods for real.
if rs.DryRun {
klog.V(3).Infof("Building a cached client from the cluster for the dry run")
// Create a new cache so we start from scratch without any leftovers
fakeClient, err := cachedClient(rs.Client, podInformer, nodeInformer, namespaceInformer, priorityClassInformer)
if err != nil {
klog.Error(err)
return
}
fakeSharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
getPodsAssignedToNode, err = podutil.BuildGetPodsAssignedToNodeFunc(fakeSharedInformerFactory.Core().V1().Pods())
if err != nil {
klog.Errorf("build get pods assigned to node function error: %v", err)
return
}
fakeCtx, cncl := context.WithCancel(context.TODO())
defer cncl()
fakeSharedInformerFactory.Start(fakeCtx.Done())
fakeSharedInformerFactory.WaitForCacheSync(fakeCtx.Done())
podEvictorClient = fakeClient
} else {
podEvictorClient = rs.Client
}
klog.V(3).Infof("Building a pod evictor")
podEvictor := evictions.NewPodEvictor(
podEvictorClient,
evictionPolicyGroupVersion,
rs.DryRun,
deschedulerPolicy.MaxNoOfPodsToEvictPerNode,
deschedulerPolicy.MaxNoOfPodsToEvictPerNamespace,
nodes,
!rs.DisableMetrics,
eventRecorder,
)
for name, strategy := range deschedulerPolicy.Strategies {
if f, ok := strategyFuncs[name]; ok {
if strategy.Enabled {
params := strategy.Params
if params == nil {
params = &api.StrategyParameters{}
}
nodeFit := false
if name != "PodLifeTime" {
nodeFit = params.NodeFit
}
// TODO(jchaloup): once all strategies are migrated move this check under
// the default evictor args validation
if params.ThresholdPriority != nil && params.ThresholdPriorityClassName != "" {
klog.V(1).ErrorS(fmt.Errorf("priority threshold misconfigured"), "only one of priorityThreshold fields can be set", "pluginName", name)
continue
}
thresholdPriority, err := utils.GetPriorityFromStrategyParams(ctx, rs.Client, strategy.Params)
if err != nil {
klog.ErrorS(err, "Failed to get threshold priority from strategy's params")
continue
}
defaultevictorArgs := &defaultevictor.DefaultEvictorArgs{
EvictLocalStoragePods: evictLocalStoragePods,
EvictSystemCriticalPods: evictSystemCriticalPods,
IgnorePvcPods: ignorePvcPods,
EvictFailedBarePods: evictBarePods,
NodeFit: nodeFit,
PriorityThreshold: &api.PriorityThreshold{
Value: &thresholdPriority,
},
}
evictorFilter, _ := defaultevictor.New(
defaultevictorArgs,
&handleImpl{
clientSet: rs.Client,
getPodsAssignedToNodeFunc: getPodsAssignedToNode,
sharedInformerFactory: sharedInformerFactory,
},
)
handle := &handleImpl{
clientSet: rs.Client,
getPodsAssignedToNodeFunc: getPodsAssignedToNode,
sharedInformerFactory: sharedInformerFactory,
evictor: &evictorImpl{
podEvictor: podEvictor,
evictorFilter: evictorFilter.(framework.EvictorPlugin),
},
}
// TODO: strategyName should be accessible from within the strategy using a framework
// handle or function which the Evictor has access to. For migration/in-progress framework
// work, we are currently passing this via context. To be removed
// (See discussion thread https://github.com/kubernetes-sigs/descheduler/pull/885#discussion_r919962292)
childCtx := context.WithValue(ctx, "strategyName", string(name))
if pgFnc, exists := pluginsMap[string(name)]; exists {
pgFnc(childCtx, nodes, params, handle)
} else {
f(childCtx, rs.Client, strategy, nodes, podEvictor, evictorFilter.(framework.EvictorPlugin), getPodsAssignedToNode)
}
}
} else {
klog.ErrorS(fmt.Errorf("unknown strategy name"), "skipping strategy", "strategy", name)
}
}
klog.V(1).InfoS("Number of evicted pods", "totalEvicted", podEvictor.TotalEvicted())
// If there was no interval specified, send a signal to the stopChannel to end the wait.Until loop after 1 iteration
if rs.DeschedulingInterval.Seconds() == 0 {
close(stopChannel)
cancel()
}
}, rs.DeschedulingInterval, stopChannel)
}, rs.DeschedulingInterval, ctx.Done())
return nil
}
func createClients(kubeconfig string) (clientset.Interface, clientset.Interface, error) {
kClient, err := client.CreateClient(kubeconfig, "descheduler")
if err != nil {
return nil, nil, err
}
eventClient, err := client.CreateClient(kubeconfig, "")
if err != nil {
return nil, nil, err
}
return kClient, eventClient, nil
}

View File

@@ -0,0 +1,218 @@
package descheduler
import (
"context"
"fmt"
"testing"
"time"
v1 "k8s.io/api/core/v1"
policy "k8s.io/api/policy/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
fakeclientset "k8s.io/client-go/kubernetes/fake"
core "k8s.io/client-go/testing"
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
"sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/test"
)
func TestTaintsUpdated(t *testing.T) {
ctx := context.Background()
n1 := test.BuildTestNode("n1", 2000, 3000, 10, nil)
n2 := test.BuildTestNode("n2", 2000, 3000, 10, nil)
p1 := test.BuildTestPod(fmt.Sprintf("pod_1_%s", n1.Name), 200, 0, n1.Name, nil)
p1.ObjectMeta.OwnerReferences = []metav1.OwnerReference{
{},
}
client := fakeclientset.NewSimpleClientset(n1, n2, p1)
eventClient := fakeclientset.NewSimpleClientset(n1, n2, p1)
dp := &api.DeschedulerPolicy{
Strategies: api.StrategyList{
"RemovePodsViolatingNodeTaints": api.DeschedulerStrategy{
Enabled: true,
},
},
}
rs, err := options.NewDeschedulerServer()
if err != nil {
t.Fatalf("Unable to initialize server: %v", err)
}
rs.Client = client
rs.EventClient = eventClient
pods, err := client.CoreV1().Pods(p1.Namespace).List(ctx, metav1.ListOptions{})
if err != nil {
t.Errorf("Unable to list pods: %v", err)
}
if len(pods.Items) < 1 {
t.Errorf("The pod was evicted before a node was tained")
}
n1WithTaint := n1.DeepCopy()
n1WithTaint.Spec.Taints = []v1.Taint{
{
Key: "key",
Value: "value",
Effect: v1.TaintEffectNoSchedule,
},
}
if _, err := client.CoreV1().Nodes().Update(ctx, n1WithTaint, metav1.UpdateOptions{}); err != nil {
t.Fatalf("Unable to update node: %v\n", err)
}
var evictedPods []string
client.PrependReactor("create", "pods", podEvictionReactionFuc(&evictedPods))
if err := RunDeschedulerStrategies(ctx, rs, dp, "v1"); err != nil {
t.Fatalf("Unable to run descheduler strategies: %v", err)
}
if len(evictedPods) != 1 {
t.Fatalf("Unable to evict pod, node taint did not get propagated to descheduler strategies %v\n", err)
}
}
func TestDuplicate(t *testing.T) {
ctx := context.Background()
node1 := test.BuildTestNode("n1", 2000, 3000, 10, nil)
node2 := test.BuildTestNode("n2", 2000, 3000, 10, nil)
p1 := test.BuildTestPod("p1", 100, 0, node1.Name, nil)
p1.Namespace = "dev"
p2 := test.BuildTestPod("p2", 100, 0, node1.Name, nil)
p2.Namespace = "dev"
p3 := test.BuildTestPod("p3", 100, 0, node1.Name, nil)
p3.Namespace = "dev"
ownerRef1 := test.GetReplicaSetOwnerRefList()
p1.ObjectMeta.OwnerReferences = ownerRef1
p2.ObjectMeta.OwnerReferences = ownerRef1
p3.ObjectMeta.OwnerReferences = ownerRef1
client := fakeclientset.NewSimpleClientset(node1, node2, p1, p2, p3)
eventClient := fakeclientset.NewSimpleClientset(node1, node2, p1, p2, p3)
dp := &api.DeschedulerPolicy{
Strategies: api.StrategyList{
"RemoveDuplicates": api.DeschedulerStrategy{
Enabled: true,
},
},
}
rs, err := options.NewDeschedulerServer()
if err != nil {
t.Fatalf("Unable to initialize server: %v", err)
}
rs.Client = client
rs.EventClient = eventClient
pods, err := client.CoreV1().Pods(p1.Namespace).List(ctx, metav1.ListOptions{})
if err != nil {
t.Errorf("Unable to list pods: %v", err)
}
if len(pods.Items) != 3 {
t.Errorf("Pods number should be 3 before evict")
}
var evictedPods []string
client.PrependReactor("create", "pods", podEvictionReactionFuc(&evictedPods))
if err := RunDeschedulerStrategies(ctx, rs, dp, "v1"); err != nil {
t.Fatalf("Unable to run descheduler strategies: %v", err)
}
if len(evictedPods) == 0 {
t.Fatalf("Unable to evict pod, node taint did not get propagated to descheduler strategies %v\n", err)
}
}
func TestRootCancel(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
n1 := test.BuildTestNode("n1", 2000, 3000, 10, nil)
n2 := test.BuildTestNode("n2", 2000, 3000, 10, nil)
client := fakeclientset.NewSimpleClientset(n1, n2)
eventClient := fakeclientset.NewSimpleClientset(n1, n2)
dp := &api.DeschedulerPolicy{
Strategies: api.StrategyList{}, // no strategies needed for this test
}
rs, err := options.NewDeschedulerServer()
if err != nil {
t.Fatalf("Unable to initialize server: %v", err)
}
rs.Client = client
rs.EventClient = eventClient
rs.DeschedulingInterval = 100 * time.Millisecond
errChan := make(chan error, 1)
defer close(errChan)
go func() {
err := RunDeschedulerStrategies(ctx, rs, dp, "v1")
errChan <- err
}()
cancel()
select {
case err := <-errChan:
if err != nil {
t.Fatalf("Unable to run descheduler strategies: %v", err)
}
case <-time.After(1 * time.Second):
t.Fatal("Root ctx should have canceled immediately")
}
}
func TestRootCancelWithNoInterval(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
n1 := test.BuildTestNode("n1", 2000, 3000, 10, nil)
n2 := test.BuildTestNode("n2", 2000, 3000, 10, nil)
client := fakeclientset.NewSimpleClientset(n1, n2)
eventClient := fakeclientset.NewSimpleClientset(n1, n2)
dp := &api.DeschedulerPolicy{
Strategies: api.StrategyList{}, // no strategies needed for this test
}
rs, err := options.NewDeschedulerServer()
if err != nil {
t.Fatalf("Unable to initialize server: %v", err)
}
rs.Client = client
rs.EventClient = eventClient
rs.DeschedulingInterval = 0
errChan := make(chan error, 1)
defer close(errChan)
go func() {
err := RunDeschedulerStrategies(ctx, rs, dp, "v1")
errChan <- err
}()
cancel()
select {
case err := <-errChan:
if err != nil {
t.Fatalf("Unable to run descheduler strategies: %v", err)
}
case <-time.After(1 * time.Second):
t.Fatal("Root ctx should have canceled immediately")
}
}
func podEvictionReactionFuc(evictedPods *[]string) func(action core.Action) (bool, runtime.Object, error) {
return func(action core.Action) (bool, runtime.Object, error) {
if action.GetSubresource() == "eviction" {
createAct, matched := action.(core.CreateActionImpl)
if !matched {
return false, nil, fmt.Errorf("unable to convert action to core.CreateActionImpl")
}
if eviction, matched := createAct.Object.(*policy.Eviction); matched {
*evictedPods = append(*evictedPods, eviction.GetName())
}
}
return false, nil, nil // fallback to the default reactor
}
}

View File

@@ -17,25 +17,160 @@ limitations under the License.
package evictions
import (
"context"
"fmt"
v1 "k8s.io/api/core/v1"
policy "k8s.io/api/policy/v1beta1"
policy "k8s.io/api/policy/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/scheme"
clientcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/tools/record"
"k8s.io/klog"
"k8s.io/client-go/tools/events"
"k8s.io/klog/v2"
"sigs.k8s.io/descheduler/metrics"
eutils "sigs.k8s.io/descheduler/pkg/descheduler/evictions/utils"
)
func EvictPod(client clientset.Interface, pod *v1.Pod, policyGroupVersion string, dryRun bool) (bool, error) {
if dryRun {
return true, nil
// nodePodEvictedCount keeps count of pods evicted on node
type nodePodEvictedCount map[string]uint
type namespacePodEvictCount map[string]uint
type PodEvictor struct {
client clientset.Interface
nodes []*v1.Node
policyGroupVersion string
dryRun bool
maxPodsToEvictPerNode *uint
maxPodsToEvictPerNamespace *uint
nodepodCount nodePodEvictedCount
namespacePodCount namespacePodEvictCount
metricsEnabled bool
eventRecorder events.EventRecorder
}
func NewPodEvictor(
client clientset.Interface,
policyGroupVersion string,
dryRun bool,
maxPodsToEvictPerNode *uint,
maxPodsToEvictPerNamespace *uint,
nodes []*v1.Node,
metricsEnabled bool,
eventRecorder events.EventRecorder,
) *PodEvictor {
var nodePodCount = make(nodePodEvictedCount)
var namespacePodCount = make(namespacePodEvictCount)
for _, node := range nodes {
// Initialize podsEvicted till now with 0.
nodePodCount[node.Name] = 0
}
return &PodEvictor{
client: client,
nodes: nodes,
policyGroupVersion: policyGroupVersion,
dryRun: dryRun,
maxPodsToEvictPerNode: maxPodsToEvictPerNode,
maxPodsToEvictPerNamespace: maxPodsToEvictPerNamespace,
nodepodCount: nodePodCount,
namespacePodCount: namespacePodCount,
metricsEnabled: metricsEnabled,
eventRecorder: eventRecorder,
}
}
// NodeEvicted gives a number of pods evicted for node
func (pe *PodEvictor) NodeEvicted(node *v1.Node) uint {
return pe.nodepodCount[node.Name]
}
// TotalEvicted gives a number of pods evicted through all nodes
func (pe *PodEvictor) TotalEvicted() uint {
var total uint
for _, count := range pe.nodepodCount {
total += count
}
return total
}
// NodeLimitExceeded checks if the number of evictions for a node was exceeded
func (pe *PodEvictor) NodeLimitExceeded(node *v1.Node) bool {
if pe.maxPodsToEvictPerNode != nil {
return pe.nodepodCount[node.Name] == *pe.maxPodsToEvictPerNode
}
return false
}
// EvictOptions provides a handle for passing additional info to EvictPod
type EvictOptions struct {
// Reason allows for passing details about the specific eviction for logging.
Reason string
}
// EvictPod evicts a pod while exercising eviction limits.
// Returns true when the pod is evicted on the server side.
func (pe *PodEvictor) EvictPod(ctx context.Context, pod *v1.Pod, opts EvictOptions) bool {
// TODO: Replace context-propagated Strategy name with a defined framework handle for accessing Strategy info
strategy := ""
if ctx.Value("strategyName") != nil {
strategy = ctx.Value("strategyName").(string)
}
if pod.Spec.NodeName != "" {
if pe.maxPodsToEvictPerNode != nil && pe.nodepodCount[pod.Spec.NodeName]+1 > *pe.maxPodsToEvictPerNode {
if pe.metricsEnabled {
metrics.PodsEvicted.With(map[string]string{"result": "maximum number of pods per node reached", "strategy": strategy, "namespace": pod.Namespace, "node": pod.Spec.NodeName}).Inc()
}
klog.ErrorS(fmt.Errorf("Maximum number of evicted pods per node reached"), "limit", *pe.maxPodsToEvictPerNode, "node", pod.Spec.NodeName)
return false
}
}
if pe.maxPodsToEvictPerNamespace != nil && pe.namespacePodCount[pod.Namespace]+1 > *pe.maxPodsToEvictPerNamespace {
if pe.metricsEnabled {
metrics.PodsEvicted.With(map[string]string{"result": "maximum number of pods per namespace reached", "strategy": strategy, "namespace": pod.Namespace, "node": pod.Spec.NodeName}).Inc()
}
klog.ErrorS(fmt.Errorf("Maximum number of evicted pods per namespace reached"), "limit", *pe.maxPodsToEvictPerNamespace, "namespace", pod.Namespace)
return false
}
err := evictPod(ctx, pe.client, pod, pe.policyGroupVersion)
if err != nil {
// err is used only for logging purposes
klog.ErrorS(err, "Error evicting pod", "pod", klog.KObj(pod), "reason", opts.Reason)
if pe.metricsEnabled {
metrics.PodsEvicted.With(map[string]string{"result": "error", "strategy": strategy, "namespace": pod.Namespace, "node": pod.Spec.NodeName}).Inc()
}
return false
}
if pod.Spec.NodeName != "" {
pe.nodepodCount[pod.Spec.NodeName]++
}
pe.namespacePodCount[pod.Namespace]++
if pe.metricsEnabled {
metrics.PodsEvicted.With(map[string]string{"result": "success", "strategy": strategy, "namespace": pod.Namespace, "node": pod.Spec.NodeName}).Inc()
}
if pe.dryRun {
klog.V(1).InfoS("Evicted pod in dry run mode", "pod", klog.KObj(pod), "reason", opts.Reason, "strategy", strategy, "node", pod.Spec.NodeName)
} else {
klog.V(1).InfoS("Evicted pod", "pod", klog.KObj(pod), "reason", opts.Reason, "strategy", strategy, "node", pod.Spec.NodeName)
reason := opts.Reason
if len(reason) == 0 {
reason = strategy
if len(reason) == 0 {
reason = "NotSet"
}
}
pe.eventRecorder.Eventf(pod, nil, v1.EventTypeNormal, reason, "Descheduled", "pod evicted by sigs.k8s.io/descheduler")
}
return true
}
func evictPod(ctx context.Context, client clientset.Interface, pod *v1.Pod, policyGroupVersion string) error {
deleteOptions := &metav1.DeleteOptions{}
// GracePeriodSeconds ?
eviction := &policy.Eviction{
@@ -49,20 +184,13 @@ func EvictPod(client clientset.Interface, pod *v1.Pod, policyGroupVersion string
},
DeleteOptions: deleteOptions,
}
err := client.PolicyV1beta1().Evictions(eviction.Namespace).Evict(eviction)
err := client.PolicyV1().Evictions(eviction.Namespace).Evict(ctx, eviction)
if err == nil {
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(klog.V(3).Infof)
eventBroadcaster.StartRecordingToSink(&clientcorev1.EventSinkImpl{Interface: client.CoreV1().Events(pod.Namespace)})
r := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "sigs.k8s.io.descheduler"})
r.Event(pod, v1.EventTypeNormal, "Descheduled", "pod evicted by sigs.k8s.io/descheduler")
return true, nil
} else if apierrors.IsTooManyRequests(err) {
return false, fmt.Errorf("error when evicting pod (ignoring) %q: %v", pod.Name, err)
} else if apierrors.IsNotFound(err) {
return true, fmt.Errorf("pod not found when evicting %q: %v", pod.Name, err)
} else {
return false, err
if apierrors.IsTooManyRequests(err) {
return fmt.Errorf("error when evicting pod (ignoring) %q: %v", pod.Name, err)
}
if apierrors.IsNotFound(err) {
return fmt.Errorf("pod not found when evicting %q: %v", pod.Name, err)
}
return err
}

Some files were not shown because too many files have changed in this diff Show More